summaryrefslogtreecommitdiff
path: root/redis
diff options
context:
space:
mode:
Diffstat (limited to 'redis')
-rw-r--r--redis/__init__.py74
-rw-r--r--redis/backoff.py2
-rwxr-xr-xredis/client.py1032
-rw-r--r--redis/cluster.py769
-rw-r--r--redis/commands/__init__.py12
-rw-r--r--redis/commands/cluster.py406
-rw-r--r--redis/commands/core.py1713
-rw-r--r--redis/commands/helpers.py12
-rw-r--r--redis/commands/json/__init__.py10
-rw-r--r--redis/commands/json/commands.py38
-rw-r--r--redis/commands/json/decoders.py7
-rw-r--r--redis/commands/parser.py41
-rw-r--r--redis/commands/redismodules.py16
-rw-r--r--redis/commands/search/__init__.py4
-rw-r--r--redis/commands/search/commands.py43
-rw-r--r--redis/commands/search/field.py6
-rw-r--r--redis/commands/search/query.py9
-rw-r--r--redis/commands/search/querystring.py7
-rw-r--r--redis/commands/search/result.py2
-rw-r--r--redis/commands/search/suggestion.py6
-rw-r--r--redis/commands/sentinel.py30
-rw-r--r--redis/commands/timeseries/__init__.py11
-rw-r--r--redis/commands/timeseries/commands.py35
-rw-r--r--redis/commands/timeseries/info.py2
-rw-r--r--redis/commands/timeseries/utils.py11
-rwxr-xr-xredis/connection.py429
-rw-r--r--redis/crc.py7
-rw-r--r--redis/exceptions.py13
-rw-r--r--redis/lock.py55
-rw-r--r--redis/retry.py5
-rw-r--r--redis/sentinel.py136
-rw-r--r--redis/utils.py7
32 files changed, 2669 insertions, 2281 deletions
diff --git a/redis/__init__.py b/redis/__init__.py
index daf741b..051b039 100644
--- a/redis/__init__.py
+++ b/redis/__init__.py
@@ -2,18 +2,11 @@ from redis.client import Redis, StrictRedis
from redis.cluster import RedisCluster
from redis.connection import (
BlockingConnectionPool,
- ConnectionPool,
Connection,
+ ConnectionPool,
SSLConnection,
- UnixDomainSocketConnection
-)
-from redis.sentinel import (
- Sentinel,
- SentinelConnectionPool,
- SentinelManagedConnection,
- SentinelManagedSSLConnection,
+ UnixDomainSocketConnection,
)
-from redis.utils import from_url
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
@@ -27,8 +20,15 @@ from redis.exceptions import (
RedisError,
ResponseError,
TimeoutError,
- WatchError
+ WatchError,
+)
+from redis.sentinel import (
+ Sentinel,
+ SentinelConnectionPool,
+ SentinelManagedConnection,
+ SentinelManagedSSLConnection,
)
+from redis.utils import from_url
def int_or_str(value):
@@ -41,33 +41,33 @@ def int_or_str(value):
__version__ = "4.1.0rc2"
-VERSION = tuple(map(int_or_str, __version__.split('.')))
+VERSION = tuple(map(int_or_str, __version__.split(".")))
__all__ = [
- 'AuthenticationError',
- 'AuthenticationWrongNumberOfArgsError',
- 'BlockingConnectionPool',
- 'BusyLoadingError',
- 'ChildDeadlockedError',
- 'Connection',
- 'ConnectionError',
- 'ConnectionPool',
- 'DataError',
- 'from_url',
- 'InvalidResponse',
- 'PubSubError',
- 'ReadOnlyError',
- 'Redis',
- 'RedisCluster',
- 'RedisError',
- 'ResponseError',
- 'Sentinel',
- 'SentinelConnectionPool',
- 'SentinelManagedConnection',
- 'SentinelManagedSSLConnection',
- 'SSLConnection',
- 'StrictRedis',
- 'TimeoutError',
- 'UnixDomainSocketConnection',
- 'WatchError',
+ "AuthenticationError",
+ "AuthenticationWrongNumberOfArgsError",
+ "BlockingConnectionPool",
+ "BusyLoadingError",
+ "ChildDeadlockedError",
+ "Connection",
+ "ConnectionError",
+ "ConnectionPool",
+ "DataError",
+ "from_url",
+ "InvalidResponse",
+ "PubSubError",
+ "ReadOnlyError",
+ "Redis",
+ "RedisCluster",
+ "RedisError",
+ "ResponseError",
+ "Sentinel",
+ "SentinelConnectionPool",
+ "SentinelManagedConnection",
+ "SentinelManagedSSLConnection",
+ "SSLConnection",
+ "StrictRedis",
+ "TimeoutError",
+ "UnixDomainSocketConnection",
+ "WatchError",
]
diff --git a/redis/backoff.py b/redis/backoff.py
index 9162778..cbb4e73 100644
--- a/redis/backoff.py
+++ b/redis/backoff.py
@@ -1,5 +1,5 @@
-from abc import ABC, abstractmethod
import random
+from abc import ABC, abstractmethod
class AbstractBackoff(ABC):
diff --git a/redis/client.py b/redis/client.py
index 9f2907e..14e588a 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -1,15 +1,18 @@
-from itertools import chain
import copy
import datetime
import re
import threading
import time
import warnings
-from redis.commands import (CoreCommands, RedisModuleCommands,
- SentinelCommands, list_or_args)
-from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
- SSLConnection)
-from redis.lock import Lock
+from itertools import chain
+
+from redis.commands import (
+ CoreCommands,
+ RedisModuleCommands,
+ SentinelCommands,
+ list_or_args,
+)
+from redis.connection import ConnectionPool, SSLConnection, UnixDomainSocketConnection
from redis.exceptions import (
ConnectionError,
ExecAbortError,
@@ -20,13 +23,14 @@ from redis.exceptions import (
TimeoutError,
WatchError,
)
+from redis.lock import Lock
from redis.utils import safe_str, str_if_bytes
-SYM_EMPTY = b''
-EMPTY_RESPONSE = 'EMPTY_RESPONSE'
+SYM_EMPTY = b""
+EMPTY_RESPONSE = "EMPTY_RESPONSE"
# some responses (ie. dump) are binary, and just meant to never be decoded
-NEVER_DECODE = 'NEVER_DECODE'
+NEVER_DECODE = "NEVER_DECODE"
def timestamp_to_datetime(response):
@@ -76,12 +80,12 @@ def parse_debug_object(response):
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = str_if_bytes(response)
- response = 'type:' + response
- response = dict(kv.split(':') for kv in response.split())
+ response = "type:" + response
+ response = dict(kv.split(":") for kv in response.split())
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
- int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
+ int_fields = ("refcount", "serializedlength", "lru", "lru_seconds_idle")
for field in int_fields:
if field in response:
response[field] = int(response[field])
@@ -91,7 +95,7 @@ def parse_debug_object(response):
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
- if infotype in ('idletime', 'refcount'):
+ if infotype in ("idletime", "refcount"):
return int_or_none(response)
return response
@@ -102,9 +106,9 @@ def parse_info(response):
response = str_if_bytes(response)
def get_value(value):
- if ',' not in value or '=' not in value:
+ if "," not in value or "=" not in value:
try:
- if '.' in value:
+ if "." in value:
return float(value)
else:
return int(value)
@@ -112,82 +116,84 @@ def parse_info(response):
return value
else:
sub_dict = {}
- for item in value.split(','):
- k, v = item.rsplit('=', 1)
+ for item in value.split(","):
+ k, v = item.rsplit("=", 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
- if line and not line.startswith('#'):
- if line.find(':') != -1:
+ if line and not line.startswith("#"):
+ if line.find(":") != -1:
# Split, the info fields keys and values.
# Note that the value may contain ':'. but the 'host:'
# pseudo-command is the only case where the key contains ':'
- key, value = line.split(':', 1)
- if key == 'cmdstat_host':
- key, value = line.rsplit(':', 1)
+ key, value = line.split(":", 1)
+ if key == "cmdstat_host":
+ key, value = line.rsplit(":", 1)
- if key == 'module':
+ if key == "module":
# Hardcode a list for key 'modules' since there could be
# multiple lines that started with 'module'
- info.setdefault('modules', []).append(get_value(value))
+ info.setdefault("modules", []).append(get_value(value))
else:
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
- info.setdefault('__raw__', []).append(line)
+ info.setdefault("__raw__", []).append(line)
return info
def parse_memory_stats(response, **kwargs):
"Parse the results of MEMORY STATS"
- stats = pairs_to_dict(response,
- decode_keys=True,
- decode_string_values=True)
+ stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)
for key, value in stats.items():
- if key.startswith('db.'):
- stats[key] = pairs_to_dict(value,
- decode_keys=True,
- decode_string_values=True)
+ if key.startswith("db."):
+ stats[key] = pairs_to_dict(
+ value, decode_keys=True, decode_string_values=True
+ )
return stats
SENTINEL_STATE_TYPES = {
- 'can-failover-its-master': int,
- 'config-epoch': int,
- 'down-after-milliseconds': int,
- 'failover-timeout': int,
- 'info-refresh': int,
- 'last-hello-message': int,
- 'last-ok-ping-reply': int,
- 'last-ping-reply': int,
- 'last-ping-sent': int,
- 'master-link-down-time': int,
- 'master-port': int,
- 'num-other-sentinels': int,
- 'num-slaves': int,
- 'o-down-time': int,
- 'pending-commands': int,
- 'parallel-syncs': int,
- 'port': int,
- 'quorum': int,
- 'role-reported-time': int,
- 's-down-time': int,
- 'slave-priority': int,
- 'slave-repl-offset': int,
- 'voted-leader-epoch': int
+ "can-failover-its-master": int,
+ "config-epoch": int,
+ "down-after-milliseconds": int,
+ "failover-timeout": int,
+ "info-refresh": int,
+ "last-hello-message": int,
+ "last-ok-ping-reply": int,
+ "last-ping-reply": int,
+ "last-ping-sent": int,
+ "master-link-down-time": int,
+ "master-port": int,
+ "num-other-sentinels": int,
+ "num-slaves": int,
+ "o-down-time": int,
+ "pending-commands": int,
+ "parallel-syncs": int,
+ "port": int,
+ "quorum": int,
+ "role-reported-time": int,
+ "s-down-time": int,
+ "slave-priority": int,
+ "slave-repl-offset": int,
+ "voted-leader-epoch": int,
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
- flags = set(result['flags'].split(','))
- for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
- ('is_sdown', 's_down'), ('is_odown', 'o_down'),
- ('is_sentinel', 'sentinel'),
- ('is_disconnected', 'disconnected'),
- ('is_master_down', 'master_down')):
+ flags = set(result["flags"].split(","))
+ for name, flag in (
+ ("is_master", "master"),
+ ("is_slave", "slave"),
+ ("is_sdown", "s_down"),
+ ("is_odown", "o_down"),
+ ("is_sentinel", "sentinel"),
+ ("is_disconnected", "disconnected"),
+ ("is_master_down", "master_down"),
+ ):
result[name] = flag in flags
return result
@@ -200,7 +206,7 @@ def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(map(str_if_bytes, item))
- result[state['name']] = state
+ result[state["name"]] = state
return result
@@ -251,9 +257,9 @@ def zset_score_pairs(response, **options):
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
- if not response or not options.get('withscores'):
+ if not response or not options.get("withscores"):
return response
- score_cast_func = options.get('score_cast_func', float)
+ score_cast_func = options.get("score_cast_func", float)
it = iter(response)
return list(zip(it, map(score_cast_func, it)))
@@ -263,9 +269,9 @@ def sort_return_tuples(response, **options):
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
- if not response or not options.get('groups'):
+ if not response or not options.get("groups"):
return response
- n = options['groups']
+ n = options["groups"]
return list(zip(*[response[i::n] for i in range(n)]))
@@ -296,34 +302,30 @@ def parse_list_of_dicts(response):
def parse_xclaim(response, **options):
- if options.get('parse_justid', False):
+ if options.get("parse_justid", False):
return response
return parse_stream_list(response)
def parse_xautoclaim(response, **options):
- if options.get('parse_justid', False):
+ if options.get("parse_justid", False):
return response[1]
return parse_stream_list(response[1])
def parse_xinfo_stream(response, **options):
data = pairs_to_dict(response, decode_keys=True)
- if not options.get('full', False):
- first = data['first-entry']
+ if not options.get("full", False):
+ first = data["first-entry"]
if first is not None:
- data['first-entry'] = (first[0], pairs_to_dict(first[1]))
- last = data['last-entry']
+ data["first-entry"] = (first[0], pairs_to_dict(first[1]))
+ last = data["last-entry"]
if last is not None:
- data['last-entry'] = (last[0], pairs_to_dict(last[1]))
+ data["last-entry"] = (last[0], pairs_to_dict(last[1]))
else:
- data['entries'] = {
- _id: pairs_to_dict(entry)
- for _id, entry in data['entries']
- }
- data['groups'] = [
- pairs_to_dict(group, decode_keys=True)
- for group in data['groups']
+ data["entries"] = {_id: pairs_to_dict(entry) for _id, entry in data["entries"]}
+ data["groups"] = [
+ pairs_to_dict(group, decode_keys=True) for group in data["groups"]
]
return data
@@ -335,19 +337,19 @@ def parse_xread(response):
def parse_xpending(response, **options):
- if options.get('parse_detail', False):
+ if options.get("parse_detail", False):
return parse_xpending_range(response)
- consumers = [{'name': n, 'pending': int(p)} for n, p in response[3] or []]
+ consumers = [{"name": n, "pending": int(p)} for n, p in response[3] or []]
return {
- 'pending': response[0],
- 'min': response[1],
- 'max': response[2],
- 'consumers': consumers
+ "pending": response[0],
+ "min": response[1],
+ "max": response[2],
+ "consumers": consumers,
}
def parse_xpending_range(response):
- k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered')
+ k = ("message_id", "consumer", "time_since_delivered", "times_delivered")
return [dict(zip(k, r)) for r in response]
@@ -358,13 +360,13 @@ def float_or_none(response):
def bool_ok(response):
- return str_if_bytes(response) == 'OK'
+ return str_if_bytes(response) == "OK"
def parse_zadd(response, **options):
if response is None:
return None
- if options.get('as_score'):
+ if options.get("as_score"):
return float(response)
return int(response)
@@ -373,7 +375,7 @@ def parse_client_list(response, **options):
clients = []
for c in str_if_bytes(response).splitlines():
# Values might contain '='
- clients.append(dict(pair.split('=', 1) for pair in c.split(' ')))
+ clients.append(dict(pair.split("=", 1) for pair in c.split(" ")))
return clients
@@ -393,7 +395,7 @@ def parse_hscan(response, **options):
def parse_zscan(response, **options):
- score_cast_func = options.get('score_cast_func', float)
+ score_cast_func = options.get("score_cast_func", float)
cursor, r = response
it = iter(r)
return int(cursor), list(zip(it, map(score_cast_func, it)))
@@ -405,23 +407,24 @@ def parse_zmscore(response, **options):
def parse_slowlog_get(response, **options):
- space = ' ' if options.get('decode_responses', False) else b' '
+ space = " " if options.get("decode_responses", False) else b" "
def parse_item(item):
result = {
- 'id': item[0],
- 'start_time': int(item[1]),
- 'duration': int(item[2]),
+ "id": item[0],
+ "start_time": int(item[1]),
+ "duration": int(item[2]),
}
# Redis Enterprise injects another entry at index [3], which has
# the complexity info (i.e. the value N in case the command has
# an O(N) complexity) instead of the command.
if isinstance(item[3], list):
- result['command'] = space.join(item[3])
+ result["command"] = space.join(item[3])
else:
- result['complexity'] = item[3]
- result['command'] = space.join(item[4])
+ result["complexity"] = item[3]
+ result["command"] = space.join(item[4])
return result
+
return [parse_item(item) for item in response]
@@ -437,42 +440,42 @@ def parse_stralgo(response, **options):
When WITHMATCHLEN is given, each array representing a match will
also have the length of the match at the beginning of the array.
"""
- if options.get('len', False):
+ if options.get("len", False):
return int(response)
- if options.get('idx', False):
- if options.get('withmatchlen', False):
- matches = [[(int(match[-1]))] + list(map(tuple, match[:-1]))
- for match in response[1]]
+ if options.get("idx", False):
+ if options.get("withmatchlen", False):
+ matches = [
+ [(int(match[-1]))] + list(map(tuple, match[:-1]))
+ for match in response[1]
+ ]
else:
- matches = [list(map(tuple, match))
- for match in response[1]]
+ matches = [list(map(tuple, match)) for match in response[1]]
return {
str_if_bytes(response[0]): matches,
- str_if_bytes(response[2]): int(response[3])
+ str_if_bytes(response[2]): int(response[3]),
}
return str_if_bytes(response)
def parse_cluster_info(response, **options):
response = str_if_bytes(response)
- return dict(line.split(':') for line in response.splitlines() if line)
+ return dict(line.split(":") for line in response.splitlines() if line)
def _parse_node_line(line):
- line_items = line.split(' ')
- node_id, addr, flags, master_id, ping, pong, epoch, \
- connected = line.split(' ')[:8]
- addr = addr.split('@')[0]
- slots = [sl.split('-') for sl in line_items[8:]]
+ line_items = line.split(" ")
+ node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(" ")[:8]
+ addr = addr.split("@")[0]
+ slots = [sl.split("-") for sl in line_items[8:]]
node_dict = {
- 'node_id': node_id,
- 'flags': flags,
- 'master_id': master_id,
- 'last_ping_sent': ping,
- 'last_pong_rcvd': pong,
- 'epoch': epoch,
- 'slots': slots,
- 'connected': True if connected == 'connected' else False
+ "node_id": node_id,
+ "flags": flags,
+ "master_id": master_id,
+ "last_ping_sent": ping,
+ "last_pong_rcvd": pong,
+ "epoch": epoch,
+ "slots": slots,
+ "connected": True if connected == "connected" else False,
}
return addr, node_dict
@@ -492,7 +495,7 @@ def parse_geosearch_generic(response, **options):
Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'
commands according to 'withdist', 'withhash' and 'withcoord' labels.
"""
- if options['store'] or options['store_dist']:
+ if options["store"] or options["store_dist"]:
# `store` and `store_dist` cant be combined
# with other command arguments.
# relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'
@@ -503,24 +506,21 @@ def parse_geosearch_generic(response, **options):
else:
response_list = response
- if not options['withdist'] and not options['withcoord'] \
- and not options['withhash']:
+ if not options["withdist"] and not options["withcoord"] and not options["withhash"]:
# just a bunch of places
return response_list
cast = {
- 'withdist': float,
- 'withcoord': lambda ll: (float(ll[0]), float(ll[1])),
- 'withhash': int
+ "withdist": float,
+ "withcoord": lambda ll: (float(ll[0]), float(ll[1])),
+ "withhash": int,
}
# zip all output results with each casting function to get
# the properly native Python value.
f = [lambda x: x]
- f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]]
- return [
- list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list
- ]
+ f += [cast[o] for o in ["withdist", "withhash", "withcoord"] if options[o]]
+ return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]
def parse_command(response, **options):
@@ -528,12 +528,12 @@ def parse_command(response, **options):
for command in response:
cmd_dict = {}
cmd_name = str_if_bytes(command[0])
- cmd_dict['name'] = cmd_name
- cmd_dict['arity'] = int(command[1])
- cmd_dict['flags'] = [str_if_bytes(flag) for flag in command[2]]
- cmd_dict['first_key_pos'] = command[3]
- cmd_dict['last_key_pos'] = command[4]
- cmd_dict['step_count'] = command[5]
+ cmd_dict["name"] = cmd_name
+ cmd_dict["arity"] = int(command[1])
+ cmd_dict["flags"] = [str_if_bytes(flag) for flag in command[2]]
+ cmd_dict["first_key_pos"] = command[3]
+ cmd_dict["last_key_pos"] = command[4]
+ cmd_dict["step_count"] = command[5]
commands[cmd_name] = cmd_dict
return commands
@@ -545,7 +545,7 @@ def parse_pubsub_numsub(response, **options):
def parse_client_kill(response, **options):
if isinstance(response, int):
return response
- return str_if_bytes(response) == 'OK'
+ return str_if_bytes(response) == "OK"
def parse_acl_getuser(response, **options):
@@ -554,21 +554,21 @@ def parse_acl_getuser(response, **options):
data = pairs_to_dict(response, decode_keys=True)
# convert everything but user-defined data in 'keys' to native strings
- data['flags'] = list(map(str_if_bytes, data['flags']))
- data['passwords'] = list(map(str_if_bytes, data['passwords']))
- data['commands'] = str_if_bytes(data['commands'])
+ data["flags"] = list(map(str_if_bytes, data["flags"]))
+ data["passwords"] = list(map(str_if_bytes, data["passwords"]))
+ data["commands"] = str_if_bytes(data["commands"])
# split 'commands' into separate 'categories' and 'commands' lists
commands, categories = [], []
- for command in data['commands'].split(' '):
- if '@' in command:
+ for command in data["commands"].split(" "):
+ if "@" in command:
categories.append(command)
else:
commands.append(command)
- data['commands'] = commands
- data['categories'] = categories
- data['enabled'] = 'on' in data['flags']
+ data["commands"] = commands
+ data["categories"] = categories
+ data["enabled"] = "on" in data["flags"]
return data
@@ -579,7 +579,7 @@ def parse_acl_log(response, **options):
data = []
for log in response:
log_data = pairs_to_dict(log, True, True)
- client_info = log_data.get('client-info', '')
+ client_info = log_data.get("client-info", "")
log_data["client-info"] = parse_client_info(client_info)
# float() is lossy comparing to the "double" in C
@@ -602,9 +602,22 @@ def parse_client_info(value):
client_info[key] = value
# Those fields are defined as int in networking.c
- for int_key in {"id", "age", "idle", "db", "sub", "psub",
- "multi", "qbuf", "qbuf-free", "obl",
- "argv-mem", "oll", "omem", "tot-mem"}:
+ for int_key in {
+ "id",
+ "age",
+ "idle",
+ "db",
+ "sub",
+ "psub",
+ "multi",
+ "qbuf",
+ "qbuf-free",
+ "obl",
+ "argv-mem",
+ "oll",
+ "omem",
+ "tot-mem",
+ }:
client_info[int_key] = int(client_info[int_key])
return client_info
@@ -622,11 +635,11 @@ def parse_set_result(response, **options):
- BOOL
- String when GET argument is used
"""
- if options.get('get'):
+ if options.get("get"):
# Redis will return a getCommand result.
# See `setGenericCommand` in t_string.c
return response
- return response and str_if_bytes(response) == 'OK'
+ return response and str_if_bytes(response) == "OK"
class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
@@ -641,158 +654,156 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
configuration, an instance will either use a ConnectionPool, or
Connection object to talk to redis.
"""
+
RESPONSE_CALLBACKS = {
**string_keys_to_dict(
- 'AUTH COPY EXPIRE EXPIREAT PEXPIRE PEXPIREAT '
- 'HEXISTS HMSET LMOVE BLMOVE MOVE '
- 'MSETNX PERSIST PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
- bool
+ "AUTH COPY EXPIRE EXPIREAT PEXPIRE PEXPIREAT "
+ "HEXISTS HMSET LMOVE BLMOVE MOVE "
+ "MSETNX PERSIST PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX",
+ bool,
),
**string_keys_to_dict(
- 'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN '
- 'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD '
- 'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN '
- 'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM '
- 'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
- int
+ "BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN "
+ "HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD "
+ "SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN "
+ "SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM "
+ "ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE",
+ int,
),
+ **string_keys_to_dict("INCRBYFLOAT HINCRBYFLOAT", float),
**string_keys_to_dict(
- 'INCRBYFLOAT HINCRBYFLOAT',
- float
+ # these return OK, or int if redis-server is >=1.3.4
+ "LPUSH RPUSH",
+ lambda r: isinstance(r, int) and r or str_if_bytes(r) == "OK",
),
+ **string_keys_to_dict("SORT", sort_return_tuples),
+ **string_keys_to_dict("ZSCORE ZINCRBY GEODIST", float_or_none),
**string_keys_to_dict(
- # these return OK, or int if redis-server is >=1.3.4
- 'LPUSH RPUSH',
- lambda r: isinstance(r, int) and r or str_if_bytes(r) == 'OK'
+ "FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE "
+ "RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ",
+ bool_ok,
),
- **string_keys_to_dict('SORT', sort_return_tuples),
- **string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none),
+ **string_keys_to_dict("BLPOP BRPOP", lambda r: r and tuple(r) or None),
**string_keys_to_dict(
- 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE '
- 'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ',
- bool_ok
+ "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
),
- **string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
**string_keys_to_dict(
- 'SDIFF SINTER SMEMBERS SUNION',
- lambda r: r and set(r) or set()
+ "ZPOPMAX ZPOPMIN ZINTER ZDIFF ZUNION ZRANGE ZRANGEBYSCORE "
+ "ZREVRANGE ZREVRANGEBYSCORE",
+ zset_score_pairs,
),
**string_keys_to_dict(
- 'ZPOPMAX ZPOPMIN ZINTER ZDIFF ZUNION ZRANGE ZRANGEBYSCORE '
- 'ZREVRANGE ZREVRANGEBYSCORE', zset_score_pairs
+ "BZPOPMIN BZPOPMAX", lambda r: r and (r[0], r[1], float(r[2])) or None
+ ),
+ **string_keys_to_dict("ZRANK ZREVRANK", int_or_none),
+ **string_keys_to_dict("XREVRANGE XRANGE", parse_stream_list),
+ **string_keys_to_dict("XREAD XREADGROUP", parse_xread),
+ **string_keys_to_dict("BGREWRITEAOF BGSAVE", lambda r: True),
+ "ACL CAT": lambda r: list(map(str_if_bytes, r)),
+ "ACL DELUSER": int,
+ "ACL GENPASS": str_if_bytes,
+ "ACL GETUSER": parse_acl_getuser,
+ "ACL HELP": lambda r: list(map(str_if_bytes, r)),
+ "ACL LIST": lambda r: list(map(str_if_bytes, r)),
+ "ACL LOAD": bool_ok,
+ "ACL LOG": parse_acl_log,
+ "ACL SAVE": bool_ok,
+ "ACL SETUSER": bool_ok,
+ "ACL USERS": lambda r: list(map(str_if_bytes, r)),
+ "ACL WHOAMI": str_if_bytes,
+ "CLIENT GETNAME": str_if_bytes,
+ "CLIENT ID": int,
+ "CLIENT KILL": parse_client_kill,
+ "CLIENT LIST": parse_client_list,
+ "CLIENT INFO": parse_client_info,
+ "CLIENT SETNAME": bool_ok,
+ "CLIENT UNBLOCK": lambda r: r and int(r) == 1 or False,
+ "CLIENT PAUSE": bool_ok,
+ "CLIENT GETREDIR": int,
+ "CLIENT TRACKINGINFO": lambda r: list(map(str_if_bytes, r)),
+ "CLUSTER ADDSLOTS": bool_ok,
+ "CLUSTER COUNT-FAILURE-REPORTS": lambda x: int(x),
+ "CLUSTER COUNTKEYSINSLOT": lambda x: int(x),
+ "CLUSTER DELSLOTS": bool_ok,
+ "CLUSTER FAILOVER": bool_ok,
+ "CLUSTER FORGET": bool_ok,
+ "CLUSTER INFO": parse_cluster_info,
+ "CLUSTER KEYSLOT": lambda x: int(x),
+ "CLUSTER MEET": bool_ok,
+ "CLUSTER NODES": parse_cluster_nodes,
+ "CLUSTER REPLICATE": bool_ok,
+ "CLUSTER RESET": bool_ok,
+ "CLUSTER SAVECONFIG": bool_ok,
+ "CLUSTER SET-CONFIG-EPOCH": bool_ok,
+ "CLUSTER SETSLOT": bool_ok,
+ "CLUSTER SLAVES": parse_cluster_nodes,
+ "CLUSTER REPLICAS": parse_cluster_nodes,
+ "COMMAND": parse_command,
+ "COMMAND COUNT": int,
+ "COMMAND GETKEYS": lambda r: list(map(str_if_bytes, r)),
+ "CONFIG GET": parse_config_get,
+ "CONFIG RESETSTAT": bool_ok,
+ "CONFIG SET": bool_ok,
+ "DEBUG OBJECT": parse_debug_object,
+ "GEOHASH": lambda r: list(map(str_if_bytes, r)),
+ "GEOPOS": lambda r: list(
+ map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)
),
- **string_keys_to_dict('BZPOPMIN BZPOPMAX', \
- lambda r:
- r and (r[0], r[1], float(r[2])) or None),
- **string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
- **string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list),
- **string_keys_to_dict('XREAD XREADGROUP', parse_xread),
- **string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
- 'ACL CAT': lambda r: list(map(str_if_bytes, r)),
- 'ACL DELUSER': int,
- 'ACL GENPASS': str_if_bytes,
- 'ACL GETUSER': parse_acl_getuser,
- 'ACL HELP': lambda r: list(map(str_if_bytes, r)),
- 'ACL LIST': lambda r: list(map(str_if_bytes, r)),
- 'ACL LOAD': bool_ok,
- 'ACL LOG': parse_acl_log,
- 'ACL SAVE': bool_ok,
- 'ACL SETUSER': bool_ok,
- 'ACL USERS': lambda r: list(map(str_if_bytes, r)),
- 'ACL WHOAMI': str_if_bytes,
- 'CLIENT GETNAME': str_if_bytes,
- 'CLIENT ID': int,
- 'CLIENT KILL': parse_client_kill,
- 'CLIENT LIST': parse_client_list,
- 'CLIENT INFO': parse_client_info,
- 'CLIENT SETNAME': bool_ok,
- 'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False,
- 'CLIENT PAUSE': bool_ok,
- 'CLIENT GETREDIR': int,
- 'CLIENT TRACKINGINFO': lambda r: list(map(str_if_bytes, r)),
- 'CLUSTER ADDSLOTS': bool_ok,
- 'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x),
- 'CLUSTER COUNTKEYSINSLOT': lambda x: int(x),
- 'CLUSTER DELSLOTS': bool_ok,
- 'CLUSTER FAILOVER': bool_ok,
- 'CLUSTER FORGET': bool_ok,
- 'CLUSTER INFO': parse_cluster_info,
- 'CLUSTER KEYSLOT': lambda x: int(x),
- 'CLUSTER MEET': bool_ok,
- 'CLUSTER NODES': parse_cluster_nodes,
- 'CLUSTER REPLICATE': bool_ok,
- 'CLUSTER RESET': bool_ok,
- 'CLUSTER SAVECONFIG': bool_ok,
- 'CLUSTER SET-CONFIG-EPOCH': bool_ok,
- 'CLUSTER SETSLOT': bool_ok,
- 'CLUSTER SLAVES': parse_cluster_nodes,
- 'CLUSTER REPLICAS': parse_cluster_nodes,
- 'COMMAND': parse_command,
- 'COMMAND COUNT': int,
- 'COMMAND GETKEYS': lambda r: list(map(str_if_bytes, r)),
- 'CONFIG GET': parse_config_get,
- 'CONFIG RESETSTAT': bool_ok,
- 'CONFIG SET': bool_ok,
- 'DEBUG OBJECT': parse_debug_object,
- 'GEOHASH': lambda r: list(map(str_if_bytes, r)),
- 'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),
- float(ll[1]))
- if ll is not None else None, r)),
- 'GEOSEARCH': parse_geosearch_generic,
- 'GEORADIUS': parse_geosearch_generic,
- 'GEORADIUSBYMEMBER': parse_geosearch_generic,
- 'HGETALL': lambda r: r and pairs_to_dict(r) or {},
- 'HSCAN': parse_hscan,
- 'INFO': parse_info,
- 'LASTSAVE': timestamp_to_datetime,
- 'MEMORY PURGE': bool_ok,
- 'MEMORY STATS': parse_memory_stats,
- 'MEMORY USAGE': int_or_none,
- 'MODULE LOAD': parse_module_result,
- 'MODULE UNLOAD': parse_module_result,
- 'MODULE LIST': lambda r: [pairs_to_dict(m) for m in r],
- 'OBJECT': parse_object,
- 'PING': lambda r: str_if_bytes(r) == 'PONG',
- 'QUIT': bool_ok,
- 'STRALGO': parse_stralgo,
- 'PUBSUB NUMSUB': parse_pubsub_numsub,
- 'RANDOMKEY': lambda r: r and r or None,
- 'SCAN': parse_scan,
- 'SCRIPT EXISTS': lambda r: list(map(bool, r)),
- 'SCRIPT FLUSH': bool_ok,
- 'SCRIPT KILL': bool_ok,
- 'SCRIPT LOAD': str_if_bytes,
- 'SENTINEL CKQUORUM': bool_ok,
- 'SENTINEL FAILOVER': bool_ok,
- 'SENTINEL FLUSHCONFIG': bool_ok,
- 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
- 'SENTINEL MASTER': parse_sentinel_master,
- 'SENTINEL MASTERS': parse_sentinel_masters,
- 'SENTINEL MONITOR': bool_ok,
- 'SENTINEL RESET': bool_ok,
- 'SENTINEL REMOVE': bool_ok,
- 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
- 'SENTINEL SET': bool_ok,
- 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
- 'SET': parse_set_result,
- 'SLOWLOG GET': parse_slowlog_get,
- 'SLOWLOG LEN': int,
- 'SLOWLOG RESET': bool_ok,
- 'SSCAN': parse_scan,
- 'TIME': lambda x: (int(x[0]), int(x[1])),
- 'XCLAIM': parse_xclaim,
- 'XAUTOCLAIM': parse_xautoclaim,
- 'XGROUP CREATE': bool_ok,
- 'XGROUP DELCONSUMER': int,
- 'XGROUP DESTROY': bool,
- 'XGROUP SETID': bool_ok,
- 'XINFO CONSUMERS': parse_list_of_dicts,
- 'XINFO GROUPS': parse_list_of_dicts,
- 'XINFO STREAM': parse_xinfo_stream,
- 'XPENDING': parse_xpending,
- 'ZADD': parse_zadd,
- 'ZSCAN': parse_zscan,
- 'ZMSCORE': parse_zmscore,
+ "GEOSEARCH": parse_geosearch_generic,
+ "GEORADIUS": parse_geosearch_generic,
+ "GEORADIUSBYMEMBER": parse_geosearch_generic,
+ "HGETALL": lambda r: r and pairs_to_dict(r) or {},
+ "HSCAN": parse_hscan,
+ "INFO": parse_info,
+ "LASTSAVE": timestamp_to_datetime,
+ "MEMORY PURGE": bool_ok,
+ "MEMORY STATS": parse_memory_stats,
+ "MEMORY USAGE": int_or_none,
+ "MODULE LOAD": parse_module_result,
+ "MODULE UNLOAD": parse_module_result,
+ "MODULE LIST": lambda r: [pairs_to_dict(m) for m in r],
+ "OBJECT": parse_object,
+ "PING": lambda r: str_if_bytes(r) == "PONG",
+ "QUIT": bool_ok,
+ "STRALGO": parse_stralgo,
+ "PUBSUB NUMSUB": parse_pubsub_numsub,
+ "RANDOMKEY": lambda r: r and r or None,
+ "SCAN": parse_scan,
+ "SCRIPT EXISTS": lambda r: list(map(bool, r)),
+ "SCRIPT FLUSH": bool_ok,
+ "SCRIPT KILL": bool_ok,
+ "SCRIPT LOAD": str_if_bytes,
+ "SENTINEL CKQUORUM": bool_ok,
+ "SENTINEL FAILOVER": bool_ok,
+ "SENTINEL FLUSHCONFIG": bool_ok,
+ "SENTINEL GET-MASTER-ADDR-BY-NAME": parse_sentinel_get_master,
+ "SENTINEL MASTER": parse_sentinel_master,
+ "SENTINEL MASTERS": parse_sentinel_masters,
+ "SENTINEL MONITOR": bool_ok,
+ "SENTINEL RESET": bool_ok,
+ "SENTINEL REMOVE": bool_ok,
+ "SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels,
+ "SENTINEL SET": bool_ok,
+ "SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels,
+ "SET": parse_set_result,
+ "SLOWLOG GET": parse_slowlog_get,
+ "SLOWLOG LEN": int,
+ "SLOWLOG RESET": bool_ok,
+ "SSCAN": parse_scan,
+ "TIME": lambda x: (int(x[0]), int(x[1])),
+ "XCLAIM": parse_xclaim,
+ "XAUTOCLAIM": parse_xautoclaim,
+ "XGROUP CREATE": bool_ok,
+ "XGROUP DELCONSUMER": int,
+ "XGROUP DESTROY": bool,
+ "XGROUP SETID": bool_ok,
+ "XINFO CONSUMERS": parse_list_of_dicts,
+ "XINFO GROUPS": parse_list_of_dicts,
+ "XINFO STREAM": parse_xinfo_stream,
+ "XPENDING": parse_xpending,
+ "ZADD": parse_zadd,
+ "ZSCAN": parse_zscan,
+ "ZMSCORE": parse_zmscore,
}
@classmethod
@@ -839,20 +850,38 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
connection_pool = ConnectionPool.from_url(url, **kwargs)
return cls(connection_pool=connection_pool)
- def __init__(self, host='localhost', port=6379,
- db=0, password=None, socket_timeout=None,
- socket_connect_timeout=None,
- socket_keepalive=None, socket_keepalive_options=None,
- connection_pool=None, unix_socket_path=None,
- encoding='utf-8', encoding_errors='strict',
- charset=None, errors=None,
- decode_responses=False, retry_on_timeout=False,
- ssl=False, ssl_keyfile=None, ssl_certfile=None,
- ssl_cert_reqs='required', ssl_ca_certs=None,
- ssl_check_hostname=False,
- max_connections=None, single_connection_client=False,
- health_check_interval=0, client_name=None, username=None,
- retry=None, redis_connect_func=None):
+ def __init__(
+ self,
+ host="localhost",
+ port=6379,
+ db=0,
+ password=None,
+ socket_timeout=None,
+ socket_connect_timeout=None,
+ socket_keepalive=None,
+ socket_keepalive_options=None,
+ connection_pool=None,
+ unix_socket_path=None,
+ encoding="utf-8",
+ encoding_errors="strict",
+ charset=None,
+ errors=None,
+ decode_responses=False,
+ retry_on_timeout=False,
+ ssl=False,
+ ssl_keyfile=None,
+ ssl_certfile=None,
+ ssl_cert_reqs="required",
+ ssl_ca_certs=None,
+ ssl_check_hostname=False,
+ max_connections=None,
+ single_connection_client=False,
+ health_check_interval=0,
+ client_name=None,
+ username=None,
+ retry=None,
+ redis_connect_func=None,
+ ):
"""
Initialize a new Redis client.
To specify a retry policy, first set `retry_on_timeout` to `True`
@@ -860,62 +889,73 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"""
if not connection_pool:
if charset is not None:
- warnings.warn(DeprecationWarning(
- '"charset" is deprecated. Use "encoding" instead'))
+ warnings.warn(
+ DeprecationWarning(
+ '"charset" is deprecated. Use "encoding" instead'
+ )
+ )
encoding = charset
if errors is not None:
- warnings.warn(DeprecationWarning(
- '"errors" is deprecated. Use "encoding_errors" instead'))
+ warnings.warn(
+ DeprecationWarning(
+ '"errors" is deprecated. Use "encoding_errors" instead'
+ )
+ )
encoding_errors = errors
kwargs = {
- 'db': db,
- 'username': username,
- 'password': password,
- 'socket_timeout': socket_timeout,
- 'encoding': encoding,
- 'encoding_errors': encoding_errors,
- 'decode_responses': decode_responses,
- 'retry_on_timeout': retry_on_timeout,
- 'retry': copy.deepcopy(retry),
- 'max_connections': max_connections,
- 'health_check_interval': health_check_interval,
- 'client_name': client_name,
- 'redis_connect_func': redis_connect_func
+ "db": db,
+ "username": username,
+ "password": password,
+ "socket_timeout": socket_timeout,
+ "encoding": encoding,
+ "encoding_errors": encoding_errors,
+ "decode_responses": decode_responses,
+ "retry_on_timeout": retry_on_timeout,
+ "retry": copy.deepcopy(retry),
+ "max_connections": max_connections,
+ "health_check_interval": health_check_interval,
+ "client_name": client_name,
+ "redis_connect_func": redis_connect_func,
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
- kwargs.update({
- 'path': unix_socket_path,
- 'connection_class': UnixDomainSocketConnection
- })
+ kwargs.update(
+ {
+ "path": unix_socket_path,
+ "connection_class": UnixDomainSocketConnection,
+ }
+ )
else:
# TCP specific options
- kwargs.update({
- 'host': host,
- 'port': port,
- 'socket_connect_timeout': socket_connect_timeout,
- 'socket_keepalive': socket_keepalive,
- 'socket_keepalive_options': socket_keepalive_options,
- })
+ kwargs.update(
+ {
+ "host": host,
+ "port": port,
+ "socket_connect_timeout": socket_connect_timeout,
+ "socket_keepalive": socket_keepalive,
+ "socket_keepalive_options": socket_keepalive_options,
+ }
+ )
if ssl:
- kwargs.update({
- 'connection_class': SSLConnection,
- 'ssl_keyfile': ssl_keyfile,
- 'ssl_certfile': ssl_certfile,
- 'ssl_cert_reqs': ssl_cert_reqs,
- 'ssl_ca_certs': ssl_ca_certs,
- 'ssl_check_hostname': ssl_check_hostname,
- })
+ kwargs.update(
+ {
+ "connection_class": SSLConnection,
+ "ssl_keyfile": ssl_keyfile,
+ "ssl_certfile": ssl_certfile,
+ "ssl_cert_reqs": ssl_cert_reqs,
+ "ssl_ca_certs": ssl_ca_certs,
+ "ssl_check_hostname": ssl_check_hostname,
+ }
+ )
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self.connection = None
if single_connection_client:
- self.connection = self.connection_pool.get_connection('_')
+ self.connection = self.connection_pool.get_connection("_")
- self.response_callbacks = CaseInsensitiveDict(
- self.__class__.RESPONSE_CALLBACKS)
+ self.response_callbacks = CaseInsensitiveDict(self.__class__.RESPONSE_CALLBACKS)
def __repr__(self):
return f"{type(self).__name__}<{repr(self.connection_pool)}>"
@@ -924,8 +964,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
- def load_external_module(self, funcname, func,
- ):
+ def load_external_module(
+ self,
+ funcname,
+ func,
+ ):
"""
This function can be used to add externally defined redis modules,
and their namespaces to the redis client.
@@ -957,10 +1000,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
between the client and server.
"""
return Pipeline(
- self.connection_pool,
- self.response_callbacks,
- transaction,
- shard_hint)
+ self.connection_pool, self.response_callbacks, transaction, shard_hint
+ )
def transaction(self, func, *watches, **kwargs):
"""
@@ -968,9 +1009,9 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
- shard_hint = kwargs.pop('shard_hint', None)
- value_from_callable = kwargs.pop('value_from_callable', False)
- watch_delay = kwargs.pop('watch_delay', None)
+ shard_hint = kwargs.pop("shard_hint", None)
+ value_from_callable = kwargs.pop("value_from_callable", False)
+ watch_delay = kwargs.pop("watch_delay", None)
with self.pipeline(True, shard_hint) as pipe:
while True:
try:
@@ -984,8 +1025,15 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
time.sleep(watch_delay)
continue
- def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
- lock_class=None, thread_local=True):
+ def lock(
+ self,
+ name,
+ timeout=None,
+ sleep=0.1,
+ blocking_timeout=None,
+ lock_class=None,
+ thread_local=True,
+ ):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
@@ -1028,12 +1076,17 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
- thread local storage. """
+ thread local storage."""
if lock_class is None:
lock_class = Lock
- return lock_class(self, name, timeout=timeout, sleep=sleep,
- blocking_timeout=blocking_timeout,
- thread_local=thread_local)
+ return lock_class(
+ self,
+ name,
+ timeout=timeout,
+ sleep=sleep,
+ blocking_timeout=blocking_timeout,
+ thread_local=thread_local,
+ )
def pubsub(self, **kwargs):
"""
@@ -1047,8 +1100,9 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
return Monitor(self.connection_pool)
def client(self):
- return self.__class__(connection_pool=self.connection_pool,
- single_connection_client=True)
+ return self.__class__(
+ connection_pool=self.connection_pool, single_connection_client=True
+ )
def __enter__(self):
return self
@@ -1065,11 +1119,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
self.connection = None
self.connection_pool.release(conn)
- def _send_command_parse_response(self,
- conn,
- command_name,
- *args,
- **options):
+ def _send_command_parse_response(self, conn, command_name, *args, **options):
"""
Send a command and parse the response
"""
@@ -1095,11 +1145,11 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
try:
return conn.retry.call_with_retry(
- lambda: self._send_command_parse_response(conn,
- command_name,
- *args,
- **options),
- lambda error: self._disconnect_raise(conn, error))
+ lambda: self._send_command_parse_response(
+ conn, command_name, *args, **options
+ ),
+ lambda error: self._disconnect_raise(conn, error),
+ )
finally:
if not self.connection:
pool.release(conn)
@@ -1129,19 +1179,20 @@ class Monitor:
next_command() method returns one command from monitor
listen() method yields commands from monitor.
"""
- monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)')
+
+ monitor_re = re.compile(r"\[(\d+) (.*)\] (.*)")
command_re = re.compile(r'"(.*?)(?<!\\)"')
def __init__(self, connection_pool):
self.connection_pool = connection_pool
- self.connection = self.connection_pool.get_connection('MONITOR')
+ self.connection = self.connection_pool.get_connection("MONITOR")
def __enter__(self):
- self.connection.send_command('MONITOR')
+ self.connection.send_command("MONITOR")
# check that monitor returns 'OK', but don't return it to user
response = self.connection.read_response()
if not bool_ok(response):
- raise RedisError(f'MONITOR failed: {response}')
+ raise RedisError(f"MONITOR failed: {response}")
return self
def __exit__(self, *args):
@@ -1153,34 +1204,34 @@ class Monitor:
response = self.connection.read_response()
if isinstance(response, bytes):
response = self.connection.encoder.decode(response, force=True)
- command_time, command_data = response.split(' ', 1)
+ command_time, command_data = response.split(" ", 1)
m = self.monitor_re.match(command_data)
db_id, client_info, command = m.groups()
- command = ' '.join(self.command_re.findall(command))
+ command = " ".join(self.command_re.findall(command))
# Redis escapes double quotes because each piece of the command
# string is surrounded by double quotes. We don't have that
# requirement so remove the escaping and leave the quote.
command = command.replace('\\"', '"')
- if client_info == 'lua':
- client_address = 'lua'
- client_port = ''
- client_type = 'lua'
- elif client_info.startswith('unix'):
- client_address = 'unix'
+ if client_info == "lua":
+ client_address = "lua"
+ client_port = ""
+ client_type = "lua"
+ elif client_info.startswith("unix"):
+ client_address = "unix"
client_port = client_info[5:]
- client_type = 'unix'
+ client_type = "unix"
else:
# use rsplit as ipv6 addresses contain colons
- client_address, client_port = client_info.rsplit(':', 1)
- client_type = 'tcp'
+ client_address, client_port = client_info.rsplit(":", 1)
+ client_type = "tcp"
return {
- 'time': float(command_time),
- 'db': int(db_id),
- 'client_address': client_address,
- 'client_port': client_port,
- 'client_type': client_type,
- 'command': command
+ "time": float(command_time),
+ "db": int(db_id),
+ "client_address": client_address,
+ "client_port": client_port,
+ "client_type": client_type,
+ "command": command,
}
def listen(self):
@@ -1197,12 +1248,18 @@ class PubSub:
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
- PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
- UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
- HEALTH_CHECK_MESSAGE = 'redis-py-health-check'
- def __init__(self, connection_pool, shard_hint=None,
- ignore_subscribe_messages=False, encoder=None):
+ PUBLISH_MESSAGE_TYPES = ("message", "pmessage")
+ UNSUBSCRIBE_MESSAGE_TYPES = ("unsubscribe", "punsubscribe")
+ HEALTH_CHECK_MESSAGE = "redis-py-health-check"
+
+ def __init__(
+ self,
+ connection_pool,
+ shard_hint=None,
+ ignore_subscribe_messages=False,
+ encoder=None,
+ ):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
@@ -1213,11 +1270,11 @@ class PubSub:
if self.encoder is None:
self.encoder = self.connection_pool.get_encoder()
if self.encoder.decode_responses:
- self.health_check_response = ['pong', self.HEALTH_CHECK_MESSAGE]
+ self.health_check_response = ["pong", self.HEALTH_CHECK_MESSAGE]
else:
self.health_check_response = [
- b'pong',
- self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
+ b"pong",
+ self.encoder.encode(self.HEALTH_CHECK_MESSAGE),
]
self.reset()
@@ -1282,14 +1339,13 @@ class PubSub:
if self.connection is None:
self.connection = self.connection_pool.get_connection(
- 'pubsub',
- self.shard_hint
+ "pubsub", self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
- kwargs = {'check_health': not self.subscribed}
+ kwargs = {"check_health": not self.subscribed}
self._execute(connection, connection.send_command, *args, **kwargs)
def _disconnect_raise_connect(self, conn, error):
@@ -1313,27 +1369,25 @@ class PubSub:
"""
return conn.retry.call_with_retry(
lambda: command(*args, **kwargs),
- lambda error: self._disconnect_raise_connect(conn, error))
+ lambda error: self._disconnect_raise_connect(conn, error),
+ )
def parse_response(self, block=True, timeout=0):
"""Parse the response from a publish/subscribe command"""
conn = self.connection
if conn is None:
raise RuntimeError(
- 'pubsub connection not set: '
- 'did you forget to call subscribe() or psubscribe()?')
+ "pubsub connection not set: "
+ "did you forget to call subscribe() or psubscribe()?"
+ )
self.check_health()
- if(
- not block
- and not self._execute(conn, conn.can_read, timeout=timeout)
- ):
+ if not block and not self._execute(conn, conn.can_read, timeout=timeout):
return None
response = self._execute(conn, conn.read_response)
- if conn.health_check_interval and \
- response == self.health_check_response:
+ if conn.health_check_interval and response == self.health_check_response:
# ignore the health check message as user might not expect it
return None
return response
@@ -1342,12 +1396,12 @@ class PubSub:
conn = self.connection
if conn is None:
raise RuntimeError(
- 'pubsub connection not set: '
- 'did you forget to call subscribe() or psubscribe()?')
+ "pubsub connection not set: "
+ "did you forget to call subscribe() or psubscribe()?"
+ )
if conn.health_check_interval and time.time() > conn.next_health_check:
- conn.send_command('PING', self.HEALTH_CHECK_MESSAGE,
- check_health=False)
+ conn.send_command("PING", self.HEALTH_CHECK_MESSAGE, check_health=False)
def _normalize_keys(self, data):
"""
@@ -1371,7 +1425,7 @@ class PubSub:
args = list_or_args(args[0], args[1:])
new_patterns = dict.fromkeys(args)
new_patterns.update(kwargs)
- ret_val = self.execute_command('PSUBSCRIBE', *new_patterns.keys())
+ ret_val = self.execute_command("PSUBSCRIBE", *new_patterns.keys())
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
@@ -1391,7 +1445,7 @@ class PubSub:
else:
patterns = self.patterns
self.pending_unsubscribe_patterns.update(patterns)
- return self.execute_command('PUNSUBSCRIBE', *args)
+ return self.execute_command("PUNSUBSCRIBE", *args)
def subscribe(self, *args, **kwargs):
"""
@@ -1405,7 +1459,7 @@ class PubSub:
args = list_or_args(args[0], args[1:])
new_channels = dict.fromkeys(args)
new_channels.update(kwargs)
- ret_val = self.execute_command('SUBSCRIBE', *new_channels.keys())
+ ret_val = self.execute_command("SUBSCRIBE", *new_channels.keys())
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
@@ -1425,7 +1479,7 @@ class PubSub:
else:
channels = self.channels
self.pending_unsubscribe_channels.update(channels)
- return self.execute_command('UNSUBSCRIBE', *args)
+ return self.execute_command("UNSUBSCRIBE", *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
@@ -1451,8 +1505,8 @@ class PubSub:
"""
Ping the Redis server
"""
- message = '' if message is None else message
- return self.execute_command('PING', message)
+ message = "" if message is None else message
+ return self.execute_command("PING", message)
def handle_message(self, response, ignore_subscribe_messages=False):
"""
@@ -1461,31 +1515,31 @@ class PubSub:
message being returned.
"""
message_type = str_if_bytes(response[0])
- if message_type == 'pmessage':
+ if message_type == "pmessage":
message = {
- 'type': message_type,
- 'pattern': response[1],
- 'channel': response[2],
- 'data': response[3]
+ "type": message_type,
+ "pattern": response[1],
+ "channel": response[2],
+ "data": response[3],
}
- elif message_type == 'pong':
+ elif message_type == "pong":
message = {
- 'type': message_type,
- 'pattern': None,
- 'channel': None,
- 'data': response[1]
+ "type": message_type,
+ "pattern": None,
+ "channel": None,
+ "data": response[1],
}
else:
message = {
- 'type': message_type,
- 'pattern': None,
- 'channel': response[1],
- 'data': response[2]
+ "type": message_type,
+ "pattern": None,
+ "channel": response[1],
+ "data": response[2],
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
- if message_type == 'punsubscribe':
+ if message_type == "punsubscribe":
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
@@ -1498,14 +1552,14 @@ class PubSub:
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
- if message_type == 'pmessage':
- handler = self.patterns.get(message['pattern'], None)
+ if message_type == "pmessage":
+ handler = self.patterns.get(message["pattern"], None)
else:
- handler = self.channels.get(message['channel'], None)
+ handler = self.channels.get(message["channel"], None)
if handler:
handler(message)
return None
- elif message_type != 'pong':
+ elif message_type != "pong":
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
@@ -1513,8 +1567,7 @@ class PubSub:
return message
- def run_in_thread(self, sleep_time=0, daemon=False,
- exception_handler=None):
+ def run_in_thread(self, sleep_time=0, daemon=False, exception_handler=None):
for channel, handler in self.channels.items():
if handler is None:
raise PubSubError(f"Channel: '{channel}' has no handler registered")
@@ -1523,18 +1576,14 @@ class PubSub:
raise PubSubError(f"Pattern: '{pattern}' has no handler registered")
thread = PubSubWorkerThread(
- self,
- sleep_time,
- daemon=daemon,
- exception_handler=exception_handler
+ self, sleep_time, daemon=daemon, exception_handler=exception_handler
)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
- def __init__(self, pubsub, sleep_time, daemon=False,
- exception_handler=None):
+ def __init__(self, pubsub, sleep_time, daemon=False, exception_handler=None):
super().__init__()
self.daemon = daemon
self.pubsub = pubsub
@@ -1550,8 +1599,7 @@ class PubSubWorkerThread(threading.Thread):
sleep_time = self.sleep_time
while self._running.is_set():
try:
- pubsub.get_message(ignore_subscribe_messages=True,
- timeout=sleep_time)
+ pubsub.get_message(ignore_subscribe_messages=True, timeout=sleep_time)
except BaseException as e:
if self.exception_handler is None:
raise
@@ -1584,10 +1632,9 @@ class Pipeline(Redis):
on a key of a different datatype.
"""
- UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'}
+ UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
- def __init__(self, connection_pool, response_callbacks, transaction,
- shard_hint):
+ def __init__(self, connection_pool, response_callbacks, transaction, shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
@@ -1625,7 +1672,7 @@ class Pipeline(Redis):
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
- self.connection.send_command('UNWATCH')
+ self.connection.send_command("UNWATCH")
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
@@ -1645,15 +1692,15 @@ class Pipeline(Redis):
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
- raise RedisError('Cannot issue nested calls to MULTI')
+ raise RedisError("Cannot issue nested calls to MULTI")
if self.command_stack:
- raise RedisError('Commands without an initial WATCH have already '
- 'been issued')
+ raise RedisError(
+ "Commands without an initial WATCH have already " "been issued"
+ )
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
- if (self.watching or args[0] == 'WATCH') and \
- not self.explicit_transaction:
+ if (self.watching or args[0] == "WATCH") and not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
@@ -1670,8 +1717,9 @@ class Pipeline(Redis):
# indicates the user should retry this transaction.
if self.watching:
self.reset()
- raise WatchError("A ConnectionError occurred on while "
- "watching one or more keys")
+ raise WatchError(
+ "A ConnectionError occurred on while " "watching one or more keys"
+ )
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(error, TimeoutError)):
@@ -1689,16 +1737,15 @@ class Pipeline(Redis):
conn = self.connection
# if this is the first call, we need a connection
if not conn:
- conn = self.connection_pool.get_connection(command_name,
- self.shard_hint)
+ conn = self.connection_pool.get_connection(command_name, self.shard_hint)
self.connection = conn
return conn.retry.call_with_retry(
- lambda: self._send_command_parse_response(conn,
- command_name,
- *args,
- **options),
- lambda error: self._disconnect_reset_raise(conn, error))
+ lambda: self._send_command_parse_response(
+ conn, command_name, *args, **options
+ ),
+ lambda error: self._disconnect_reset_raise(conn, error),
+ )
def pipeline_execute_command(self, *args, **options):
"""
@@ -1716,9 +1763,10 @@ class Pipeline(Redis):
return self
def _execute_transaction(self, connection, commands, raise_on_error):
- cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
- all_cmds = connection.pack_commands([args for args, options in cmds
- if EMPTY_RESPONSE not in options])
+ cmds = chain([(("MULTI",), {})], commands, [(("EXEC",), {})])
+ all_cmds = connection.pack_commands(
+ [args for args, options in cmds if EMPTY_RESPONSE not in options]
+ )
connection.send_packed_command(all_cmds)
errors = []
@@ -1727,7 +1775,7 @@ class Pipeline(Redis):
# so that we read all the additional command messages from
# the socket
try:
- self.parse_response(connection, '_')
+ self.parse_response(connection, "_")
except ResponseError as e:
errors.append((0, e))
@@ -1737,14 +1785,14 @@ class Pipeline(Redis):
errors.append((i, command[1][EMPTY_RESPONSE]))
else:
try:
- self.parse_response(connection, '_')
+ self.parse_response(connection, "_")
except ResponseError as e:
self.annotate_exception(e, i + 1, command[0])
errors.append((i, e))
# parse the EXEC.
try:
- response = self.parse_response(connection, '_')
+ response = self.parse_response(connection, "_")
except ExecAbortError:
if errors:
raise errors[0][1]
@@ -1762,8 +1810,9 @@ class Pipeline(Redis):
if len(response) != len(commands):
self.connection.disconnect()
- raise ResponseError("Wrong number of response items from "
- "pipeline execution")
+ raise ResponseError(
+ "Wrong number of response items from " "pipeline execution"
+ )
# find any errors in the response and raise if necessary
if raise_on_error:
@@ -1788,8 +1837,7 @@ class Pipeline(Redis):
response = []
for args, options in commands:
try:
- response.append(
- self.parse_response(connection, args[0], **options))
+ response.append(self.parse_response(connection, args[0], **options))
except ResponseError as e:
response.append(e)
@@ -1804,19 +1852,18 @@ class Pipeline(Redis):
raise r
def annotate_exception(self, exception, number, command):
- cmd = ' '.join(map(safe_str, command))
+ cmd = " ".join(map(safe_str, command))
msg = (
- f'Command # {number} ({cmd}) of pipeline '
- f'caused error: {exception.args[0]}'
+ f"Command # {number} ({cmd}) of pipeline "
+ f"caused error: {exception.args[0]}"
)
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
- result = Redis.parse_response(
- self, connection, command_name, **options)
+ result = Redis.parse_response(self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
- elif command_name == 'WATCH':
+ elif command_name == "WATCH":
self.watching = True
return result
@@ -1827,11 +1874,11 @@ class Pipeline(Redis):
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
- exists = immediate('SCRIPT EXISTS', *shas)
+ exists = immediate("SCRIPT EXISTS", *shas)
if not all(exists):
for s, exist in zip(scripts, exists):
if not exist:
- s.sha = immediate('SCRIPT LOAD', s.script)
+ s.sha = immediate("SCRIPT LOAD", s.script)
def _disconnect_raise_reset(self, conn, error):
"""
@@ -1844,8 +1891,9 @@ class Pipeline(Redis):
# since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
- raise WatchError("A ConnectionError occurred on while "
- "watching one or more keys")
+ raise WatchError(
+ "A ConnectionError occurred on while " "watching one or more keys"
+ )
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(error, TimeoutError)):
@@ -1866,8 +1914,7 @@ class Pipeline(Redis):
conn = self.connection
if not conn:
- conn = self.connection_pool.get_connection('MULTI',
- self.shard_hint)
+ conn = self.connection_pool.get_connection("MULTI", self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
@@ -1875,7 +1922,8 @@ class Pipeline(Redis):
try:
return conn.retry.call_with_retry(
lambda: execute(conn, stack, raise_on_error),
- lambda error: self._disconnect_raise_reset(conn, error))
+ lambda error: self._disconnect_raise_reset(conn, error),
+ )
finally:
self.reset()
@@ -1888,9 +1936,9 @@ class Pipeline(Redis):
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
- raise RedisError('Cannot issue a WATCH after a MULTI')
- return self.execute_command('WATCH', *names)
+ raise RedisError("Cannot issue a WATCH after a MULTI")
+ return self.execute_command("WATCH", *names)
def unwatch(self):
"Unwatches all previously specified keys"
- return self.watching and self.execute_command('UNWATCH') or True
+ return self.watching and self.execute_command("UNWATCH") or True
diff --git a/redis/cluster.py b/redis/cluster.py
index c1853aa..57e8316 100644
--- a/redis/cluster.py
+++ b/redis/cluster.py
@@ -2,18 +2,15 @@ import copy
import logging
import random
import socket
-import time
-import threading
import sys
-
+import threading
+import time
from collections import OrderedDict
-from redis.client import CaseInsensitiveDict, Redis, PubSub
-from redis.commands import (
- ClusterCommands,
- CommandsParser
-)
-from redis.connection import DefaultParser, ConnectionPool, Encoder, parse_url
-from redis.crc import key_slot, REDIS_CLUSTER_HASH_SLOTS
+
+from redis.client import CaseInsensitiveDict, PubSub, Redis
+from redis.commands import ClusterCommands, CommandsParser
+from redis.connection import ConnectionPool, DefaultParser, Encoder, parse_url
+from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
from redis.exceptions import (
AskError,
BusyLoadingError,
@@ -34,15 +31,15 @@ from redis.utils import (
dict_merge,
list_keys_to_dict,
merge_result,
+ safe_str,
str_if_bytes,
- safe_str
)
log = logging.getLogger(__name__)
def get_node_name(host, port):
- return f'{host}:{port}'
+ return f"{host}:{port}"
def get_connection(redis_node, *args, **options):
@@ -67,15 +64,12 @@ def parse_pubsub_numsub(command, res, **options):
except KeyError:
numsub_d[channel] = numsubbed
- ret_numsub = [
- (channel, numsub)
- for channel, numsub in numsub_d.items()
- ]
+ ret_numsub = [(channel, numsub) for channel, numsub in numsub_d.items()]
return ret_numsub
def parse_cluster_slots(resp, **options):
- current_host = options.get('current_host', '')
+ current_host = options.get("current_host", "")
def fix_server(*args):
return str_if_bytes(args[0]) or current_host, args[1]
@@ -85,8 +79,8 @@ def parse_cluster_slots(resp, **options):
start, end, primary = slot[:3]
replicas = slot[3:]
slots[start, end] = {
- 'primary': fix_server(*primary),
- 'replicas': [fix_server(*replica) for replica in replicas],
+ "primary": fix_server(*primary),
+ "replicas": [fix_server(*replica) for replica in replicas],
}
return slots
@@ -132,47 +126,49 @@ KWARGS_DISABLED_KEYS = (
# Not complete, but covers the major ones
# https://redis.io/commands
-READ_COMMANDS = frozenset([
- "BITCOUNT",
- "BITPOS",
- "EXISTS",
- "GEODIST",
- "GEOHASH",
- "GEOPOS",
- "GEORADIUS",
- "GEORADIUSBYMEMBER",
- "GET",
- "GETBIT",
- "GETRANGE",
- "HEXISTS",
- "HGET",
- "HGETALL",
- "HKEYS",
- "HLEN",
- "HMGET",
- "HSTRLEN",
- "HVALS",
- "KEYS",
- "LINDEX",
- "LLEN",
- "LRANGE",
- "MGET",
- "PTTL",
- "RANDOMKEY",
- "SCARD",
- "SDIFF",
- "SINTER",
- "SISMEMBER",
- "SMEMBERS",
- "SRANDMEMBER",
- "STRLEN",
- "SUNION",
- "TTL",
- "ZCARD",
- "ZCOUNT",
- "ZRANGE",
- "ZSCORE",
-])
+READ_COMMANDS = frozenset(
+ [
+ "BITCOUNT",
+ "BITPOS",
+ "EXISTS",
+ "GEODIST",
+ "GEOHASH",
+ "GEOPOS",
+ "GEORADIUS",
+ "GEORADIUSBYMEMBER",
+ "GET",
+ "GETBIT",
+ "GETRANGE",
+ "HEXISTS",
+ "HGET",
+ "HGETALL",
+ "HKEYS",
+ "HLEN",
+ "HMGET",
+ "HSTRLEN",
+ "HVALS",
+ "KEYS",
+ "LINDEX",
+ "LLEN",
+ "LRANGE",
+ "MGET",
+ "PTTL",
+ "RANDOMKEY",
+ "SCARD",
+ "SDIFF",
+ "SINTER",
+ "SISMEMBER",
+ "SMEMBERS",
+ "SRANDMEMBER",
+ "STRLEN",
+ "SUNION",
+ "TTL",
+ "ZCARD",
+ "ZCOUNT",
+ "ZRANGE",
+ "ZSCORE",
+ ]
+)
def cleanup_kwargs(**kwargs):
@@ -190,14 +186,16 @@ def cleanup_kwargs(**kwargs):
class ClusterParser(DefaultParser):
EXCEPTION_CLASSES = dict_merge(
- DefaultParser.EXCEPTION_CLASSES, {
- 'ASK': AskError,
- 'TRYAGAIN': TryAgainError,
- 'MOVED': MovedError,
- 'CLUSTERDOWN': ClusterDownError,
- 'CROSSSLOT': ClusterCrossSlotError,
- 'MASTERDOWN': MasterDownError,
- })
+ DefaultParser.EXCEPTION_CLASSES,
+ {
+ "ASK": AskError,
+ "TRYAGAIN": TryAgainError,
+ "MOVED": MovedError,
+ "CLUSTERDOWN": ClusterDownError,
+ "CROSSSLOT": ClusterCrossSlotError,
+ "MASTERDOWN": MasterDownError,
+ },
+ )
class RedisCluster(ClusterCommands):
@@ -209,13 +207,7 @@ class RedisCluster(ClusterCommands):
RANDOM = "random"
DEFAULT_NODE = "default-node"
- NODE_FLAGS = {
- PRIMARIES,
- REPLICAS,
- ALL_NODES,
- RANDOM,
- DEFAULT_NODE
- }
+ NODE_FLAGS = {PRIMARIES, REPLICAS, ALL_NODES, RANDOM, DEFAULT_NODE}
COMMAND_FLAGS = dict_merge(
list_keys_to_dict(
@@ -292,119 +284,138 @@ class RedisCluster(ClusterCommands):
)
CLUSTER_COMMANDS_RESPONSE_CALLBACKS = {
- 'CLUSTER ADDSLOTS': bool,
- 'CLUSTER COUNT-FAILURE-REPORTS': int,
- 'CLUSTER COUNTKEYSINSLOT': int,
- 'CLUSTER DELSLOTS': bool,
- 'CLUSTER FAILOVER': bool,
- 'CLUSTER FORGET': bool,
- 'CLUSTER GETKEYSINSLOT': list,
- 'CLUSTER KEYSLOT': int,
- 'CLUSTER MEET': bool,
- 'CLUSTER REPLICATE': bool,
- 'CLUSTER RESET': bool,
- 'CLUSTER SAVECONFIG': bool,
- 'CLUSTER SET-CONFIG-EPOCH': bool,
- 'CLUSTER SETSLOT': bool,
- 'CLUSTER SLOTS': parse_cluster_slots,
- 'ASKING': bool,
- 'READONLY': bool,
- 'READWRITE': bool,
+ "CLUSTER ADDSLOTS": bool,
+ "CLUSTER COUNT-FAILURE-REPORTS": int,
+ "CLUSTER COUNTKEYSINSLOT": int,
+ "CLUSTER DELSLOTS": bool,
+ "CLUSTER FAILOVER": bool,
+ "CLUSTER FORGET": bool,
+ "CLUSTER GETKEYSINSLOT": list,
+ "CLUSTER KEYSLOT": int,
+ "CLUSTER MEET": bool,
+ "CLUSTER REPLICATE": bool,
+ "CLUSTER RESET": bool,
+ "CLUSTER SAVECONFIG": bool,
+ "CLUSTER SET-CONFIG-EPOCH": bool,
+ "CLUSTER SETSLOT": bool,
+ "CLUSTER SLOTS": parse_cluster_slots,
+ "ASKING": bool,
+ "READONLY": bool,
+ "READWRITE": bool,
}
RESULT_CALLBACKS = dict_merge(
- list_keys_to_dict([
- "PUBSUB NUMSUB",
- ], parse_pubsub_numsub),
- list_keys_to_dict([
- "PUBSUB NUMPAT",
- ], lambda command, res: sum(list(res.values()))),
- list_keys_to_dict([
- "KEYS",
- "PUBSUB CHANNELS",
- ], merge_result),
- list_keys_to_dict([
- "PING",
- "CONFIG SET",
- "CONFIG REWRITE",
- "CONFIG RESETSTAT",
- "CLIENT SETNAME",
- "BGSAVE",
- "SLOWLOG RESET",
- "SAVE",
- "MEMORY PURGE",
- "CLIENT PAUSE",
- "CLIENT UNPAUSE",
- ], lambda command, res: all(res.values()) if isinstance(res, dict)
- else res),
- list_keys_to_dict([
- "DBSIZE",
- "WAIT",
- ], lambda command, res: sum(res.values()) if isinstance(res, dict)
- else res),
- list_keys_to_dict([
- "CLIENT UNBLOCK",
- ], lambda command, res: 1 if sum(res.values()) > 0 else 0),
- list_keys_to_dict([
- "SCAN",
- ], parse_scan_result)
+ list_keys_to_dict(
+ [
+ "PUBSUB NUMSUB",
+ ],
+ parse_pubsub_numsub,
+ ),
+ list_keys_to_dict(
+ [
+ "PUBSUB NUMPAT",
+ ],
+ lambda command, res: sum(list(res.values())),
+ ),
+ list_keys_to_dict(
+ [
+ "KEYS",
+ "PUBSUB CHANNELS",
+ ],
+ merge_result,
+ ),
+ list_keys_to_dict(
+ [
+ "PING",
+ "CONFIG SET",
+ "CONFIG REWRITE",
+ "CONFIG RESETSTAT",
+ "CLIENT SETNAME",
+ "BGSAVE",
+ "SLOWLOG RESET",
+ "SAVE",
+ "MEMORY PURGE",
+ "CLIENT PAUSE",
+ "CLIENT UNPAUSE",
+ ],
+ lambda command, res: all(res.values()) if isinstance(res, dict) else res,
+ ),
+ list_keys_to_dict(
+ [
+ "DBSIZE",
+ "WAIT",
+ ],
+ lambda command, res: sum(res.values()) if isinstance(res, dict) else res,
+ ),
+ list_keys_to_dict(
+ [
+ "CLIENT UNBLOCK",
+ ],
+ lambda command, res: 1 if sum(res.values()) > 0 else 0,
+ ),
+ list_keys_to_dict(
+ [
+ "SCAN",
+ ],
+ parse_scan_result,
+ ),
)
def __init__(
- self,
- host=None,
- port=6379,
- startup_nodes=None,
- cluster_error_retry_attempts=3,
- require_full_coverage=True,
- skip_full_coverage_check=False,
- reinitialize_steps=10,
- read_from_replicas=False,
- url=None,
- retry_on_timeout=False,
- retry=None,
- **kwargs
+ self,
+ host=None,
+ port=6379,
+ startup_nodes=None,
+ cluster_error_retry_attempts=3,
+ require_full_coverage=True,
+ skip_full_coverage_check=False,
+ reinitialize_steps=10,
+ read_from_replicas=False,
+ url=None,
+ retry_on_timeout=False,
+ retry=None,
+ **kwargs,
):
"""
- Initialize a new RedisCluster client.
-
- :startup_nodes: 'list[ClusterNode]'
- List of nodes from which initial bootstrapping can be done
- :host: 'str'
- Can be used to point to a startup node
- :port: 'int'
- Can be used to point to a startup node
- :require_full_coverage: 'bool'
- If set to True, as it is by default, all slots must be covered.
- If set to False and not all slots are covered, the instance
- creation will succeed only if 'cluster-require-full-coverage'
- configuration is set to 'no' in all of the cluster's nodes.
- Otherwise, RedisClusterException will be thrown.
- :skip_full_coverage_check: 'bool'
- If require_full_coverage is set to False, a check of
- cluster-require-full-coverage config will be executed against all
- nodes. Set skip_full_coverage_check to True to skip this check.
- Useful for clusters without the CONFIG command (like ElastiCache)
- :read_from_replicas: 'bool'
- Enable read from replicas in READONLY mode. You can read possibly
- stale data.
- When set to true, read commands will be assigned between the
- primary and its replications in a Round-Robin manner.
- :cluster_error_retry_attempts: 'int'
- Retry command execution attempts when encountering ClusterDownError
- or ConnectionError
- :retry_on_timeout: 'bool'
- To specify a retry policy, first set `retry_on_timeout` to `True`
- then set `retry` to a valid `Retry` object
- :retry: 'Retry'
- a `Retry` object
- :**kwargs:
- Extra arguments that will be sent into Redis instance when created
- (See Official redis-py doc for supported kwargs
- [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
- Some kwargs are not supported and will raise a
- RedisClusterException:
- - db (Redis do not support database SELECT in cluster mode)
+ Initialize a new RedisCluster client.
+
+ :startup_nodes: 'list[ClusterNode]'
+ List of nodes from which initial bootstrapping can be done
+ :host: 'str'
+ Can be used to point to a startup node
+ :port: 'int'
+ Can be used to point to a startup node
+ :require_full_coverage: 'bool'
+ If set to True, as it is by default, all slots must be covered.
+ If set to False and not all slots are covered, the instance
+ creation will succeed only if 'cluster-require-full-coverage'
+ configuration is set to 'no' in all of the cluster's nodes.
+ Otherwise, RedisClusterException will be thrown.
+ :skip_full_coverage_check: 'bool'
+ If require_full_coverage is set to False, a check of
+ cluster-require-full-coverage config will be executed against all
+ nodes. Set skip_full_coverage_check to True to skip this check.
+ Useful for clusters without the CONFIG command (like ElastiCache)
+ :read_from_replicas: 'bool'
+ Enable read from replicas in READONLY mode. You can read possibly
+ stale data.
+ When set to true, read commands will be assigned between the
+ primary and its replications in a Round-Robin manner.
+ :cluster_error_retry_attempts: 'int'
+ Retry command execution attempts when encountering ClusterDownError
+ or ConnectionError
+ :retry_on_timeout: 'bool'
+ To specify a retry policy, first set `retry_on_timeout` to `True`
+ then set `retry` to a valid `Retry` object
+ :retry: 'Retry'
+ a `Retry` object
+ :**kwargs:
+ Extra arguments that will be sent into Redis instance when created
+ (See Official redis-py doc for supported kwargs
+ [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
+ Some kwargs are not supported and will raise a
+ RedisClusterException:
+ - db (Redis do not support database SELECT in cluster mode)
"""
log.info("Creating a new instance of RedisCluster client")
@@ -418,8 +429,7 @@ class RedisCluster(ClusterCommands):
)
if retry_on_timeout:
- kwargs.update({'retry_on_timeout': retry_on_timeout,
- 'retry': retry})
+ kwargs.update({"retry_on_timeout": retry_on_timeout, "retry": retry})
# Get the startup node/s
from_url = False
@@ -429,15 +439,16 @@ class RedisCluster(ClusterCommands):
if "path" in url_options:
raise RedisClusterException(
"RedisCluster does not currently support Unix Domain "
- "Socket connections")
+ "Socket connections"
+ )
if "db" in url_options and url_options["db"] != 0:
# Argument 'db' is not possible to use in cluster mode
raise RedisClusterException(
"A ``db`` querystring option can only be 0 in cluster mode"
)
kwargs.update(url_options)
- host = kwargs.get('host')
- port = kwargs.get('port', port)
+ host = kwargs.get("host")
+ port = kwargs.get("port", port)
startup_nodes.append(ClusterNode(host, port))
elif host is not None and port is not None:
startup_nodes.append(ClusterNode(host, port))
@@ -450,7 +461,8 @@ class RedisCluster(ClusterCommands):
" RedisCluster(host='localhost', port=6379)\n"
"2. list of startup nodes, for example:\n"
" RedisCluster(startup_nodes=[ClusterNode('localhost', 6379),"
- " ClusterNode('localhost', 6378)])")
+ " ClusterNode('localhost', 6378)])"
+ )
log.debug(f"startup_nodes : {startup_nodes}")
# Update the connection arguments
# Whenever a new connection is established, RedisCluster's on_connect
@@ -482,9 +494,9 @@ class RedisCluster(ClusterCommands):
)
self.cluster_response_callbacks = CaseInsensitiveDict(
- self.__class__.CLUSTER_COMMANDS_RESPONSE_CALLBACKS)
- self.result_callbacks = CaseInsensitiveDict(
- self.__class__.RESULT_CALLBACKS)
+ self.__class__.CLUSTER_COMMANDS_RESPONSE_CALLBACKS
+ )
+ self.result_callbacks = CaseInsensitiveDict(self.__class__.RESULT_CALLBACKS)
self.commands_parser = CommandsParser(self)
self._lock = threading.Lock()
@@ -563,9 +575,9 @@ class RedisCluster(ClusterCommands):
# to a failover, we should establish a READONLY connection
# regardless of the server type. If this is a primary connection,
# READONLY would not affect executing write commands.
- connection.send_command('READONLY')
- if str_if_bytes(connection.read_response()) != 'OK':
- raise ConnectionError('READONLY command failed')
+ connection.send_command("READONLY")
+ if str_if_bytes(connection.read_response()) != "OK":
+ raise ConnectionError("READONLY command failed")
if self.user_on_connect_func is not None:
self.user_on_connect_func(connection)
@@ -601,9 +613,7 @@ class RedisCluster(ClusterCommands):
slot = self.keyslot(key)
slot_cache = self.nodes_manager.slots_cache.get(slot)
if slot_cache is None or len(slot_cache) == 0:
- raise SlotNotCoveredError(
- f'Slot "{slot}" is not covered by the cluster.'
- )
+ raise SlotNotCoveredError(f'Slot "{slot}" is not covered by the cluster.')
if replica and len(self.nodes_manager.slots_cache[slot]) < 2:
return None
elif replica:
@@ -627,8 +637,10 @@ class RedisCluster(ClusterCommands):
:return True if the default node was set, else False
"""
if node is None or self.get_node(node_name=node.name) is None:
- log.info("The requested node does not exist in the cluster, so "
- "the default node was not changed.")
+ log.info(
+ "The requested node does not exist in the cluster, so "
+ "the default node was not changed."
+ )
return False
self.nodes_manager.default_node = node
log.info(f"Changed the default cluster node to {node}")
@@ -651,12 +663,10 @@ class RedisCluster(ClusterCommands):
when calling execute() will only return the result stack.
"""
if shard_hint:
- raise RedisClusterException(
- "shard_hint is deprecated in cluster mode")
+ raise RedisClusterException("shard_hint is deprecated in cluster mode")
if transaction:
- raise RedisClusterException(
- "transaction is deprecated in cluster mode")
+ raise RedisClusterException("transaction is deprecated in cluster mode")
return ClusterPipeline(
nodes_manager=self.nodes_manager,
@@ -665,7 +675,7 @@ class RedisCluster(ClusterCommands):
cluster_response_callbacks=self.cluster_response_callbacks,
cluster_error_retry_attempts=self.cluster_error_retry_attempts,
read_from_replicas=self.read_from_replicas,
- reinitialize_steps=self.reinitialize_steps
+ reinitialize_steps=self.reinitialize_steps,
)
def _determine_nodes(self, *args, **kwargs):
@@ -698,7 +708,8 @@ class RedisCluster(ClusterCommands):
# get the node that holds the key's slot
slot = self.determine_slot(*args)
node = self.nodes_manager.get_node_from_slot(
- slot, self.read_from_replicas and command in READ_COMMANDS)
+ slot, self.read_from_replicas and command in READ_COMMANDS
+ )
log.debug(f"Target for {args}: slot {slot}")
return [node]
@@ -760,8 +771,7 @@ class RedisCluster(ClusterCommands):
self.nodes_manager.initialize()
def _is_nodes_flag(self, target_nodes):
- return isinstance(target_nodes, str) \
- and target_nodes in self.node_flags
+ return isinstance(target_nodes, str) and target_nodes in self.node_flags
def _parse_target_nodes(self, target_nodes):
if isinstance(target_nodes, list):
@@ -812,8 +822,9 @@ class RedisCluster(ClusterCommands):
# the command execution since the nodes may not be valid anymore
# after the tables were reinitialized. So in case of passed target
# nodes, retry_attempts will be set to 1.
- retry_attempts = 1 if target_nodes_specified else \
- self.cluster_error_retry_attempts
+ retry_attempts = (
+ 1 if target_nodes_specified else self.cluster_error_retry_attempts
+ )
exception = None
for _ in range(0, retry_attempts):
try:
@@ -821,13 +832,14 @@ class RedisCluster(ClusterCommands):
if not target_nodes_specified:
# Determine the nodes to execute the command on
target_nodes = self._determine_nodes(
- *args, **kwargs, nodes_flag=target_nodes)
+ *args, **kwargs, nodes_flag=target_nodes
+ )
if not target_nodes:
raise RedisClusterException(
- f"No targets were found to execute {args} command on")
+ f"No targets were found to execute {args} command on"
+ )
for node in target_nodes:
- res[node.name] = self._execute_command(
- node, *args, **kwargs)
+ res[node.name] = self._execute_command(node, *args, **kwargs)
# Return the processed result
return self._process_result(args[0], res, **kwargs)
except (ClusterDownError, ConnectionError) as e:
@@ -862,9 +874,9 @@ class RedisCluster(ClusterCommands):
# MOVED occurred and the slots cache was updated,
# refresh the target node
slot = self.determine_slot(*args)
- target_node = self.nodes_manager. \
- get_node_from_slot(slot, self.read_from_replicas and
- command in READ_COMMANDS)
+ target_node = self.nodes_manager.get_node_from_slot(
+ slot, self.read_from_replicas and command in READ_COMMANDS
+ )
moved = False
log.debug(
@@ -879,11 +891,11 @@ class RedisCluster(ClusterCommands):
asking = False
connection.send_command(*args)
- response = redis_node.parse_response(connection, command,
- **kwargs)
+ response = redis_node.parse_response(connection, command, **kwargs)
if command in self.cluster_response_callbacks:
response = self.cluster_response_callbacks[command](
- response, **kwargs)
+ response, **kwargs
+ )
return response
except (RedisClusterException, BusyLoadingError):
@@ -997,7 +1009,7 @@ class RedisCluster(ClusterCommands):
class ClusterNode:
def __init__(self, host, port, server_type=None, redis_connection=None):
- if host == 'localhost':
+ if host == "localhost":
host = socket.gethostbyname(host)
self.host = host
@@ -1008,11 +1020,11 @@ class ClusterNode:
def __repr__(self):
return (
- f'[host={self.host},'
- f'port={self.port},'
- f'name={self.name},'
- f'server_type={self.server_type},'
- f'redis_connection={self.redis_connection}]'
+ f"[host={self.host},"
+ f"port={self.port},"
+ f"name={self.name},"
+ f"server_type={self.server_type},"
+ f"redis_connection={self.redis_connection}]"
)
def __eq__(self, obj):
@@ -1029,8 +1041,7 @@ class LoadBalancer:
self.start_index = start_index
def get_server_index(self, primary, list_size):
- server_index = self.primary_to_idx.setdefault(primary,
- self.start_index)
+ server_index = self.primary_to_idx.setdefault(primary, self.start_index)
# Update the index
self.primary_to_idx[primary] = (server_index + 1) % list_size
return server_index
@@ -1040,9 +1051,15 @@ class LoadBalancer:
class NodesManager:
- def __init__(self, startup_nodes, from_url=False,
- require_full_coverage=True, skip_full_coverage_check=False,
- lock=None, **kwargs):
+ def __init__(
+ self,
+ startup_nodes,
+ from_url=False,
+ require_full_coverage=True,
+ skip_full_coverage_check=False,
+ lock=None,
+ **kwargs,
+ ):
self.nodes_cache = {}
self.slots_cache = {}
self.startup_nodes = {}
@@ -1122,8 +1139,7 @@ class NodesManager:
# Reset moved_exception
self._moved_exception = None
- def get_node_from_slot(self, slot, read_from_replicas=False,
- server_type=None):
+ def get_node_from_slot(self, slot, read_from_replicas=False, server_type=None):
"""
Gets a node that servers this hash slot
"""
@@ -1132,8 +1148,7 @@ class NodesManager:
if self._moved_exception:
self._update_moved_slots()
- if self.slots_cache.get(slot) is None or \
- len(self.slots_cache[slot]) == 0:
+ if self.slots_cache.get(slot) is None or len(self.slots_cache[slot]) == 0:
raise SlotNotCoveredError(
f'Slot "{slot}" not covered by the cluster. '
f'"require_full_coverage={self._require_full_coverage}"'
@@ -1143,19 +1158,19 @@ class NodesManager:
# get the server index in a Round-Robin manner
primary_name = self.slots_cache[slot][0].name
node_idx = self.read_load_balancer.get_server_index(
- primary_name, len(self.slots_cache[slot]))
+ primary_name, len(self.slots_cache[slot])
+ )
elif (
- server_type is None
- or server_type == PRIMARY
- or len(self.slots_cache[slot]) == 1
+ server_type is None
+ or server_type == PRIMARY
+ or len(self.slots_cache[slot]) == 1
):
# return a primary
node_idx = 0
else:
# return a replica
# randomly choose one of the replicas
- node_idx = random.randint(
- 1, len(self.slots_cache[slot]) - 1)
+ node_idx = random.randint(1, len(self.slots_cache[slot]) - 1)
return self.slots_cache[slot][node_idx]
@@ -1187,20 +1202,22 @@ class NodesManager:
def node_require_full_coverage(node):
try:
- return ("yes" in node.redis_connection.config_get(
- "cluster-require-full-coverage").values()
+ return (
+ "yes"
+ in node.redis_connection.config_get(
+ "cluster-require-full-coverage"
+ ).values()
)
except ConnectionError:
return False
except Exception as e:
raise RedisClusterException(
'ERROR sending "config get cluster-require-full-coverage"'
- f' command to redis server: {node.name}, {e}'
+ f" command to redis server: {node.name}, {e}"
)
# at least one node should have cluster-require-full-coverage yes
- return any(node_require_full_coverage(node)
- for node in cluster_nodes.values())
+ return any(node_require_full_coverage(node) for node in cluster_nodes.values())
def check_slots_coverage(self, slots_cache):
# Validate if all slots are covered or if we should try next
@@ -1229,11 +1246,7 @@ class NodesManager:
kwargs.update({"port": port})
r = Redis(connection_pool=ConnectionPool(**kwargs))
else:
- r = Redis(
- host=host,
- port=port,
- **kwargs
- )
+ r = Redis(host=host, port=port, **kwargs)
return r
def initialize(self):
@@ -1257,22 +1270,23 @@ class NodesManager:
# Create a new Redis connection and let Redis decode the
# responses so we won't need to handle that
copy_kwargs = copy.deepcopy(kwargs)
- copy_kwargs.update({"decode_responses": True,
- "encoding": "utf-8"})
+ copy_kwargs.update({"decode_responses": True, "encoding": "utf-8"})
r = self.create_redis_node(
- startup_node.host, startup_node.port, **copy_kwargs)
+ startup_node.host, startup_node.port, **copy_kwargs
+ )
self.startup_nodes[startup_node.name].redis_connection = r
cluster_slots = r.execute_command("CLUSTER SLOTS")
startup_nodes_reachable = True
except (ConnectionError, TimeoutError) as e:
msg = e.__str__
- log.exception('An exception occurred while trying to'
- ' initialize the cluster using the seed node'
- f' {startup_node.name}:\n{msg}')
+ log.exception(
+ "An exception occurred while trying to"
+ " initialize the cluster using the seed node"
+ f" {startup_node.name}:\n{msg}"
+ )
continue
except ResponseError as e:
- log.exception(
- 'ReseponseError sending "cluster slots" to redis server')
+ log.exception('ReseponseError sending "cluster slots" to redis server')
# Isn't a cluster connection, so it won't parse these
# exceptions automatically
@@ -1282,13 +1296,13 @@ class NodesManager:
else:
raise RedisClusterException(
'ERROR sending "cluster slots" command to redis '
- f'server: {startup_node}. error: {message}'
+ f"server: {startup_node}. error: {message}"
)
except Exception as e:
message = e.__str__()
raise RedisClusterException(
'ERROR sending "cluster slots" command to redis '
- f'server: {startup_node}. error: {message}'
+ f"server: {startup_node}. error: {message}"
)
# CLUSTER SLOTS command results in the following output:
@@ -1298,9 +1312,11 @@ class NodesManager:
# primary node of the first slot section.
# If there's only one server in the cluster, its ``host`` is ''
# Fix it to the host in startup_nodes
- if (len(cluster_slots) == 1
- and len(cluster_slots[0][2][0]) == 0
- and len(self.startup_nodes) == 1):
+ if (
+ len(cluster_slots) == 1
+ and len(cluster_slots[0][2][0]) == 0
+ and len(self.startup_nodes) == 1
+ ):
cluster_slots[0][2][0] = startup_node.host
for slot in cluster_slots:
@@ -1327,10 +1343,10 @@ class NodesManager:
port = replica_node[1]
target_replica_node = tmp_nodes_cache.get(
- get_node_name(host, port))
+ get_node_name(host, port)
+ )
if target_replica_node is None:
- target_replica_node = ClusterNode(
- host, port, REPLICA)
+ target_replica_node = ClusterNode(host, port, REPLICA)
tmp_slots[i].append(target_replica_node)
# add this node to the nodes cache
tmp_nodes_cache[
@@ -1342,12 +1358,12 @@ class NodesManager:
tmp_slot = tmp_slots[i][0]
if tmp_slot.name != target_node.name:
disagreements.append(
- f'{tmp_slot.name} vs {target_node.name} on slot: {i}'
+ f"{tmp_slot.name} vs {target_node.name} on slot: {i}"
)
if len(disagreements) > 5:
raise RedisClusterException(
- f'startup_nodes could not agree on a valid '
+ f"startup_nodes could not agree on a valid "
f'slots cache: {", ".join(disagreements)}'
)
@@ -1366,8 +1382,8 @@ class NodesManager:
# Despite the requirement that the slots be covered, there
# isn't a full coverage
raise RedisClusterException(
- f'All slots are not covered after query all startup_nodes. '
- f'{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered...'
+ f"All slots are not covered after query all startup_nodes. "
+ f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered..."
)
elif not fully_covered and not self._require_full_coverage:
# The user set require_full_coverage to False.
@@ -1376,15 +1392,17 @@ class NodesManager:
# continue with partial coverage.
# see Redis Cluster configuration parameters in
# https://redis.io/topics/cluster-tutorial
- if not self._skip_full_coverage_check and \
- self.cluster_require_full_coverage(tmp_nodes_cache):
+ if (
+ not self._skip_full_coverage_check
+ and self.cluster_require_full_coverage(tmp_nodes_cache)
+ ):
raise RedisClusterException(
- 'Not all slots are covered but the cluster\'s '
- 'configuration requires full coverage. Set '
- 'cluster-require-full-coverage configuration to no on '
- 'all of the cluster nodes if you wish the cluster to '
- 'be able to serve without being fully covered.'
- f'{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered...'
+ "Not all slots are covered but the cluster's "
+ "configuration requires full coverage. Set "
+ "cluster-require-full-coverage configuration to no on "
+ "all of the cluster nodes if you wish the cluster to "
+ "be able to serve without being fully covered."
+ f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered..."
)
# Set the tmp variables to the real variables
@@ -1418,8 +1436,7 @@ class ClusterPubSub(PubSub):
https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
"""
- def __init__(self, redis_cluster, node=None, host=None, port=None,
- **kwargs):
+ def __init__(self, redis_cluster, node=None, host=None, port=None, **kwargs):
"""
When a pubsub instance is created without specifying a node, a single
node will be transparently chosen for the pubsub connection on the
@@ -1436,11 +1453,15 @@ class ClusterPubSub(PubSub):
log.info("Creating new instance of ClusterPubSub")
self.node = None
self.set_pubsub_node(redis_cluster, node, host, port)
- connection_pool = None if self.node is None else \
- redis_cluster.get_redis_connection(self.node).connection_pool
+ connection_pool = (
+ None
+ if self.node is None
+ else redis_cluster.get_redis_connection(self.node).connection_pool
+ )
self.cluster = redis_cluster
- super().__init__(**kwargs, connection_pool=connection_pool,
- encoder=redis_cluster.encoder)
+ super().__init__(
+ **kwargs, connection_pool=connection_pool, encoder=redis_cluster.encoder
+ )
def set_pubsub_node(self, cluster, node=None, host=None, port=None):
"""
@@ -1468,8 +1489,7 @@ class ClusterPubSub(PubSub):
pubsub_node = node
elif any([host, port]) is True:
# only 'host' or 'port' passed
- raise DataError('Passing a host requires passing a port, '
- 'and vice versa')
+ raise DataError("Passing a host requires passing a port, " "and vice versa")
else:
# nothing passed by the user. set node to None
pubsub_node = None
@@ -1489,7 +1509,8 @@ class ClusterPubSub(PubSub):
"""
if node is None or redis_cluster.get_node(node_name=node.name) is None:
raise RedisClusterException(
- f"Node {host}:{port} doesn't exist in the cluster")
+ f"Node {host}:{port} doesn't exist in the cluster"
+ )
def execute_command(self, *args, **kwargs):
"""
@@ -1508,9 +1529,9 @@ class ClusterPubSub(PubSub):
# this slot
channel = args[1]
slot = self.cluster.keyslot(channel)
- node = self.cluster.nodes_manager. \
- get_node_from_slot(slot, self.cluster.
- read_from_replicas)
+ node = self.cluster.nodes_manager.get_node_from_slot(
+ slot, self.cluster.read_from_replicas
+ )
else:
# Get a random node
node = self.cluster.get_random_node()
@@ -1518,8 +1539,7 @@ class ClusterPubSub(PubSub):
redis_connection = self.cluster.get_redis_connection(node)
self.connection_pool = redis_connection.connection_pool
self.connection = self.connection_pool.get_connection(
- 'pubsub',
- self.shard_hint
+ "pubsub", self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
@@ -1535,8 +1555,13 @@ class ClusterPubSub(PubSub):
return self.node.redis_connection
-ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError,
- MovedError, AskError, TryAgainError)
+ERRORS_ALLOW_RETRY = (
+ ConnectionError,
+ TimeoutError,
+ MovedError,
+ AskError,
+ TryAgainError,
+)
class ClusterPipeline(RedisCluster):
@@ -1545,18 +1570,25 @@ class ClusterPipeline(RedisCluster):
in cluster mode
"""
- def __init__(self, nodes_manager, result_callbacks=None,
- cluster_response_callbacks=None, startup_nodes=None,
- read_from_replicas=False, cluster_error_retry_attempts=3,
- reinitialize_steps=10, **kwargs):
- """
- """
+ def __init__(
+ self,
+ nodes_manager,
+ result_callbacks=None,
+ cluster_response_callbacks=None,
+ startup_nodes=None,
+ read_from_replicas=False,
+ cluster_error_retry_attempts=3,
+ reinitialize_steps=10,
+ **kwargs,
+ ):
+ """ """
log.info("Creating new instance of ClusterPipeline")
self.command_stack = []
self.nodes_manager = nodes_manager
self.refresh_table_asap = False
- self.result_callbacks = (result_callbacks or
- self.__class__.RESULT_CALLBACKS.copy())
+ self.result_callbacks = (
+ result_callbacks or self.__class__.RESULT_CALLBACKS.copy()
+ )
self.startup_nodes = startup_nodes if startup_nodes else []
self.read_from_replicas = read_from_replicas
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
@@ -1576,18 +1608,15 @@ class ClusterPipeline(RedisCluster):
self.commands_parser = CommandsParser(super())
def __repr__(self):
- """
- """
+ """ """
return f"{type(self).__name__}"
def __enter__(self):
- """
- """
+ """ """
return self
def __exit__(self, exc_type, exc_value, traceback):
- """
- """
+ """ """
self.reset()
def __del__(self):
@@ -1597,8 +1626,7 @@ class ClusterPipeline(RedisCluster):
pass
def __len__(self):
- """
- """
+ """ """
return len(self.command_stack)
def __nonzero__(self):
@@ -1620,7 +1648,8 @@ class ClusterPipeline(RedisCluster):
Appends the executed command to the pipeline's command stack
"""
self.command_stack.append(
- PipelineCommand(args, options, len(self.command_stack)))
+ PipelineCommand(args, options, len(self.command_stack))
+ )
return self
def raise_first_error(self, stack):
@@ -1637,10 +1666,10 @@ class ClusterPipeline(RedisCluster):
"""
Provides extra context to the exception prior to it being handled
"""
- cmd = ' '.join(map(safe_str, command))
+ cmd = " ".join(map(safe_str, command))
msg = (
- f'Command # {number} ({cmd}) of pipeline '
- f'caused error: {exception.args[0]}'
+ f"Command # {number} ({cmd}) of pipeline "
+ f"caused error: {exception.args[0]}"
)
exception.args = (msg,) + exception.args[1:]
@@ -1686,8 +1715,9 @@ class ClusterPipeline(RedisCluster):
# self.connection_pool.release(self.connection)
# self.connection = None
- def send_cluster_commands(self, stack,
- raise_on_error=True, allow_redirections=True):
+ def send_cluster_commands(
+ self, stack, raise_on_error=True, allow_redirections=True
+ ):
"""
Wrapper for CLUSTERDOWN error handling.
@@ -1720,12 +1750,11 @@ class ClusterPipeline(RedisCluster):
# If it fails the configured number of times then raise
# exception back to caller of this method
- raise ClusterDownError(
- "CLUSTERDOWN error. Unable to rebuild the cluster")
+ raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster")
- def _send_cluster_commands(self, stack,
- raise_on_error=True,
- allow_redirections=True):
+ def _send_cluster_commands(
+ self, stack, raise_on_error=True, allow_redirections=True
+ ):
"""
Send a bunch of cluster commands to the redis cluster.
@@ -1751,7 +1780,8 @@ class ClusterPipeline(RedisCluster):
# command should route to.
slot = self.determine_slot(*c.args)
node = self.nodes_manager.get_node_from_slot(
- slot, self.read_from_replicas and c.args[0] in READ_COMMANDS)
+ slot, self.read_from_replicas and c.args[0] in READ_COMMANDS
+ )
# now that we know the name of the node
# ( it's just a string in the form of host:port )
@@ -1760,9 +1790,9 @@ class ClusterPipeline(RedisCluster):
if node_name not in nodes:
redis_node = self.get_redis_connection(node)
connection = get_connection(redis_node, c.args)
- nodes[node_name] = NodeCommands(redis_node.parse_response,
- redis_node.connection_pool,
- connection)
+ nodes[node_name] = NodeCommands(
+ redis_node.parse_response, redis_node.connection_pool, connection
+ )
nodes[node_name].append(c)
@@ -1808,9 +1838,10 @@ class ClusterPipeline(RedisCluster):
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
- attempt = sorted((c for c in attempt
- if isinstance(c.result, ERRORS_ALLOW_RETRY)),
- key=lambda x: x.position)
+ attempt = sorted(
+ (c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)),
+ key=lambda x: x.position,
+ )
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaing comamnds one at a time using `execute_command`
@@ -1831,10 +1862,10 @@ class ClusterPipeline(RedisCluster):
# flag to rebuild the slots table from scratch.
# So MOVED errors should correct themselves fairly quickly.
log.exception(
- f'An exception occurred during pipeline execution. '
- f'args: {attempt[-1].args}, '
- f'error: {type(attempt[-1].result).__name__} '
- f'{str(attempt[-1].result)}'
+ f"An exception occurred during pipeline execution. "
+ f"args: {attempt[-1].args}, "
+ f"error: {type(attempt[-1].result).__name__} "
+ f"{str(attempt[-1].result)}"
)
self.reinitialize_counter += 1
if self._should_reinitialized():
@@ -1857,55 +1888,47 @@ class ClusterPipeline(RedisCluster):
return response
def _fail_on_redirect(self, allow_redirections):
- """
- """
+ """ """
if not allow_redirections:
raise RedisClusterException(
- "ASK & MOVED redirection not allowed in this pipeline")
+ "ASK & MOVED redirection not allowed in this pipeline"
+ )
def eval(self):
- """
- """
+ """ """
raise RedisClusterException("method eval() is not implemented")
def multi(self):
- """
- """
+ """ """
raise RedisClusterException("method multi() is not implemented")
def immediate_execute_command(self, *args, **options):
- """
- """
+ """ """
raise RedisClusterException(
- "method immediate_execute_command() is not implemented")
+ "method immediate_execute_command() is not implemented"
+ )
def _execute_transaction(self, *args, **kwargs):
- """
- """
- raise RedisClusterException(
- "method _execute_transaction() is not implemented")
+ """ """
+ raise RedisClusterException("method _execute_transaction() is not implemented")
def load_scripts(self):
- """
- """
- raise RedisClusterException(
- "method load_scripts() is not implemented")
+ """ """
+ raise RedisClusterException("method load_scripts() is not implemented")
def watch(self, *names):
- """
- """
+ """ """
raise RedisClusterException("method watch() is not implemented")
def unwatch(self):
- """
- """
+ """ """
raise RedisClusterException("method unwatch() is not implemented")
def script_load_for_pipeline(self, *args, **kwargs):
- """
- """
+ """ """
raise RedisClusterException(
- "method script_load_for_pipeline() is not implemented")
+ "method script_load_for_pipeline() is not implemented"
+ )
def delete(self, *names):
"""
@@ -1913,10 +1936,10 @@ class ClusterPipeline(RedisCluster):
"""
if len(names) != 1:
raise RedisClusterException(
- "deleting multiple keys is not "
- "implemented in pipeline command")
+ "deleting multiple keys is not " "implemented in pipeline command"
+ )
- return self.execute_command('DEL', names[0])
+ return self.execute_command("DEL", names[0])
def block_pipeline_command(func):
@@ -1928,7 +1951,8 @@ def block_pipeline_command(func):
def inner(*args, **kwargs):
raise RedisClusterException(
f"ERROR: Calling pipelined function {func.__name__} is blocked when "
- f"running redis in cluster mode...")
+ f"running redis in cluster mode..."
+ )
return inner
@@ -1936,11 +1960,9 @@ def block_pipeline_command(func):
# Blocked pipeline commands
ClusterPipeline.bitop = block_pipeline_command(RedisCluster.bitop)
ClusterPipeline.brpoplpush = block_pipeline_command(RedisCluster.brpoplpush)
-ClusterPipeline.client_getname = \
- block_pipeline_command(RedisCluster.client_getname)
+ClusterPipeline.client_getname = block_pipeline_command(RedisCluster.client_getname)
ClusterPipeline.client_list = block_pipeline_command(RedisCluster.client_list)
-ClusterPipeline.client_setname = \
- block_pipeline_command(RedisCluster.client_setname)
+ClusterPipeline.client_setname = block_pipeline_command(RedisCluster.client_setname)
ClusterPipeline.config_set = block_pipeline_command(RedisCluster.config_set)
ClusterPipeline.dbsize = block_pipeline_command(RedisCluster.dbsize)
ClusterPipeline.flushall = block_pipeline_command(RedisCluster.flushall)
@@ -1972,8 +1994,7 @@ ClusterPipeline.readonly = block_pipeline_command(RedisCluster.readonly)
class PipelineCommand:
- """
- """
+ """ """
def __init__(self, args, options=None, position=None):
self.args = args
@@ -1987,20 +2008,17 @@ class PipelineCommand:
class NodeCommands:
- """
- """
+ """ """
def __init__(self, parse_response, connection_pool, connection):
- """
- """
+ """ """
self.parse_response = parse_response
self.connection_pool = connection_pool
self.connection = connection
self.commands = []
def append(self, c):
- """
- """
+ """ """
self.commands.append(c)
def write(self):
@@ -2019,14 +2037,14 @@ class NodeCommands:
# send all the commands and catch connection and timeout errors.
try:
connection.send_packed_command(
- connection.pack_commands([c.args for c in commands]))
+ connection.pack_commands([c.args for c in commands])
+ )
except (ConnectionError, TimeoutError) as e:
for c in commands:
c.result = e
def read(self):
- """
- """
+ """ """
connection = self.connection
for c in self.commands:
@@ -2050,8 +2068,7 @@ class NodeCommands:
# explicitly open the connection and all will be well.
if c.result is None:
try:
- c.result = self.parse_response(
- connection, c.args[0], **c.options)
+ c.result = self.parse_response(connection, c.args[0], **c.options)
except (ConnectionError, TimeoutError) as e:
for c in self.commands:
c.result = e
diff --git a/redis/commands/__init__.py b/redis/commands/__init__.py
index a4728d0..bc1e78c 100644
--- a/redis/commands/__init__.py
+++ b/redis/commands/__init__.py
@@ -6,10 +6,10 @@ from .redismodules import RedisModuleCommands
from .sentinel import SentinelCommands
__all__ = [
- 'ClusterCommands',
- 'CommandsParser',
- 'CoreCommands',
- 'list_or_args',
- 'RedisModuleCommands',
- 'SentinelCommands'
+ "ClusterCommands",
+ "CommandsParser",
+ "CoreCommands",
+ "list_or_args",
+ "RedisModuleCommands",
+ "SentinelCommands",
]
diff --git a/redis/commands/cluster.py b/redis/commands/cluster.py
index e6b0a08..0df073a 100644
--- a/redis/commands/cluster.py
+++ b/redis/commands/cluster.py
@@ -1,9 +1,6 @@
-from redis.exceptions import (
- ConnectionError,
- DataError,
- RedisError,
-)
from redis.crc import key_slot
+from redis.exceptions import ConnectionError, DataError, RedisError
+
from .core import DataAccessCommands
from .helpers import list_or_args
@@ -36,6 +33,7 @@ class ClusterMultiKeyCommands:
"""
from redis.client import EMPTY_RESPONSE
+
options = {}
if not args:
options[EMPTY_RESPONSE] = []
@@ -50,8 +48,7 @@ class ClusterMultiKeyCommands:
# We must make sure that the keys are returned in order
all_results = {}
for slot_keys in slots_to_keys.values():
- slot_values = self.execute_command(
- 'MGET', *slot_keys, **options)
+ slot_values = self.execute_command("MGET", *slot_keys, **options)
slot_results = dict(zip(slot_keys, slot_values))
all_results.update(slot_results)
@@ -83,7 +80,7 @@ class ClusterMultiKeyCommands:
# the results (one result per slot)
res = []
for pairs in slots_to_pairs.values():
- res.append(self.execute_command('MSET', *pairs))
+ res.append(self.execute_command("MSET", *pairs))
return res
@@ -108,7 +105,7 @@ class ClusterMultiKeyCommands:
whole cluster. The keys are first split up into slots
and then an EXISTS command is sent for every slot
"""
- return self._split_command_across_slots('EXISTS', *keys)
+ return self._split_command_across_slots("EXISTS", *keys)
def delete(self, *keys):
"""
@@ -119,7 +116,7 @@ class ClusterMultiKeyCommands:
Non-existant keys are ignored.
Returns the number of keys that were deleted.
"""
- return self._split_command_across_slots('DEL', *keys)
+ return self._split_command_across_slots("DEL", *keys)
def touch(self, *keys):
"""
@@ -132,7 +129,7 @@ class ClusterMultiKeyCommands:
Non-existant keys are ignored.
Returns the number of keys that were touched.
"""
- return self._split_command_across_slots('TOUCH', *keys)
+ return self._split_command_across_slots("TOUCH", *keys)
def unlink(self, *keys):
"""
@@ -144,7 +141,7 @@ class ClusterMultiKeyCommands:
Non-existant keys are ignored.
Returns the number of keys that were unlinked.
"""
- return self._split_command_across_slots('UNLINK', *keys)
+ return self._split_command_across_slots("UNLINK", *keys)
class ClusterManagementCommands:
@@ -166,6 +163,7 @@ class ClusterManagementCommands:
r.bgsave(target_nodes=primary)
r.bgsave(target_nodes='primaries')
"""
+
def bgsave(self, schedule=True, target_nodes=None):
"""
Tell the Redis server to save its data to disk. Unlike save(),
@@ -174,9 +172,7 @@ class ClusterManagementCommands:
pieces = []
if schedule:
pieces.append("SCHEDULE")
- return self.execute_command('BGSAVE',
- *pieces,
- target_nodes=target_nodes)
+ return self.execute_command("BGSAVE", *pieces, target_nodes=target_nodes)
def client_getname(self, target_nodes=None):
"""
@@ -184,8 +180,7 @@ class ClusterManagementCommands:
The result will be a dictionary with the IP and
connection name.
"""
- return self.execute_command('CLIENT GETNAME',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT GETNAME", target_nodes=target_nodes)
def client_getredir(self, target_nodes=None):
"""Returns the ID (an integer) of the client to whom we are
@@ -193,25 +188,29 @@ class ClusterManagementCommands:
see: https://redis.io/commands/client-getredir
"""
- return self.execute_command('CLIENT GETREDIR',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT GETREDIR", target_nodes=target_nodes)
def client_id(self, target_nodes=None):
"""Returns the current connection id"""
- return self.execute_command('CLIENT ID',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT ID", target_nodes=target_nodes)
def client_info(self, target_nodes=None):
"""
Returns information and statistics about the current
client connection.
"""
- return self.execute_command('CLIENT INFO',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT INFO", target_nodes=target_nodes)
- def client_kill_filter(self, _id=None, _type=None, addr=None,
- skipme=None, laddr=None, user=None,
- target_nodes=None):
+ def client_kill_filter(
+ self,
+ _id=None,
+ _type=None,
+ addr=None,
+ skipme=None,
+ laddr=None,
+ user=None,
+ target_nodes=None,
+ ):
"""
Disconnects client(s) using a variety of filter options
:param id: Kills a client by its unique ID field
@@ -226,35 +225,35 @@ class ClusterManagementCommands:
"""
args = []
if _type is not None:
- client_types = ('normal', 'master', 'slave', 'pubsub')
+ client_types = ("normal", "master", "slave", "pubsub")
if str(_type).lower() not in client_types:
raise DataError(f"CLIENT KILL type must be one of {client_types!r}")
- args.extend((b'TYPE', _type))
+ args.extend((b"TYPE", _type))
if skipme is not None:
if not isinstance(skipme, bool):
raise DataError("CLIENT KILL skipme must be a bool")
if skipme:
- args.extend((b'SKIPME', b'YES'))
+ args.extend((b"SKIPME", b"YES"))
else:
- args.extend((b'SKIPME', b'NO'))
+ args.extend((b"SKIPME", b"NO"))
if _id is not None:
- args.extend((b'ID', _id))
+ args.extend((b"ID", _id))
if addr is not None:
- args.extend((b'ADDR', addr))
+ args.extend((b"ADDR", addr))
if laddr is not None:
- args.extend((b'LADDR', laddr))
+ args.extend((b"LADDR", laddr))
if user is not None:
- args.extend((b'USER', user))
+ args.extend((b"USER", user))
if not args:
- raise DataError("CLIENT KILL <filter> <value> ... ... <filter> "
- "<value> must specify at least one filter")
- return self.execute_command('CLIENT KILL', *args,
- target_nodes=target_nodes)
+ raise DataError(
+ "CLIENT KILL <filter> <value> ... ... <filter> "
+ "<value> must specify at least one filter"
+ )
+ return self.execute_command("CLIENT KILL", *args, target_nodes=target_nodes)
def client_kill(self, address, target_nodes=None):
"Disconnects the client at ``address`` (ip:port)"
- return self.execute_command('CLIENT KILL', address,
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT KILL", address, target_nodes=target_nodes)
def client_list(self, _type=None, target_nodes=None):
"""
@@ -264,15 +263,13 @@ class ClusterManagementCommands:
replica, pubsub)
"""
if _type is not None:
- client_types = ('normal', 'master', 'replica', 'pubsub')
+ client_types = ("normal", "master", "replica", "pubsub")
if str(_type).lower() not in client_types:
raise DataError(f"CLIENT LIST _type must be one of {client_types!r}")
- return self.execute_command('CLIENT LIST',
- b'TYPE',
- _type,
- target_noes=target_nodes)
- return self.execute_command('CLIENT LIST',
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLIENT LIST", b"TYPE", _type, target_noes=target_nodes
+ )
+ return self.execute_command("CLIENT LIST", target_nodes=target_nodes)
def client_pause(self, timeout, target_nodes=None):
"""
@@ -281,8 +278,9 @@ class ClusterManagementCommands:
"""
if not isinstance(timeout, int):
raise DataError("CLIENT PAUSE timeout must be an integer")
- return self.execute_command('CLIENT PAUSE', str(timeout),
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLIENT PAUSE", str(timeout), target_nodes=target_nodes
+ )
def client_reply(self, reply, target_nodes=None):
"""Enable and disable redis server replies.
@@ -298,16 +296,14 @@ class ClusterManagementCommands:
conftest.py has a client with a timeout.
See https://redis.io/commands/client-reply
"""
- replies = ['ON', 'OFF', 'SKIP']
+ replies = ["ON", "OFF", "SKIP"]
if reply not in replies:
- raise DataError(f'CLIENT REPLY must be one of {replies!r}')
- return self.execute_command("CLIENT REPLY", reply,
- target_nodes=target_nodes)
+ raise DataError(f"CLIENT REPLY must be one of {replies!r}")
+ return self.execute_command("CLIENT REPLY", reply, target_nodes=target_nodes)
def client_setname(self, name, target_nodes=None):
"Sets the current connection name"
- return self.execute_command('CLIENT SETNAME', name,
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT SETNAME", name, target_nodes=target_nodes)
def client_trackinginfo(self, target_nodes=None):
"""
@@ -315,8 +311,7 @@ class ClusterManagementCommands:
use of the server assisted client side cache.
See https://redis.io/commands/client-trackinginfo
"""
- return self.execute_command('CLIENT TRACKINGINFO',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT TRACKINGINFO", target_nodes=target_nodes)
def client_unblock(self, client_id, error=False, target_nodes=None):
"""
@@ -325,56 +320,50 @@ class ClusterManagementCommands:
If ``error`` is False (default), the client is unblocked using the
regular timeout mechanism.
"""
- args = ['CLIENT UNBLOCK', int(client_id)]
+ args = ["CLIENT UNBLOCK", int(client_id)]
if error:
- args.append(b'ERROR')
+ args.append(b"ERROR")
return self.execute_command(*args, target_nodes=target_nodes)
def client_unpause(self, target_nodes=None):
"""
Unpause all redis clients
"""
- return self.execute_command('CLIENT UNPAUSE',
- target_nodes=target_nodes)
+ return self.execute_command("CLIENT UNPAUSE", target_nodes=target_nodes)
def command(self, target_nodes=None):
"""
Returns dict reply of details about all Redis commands.
"""
- return self.execute_command('COMMAND', target_nodes=target_nodes)
+ return self.execute_command("COMMAND", target_nodes=target_nodes)
def command_count(self, target_nodes=None):
"""
Returns Integer reply of number of total commands in this Redis server.
"""
- return self.execute_command('COMMAND COUNT', target_nodes=target_nodes)
+ return self.execute_command("COMMAND COUNT", target_nodes=target_nodes)
def config_get(self, pattern="*", target_nodes=None):
"""
Return a dictionary of configuration based on the ``pattern``
"""
- return self.execute_command('CONFIG GET',
- pattern,
- target_nodes=target_nodes)
+ return self.execute_command("CONFIG GET", pattern, target_nodes=target_nodes)
def config_resetstat(self, target_nodes=None):
"""Reset runtime statistics"""
- return self.execute_command('CONFIG RESETSTAT',
- target_nodes=target_nodes)
+ return self.execute_command("CONFIG RESETSTAT", target_nodes=target_nodes)
def config_rewrite(self, target_nodes=None):
"""
Rewrite config file with the minimal change to reflect running config.
"""
- return self.execute_command('CONFIG REWRITE',
- target_nodes=target_nodes)
+ return self.execute_command("CONFIG REWRITE", target_nodes=target_nodes)
def config_set(self, name, value, target_nodes=None):
"Set config item ``name`` with ``value``"
- return self.execute_command('CONFIG SET',
- name,
- value,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CONFIG SET", name, value, target_nodes=target_nodes
+ )
def dbsize(self, target_nodes=None):
"""
@@ -383,8 +372,7 @@ class ClusterManagementCommands:
:target_nodes: 'ClusterNode' or 'list(ClusterNodes)'
The node/s to execute the command on
"""
- return self.execute_command('DBSIZE',
- target_nodes=target_nodes)
+ return self.execute_command("DBSIZE", target_nodes=target_nodes)
def debug_object(self, key):
raise NotImplementedError(
@@ -398,8 +386,7 @@ class ClusterManagementCommands:
def echo(self, value, target_nodes):
"""Echo the string back from the server"""
- return self.execute_command('ECHO', value,
- target_nodes=target_nodes)
+ return self.execute_command("ECHO", value, target_nodes=target_nodes)
def flushall(self, asynchronous=False, target_nodes=None):
"""
@@ -411,10 +398,8 @@ class ClusterManagementCommands:
"""
args = []
if asynchronous:
- args.append(b'ASYNC')
- return self.execute_command('FLUSHALL',
- *args,
- target_nodes=target_nodes)
+ args.append(b"ASYNC")
+ return self.execute_command("FLUSHALL", *args, target_nodes=target_nodes)
def flushdb(self, asynchronous=False, target_nodes=None):
"""
@@ -425,10 +410,8 @@ class ClusterManagementCommands:
"""
args = []
if asynchronous:
- args.append(b'ASYNC')
- return self.execute_command('FLUSHDB',
- *args,
- target_nodes=target_nodes)
+ args.append(b"ASYNC")
+ return self.execute_command("FLUSHDB", *args, target_nodes=target_nodes)
def info(self, section=None, target_nodes=None):
"""
@@ -441,24 +424,20 @@ class ClusterManagementCommands:
and will generate ResponseError
"""
if section is None:
- return self.execute_command('INFO',
- target_nodes=target_nodes)
+ return self.execute_command("INFO", target_nodes=target_nodes)
else:
- return self.execute_command('INFO',
- section,
- target_nodes=target_nodes)
+ return self.execute_command("INFO", section, target_nodes=target_nodes)
- def keys(self, pattern='*', target_nodes=None):
+ def keys(self, pattern="*", target_nodes=None):
"Returns a list of keys matching ``pattern``"
- return self.execute_command('KEYS', pattern, target_nodes=target_nodes)
+ return self.execute_command("KEYS", pattern, target_nodes=target_nodes)
def lastsave(self, target_nodes=None):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
- return self.execute_command('LASTSAVE',
- target_nodes=target_nodes)
+ return self.execute_command("LASTSAVE", target_nodes=target_nodes)
def memory_doctor(self):
raise NotImplementedError(
@@ -472,18 +451,15 @@ class ClusterManagementCommands:
def memory_malloc_stats(self, target_nodes=None):
"""Return an internal statistics report from the memory allocator."""
- return self.execute_command('MEMORY MALLOC-STATS',
- target_nodes=target_nodes)
+ return self.execute_command("MEMORY MALLOC-STATS", target_nodes=target_nodes)
def memory_purge(self, target_nodes=None):
"""Attempts to purge dirty pages for reclamation by allocator"""
- return self.execute_command('MEMORY PURGE',
- target_nodes=target_nodes)
+ return self.execute_command("MEMORY PURGE", target_nodes=target_nodes)
def memory_stats(self, target_nodes=None):
"""Return a dictionary of memory stats"""
- return self.execute_command('MEMORY STATS',
- target_nodes=target_nodes)
+ return self.execute_command("MEMORY STATS", target_nodes=target_nodes)
def memory_usage(self, key, samples=None):
"""
@@ -496,12 +472,12 @@ class ClusterManagementCommands:
"""
args = []
if isinstance(samples, int):
- args.extend([b'SAMPLES', samples])
- return self.execute_command('MEMORY USAGE', key, *args)
+ args.extend([b"SAMPLES", samples])
+ return self.execute_command("MEMORY USAGE", key, *args)
def object(self, infotype, key):
"""Return the encoding, idletime, or refcount about the key"""
- return self.execute_command('OBJECT', infotype, key, infotype=infotype)
+ return self.execute_command("OBJECT", infotype, key, infotype=infotype)
def ping(self, target_nodes=None):
"""
@@ -509,24 +485,22 @@ class ClusterManagementCommands:
If no target nodes are specified, sent to all nodes and returns True if
the ping was successful across all nodes.
"""
- return self.execute_command('PING',
- target_nodes=target_nodes)
+ return self.execute_command("PING", target_nodes=target_nodes)
def randomkey(self, target_nodes=None):
"""
Returns the name of a random key"
"""
- return self.execute_command('RANDOMKEY', target_nodes=target_nodes)
+ return self.execute_command("RANDOMKEY", target_nodes=target_nodes)
def save(self, target_nodes=None):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
- return self.execute_command('SAVE', target_nodes=target_nodes)
+ return self.execute_command("SAVE", target_nodes=target_nodes)
- def scan(self, cursor=0, match=None, count=None, _type=None,
- target_nodes=None):
+ def scan(self, cursor=0, match=None, count=None, _type=None, target_nodes=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
@@ -543,12 +517,12 @@ class ClusterManagementCommands:
"""
pieces = [cursor]
if match is not None:
- pieces.extend([b'MATCH', match])
+ pieces.extend([b"MATCH", match])
if count is not None:
- pieces.extend([b'COUNT', count])
+ pieces.extend([b"COUNT", count])
if _type is not None:
- pieces.extend([b'TYPE', _type])
- return self.execute_command('SCAN', *pieces, target_nodes=target_nodes)
+ pieces.extend([b"TYPE", _type])
+ return self.execute_command("SCAN", *pieces, target_nodes=target_nodes)
def scan_iter(self, match=None, count=None, _type=None, target_nodes=None):
"""
@@ -565,11 +539,15 @@ class ClusterManagementCommands:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
"""
- cursor = '0'
+ cursor = "0"
while cursor != 0:
- cursor, data = self.scan(cursor=cursor, match=match,
- count=count, _type=_type,
- target_nodes=target_nodes)
+ cursor, data = self.scan(
+ cursor=cursor,
+ match=match,
+ count=count,
+ _type=_type,
+ target_nodes=target_nodes,
+ )
yield from data
def shutdown(self, save=False, nosave=False, target_nodes=None):
@@ -580,12 +558,12 @@ class ClusterManagementCommands:
attempted. The "save" and "nosave" options cannot both be set.
"""
if save and nosave:
- raise DataError('SHUTDOWN save and nosave cannot both be set')
- args = ['SHUTDOWN']
+ raise DataError("SHUTDOWN save and nosave cannot both be set")
+ args = ["SHUTDOWN"]
if save:
- args.append('SAVE')
+ args.append("SAVE")
if nosave:
- args.append('NOSAVE')
+ args.append("NOSAVE")
try:
self.execute_command(*args, target_nodes=target_nodes)
except ConnectionError:
@@ -598,26 +576,32 @@ class ClusterManagementCommands:
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
- args = ['SLOWLOG GET']
+ args = ["SLOWLOG GET"]
if num is not None:
args.append(num)
- return self.execute_command(*args,
- target_nodes=target_nodes)
+ return self.execute_command(*args, target_nodes=target_nodes)
def slowlog_len(self, target_nodes=None):
"Get the number of items in the slowlog"
- return self.execute_command('SLOWLOG LEN',
- target_nodes=target_nodes)
+ return self.execute_command("SLOWLOG LEN", target_nodes=target_nodes)
def slowlog_reset(self, target_nodes=None):
"Remove all items in the slowlog"
- return self.execute_command('SLOWLOG RESET',
- target_nodes=target_nodes)
-
- def stralgo(self, algo, value1, value2, specific_argument='strings',
- len=False, idx=False, minmatchlen=None, withmatchlen=False,
- target_nodes=None):
+ return self.execute_command("SLOWLOG RESET", target_nodes=target_nodes)
+
+ def stralgo(
+ self,
+ algo,
+ value1,
+ value2,
+ specific_argument="strings",
+ len=False,
+ idx=False,
+ minmatchlen=None,
+ withmatchlen=False,
+ target_nodes=None,
+ ):
"""
Implements complex algorithms that operate on strings.
Right now the only algorithm implemented is the LCS algorithm
@@ -636,40 +620,45 @@ class ClusterManagementCommands:
Can be provided only when ``idx`` set to True.
"""
# check validity
- supported_algo = ['LCS']
+ supported_algo = ["LCS"]
if algo not in supported_algo:
- supported_algos_str = ', '.join(supported_algo)
+ supported_algos_str = ", ".join(supported_algo)
raise DataError(f"The supported algorithms are: {supported_algos_str}")
- if specific_argument not in ['keys', 'strings']:
+ if specific_argument not in ["keys", "strings"]:
raise DataError("specific_argument can be only keys or strings")
if len and idx:
raise DataError("len and idx cannot be provided together.")
pieces = [algo, specific_argument.upper(), value1, value2]
if len:
- pieces.append(b'LEN')
+ pieces.append(b"LEN")
if idx:
- pieces.append(b'IDX')
+ pieces.append(b"IDX")
try:
int(minmatchlen)
- pieces.extend([b'MINMATCHLEN', minmatchlen])
+ pieces.extend([b"MINMATCHLEN", minmatchlen])
except TypeError:
pass
if withmatchlen:
- pieces.append(b'WITHMATCHLEN')
- if specific_argument == 'strings' and target_nodes is None:
- target_nodes = 'default-node'
- return self.execute_command('STRALGO', *pieces, len=len, idx=idx,
- minmatchlen=minmatchlen,
- withmatchlen=withmatchlen,
- target_nodes=target_nodes)
+ pieces.append(b"WITHMATCHLEN")
+ if specific_argument == "strings" and target_nodes is None:
+ target_nodes = "default-node"
+ return self.execute_command(
+ "STRALGO",
+ *pieces,
+ len=len,
+ idx=idx,
+ minmatchlen=minmatchlen,
+ withmatchlen=withmatchlen,
+ target_nodes=target_nodes,
+ )
def time(self, target_nodes=None):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
- return self.execute_command('TIME', target_nodes=target_nodes)
+ return self.execute_command("TIME", target_nodes=target_nodes)
def wait(self, num_replicas, timeout, target_nodes=None):
"""
@@ -680,9 +669,9 @@ class ClusterManagementCommands:
If more than one target node are passed the result will be summed up
"""
- return self.execute_command('WAIT', num_replicas,
- timeout,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "WAIT", num_replicas, timeout, target_nodes=target_nodes
+ )
class ClusterPubSubCommands:
@@ -690,38 +679,44 @@ class ClusterPubSubCommands:
Redis PubSub commands for RedisCluster use.
see https://redis.io/topics/pubsub
"""
+
def publish(self, channel, message, target_nodes=None):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
- return self.execute_command('PUBLISH', channel, message,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "PUBLISH", channel, message, target_nodes=target_nodes
+ )
- def pubsub_channels(self, pattern='*', target_nodes=None):
+ def pubsub_channels(self, pattern="*", target_nodes=None):
"""
Return a list of channels that have at least one subscriber
"""
- return self.execute_command('PUBSUB CHANNELS', pattern,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "PUBSUB CHANNELS", pattern, target_nodes=target_nodes
+ )
def pubsub_numpat(self, target_nodes=None):
"""
Returns the number of subscriptions to patterns
"""
- return self.execute_command('PUBSUB NUMPAT', target_nodes=target_nodes)
+ return self.execute_command("PUBSUB NUMPAT", target_nodes=target_nodes)
def pubsub_numsub(self, *args, target_nodes=None):
"""
Return a list of (channel, number of subscribers) tuples
for each channel given in ``*args``
"""
- return self.execute_command('PUBSUB NUMSUB', *args,
- target_nodes=target_nodes)
+ return self.execute_command("PUBSUB NUMSUB", *args, target_nodes=target_nodes)
-class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
- ClusterPubSubCommands, DataAccessCommands):
+class ClusterCommands(
+ ClusterManagementCommands,
+ ClusterMultiKeyCommands,
+ ClusterPubSubCommands,
+ DataAccessCommands,
+):
"""
Redis Cluster commands
@@ -738,6 +733,7 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
for example:
r.cluster_info(target_nodes='all')
"""
+
def cluster_addslots(self, target_node, *slots):
"""
Assign new hash slots to receiving node. Sends to specified node.
@@ -745,22 +741,23 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
:target_node: 'ClusterNode'
The node to execute the command on
"""
- return self.execute_command('CLUSTER ADDSLOTS', *slots,
- target_nodes=target_node)
+ return self.execute_command(
+ "CLUSTER ADDSLOTS", *slots, target_nodes=target_node
+ )
def cluster_countkeysinslot(self, slot_id):
"""
Return the number of local keys in the specified hash slot
Send to node based on specified slot_id
"""
- return self.execute_command('CLUSTER COUNTKEYSINSLOT', slot_id)
+ return self.execute_command("CLUSTER COUNTKEYSINSLOT", slot_id)
def cluster_count_failure_report(self, node_id):
"""
Return the number of failure reports active for a given node
Sends to a random node
"""
- return self.execute_command('CLUSTER COUNT-FAILURE-REPORTS', node_id)
+ return self.execute_command("CLUSTER COUNT-FAILURE-REPORTS", node_id)
def cluster_delslots(self, *slots):
"""
@@ -769,10 +766,7 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
Returns a list of the results for each processed slot.
"""
- return [
- self.execute_command('CLUSTER DELSLOTS', slot)
- for slot in slots
- ]
+ return [self.execute_command("CLUSTER DELSLOTS", slot) for slot in slots]
def cluster_failover(self, target_node, option=None):
"""
@@ -783,15 +777,16 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
The node to execute the command on
"""
if option:
- if option.upper() not in ['FORCE', 'TAKEOVER']:
+ if option.upper() not in ["FORCE", "TAKEOVER"]:
raise RedisError(
- f'Invalid option for CLUSTER FAILOVER command: {option}')
+ f"Invalid option for CLUSTER FAILOVER command: {option}"
+ )
else:
- return self.execute_command('CLUSTER FAILOVER', option,
- target_nodes=target_node)
+ return self.execute_command(
+ "CLUSTER FAILOVER", option, target_nodes=target_node
+ )
else:
- return self.execute_command('CLUSTER FAILOVER',
- target_nodes=target_node)
+ return self.execute_command("CLUSTER FAILOVER", target_nodes=target_node)
def cluster_info(self, target_nodes=None):
"""
@@ -799,22 +794,23 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
The command will be sent to a random node in the cluster if no target
node is specified.
"""
- return self.execute_command('CLUSTER INFO', target_nodes=target_nodes)
+ return self.execute_command("CLUSTER INFO", target_nodes=target_nodes)
def cluster_keyslot(self, key):
"""
Returns the hash slot of the specified key
Sends to random node in the cluster
"""
- return self.execute_command('CLUSTER KEYSLOT', key)
+ return self.execute_command("CLUSTER KEYSLOT", key)
def cluster_meet(self, host, port, target_nodes=None):
"""
Force a node cluster to handshake with another node.
Sends to specified node.
"""
- return self.execute_command('CLUSTER MEET', host, port,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLUSTER MEET", host, port, target_nodes=target_nodes
+ )
def cluster_nodes(self):
"""
@@ -822,14 +818,15 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
Sends to random node in the cluster
"""
- return self.execute_command('CLUSTER NODES')
+ return self.execute_command("CLUSTER NODES")
def cluster_replicate(self, target_nodes, node_id):
"""
Reconfigure a node as a slave of the specified master node
"""
- return self.execute_command('CLUSTER REPLICATE', node_id,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLUSTER REPLICATE", node_id, target_nodes=target_nodes
+ )
def cluster_reset(self, soft=True, target_nodes=None):
"""
@@ -838,29 +835,29 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
If 'soft' is True then it will send 'SOFT' argument
If 'soft' is False then it will send 'HARD' argument
"""
- return self.execute_command('CLUSTER RESET',
- b'SOFT' if soft else b'HARD',
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLUSTER RESET", b"SOFT" if soft else b"HARD", target_nodes=target_nodes
+ )
def cluster_save_config(self, target_nodes=None):
"""
Forces the node to save cluster state on disk
"""
- return self.execute_command('CLUSTER SAVECONFIG',
- target_nodes=target_nodes)
+ return self.execute_command("CLUSTER SAVECONFIG", target_nodes=target_nodes)
def cluster_get_keys_in_slot(self, slot, num_keys):
"""
Returns the number of keys in the specified cluster slot
"""
- return self.execute_command('CLUSTER GETKEYSINSLOT', slot, num_keys)
+ return self.execute_command("CLUSTER GETKEYSINSLOT", slot, num_keys)
def cluster_set_config_epoch(self, epoch, target_nodes=None):
"""
Set the configuration epoch in a new node
"""
- return self.execute_command('CLUSTER SET-CONFIG-EPOCH', epoch,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLUSTER SET-CONFIG-EPOCH", epoch, target_nodes=target_nodes
+ )
def cluster_setslot(self, target_node, node_id, slot_id, state):
"""
@@ -869,47 +866,48 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
:target_node: 'ClusterNode'
The node to execute the command on
"""
- if state.upper() in ('IMPORTING', 'NODE', 'MIGRATING'):
- return self.execute_command('CLUSTER SETSLOT', slot_id, state,
- node_id, target_nodes=target_node)
- elif state.upper() == 'STABLE':
- raise RedisError('For "stable" state please use '
- 'cluster_setslot_stable')
+ if state.upper() in ("IMPORTING", "NODE", "MIGRATING"):
+ return self.execute_command(
+ "CLUSTER SETSLOT", slot_id, state, node_id, target_nodes=target_node
+ )
+ elif state.upper() == "STABLE":
+ raise RedisError('For "stable" state please use ' "cluster_setslot_stable")
else:
- raise RedisError(f'Invalid slot state: {state}')
+ raise RedisError(f"Invalid slot state: {state}")
def cluster_setslot_stable(self, slot_id):
"""
Clears migrating / importing state from the slot.
It determines by it self what node the slot is in and sends it there.
"""
- return self.execute_command('CLUSTER SETSLOT', slot_id, 'STABLE')
+ return self.execute_command("CLUSTER SETSLOT", slot_id, "STABLE")
def cluster_replicas(self, node_id, target_nodes=None):
"""
Provides a list of replica nodes replicating from the specified primary
target node.
"""
- return self.execute_command('CLUSTER REPLICAS', node_id,
- target_nodes=target_nodes)
+ return self.execute_command(
+ "CLUSTER REPLICAS", node_id, target_nodes=target_nodes
+ )
def cluster_slots(self, target_nodes=None):
"""
Get array of Cluster slot to node mappings
"""
- return self.execute_command('CLUSTER SLOTS', target_nodes=target_nodes)
+ return self.execute_command("CLUSTER SLOTS", target_nodes=target_nodes)
def readonly(self, target_nodes=None):
"""
Enables read queries.
The command will be sent to the default cluster node if target_nodes is
not specified.
- """
- if target_nodes == 'replicas' or target_nodes == 'all':
+ """
+ if target_nodes == "replicas" or target_nodes == "all":
# read_from_replicas will only be enabled if the READONLY command
# is sent to all replicas
self.read_from_replicas = True
- return self.execute_command('READONLY', target_nodes=target_nodes)
+ return self.execute_command("READONLY", target_nodes=target_nodes)
def readwrite(self, target_nodes=None):
"""
@@ -919,4 +917,4 @@ class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
"""
# Reset read from replicas flag
self.read_from_replicas = False
- return self.execute_command('READWRITE', target_nodes=target_nodes)
+ return self.execute_command("READWRITE", target_nodes=target_nodes)
diff --git a/redis/commands/core.py b/redis/commands/core.py
index 0285f80..688e1dd 100644
--- a/redis/commands/core.py
+++ b/redis/commands/core.py
@@ -1,15 +1,11 @@
import datetime
+import hashlib
import time
import warnings
-import hashlib
+
+from redis.exceptions import ConnectionError, DataError, NoScriptError, RedisError
from .helpers import list_or_args
-from redis.exceptions import (
- ConnectionError,
- DataError,
- NoScriptError,
- RedisError,
-)
class ACLCommands:
@@ -17,6 +13,7 @@ class ACLCommands:
Redis Access Control List (ACL) commands.
see: https://redis.io/topics/acl
"""
+
def acl_cat(self, category=None):
"""
Returns a list of categories or commands within a category.
@@ -28,7 +25,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-cat
"""
pieces = [category] if category else []
- return self.execute_command('ACL CAT', *pieces)
+ return self.execute_command("ACL CAT", *pieces)
def acl_deluser(self, *username):
"""
@@ -36,7 +33,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-deluser
"""
- return self.execute_command('ACL DELUSER', *username)
+ return self.execute_command("ACL DELUSER", *username)
def acl_genpass(self, bits=None):
"""Generate a random password value.
@@ -51,9 +48,10 @@ class ACLCommands:
if b < 0 or b > 4096:
raise ValueError
except ValueError:
- raise DataError('genpass optionally accepts a bits argument, '
- 'between 0 and 4096.')
- return self.execute_command('ACL GENPASS', *pieces)
+ raise DataError(
+ "genpass optionally accepts a bits argument, " "between 0 and 4096."
+ )
+ return self.execute_command("ACL GENPASS", *pieces)
def acl_getuser(self, username):
"""
@@ -63,7 +61,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-getuser
"""
- return self.execute_command('ACL GETUSER', username)
+ return self.execute_command("ACL GETUSER", username)
def acl_help(self):
"""The ACL HELP command returns helpful text describing
@@ -71,7 +69,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-help
"""
- return self.execute_command('ACL HELP')
+ return self.execute_command("ACL HELP")
def acl_list(self):
"""
@@ -79,7 +77,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-list
"""
- return self.execute_command('ACL LIST')
+ return self.execute_command("ACL LIST")
def acl_log(self, count=None):
"""
@@ -92,11 +90,10 @@ class ACLCommands:
args = []
if count is not None:
if not isinstance(count, int):
- raise DataError('ACL LOG count must be an '
- 'integer')
+ raise DataError("ACL LOG count must be an " "integer")
args.append(count)
- return self.execute_command('ACL LOG', *args)
+ return self.execute_command("ACL LOG", *args)
def acl_log_reset(self):
"""
@@ -105,8 +102,8 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-log
"""
- args = [b'RESET']
- return self.execute_command('ACL LOG', *args)
+ args = [b"RESET"]
+ return self.execute_command("ACL LOG", *args)
def acl_load(self):
"""
@@ -117,7 +114,7 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-load
"""
- return self.execute_command('ACL LOAD')
+ return self.execute_command("ACL LOAD")
def acl_save(self):
"""
@@ -128,12 +125,22 @@ class ACLCommands:
For more information check https://redis.io/commands/acl-save
"""
- return self.execute_command('ACL SAVE')
-
- def acl_setuser(self, username, enabled=False, nopass=False,
- passwords=None, hashed_passwords=None, categories=None,
- commands=None, keys=None, reset=False, reset_keys=False,
- reset_passwords=False):
+ return self.execute_command("ACL SAVE")
+
+ def acl_setuser(
+ self,
+ username,
+ enabled=False,
+ nopass=False,
+ passwords=None,
+ hashed_passwords=None,
+ categories=None,
+ commands=None,
+ keys=None,
+ reset=False,
+ reset_keys=False,
+ reset_passwords=False,
+ ):
"""
Create or update an ACL user.
@@ -199,22 +206,23 @@ class ACLCommands:
pieces = [username]
if reset:
- pieces.append(b'reset')
+ pieces.append(b"reset")
if reset_keys:
- pieces.append(b'resetkeys')
+ pieces.append(b"resetkeys")
if reset_passwords:
- pieces.append(b'resetpass')
+ pieces.append(b"resetpass")
if enabled:
- pieces.append(b'on')
+ pieces.append(b"on")
else:
- pieces.append(b'off')
+ pieces.append(b"off")
if (passwords or hashed_passwords) and nopass:
- raise DataError('Cannot set \'nopass\' and supply '
- '\'passwords\' or \'hashed_passwords\'')
+ raise DataError(
+ "Cannot set 'nopass' and supply " "'passwords' or 'hashed_passwords'"
+ )
if passwords:
# as most users will have only one password, allow remove_passwords
@@ -222,13 +230,15 @@ class ACLCommands:
passwords = list_or_args(passwords, [])
for i, password in enumerate(passwords):
password = encoder.encode(password)
- if password.startswith(b'+'):
- pieces.append(b'>%s' % password[1:])
- elif password.startswith(b'-'):
- pieces.append(b'<%s' % password[1:])
+ if password.startswith(b"+"):
+ pieces.append(b">%s" % password[1:])
+ elif password.startswith(b"-"):
+ pieces.append(b"<%s" % password[1:])
else:
- raise DataError(f'Password {i} must be prefixed with a '
- f'"+" to add or a "-" to remove')
+ raise DataError(
+ f"Password {i} must be prefixed with a "
+ f'"+" to add or a "-" to remove'
+ )
if hashed_passwords:
# as most users will have only one password, allow remove_passwords
@@ -236,29 +246,31 @@ class ACLCommands:
hashed_passwords = list_or_args(hashed_passwords, [])
for i, hashed_password in enumerate(hashed_passwords):
hashed_password = encoder.encode(hashed_password)
- if hashed_password.startswith(b'+'):
- pieces.append(b'#%s' % hashed_password[1:])
- elif hashed_password.startswith(b'-'):
- pieces.append(b'!%s' % hashed_password[1:])
+ if hashed_password.startswith(b"+"):
+ pieces.append(b"#%s" % hashed_password[1:])
+ elif hashed_password.startswith(b"-"):
+ pieces.append(b"!%s" % hashed_password[1:])
else:
- raise DataError(f'Hashed password {i} must be prefixed with a '
- f'"+" to add or a "-" to remove')
+ raise DataError(
+ f"Hashed password {i} must be prefixed with a "
+ f'"+" to add or a "-" to remove'
+ )
if nopass:
- pieces.append(b'nopass')
+ pieces.append(b"nopass")
if categories:
for category in categories:
category = encoder.encode(category)
# categories can be prefixed with one of (+@, +, -@, -)
- if category.startswith(b'+@'):
+ if category.startswith(b"+@"):
pieces.append(category)
- elif category.startswith(b'+'):
- pieces.append(b'+@%s' % category[1:])
- elif category.startswith(b'-@'):
+ elif category.startswith(b"+"):
+ pieces.append(b"+@%s" % category[1:])
+ elif category.startswith(b"-@"):
pieces.append(category)
- elif category.startswith(b'-'):
- pieces.append(b'-@%s' % category[1:])
+ elif category.startswith(b"-"):
+ pieces.append(b"-@%s" % category[1:])
else:
raise DataError(
f'Category "{encoder.decode(category, force=True)}" '
@@ -267,7 +279,7 @@ class ACLCommands:
if commands:
for cmd in commands:
cmd = encoder.encode(cmd)
- if not cmd.startswith(b'+') and not cmd.startswith(b'-'):
+ if not cmd.startswith(b"+") and not cmd.startswith(b"-"):
raise DataError(
f'Command "{encoder.decode(cmd, force=True)}" '
'must be prefixed with "+" or "-"'
@@ -277,35 +289,36 @@ class ACLCommands:
if keys:
for key in keys:
key = encoder.encode(key)
- pieces.append(b'~%s' % key)
+ pieces.append(b"~%s" % key)
- return self.execute_command('ACL SETUSER', *pieces)
+ return self.execute_command("ACL SETUSER", *pieces)
def acl_users(self):
"""Returns a list of all registered users on the server.
For more information check https://redis.io/commands/acl-users
"""
- return self.execute_command('ACL USERS')
+ return self.execute_command("ACL USERS")
def acl_whoami(self):
"""Get the username for the current connection
For more information check https://redis.io/commands/acl-whoami
"""
- return self.execute_command('ACL WHOAMI')
+ return self.execute_command("ACL WHOAMI")
class ManagementCommands:
"""
Redis management commands
"""
+
def bgrewriteaof(self):
"""Tell the Redis server to rewrite the AOF file from data in memory.
For more information check https://redis.io/commands/bgrewriteaof
"""
- return self.execute_command('BGREWRITEAOF')
+ return self.execute_command("BGREWRITEAOF")
def bgsave(self, schedule=True):
"""
@@ -317,17 +330,18 @@ class ManagementCommands:
pieces = []
if schedule:
pieces.append("SCHEDULE")
- return self.execute_command('BGSAVE', *pieces)
+ return self.execute_command("BGSAVE", *pieces)
def client_kill(self, address):
"""Disconnects the client at ``address`` (ip:port)
For more information check https://redis.io/commands/client-kill
"""
- return self.execute_command('CLIENT KILL', address)
+ return self.execute_command("CLIENT KILL", address)
- def client_kill_filter(self, _id=None, _type=None, addr=None,
- skipme=None, laddr=None, user=None):
+ def client_kill_filter(
+ self, _id=None, _type=None, addr=None, skipme=None, laddr=None, user=None
+ ):
"""
Disconnects client(s) using a variety of filter options
:param id: Kills a client by its unique ID field
@@ -342,29 +356,31 @@ class ManagementCommands:
"""
args = []
if _type is not None:
- client_types = ('normal', 'master', 'slave', 'pubsub')
+ client_types = ("normal", "master", "slave", "pubsub")
if str(_type).lower() not in client_types:
raise DataError(f"CLIENT KILL type must be one of {client_types!r}")
- args.extend((b'TYPE', _type))
+ args.extend((b"TYPE", _type))
if skipme is not None:
if not isinstance(skipme, bool):
raise DataError("CLIENT KILL skipme must be a bool")
if skipme:
- args.extend((b'SKIPME', b'YES'))
+ args.extend((b"SKIPME", b"YES"))
else:
- args.extend((b'SKIPME', b'NO'))
+ args.extend((b"SKIPME", b"NO"))
if _id is not None:
- args.extend((b'ID', _id))
+ args.extend((b"ID", _id))
if addr is not None:
- args.extend((b'ADDR', addr))
+ args.extend((b"ADDR", addr))
if laddr is not None:
- args.extend((b'LADDR', laddr))
+ args.extend((b"LADDR", laddr))
if user is not None:
- args.extend((b'USER', user))
+ args.extend((b"USER", user))
if not args:
- raise DataError("CLIENT KILL <filter> <value> ... ... <filter> "
- "<value> must specify at least one filter")
- return self.execute_command('CLIENT KILL', *args)
+ raise DataError(
+ "CLIENT KILL <filter> <value> ... ... <filter> "
+ "<value> must specify at least one filter"
+ )
+ return self.execute_command("CLIENT KILL", *args)
def client_info(self):
"""
@@ -373,7 +389,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-info
"""
- return self.execute_command('CLIENT INFO')
+ return self.execute_command("CLIENT INFO")
def client_list(self, _type=None, client_id=[]):
"""
@@ -387,17 +403,17 @@ class ManagementCommands:
"""
args = []
if _type is not None:
- client_types = ('normal', 'master', 'replica', 'pubsub')
+ client_types = ("normal", "master", "replica", "pubsub")
if str(_type).lower() not in client_types:
raise DataError(f"CLIENT LIST _type must be one of {client_types!r}")
- args.append(b'TYPE')
+ args.append(b"TYPE")
args.append(_type)
if not isinstance(client_id, list):
raise DataError("client_id must be a list")
if client_id != []:
args.append(b"ID")
- args.append(' '.join(client_id))
- return self.execute_command('CLIENT LIST', *args)
+ args.append(" ".join(client_id))
+ return self.execute_command("CLIENT LIST", *args)
def client_getname(self):
"""
@@ -405,7 +421,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-getname
"""
- return self.execute_command('CLIENT GETNAME')
+ return self.execute_command("CLIENT GETNAME")
def client_getredir(self):
"""
@@ -414,7 +430,7 @@ class ManagementCommands:
see: https://redis.io/commands/client-getredir
"""
- return self.execute_command('CLIENT GETREDIR')
+ return self.execute_command("CLIENT GETREDIR")
def client_reply(self, reply):
"""
@@ -432,9 +448,9 @@ class ManagementCommands:
See https://redis.io/commands/client-reply
"""
- replies = ['ON', 'OFF', 'SKIP']
+ replies = ["ON", "OFF", "SKIP"]
if reply not in replies:
- raise DataError(f'CLIENT REPLY must be one of {replies!r}')
+ raise DataError(f"CLIENT REPLY must be one of {replies!r}")
return self.execute_command("CLIENT REPLY", reply)
def client_id(self):
@@ -443,7 +459,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-id
"""
- return self.execute_command('CLIENT ID')
+ return self.execute_command("CLIENT ID")
def client_trackinginfo(self):
"""
@@ -452,7 +468,7 @@ class ManagementCommands:
See https://redis.io/commands/client-trackinginfo
"""
- return self.execute_command('CLIENT TRACKINGINFO')
+ return self.execute_command("CLIENT TRACKINGINFO")
def client_setname(self, name):
"""
@@ -460,7 +476,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-setname
"""
- return self.execute_command('CLIENT SETNAME', name)
+ return self.execute_command("CLIENT SETNAME", name)
def client_unblock(self, client_id, error=False):
"""
@@ -471,9 +487,9 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-unblock
"""
- args = ['CLIENT UNBLOCK', int(client_id)]
+ args = ["CLIENT UNBLOCK", int(client_id)]
if error:
- args.append(b'ERROR')
+ args.append(b"ERROR")
return self.execute_command(*args)
def client_pause(self, timeout):
@@ -485,7 +501,7 @@ class ManagementCommands:
"""
if not isinstance(timeout, int):
raise DataError("CLIENT PAUSE timeout must be an integer")
- return self.execute_command('CLIENT PAUSE', str(timeout))
+ return self.execute_command("CLIENT PAUSE", str(timeout))
def client_unpause(self):
"""
@@ -493,7 +509,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/client-unpause
"""
- return self.execute_command('CLIENT UNPAUSE')
+ return self.execute_command("CLIENT UNPAUSE")
def command_info(self):
raise NotImplementedError(
@@ -501,7 +517,7 @@ class ManagementCommands:
)
def command_count(self):
- return self.execute_command('COMMAND COUNT')
+ return self.execute_command("COMMAND COUNT")
def readwrite(self):
"""
@@ -509,7 +525,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/readwrite
"""
- return self.execute_command('READWRITE')
+ return self.execute_command("READWRITE")
def readonly(self):
"""
@@ -517,7 +533,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/readonly
"""
- return self.execute_command('READONLY')
+ return self.execute_command("READONLY")
def config_get(self, pattern="*"):
"""
@@ -525,14 +541,14 @@ class ManagementCommands:
For more information check https://redis.io/commands/config-get
"""
- return self.execute_command('CONFIG GET', pattern)
+ return self.execute_command("CONFIG GET", pattern)
def config_set(self, name, value):
"""Set config item ``name`` with ``value``
For more information check https://redis.io/commands/config-set
"""
- return self.execute_command('CONFIG SET', name, value)
+ return self.execute_command("CONFIG SET", name, value)
def config_resetstat(self):
"""
@@ -540,7 +556,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/config-resetstat
"""
- return self.execute_command('CONFIG RESETSTAT')
+ return self.execute_command("CONFIG RESETSTAT")
def config_rewrite(self):
"""
@@ -548,10 +564,10 @@ class ManagementCommands:
For more information check https://redis.io/commands/config-rewrite
"""
- return self.execute_command('CONFIG REWRITE')
+ return self.execute_command("CONFIG REWRITE")
def cluster(self, cluster_arg, *args):
- return self.execute_command(f'CLUSTER {cluster_arg.upper()}', *args)
+ return self.execute_command(f"CLUSTER {cluster_arg.upper()}", *args)
def dbsize(self):
"""
@@ -559,7 +575,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/dbsize
"""
- return self.execute_command('DBSIZE')
+ return self.execute_command("DBSIZE")
def debug_object(self, key):
"""
@@ -567,7 +583,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/debug-object
"""
- return self.execute_command('DEBUG OBJECT', key)
+ return self.execute_command("DEBUG OBJECT", key)
def debug_segfault(self):
raise NotImplementedError(
@@ -584,7 +600,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/echo
"""
- return self.execute_command('ECHO', value)
+ return self.execute_command("ECHO", value)
def flushall(self, asynchronous=False):
"""
@@ -597,8 +613,8 @@ class ManagementCommands:
"""
args = []
if asynchronous:
- args.append(b'ASYNC')
- return self.execute_command('FLUSHALL', *args)
+ args.append(b"ASYNC")
+ return self.execute_command("FLUSHALL", *args)
def flushdb(self, asynchronous=False):
"""
@@ -611,8 +627,8 @@ class ManagementCommands:
"""
args = []
if asynchronous:
- args.append(b'ASYNC')
- return self.execute_command('FLUSHDB', *args)
+ args.append(b"ASYNC")
+ return self.execute_command("FLUSHDB", *args)
def swapdb(self, first, second):
"""
@@ -620,7 +636,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/swapdb
"""
- return self.execute_command('SWAPDB', first, second)
+ return self.execute_command("SWAPDB", first, second)
def info(self, section=None):
"""
@@ -635,9 +651,9 @@ class ManagementCommands:
For more information check https://redis.io/commands/info
"""
if section is None:
- return self.execute_command('INFO')
+ return self.execute_command("INFO")
else:
- return self.execute_command('INFO', section)
+ return self.execute_command("INFO", section)
def lastsave(self):
"""
@@ -646,7 +662,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/lastsave
"""
- return self.execute_command('LASTSAVE')
+ return self.execute_command("LASTSAVE")
def lolwut(self, *version_numbers):
"""
@@ -655,12 +671,21 @@ class ManagementCommands:
See: https://redis.io/commands/lolwut
"""
if version_numbers:
- return self.execute_command('LOLWUT VERSION', *version_numbers)
+ return self.execute_command("LOLWUT VERSION", *version_numbers)
else:
- return self.execute_command('LOLWUT')
-
- def migrate(self, host, port, keys, destination_db, timeout,
- copy=False, replace=False, auth=None):
+ return self.execute_command("LOLWUT")
+
+ def migrate(
+ self,
+ host,
+ port,
+ keys,
+ destination_db,
+ timeout,
+ copy=False,
+ replace=False,
+ auth=None,
+ ):
"""
Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
@@ -682,25 +707,26 @@ class ManagementCommands:
"""
keys = list_or_args(keys, [])
if not keys:
- raise DataError('MIGRATE requires at least one key')
+ raise DataError("MIGRATE requires at least one key")
pieces = []
if copy:
- pieces.append(b'COPY')
+ pieces.append(b"COPY")
if replace:
- pieces.append(b'REPLACE')
+ pieces.append(b"REPLACE")
if auth:
- pieces.append(b'AUTH')
+ pieces.append(b"AUTH")
pieces.append(auth)
- pieces.append(b'KEYS')
+ pieces.append(b"KEYS")
pieces.extend(keys)
- return self.execute_command('MIGRATE', host, port, '', destination_db,
- timeout, *pieces)
+ return self.execute_command(
+ "MIGRATE", host, port, "", destination_db, timeout, *pieces
+ )
def object(self, infotype, key):
"""
Return the encoding, idletime, or refcount about the key
"""
- return self.execute_command('OBJECT', infotype, key, infotype=infotype)
+ return self.execute_command("OBJECT", infotype, key, infotype=infotype)
def memory_doctor(self):
raise NotImplementedError(
@@ -726,7 +752,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/memory-stats
"""
- return self.execute_command('MEMORY STATS')
+ return self.execute_command("MEMORY STATS")
def memory_malloc_stats(self):
"""
@@ -734,7 +760,7 @@ class ManagementCommands:
See: https://redis.io/commands/memory-malloc-stats
"""
- return self.execute_command('MEMORY MALLOC-STATS')
+ return self.execute_command("MEMORY MALLOC-STATS")
def memory_usage(self, key, samples=None):
"""
@@ -749,8 +775,8 @@ class ManagementCommands:
"""
args = []
if isinstance(samples, int):
- args.extend([b'SAMPLES', samples])
- return self.execute_command('MEMORY USAGE', key, *args)
+ args.extend([b"SAMPLES", samples])
+ return self.execute_command("MEMORY USAGE", key, *args)
def memory_purge(self):
"""
@@ -758,7 +784,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/memory-purge
"""
- return self.execute_command('MEMORY PURGE')
+ return self.execute_command("MEMORY PURGE")
def ping(self):
"""
@@ -766,7 +792,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/ping
"""
- return self.execute_command('PING')
+ return self.execute_command("PING")
def quit(self):
"""
@@ -774,7 +800,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/quit
"""
- return self.execute_command('QUIT')
+ return self.execute_command("QUIT")
def replicaof(self, *args):
"""
@@ -785,7 +811,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/replicaof
"""
- return self.execute_command('REPLICAOF', *args)
+ return self.execute_command("REPLICAOF", *args)
def save(self):
"""
@@ -794,7 +820,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/save
"""
- return self.execute_command('SAVE')
+ return self.execute_command("SAVE")
def shutdown(self, save=False, nosave=False):
"""Shutdown the Redis server. If Redis has persistence configured,
@@ -806,12 +832,12 @@ class ManagementCommands:
For more information check https://redis.io/commands/shutdown
"""
if save and nosave:
- raise DataError('SHUTDOWN save and nosave cannot both be set')
- args = ['SHUTDOWN']
+ raise DataError("SHUTDOWN save and nosave cannot both be set")
+ args = ["SHUTDOWN"]
if save:
- args.append('SAVE')
+ args.append("SAVE")
if nosave:
- args.append('NOSAVE')
+ args.append("NOSAVE")
try:
self.execute_command(*args)
except ConnectionError:
@@ -828,8 +854,8 @@ class ManagementCommands:
For more information check https://redis.io/commands/slaveof
"""
if host is None and port is None:
- return self.execute_command('SLAVEOF', b'NO', b'ONE')
- return self.execute_command('SLAVEOF', host, port)
+ return self.execute_command("SLAVEOF", b"NO", b"ONE")
+ return self.execute_command("SLAVEOF", host, port)
def slowlog_get(self, num=None):
"""
@@ -838,11 +864,12 @@ class ManagementCommands:
For more information check https://redis.io/commands/slowlog-get
"""
- args = ['SLOWLOG GET']
+ args = ["SLOWLOG GET"]
if num is not None:
args.append(num)
decode_responses = self.connection_pool.connection_kwargs.get(
- 'decode_responses', False)
+ "decode_responses", False
+ )
return self.execute_command(*args, decode_responses=decode_responses)
def slowlog_len(self):
@@ -851,7 +878,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/slowlog-len
"""
- return self.execute_command('SLOWLOG LEN')
+ return self.execute_command("SLOWLOG LEN")
def slowlog_reset(self):
"""
@@ -859,7 +886,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/slowlog-reset
"""
- return self.execute_command('SLOWLOG RESET')
+ return self.execute_command("SLOWLOG RESET")
def time(self):
"""
@@ -868,7 +895,7 @@ class ManagementCommands:
For more information check https://redis.io/commands/time
"""
- return self.execute_command('TIME')
+ return self.execute_command("TIME")
def wait(self, num_replicas, timeout):
"""
@@ -879,13 +906,14 @@ class ManagementCommands:
For more information check https://redis.io/commands/wait
"""
- return self.execute_command('WAIT', num_replicas, timeout)
+ return self.execute_command("WAIT", num_replicas, timeout)
class BasicKeyCommands:
"""
Redis basic key-based commands
"""
+
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
@@ -894,7 +922,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/append
"""
- return self.execute_command('APPEND', key, value)
+ return self.execute_command("APPEND", key, value)
def bitcount(self, key, start=None, end=None):
"""
@@ -907,10 +935,9 @@ class BasicKeyCommands:
if start is not None and end is not None:
params.append(start)
params.append(end)
- elif (start is not None and end is None) or \
- (end is not None and start is None):
+ elif (start is not None and end is None) or (end is not None and start is None):
raise DataError("Both start and end must be specified")
- return self.execute_command('BITCOUNT', *params)
+ return self.execute_command("BITCOUNT", *params)
def bitfield(self, key, default_overflow=None):
"""
@@ -928,7 +955,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/bitop
"""
- return self.execute_command('BITOP', operation, dest, *keys)
+ return self.execute_command("BITOP", operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
@@ -940,7 +967,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/bitpos
"""
if bit not in (0, 1):
- raise DataError('bit must be 0 or 1')
+ raise DataError("bit must be 0 or 1")
params = [key, bit]
start is not None and params.append(start)
@@ -948,9 +975,8 @@ class BasicKeyCommands:
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
- raise DataError("start argument is not set, "
- "when end is specified")
- return self.execute_command('BITPOS', *params)
+ raise DataError("start argument is not set, " "when end is specified")
+ return self.execute_command("BITPOS", *params)
def copy(self, source, destination, destination_db=None, replace=False):
"""
@@ -970,7 +996,7 @@ class BasicKeyCommands:
params.extend(["DB", destination_db])
if replace:
params.append("REPLACE")
- return self.execute_command('COPY', *params)
+ return self.execute_command("COPY", *params)
def decr(self, name, amount=1):
"""
@@ -990,13 +1016,13 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/decrby
"""
- return self.execute_command('DECRBY', name, amount)
+ return self.execute_command("DECRBY", name, amount)
def delete(self, *names):
"""
Delete one or more keys specified by ``names``
"""
- return self.execute_command('DEL', *names)
+ return self.execute_command("DEL", *names)
def __delitem__(self, name):
self.delete(name)
@@ -1009,9 +1035,10 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/dump
"""
from redis.client import NEVER_DECODE
+
options = {}
options[NEVER_DECODE] = []
- return self.execute_command('DUMP', name, **options)
+ return self.execute_command("DUMP", name, **options)
def exists(self, *names):
"""
@@ -1019,7 +1046,8 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/exists
"""
- return self.execute_command('EXISTS', *names)
+ return self.execute_command("EXISTS", *names)
+
__contains__ = exists
def expire(self, name, time):
@@ -1031,7 +1059,7 @@ class BasicKeyCommands:
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
- return self.execute_command('EXPIRE', name, time)
+ return self.execute_command("EXPIRE", name, time)
def expireat(self, name, when):
"""
@@ -1042,7 +1070,7 @@ class BasicKeyCommands:
"""
if isinstance(when, datetime.datetime):
when = int(time.mktime(when.timetuple()))
- return self.execute_command('EXPIREAT', name, when)
+ return self.execute_command("EXPIREAT", name, when)
def get(self, name):
"""
@@ -1050,7 +1078,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/get
"""
- return self.execute_command('GET', name)
+ return self.execute_command("GET", name)
def getdel(self, name):
"""
@@ -1061,10 +1089,9 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/getdel
"""
- return self.execute_command('GETDEL', name)
+ return self.execute_command("GETDEL", name)
- def getex(self, name,
- ex=None, px=None, exat=None, pxat=None, persist=False):
+ def getex(self, name, ex=None, px=None, exat=None, pxat=None, persist=False):
"""
Get the value of key and optionally set its expiration.
GETEX is similar to GET, but is a write command with
@@ -1088,38 +1115,40 @@ class BasicKeyCommands:
opset = {ex, px, exat, pxat}
if len(opset) > 2 or len(opset) > 1 and persist:
- raise DataError("``ex``, ``px``, ``exat``, ``pxat``, "
- "and ``persist`` are mutually exclusive.")
+ raise DataError(
+ "``ex``, ``px``, ``exat``, ``pxat``, "
+ "and ``persist`` are mutually exclusive."
+ )
pieces = []
# similar to set command
if ex is not None:
- pieces.append('EX')
+ pieces.append("EX")
if isinstance(ex, datetime.timedelta):
ex = int(ex.total_seconds())
pieces.append(ex)
if px is not None:
- pieces.append('PX')
+ pieces.append("PX")
if isinstance(px, datetime.timedelta):
px = int(px.total_seconds() * 1000)
pieces.append(px)
# similar to pexpireat command
if exat is not None:
- pieces.append('EXAT')
+ pieces.append("EXAT")
if isinstance(exat, datetime.datetime):
s = int(exat.microsecond / 1000000)
exat = int(time.mktime(exat.timetuple())) + s
pieces.append(exat)
if pxat is not None:
- pieces.append('PXAT')
+ pieces.append("PXAT")
if isinstance(pxat, datetime.datetime):
ms = int(pxat.microsecond / 1000)
pxat = int(time.mktime(pxat.timetuple())) * 1000 + ms
pieces.append(pxat)
if persist:
- pieces.append('PERSIST')
+ pieces.append("PERSIST")
- return self.execute_command('GETEX', name, *pieces)
+ return self.execute_command("GETEX", name, *pieces)
def __getitem__(self, name):
"""
@@ -1137,7 +1166,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/getbit
"""
- return self.execute_command('GETBIT', name, offset)
+ return self.execute_command("GETBIT", name, offset)
def getrange(self, key, start, end):
"""
@@ -1146,7 +1175,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/getrange
"""
- return self.execute_command('GETRANGE', key, start, end)
+ return self.execute_command("GETRANGE", key, start, end)
def getset(self, name, value):
"""
@@ -1158,7 +1187,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/getset
"""
- return self.execute_command('GETSET', name, value)
+ return self.execute_command("GETSET", name, value)
def incr(self, name, amount=1):
"""
@@ -1178,7 +1207,7 @@ class BasicKeyCommands:
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
- return self.execute_command('INCRBY', name, amount)
+ return self.execute_command("INCRBY", name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
@@ -1187,15 +1216,15 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/incrbyfloat
"""
- return self.execute_command('INCRBYFLOAT', name, amount)
+ return self.execute_command("INCRBYFLOAT", name, amount)
- def keys(self, pattern='*'):
+ def keys(self, pattern="*"):
"""
Returns a list of keys matching ``pattern``
For more information check https://redis.io/commands/keys
"""
- return self.execute_command('KEYS', pattern)
+ return self.execute_command("KEYS", pattern)
def lmove(self, first_list, second_list, src="LEFT", dest="RIGHT"):
"""
@@ -1208,8 +1237,7 @@ class BasicKeyCommands:
params = [first_list, second_list, src, dest]
return self.execute_command("LMOVE", *params)
- def blmove(self, first_list, second_list, timeout,
- src="LEFT", dest="RIGHT"):
+ def blmove(self, first_list, second_list, timeout, src="LEFT", dest="RIGHT"):
"""
Blocking version of lmove.
@@ -1225,11 +1253,12 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/mget
"""
from redis.client import EMPTY_RESPONSE
+
args = list_or_args(keys, args)
options = {}
if not args:
options[EMPTY_RESPONSE] = []
- return self.execute_command('MGET', *args, **options)
+ return self.execute_command("MGET", *args, **options)
def mset(self, mapping):
"""
@@ -1242,7 +1271,7 @@ class BasicKeyCommands:
items = []
for pair in mapping.items():
items.extend(pair)
- return self.execute_command('MSET', *items)
+ return self.execute_command("MSET", *items)
def msetnx(self, mapping):
"""
@@ -1256,7 +1285,7 @@ class BasicKeyCommands:
items = []
for pair in mapping.items():
items.extend(pair)
- return self.execute_command('MSETNX', *items)
+ return self.execute_command("MSETNX", *items)
def move(self, name, db):
"""
@@ -1264,7 +1293,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/move
"""
- return self.execute_command('MOVE', name, db)
+ return self.execute_command("MOVE", name, db)
def persist(self, name):
"""
@@ -1272,7 +1301,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/persist
"""
- return self.execute_command('PERSIST', name)
+ return self.execute_command("PERSIST", name)
def pexpire(self, name, time):
"""
@@ -1284,7 +1313,7 @@ class BasicKeyCommands:
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds() * 1000)
- return self.execute_command('PEXPIRE', name, time)
+ return self.execute_command("PEXPIRE", name, time)
def pexpireat(self, name, when):
"""
@@ -1297,7 +1326,7 @@ class BasicKeyCommands:
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(time.mktime(when.timetuple())) * 1000 + ms
- return self.execute_command('PEXPIREAT', name, when)
+ return self.execute_command("PEXPIREAT", name, when)
def psetex(self, name, time_ms, value):
"""
@@ -1309,7 +1338,7 @@ class BasicKeyCommands:
"""
if isinstance(time_ms, datetime.timedelta):
time_ms = int(time_ms.total_seconds() * 1000)
- return self.execute_command('PSETEX', name, time_ms, value)
+ return self.execute_command("PSETEX", name, time_ms, value)
def pttl(self, name):
"""
@@ -1317,7 +1346,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/pttl
"""
- return self.execute_command('PTTL', name)
+ return self.execute_command("PTTL", name)
def hrandfield(self, key, count=None, withvalues=False):
"""
@@ -1347,7 +1376,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/randomkey
"""
- return self.execute_command('RANDOMKEY')
+ return self.execute_command("RANDOMKEY")
def rename(self, src, dst):
"""
@@ -1355,7 +1384,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/rename
"""
- return self.execute_command('RENAME', src, dst)
+ return self.execute_command("RENAME", src, dst)
def renamenx(self, src, dst):
"""
@@ -1363,10 +1392,18 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/renamenx
"""
- return self.execute_command('RENAMENX', src, dst)
+ return self.execute_command("RENAMENX", src, dst)
- def restore(self, name, ttl, value, replace=False, absttl=False,
- idletime=None, frequency=None):
+ def restore(
+ self,
+ name,
+ ttl,
+ value,
+ replace=False,
+ absttl=False,
+ idletime=None,
+ frequency=None,
+ ):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
@@ -1388,28 +1425,38 @@ class BasicKeyCommands:
"""
params = [name, ttl, value]
if replace:
- params.append('REPLACE')
+ params.append("REPLACE")
if absttl:
- params.append('ABSTTL')
+ params.append("ABSTTL")
if idletime is not None:
- params.append('IDLETIME')
+ params.append("IDLETIME")
try:
params.append(int(idletime))
except ValueError:
raise DataError("idletimemust be an integer")
if frequency is not None:
- params.append('FREQ')
+ params.append("FREQ")
try:
params.append(int(frequency))
except ValueError:
raise DataError("frequency must be an integer")
- return self.execute_command('RESTORE', *params)
-
- def set(self, name, value,
- ex=None, px=None, nx=False, xx=False, keepttl=False, get=False,
- exat=None, pxat=None):
+ return self.execute_command("RESTORE", *params)
+
+ def set(
+ self,
+ name,
+ value,
+ ex=None,
+ px=None,
+ nx=False,
+ xx=False,
+ keepttl=False,
+ get=False,
+ exat=None,
+ pxat=None,
+ ):
"""
Set the value at key ``name`` to ``value``
@@ -1441,7 +1488,7 @@ class BasicKeyCommands:
pieces = [name, value]
options = {}
if ex is not None:
- pieces.append('EX')
+ pieces.append("EX")
if isinstance(ex, datetime.timedelta):
pieces.append(int(ex.total_seconds()))
elif isinstance(ex, int):
@@ -1449,7 +1496,7 @@ class BasicKeyCommands:
else:
raise DataError("ex must be datetime.timedelta or int")
if px is not None:
- pieces.append('PX')
+ pieces.append("PX")
if isinstance(px, datetime.timedelta):
pieces.append(int(px.total_seconds() * 1000))
elif isinstance(px, int):
@@ -1457,30 +1504,30 @@ class BasicKeyCommands:
else:
raise DataError("px must be datetime.timedelta or int")
if exat is not None:
- pieces.append('EXAT')
+ pieces.append("EXAT")
if isinstance(exat, datetime.datetime):
s = int(exat.microsecond / 1000000)
exat = int(time.mktime(exat.timetuple())) + s
pieces.append(exat)
if pxat is not None:
- pieces.append('PXAT')
+ pieces.append("PXAT")
if isinstance(pxat, datetime.datetime):
ms = int(pxat.microsecond / 1000)
pxat = int(time.mktime(pxat.timetuple())) * 1000 + ms
pieces.append(pxat)
if keepttl:
- pieces.append('KEEPTTL')
+ pieces.append("KEEPTTL")
if nx:
- pieces.append('NX')
+ pieces.append("NX")
if xx:
- pieces.append('XX')
+ pieces.append("XX")
if get:
- pieces.append('GET')
+ pieces.append("GET")
options["get"] = True
- return self.execute_command('SET', *pieces, **options)
+ return self.execute_command("SET", *pieces, **options)
def __setitem__(self, name, value):
self.set(name, value)
@@ -1493,7 +1540,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/setbit
"""
value = value and 1 or 0
- return self.execute_command('SETBIT', name, offset, value)
+ return self.execute_command("SETBIT", name, offset, value)
def setex(self, name, time, value):
"""
@@ -1505,7 +1552,7 @@ class BasicKeyCommands:
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
- return self.execute_command('SETEX', name, time, value)
+ return self.execute_command("SETEX", name, time, value)
def setnx(self, name, value):
"""
@@ -1513,7 +1560,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/setnx
"""
- return self.execute_command('SETNX', name, value)
+ return self.execute_command("SETNX", name, value)
def setrange(self, name, offset, value):
"""
@@ -1528,10 +1575,19 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/setrange
"""
- return self.execute_command('SETRANGE', name, offset, value)
+ return self.execute_command("SETRANGE", name, offset, value)
- def stralgo(self, algo, value1, value2, specific_argument='strings',
- len=False, idx=False, minmatchlen=None, withmatchlen=False):
+ def stralgo(
+ self,
+ algo,
+ value1,
+ value2,
+ specific_argument="strings",
+ len=False,
+ idx=False,
+ minmatchlen=None,
+ withmatchlen=False,
+ ):
"""
Implements complex algorithms that operate on strings.
Right now the only algorithm implemented is the LCS algorithm
@@ -1552,31 +1608,36 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/stralgo
"""
# check validity
- supported_algo = ['LCS']
+ supported_algo = ["LCS"]
if algo not in supported_algo:
- supported_algos_str = ', '.join(supported_algo)
+ supported_algos_str = ", ".join(supported_algo)
raise DataError(f"The supported algorithms are: {supported_algos_str}")
- if specific_argument not in ['keys', 'strings']:
+ if specific_argument not in ["keys", "strings"]:
raise DataError("specific_argument can be only keys or strings")
if len and idx:
raise DataError("len and idx cannot be provided together.")
pieces = [algo, specific_argument.upper(), value1, value2]
if len:
- pieces.append(b'LEN')
+ pieces.append(b"LEN")
if idx:
- pieces.append(b'IDX')
+ pieces.append(b"IDX")
try:
int(minmatchlen)
- pieces.extend([b'MINMATCHLEN', minmatchlen])
+ pieces.extend([b"MINMATCHLEN", minmatchlen])
except TypeError:
pass
if withmatchlen:
- pieces.append(b'WITHMATCHLEN')
-
- return self.execute_command('STRALGO', *pieces, len=len, idx=idx,
- minmatchlen=minmatchlen,
- withmatchlen=withmatchlen)
+ pieces.append(b"WITHMATCHLEN")
+
+ return self.execute_command(
+ "STRALGO",
+ *pieces,
+ len=len,
+ idx=idx,
+ minmatchlen=minmatchlen,
+ withmatchlen=withmatchlen,
+ )
def strlen(self, name):
"""
@@ -1584,14 +1645,14 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/strlen
"""
- return self.execute_command('STRLEN', name)
+ return self.execute_command("STRLEN", name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
- return self.execute_command('SUBSTR', name, start, end)
+ return self.execute_command("SUBSTR", name, start, end)
def touch(self, *args):
"""
@@ -1600,7 +1661,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/touch
"""
- return self.execute_command('TOUCH', *args)
+ return self.execute_command("TOUCH", *args)
def ttl(self, name):
"""
@@ -1608,7 +1669,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/ttl
"""
- return self.execute_command('TTL', name)
+ return self.execute_command("TTL", name)
def type(self, name):
"""
@@ -1616,7 +1677,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/type
"""
- return self.execute_command('TYPE', name)
+ return self.execute_command("TYPE", name)
def watch(self, *names):
"""
@@ -1624,7 +1685,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/type
"""
- warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
+ warnings.warn(DeprecationWarning("Call WATCH from a Pipeline object"))
def unwatch(self):
"""
@@ -1632,8 +1693,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/unwatch
"""
- warnings.warn(
- DeprecationWarning('Call UNWATCH from a Pipeline object'))
+ warnings.warn(DeprecationWarning("Call UNWATCH from a Pipeline object"))
def unlink(self, *names):
"""
@@ -1641,7 +1701,7 @@ class BasicKeyCommands:
For more information check https://redis.io/commands/unlink
"""
- return self.execute_command('UNLINK', *names)
+ return self.execute_command("UNLINK", *names)
class ListCommands:
@@ -1649,6 +1709,7 @@ class ListCommands:
Redis commands for List data type.
see: https://redis.io/topics/data-types#lists
"""
+
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
@@ -1666,7 +1727,7 @@ class ListCommands:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
- return self.execute_command('BLPOP', *keys)
+ return self.execute_command("BLPOP", *keys)
def brpop(self, keys, timeout=0):
"""
@@ -1685,7 +1746,7 @@ class ListCommands:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
- return self.execute_command('BRPOP', *keys)
+ return self.execute_command("BRPOP", *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
@@ -1700,7 +1761,7 @@ class ListCommands:
"""
if timeout is None:
timeout = 0
- return self.execute_command('BRPOPLPUSH', src, dst, timeout)
+ return self.execute_command("BRPOPLPUSH", src, dst, timeout)
def lindex(self, name, index):
"""
@@ -1711,7 +1772,7 @@ class ListCommands:
For more information check https://redis.io/commands/lindex
"""
- return self.execute_command('LINDEX', name, index)
+ return self.execute_command("LINDEX", name, index)
def linsert(self, name, where, refvalue, value):
"""
@@ -1723,7 +1784,7 @@ class ListCommands:
For more information check https://redis.io/commands/linsert
"""
- return self.execute_command('LINSERT', name, where, refvalue, value)
+ return self.execute_command("LINSERT", name, where, refvalue, value)
def llen(self, name):
"""
@@ -1731,7 +1792,7 @@ class ListCommands:
For more information check https://redis.io/commands/llen
"""
- return self.execute_command('LLEN', name)
+ return self.execute_command("LLEN", name)
def lpop(self, name, count=None):
"""
@@ -1744,9 +1805,9 @@ class ListCommands:
For more information check https://redis.io/commands/lpop
"""
if count is not None:
- return self.execute_command('LPOP', name, count)
+ return self.execute_command("LPOP", name, count)
else:
- return self.execute_command('LPOP', name)
+ return self.execute_command("LPOP", name)
def lpush(self, name, *values):
"""
@@ -1754,7 +1815,7 @@ class ListCommands:
For more information check https://redis.io/commands/lpush
"""
- return self.execute_command('LPUSH', name, *values)
+ return self.execute_command("LPUSH", name, *values)
def lpushx(self, name, *values):
"""
@@ -1762,7 +1823,7 @@ class ListCommands:
For more information check https://redis.io/commands/lpushx
"""
- return self.execute_command('LPUSHX', name, *values)
+ return self.execute_command("LPUSHX", name, *values)
def lrange(self, name, start, end):
"""
@@ -1774,7 +1835,7 @@ class ListCommands:
For more information check https://redis.io/commands/lrange
"""
- return self.execute_command('LRANGE', name, start, end)
+ return self.execute_command("LRANGE", name, start, end)
def lrem(self, name, count, value):
"""
@@ -1788,7 +1849,7 @@ class ListCommands:
For more information check https://redis.io/commands/lrem
"""
- return self.execute_command('LREM', name, count, value)
+ return self.execute_command("LREM", name, count, value)
def lset(self, name, index, value):
"""
@@ -1796,7 +1857,7 @@ class ListCommands:
For more information check https://redis.io/commands/lset
"""
- return self.execute_command('LSET', name, index, value)
+ return self.execute_command("LSET", name, index, value)
def ltrim(self, name, start, end):
"""
@@ -1808,7 +1869,7 @@ class ListCommands:
For more information check https://redis.io/commands/ltrim
"""
- return self.execute_command('LTRIM', name, start, end)
+ return self.execute_command("LTRIM", name, start, end)
def rpop(self, name, count=None):
"""
@@ -1821,9 +1882,9 @@ class ListCommands:
For more information check https://redis.io/commands/rpop
"""
if count is not None:
- return self.execute_command('RPOP', name, count)
+ return self.execute_command("RPOP", name, count)
else:
- return self.execute_command('RPOP', name)
+ return self.execute_command("RPOP", name)
def rpoplpush(self, src, dst):
"""
@@ -1832,7 +1893,7 @@ class ListCommands:
For more information check https://redis.io/commands/rpoplpush
"""
- return self.execute_command('RPOPLPUSH', src, dst)
+ return self.execute_command("RPOPLPUSH", src, dst)
def rpush(self, name, *values):
"""
@@ -1840,7 +1901,7 @@ class ListCommands:
For more information check https://redis.io/commands/rpush
"""
- return self.execute_command('RPUSH', name, *values)
+ return self.execute_command("RPUSH", name, *values)
def rpushx(self, name, value):
"""
@@ -1848,7 +1909,7 @@ class ListCommands:
For more information check https://redis.io/commands/rpushx
"""
- return self.execute_command('RPUSHX', name, value)
+ return self.execute_command("RPUSHX", name, value)
def lpos(self, name, value, rank=None, count=None, maxlen=None):
"""
@@ -1878,18 +1939,28 @@ class ListCommands:
"""
pieces = [name, value]
if rank is not None:
- pieces.extend(['RANK', rank])
+ pieces.extend(["RANK", rank])
if count is not None:
- pieces.extend(['COUNT', count])
+ pieces.extend(["COUNT", count])
if maxlen is not None:
- pieces.extend(['MAXLEN', maxlen])
-
- return self.execute_command('LPOS', *pieces)
-
- def sort(self, name, start=None, num=None, by=None, get=None,
- desc=False, alpha=False, store=None, groups=False):
+ pieces.extend(["MAXLEN", maxlen])
+
+ return self.execute_command("LPOS", *pieces)
+
+ def sort(
+ self,
+ name,
+ start=None,
+ num=None,
+ by=None,
+ get=None,
+ desc=False,
+ alpha=False,
+ store=None,
+ groups=False,
+ ):
"""
Sort and return the list, set or sorted set at ``name``.
@@ -1915,39 +1986,40 @@ class ListCommands:
For more information check https://redis.io/commands/sort
"""
- if (start is not None and num is None) or \
- (num is not None and start is None):
+ if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
- pieces.extend([b'BY', by])
+ pieces.extend([b"BY", by])
if start is not None and num is not None:
- pieces.extend([b'LIMIT', start, num])
+ pieces.extend([b"LIMIT", start, num])
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, (bytes, str)):
- pieces.extend([b'GET', get])
+ pieces.extend([b"GET", get])
else:
for g in get:
- pieces.extend([b'GET', g])
+ pieces.extend([b"GET", g])
if desc:
- pieces.append(b'DESC')
+ pieces.append(b"DESC")
if alpha:
- pieces.append(b'ALPHA')
+ pieces.append(b"ALPHA")
if store is not None:
- pieces.extend([b'STORE', store])
+ pieces.extend([b"STORE", store])
if groups:
if not get or isinstance(get, (bytes, str)) or len(get) < 2:
- raise DataError('when using "groups" the "get" argument '
- 'must be specified and contain at least '
- 'two keys')
+ raise DataError(
+ 'when using "groups" the "get" argument '
+ "must be specified and contain at least "
+ "two keys"
+ )
- options = {'groups': len(get) if groups else None}
- return self.execute_command('SORT', *pieces, **options)
+ options = {"groups": len(get) if groups else None}
+ return self.execute_command("SORT", *pieces, **options)
class ScanCommands:
@@ -1955,6 +2027,7 @@ class ScanCommands:
Redis SCAN commands.
see: https://redis.io/commands/scan
"""
+
def scan(self, cursor=0, match=None, count=None, _type=None):
"""
Incrementally return lists of key names. Also return a cursor
@@ -1974,12 +2047,12 @@ class ScanCommands:
"""
pieces = [cursor]
if match is not None:
- pieces.extend([b'MATCH', match])
+ pieces.extend([b"MATCH", match])
if count is not None:
- pieces.extend([b'COUNT', count])
+ pieces.extend([b"COUNT", count])
if _type is not None:
- pieces.extend([b'TYPE', _type])
- return self.execute_command('SCAN', *pieces)
+ pieces.extend([b"TYPE", _type])
+ return self.execute_command("SCAN", *pieces)
def scan_iter(self, match=None, count=None, _type=None):
"""
@@ -1996,10 +2069,11 @@ class ScanCommands:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
"""
- cursor = '0'
+ cursor = "0"
while cursor != 0:
- cursor, data = self.scan(cursor=cursor, match=match,
- count=count, _type=_type)
+ cursor, data = self.scan(
+ cursor=cursor, match=match, count=count, _type=_type
+ )
yield from data
def sscan(self, name, cursor=0, match=None, count=None):
@@ -2015,10 +2089,10 @@ class ScanCommands:
"""
pieces = [name, cursor]
if match is not None:
- pieces.extend([b'MATCH', match])
+ pieces.extend([b"MATCH", match])
if count is not None:
- pieces.extend([b'COUNT', count])
- return self.execute_command('SSCAN', *pieces)
+ pieces.extend([b"COUNT", count])
+ return self.execute_command("SSCAN", *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
@@ -2029,10 +2103,9 @@ class ScanCommands:
``count`` allows for hint the minimum number of returns
"""
- cursor = '0'
+ cursor = "0"
while cursor != 0:
- cursor, data = self.sscan(name, cursor=cursor,
- match=match, count=count)
+ cursor, data = self.sscan(name, cursor=cursor, match=match, count=count)
yield from data
def hscan(self, name, cursor=0, match=None, count=None):
@@ -2048,10 +2121,10 @@ class ScanCommands:
"""
pieces = [name, cursor]
if match is not None:
- pieces.extend([b'MATCH', match])
+ pieces.extend([b"MATCH", match])
if count is not None:
- pieces.extend([b'COUNT', count])
- return self.execute_command('HSCAN', *pieces)
+ pieces.extend([b"COUNT", count])
+ return self.execute_command("HSCAN", *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
@@ -2062,14 +2135,12 @@ class ScanCommands:
``count`` allows for hint the minimum number of returns
"""
- cursor = '0'
+ cursor = "0"
while cursor != 0:
- cursor, data = self.hscan(name, cursor=cursor,
- match=match, count=count)
+ cursor, data = self.hscan(name, cursor=cursor, match=match, count=count)
yield from data.items()
- def zscan(self, name, cursor=0, match=None, count=None,
- score_cast_func=float):
+ def zscan(self, name, cursor=0, match=None, count=None, score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
@@ -2084,14 +2155,13 @@ class ScanCommands:
"""
pieces = [name, cursor]
if match is not None:
- pieces.extend([b'MATCH', match])
+ pieces.extend([b"MATCH", match])
if count is not None:
- pieces.extend([b'COUNT', count])
- options = {'score_cast_func': score_cast_func}
- return self.execute_command('ZSCAN', *pieces, **options)
+ pieces.extend([b"COUNT", count])
+ options = {"score_cast_func": score_cast_func}
+ return self.execute_command("ZSCAN", *pieces, **options)
- def zscan_iter(self, name, match=None, count=None,
- score_cast_func=float):
+ def zscan_iter(self, name, match=None, count=None, score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
@@ -2102,11 +2172,15 @@ class ScanCommands:
``score_cast_func`` a callable used to cast the score return value
"""
- cursor = '0'
+ cursor = "0"
while cursor != 0:
- cursor, data = self.zscan(name, cursor=cursor, match=match,
- count=count,
- score_cast_func=score_cast_func)
+ cursor, data = self.zscan(
+ name,
+ cursor=cursor,
+ match=match,
+ count=count,
+ score_cast_func=score_cast_func,
+ )
yield from data
@@ -2115,13 +2189,14 @@ class SetCommands:
Redis commands for Set data type.
see: https://redis.io/topics/data-types#sets
"""
+
def sadd(self, name, *values):
"""
Add ``value(s)`` to set ``name``
For more information check https://redis.io/commands/sadd
"""
- return self.execute_command('SADD', name, *values)
+ return self.execute_command("SADD", name, *values)
def scard(self, name):
"""
@@ -2129,7 +2204,7 @@ class SetCommands:
For more information check https://redis.io/commands/scard
"""
- return self.execute_command('SCARD', name)
+ return self.execute_command("SCARD", name)
def sdiff(self, keys, *args):
"""
@@ -2138,7 +2213,7 @@ class SetCommands:
For more information check https://redis.io/commands/sdiff
"""
args = list_or_args(keys, args)
- return self.execute_command('SDIFF', *args)
+ return self.execute_command("SDIFF", *args)
def sdiffstore(self, dest, keys, *args):
"""
@@ -2148,7 +2223,7 @@ class SetCommands:
For more information check https://redis.io/commands/sdiffstore
"""
args = list_or_args(keys, args)
- return self.execute_command('SDIFFSTORE', dest, *args)
+ return self.execute_command("SDIFFSTORE", dest, *args)
def sinter(self, keys, *args):
"""
@@ -2157,7 +2232,7 @@ class SetCommands:
For more information check https://redis.io/commands/sinter
"""
args = list_or_args(keys, args)
- return self.execute_command('SINTER', *args)
+ return self.execute_command("SINTER", *args)
def sinterstore(self, dest, keys, *args):
"""
@@ -2167,7 +2242,7 @@ class SetCommands:
For more information check https://redis.io/commands/sinterstore
"""
args = list_or_args(keys, args)
- return self.execute_command('SINTERSTORE', dest, *args)
+ return self.execute_command("SINTERSTORE", dest, *args)
def sismember(self, name, value):
"""
@@ -2175,7 +2250,7 @@ class SetCommands:
For more information check https://redis.io/commands/sismember
"""
- return self.execute_command('SISMEMBER', name, value)
+ return self.execute_command("SISMEMBER", name, value)
def smembers(self, name):
"""
@@ -2183,7 +2258,7 @@ class SetCommands:
For more information check https://redis.io/commands/smembers
"""
- return self.execute_command('SMEMBERS', name)
+ return self.execute_command("SMEMBERS", name)
def smismember(self, name, values, *args):
"""
@@ -2193,7 +2268,7 @@ class SetCommands:
For more information check https://redis.io/commands/smismember
"""
args = list_or_args(values, args)
- return self.execute_command('SMISMEMBER', name, *args)
+ return self.execute_command("SMISMEMBER", name, *args)
def smove(self, src, dst, value):
"""
@@ -2201,7 +2276,7 @@ class SetCommands:
For more information check https://redis.io/commands/smove
"""
- return self.execute_command('SMOVE', src, dst, value)
+ return self.execute_command("SMOVE", src, dst, value)
def spop(self, name, count=None):
"""
@@ -2210,7 +2285,7 @@ class SetCommands:
For more information check https://redis.io/commands/spop
"""
args = (count is not None) and [count] or []
- return self.execute_command('SPOP', name, *args)
+ return self.execute_command("SPOP", name, *args)
def srandmember(self, name, number=None):
"""
@@ -2223,7 +2298,7 @@ class SetCommands:
For more information check https://redis.io/commands/srandmember
"""
args = (number is not None) and [number] or []
- return self.execute_command('SRANDMEMBER', name, *args)
+ return self.execute_command("SRANDMEMBER", name, *args)
def srem(self, name, *values):
"""
@@ -2231,7 +2306,7 @@ class SetCommands:
For more information check https://redis.io/commands/srem
"""
- return self.execute_command('SREM', name, *values)
+ return self.execute_command("SREM", name, *values)
def sunion(self, keys, *args):
"""
@@ -2240,7 +2315,7 @@ class SetCommands:
For more information check https://redis.io/commands/sunion
"""
args = list_or_args(keys, args)
- return self.execute_command('SUNION', *args)
+ return self.execute_command("SUNION", *args)
def sunionstore(self, dest, keys, *args):
"""
@@ -2250,7 +2325,7 @@ class SetCommands:
For more information check https://redis.io/commands/sunionstore
"""
args = list_or_args(keys, args)
- return self.execute_command('SUNIONSTORE', dest, *args)
+ return self.execute_command("SUNIONSTORE", dest, *args)
class StreamCommands:
@@ -2258,6 +2333,7 @@ class StreamCommands:
Redis commands for Stream data type.
see: https://redis.io/topics/streams-intro
"""
+
def xack(self, name, groupname, *ids):
"""
Acknowledges the successful processing of one or more messages.
@@ -2267,10 +2343,19 @@ class StreamCommands:
For more information check https://redis.io/commands/xack
"""
- return self.execute_command('XACK', name, groupname, *ids)
+ return self.execute_command("XACK", name, groupname, *ids)
- def xadd(self, name, fields, id='*', maxlen=None, approximate=True,
- nomkstream=False, minid=None, limit=None):
+ def xadd(
+ self,
+ name,
+ fields,
+ id="*",
+ maxlen=None,
+ approximate=True,
+ nomkstream=False,
+ minid=None,
+ limit=None,
+ ):
"""
Add to a stream.
name: name of the stream
@@ -2288,34 +2373,43 @@ class StreamCommands:
"""
pieces = []
if maxlen is not None and minid is not None:
- raise DataError("Only one of ```maxlen``` or ```minid``` "
- "may be specified")
+ raise DataError(
+ "Only one of ```maxlen``` or ```minid``` " "may be specified"
+ )
if maxlen is not None:
if not isinstance(maxlen, int) or maxlen < 1:
- raise DataError('XADD maxlen must be a positive integer')
- pieces.append(b'MAXLEN')
+ raise DataError("XADD maxlen must be a positive integer")
+ pieces.append(b"MAXLEN")
if approximate:
- pieces.append(b'~')
+ pieces.append(b"~")
pieces.append(str(maxlen))
if minid is not None:
- pieces.append(b'MINID')
+ pieces.append(b"MINID")
if approximate:
- pieces.append(b'~')
+ pieces.append(b"~")
pieces.append(minid)
if limit is not None:
- pieces.extend([b'LIMIT', limit])
+ pieces.extend([b"LIMIT", limit])
if nomkstream:
- pieces.append(b'NOMKSTREAM')
+ pieces.append(b"NOMKSTREAM")
pieces.append(id)
if not isinstance(fields, dict) or len(fields) == 0:
- raise DataError('XADD fields must be a non-empty dict')
+ raise DataError("XADD fields must be a non-empty dict")
for pair in fields.items():
pieces.extend(pair)
- return self.execute_command('XADD', name, *pieces)
-
- def xautoclaim(self, name, groupname, consumername, min_idle_time,
- start_id=0, count=None, justid=False):
+ return self.execute_command("XADD", name, *pieces)
+
+ def xautoclaim(
+ self,
+ name,
+ groupname,
+ consumername,
+ min_idle_time,
+ start_id=0,
+ count=None,
+ justid=False,
+ ):
"""
Transfers ownership of pending stream entries that match the specified
criteria. Conceptually, equivalent to calling XPENDING and then XCLAIM,
@@ -2336,8 +2430,9 @@ class StreamCommands:
"""
try:
if int(min_idle_time) < 0:
- raise DataError("XAUTOCLAIM min_idle_time must be a non"
- "negative integer")
+ raise DataError(
+ "XAUTOCLAIM min_idle_time must be a non" "negative integer"
+ )
except TypeError:
pass
@@ -2347,18 +2442,28 @@ class StreamCommands:
try:
if int(count) < 0:
raise DataError("XPENDING count must be a integer >= 0")
- pieces.extend([b'COUNT', count])
+ pieces.extend([b"COUNT", count])
except TypeError:
pass
if justid:
- pieces.append(b'JUSTID')
- kwargs['parse_justid'] = True
-
- return self.execute_command('XAUTOCLAIM', *pieces, **kwargs)
-
- def xclaim(self, name, groupname, consumername, min_idle_time, message_ids,
- idle=None, time=None, retrycount=None, force=False,
- justid=False):
+ pieces.append(b"JUSTID")
+ kwargs["parse_justid"] = True
+
+ return self.execute_command("XAUTOCLAIM", *pieces, **kwargs)
+
+ def xclaim(
+ self,
+ name,
+ groupname,
+ consumername,
+ min_idle_time,
+ message_ids,
+ idle=None,
+ time=None,
+ retrycount=None,
+ force=False,
+ justid=False,
+ ):
"""
Changes the ownership of a pending message.
name: name of the stream.
@@ -2384,11 +2489,12 @@ class StreamCommands:
For more information check https://redis.io/commands/xclaim
"""
if not isinstance(min_idle_time, int) or min_idle_time < 0:
- raise DataError("XCLAIM min_idle_time must be a non negative "
- "integer")
+ raise DataError("XCLAIM min_idle_time must be a non negative " "integer")
if not isinstance(message_ids, (list, tuple)) or not message_ids:
- raise DataError("XCLAIM message_ids must be a non empty list or "
- "tuple of message IDs to claim")
+ raise DataError(
+ "XCLAIM message_ids must be a non empty list or "
+ "tuple of message IDs to claim"
+ )
kwargs = {}
pieces = [name, groupname, consumername, str(min_idle_time)]
@@ -2397,26 +2503,26 @@ class StreamCommands:
if idle is not None:
if not isinstance(idle, int):
raise DataError("XCLAIM idle must be an integer")
- pieces.extend((b'IDLE', str(idle)))
+ pieces.extend((b"IDLE", str(idle)))
if time is not None:
if not isinstance(time, int):
raise DataError("XCLAIM time must be an integer")
- pieces.extend((b'TIME', str(time)))
+ pieces.extend((b"TIME", str(time)))
if retrycount is not None:
if not isinstance(retrycount, int):
raise DataError("XCLAIM retrycount must be an integer")
- pieces.extend((b'RETRYCOUNT', str(retrycount)))
+ pieces.extend((b"RETRYCOUNT", str(retrycount)))
if force:
if not isinstance(force, bool):
raise DataError("XCLAIM force must be a boolean")
- pieces.append(b'FORCE')
+ pieces.append(b"FORCE")
if justid:
if not isinstance(justid, bool):
raise DataError("XCLAIM justid must be a boolean")
- pieces.append(b'JUSTID')
- kwargs['parse_justid'] = True
- return self.execute_command('XCLAIM', *pieces, **kwargs)
+ pieces.append(b"JUSTID")
+ kwargs["parse_justid"] = True
+ return self.execute_command("XCLAIM", *pieces, **kwargs)
def xdel(self, name, *ids):
"""
@@ -2426,9 +2532,9 @@ class StreamCommands:
For more information check https://redis.io/commands/xdel
"""
- return self.execute_command('XDEL', name, *ids)
+ return self.execute_command("XDEL", name, *ids)
- def xgroup_create(self, name, groupname, id='$', mkstream=False):
+ def xgroup_create(self, name, groupname, id="$", mkstream=False):
"""
Create a new consumer group associated with a stream.
name: name of the stream.
@@ -2437,9 +2543,9 @@ class StreamCommands:
For more information check https://redis.io/commands/xgroup-create
"""
- pieces = ['XGROUP CREATE', name, groupname, id]
+ pieces = ["XGROUP CREATE", name, groupname, id]
if mkstream:
- pieces.append(b'MKSTREAM')
+ pieces.append(b"MKSTREAM")
return self.execute_command(*pieces)
def xgroup_delconsumer(self, name, groupname, consumername):
@@ -2453,8 +2559,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xgroup-delconsumer
"""
- return self.execute_command('XGROUP DELCONSUMER', name, groupname,
- consumername)
+ return self.execute_command("XGROUP DELCONSUMER", name, groupname, consumername)
def xgroup_destroy(self, name, groupname):
"""
@@ -2464,7 +2569,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xgroup-destroy
"""
- return self.execute_command('XGROUP DESTROY', name, groupname)
+ return self.execute_command("XGROUP DESTROY", name, groupname)
def xgroup_createconsumer(self, name, groupname, consumername):
"""
@@ -2477,8 +2582,9 @@ class StreamCommands:
See: https://redis.io/commands/xgroup-createconsumer
"""
- return self.execute_command('XGROUP CREATECONSUMER', name, groupname,
- consumername)
+ return self.execute_command(
+ "XGROUP CREATECONSUMER", name, groupname, consumername
+ )
def xgroup_setid(self, name, groupname, id):
"""
@@ -2489,7 +2595,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xgroup-setid
"""
- return self.execute_command('XGROUP SETID', name, groupname, id)
+ return self.execute_command("XGROUP SETID", name, groupname, id)
def xinfo_consumers(self, name, groupname):
"""
@@ -2499,7 +2605,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xinfo-consumers
"""
- return self.execute_command('XINFO CONSUMERS', name, groupname)
+ return self.execute_command("XINFO CONSUMERS", name, groupname)
def xinfo_groups(self, name):
"""
@@ -2508,7 +2614,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xinfo-groups
"""
- return self.execute_command('XINFO GROUPS', name)
+ return self.execute_command("XINFO GROUPS", name)
def xinfo_stream(self, name, full=False):
"""
@@ -2521,9 +2627,9 @@ class StreamCommands:
pieces = [name]
options = {}
if full:
- pieces.append(b'FULL')
- options = {'full': full}
- return self.execute_command('XINFO STREAM', *pieces, **options)
+ pieces.append(b"FULL")
+ options = {"full": full}
+ return self.execute_command("XINFO STREAM", *pieces, **options)
def xlen(self, name):
"""
@@ -2531,7 +2637,7 @@ class StreamCommands:
For more information check https://redis.io/commands/xlen
"""
- return self.execute_command('XLEN', name)
+ return self.execute_command("XLEN", name)
def xpending(self, name, groupname):
"""
@@ -2541,11 +2647,18 @@ class StreamCommands:
For more information check https://redis.io/commands/xpending
"""
- return self.execute_command('XPENDING', name, groupname)
+ return self.execute_command("XPENDING", name, groupname)
- def xpending_range(self, name, groupname, idle=None,
- min=None, max=None, count=None,
- consumername=None):
+ def xpending_range(
+ self,
+ name,
+ groupname,
+ idle=None,
+ min=None,
+ max=None,
+ count=None,
+ consumername=None,
+ ):
"""
Returns information about pending messages, in a range.
@@ -2560,20 +2673,24 @@ class StreamCommands:
"""
if {min, max, count} == {None}:
if idle is not None or consumername is not None:
- raise DataError("if XPENDING is provided with idle time"
- " or consumername, it must be provided"
- " with min, max and count parameters")
+ raise DataError(
+ "if XPENDING is provided with idle time"
+ " or consumername, it must be provided"
+ " with min, max and count parameters"
+ )
return self.xpending(name, groupname)
pieces = [name, groupname]
if min is None or max is None or count is None:
- raise DataError("XPENDING must be provided with min, max "
- "and count parameters, or none of them.")
+ raise DataError(
+ "XPENDING must be provided with min, max "
+ "and count parameters, or none of them."
+ )
# idle
try:
if int(idle) < 0:
raise DataError("XPENDING idle must be a integer >= 0")
- pieces.extend(['IDLE', idle])
+ pieces.extend(["IDLE", idle])
except TypeError:
pass
# count
@@ -2587,9 +2704,9 @@ class StreamCommands:
if consumername:
pieces.append(consumername)
- return self.execute_command('XPENDING', *pieces, parse_detail=True)
+ return self.execute_command("XPENDING", *pieces, parse_detail=True)
- def xrange(self, name, min='-', max='+', count=None):
+ def xrange(self, name, min="-", max="+", count=None):
"""
Read stream values within an interval.
name: name of the stream.
@@ -2605,11 +2722,11 @@ class StreamCommands:
pieces = [min, max]
if count is not None:
if not isinstance(count, int) or count < 1:
- raise DataError('XRANGE count must be a positive integer')
- pieces.append(b'COUNT')
+ raise DataError("XRANGE count must be a positive integer")
+ pieces.append(b"COUNT")
pieces.append(str(count))
- return self.execute_command('XRANGE', name, *pieces)
+ return self.execute_command("XRANGE", name, *pieces)
def xread(self, streams, count=None, block=None):
"""
@@ -2625,24 +2742,25 @@ class StreamCommands:
pieces = []
if block is not None:
if not isinstance(block, int) or block < 0:
- raise DataError('XREAD block must be a non-negative integer')
- pieces.append(b'BLOCK')
+ raise DataError("XREAD block must be a non-negative integer")
+ pieces.append(b"BLOCK")
pieces.append(str(block))
if count is not None:
if not isinstance(count, int) or count < 1:
- raise DataError('XREAD count must be a positive integer')
- pieces.append(b'COUNT')
+ raise DataError("XREAD count must be a positive integer")
+ pieces.append(b"COUNT")
pieces.append(str(count))
if not isinstance(streams, dict) or len(streams) == 0:
- raise DataError('XREAD streams must be a non empty dict')
- pieces.append(b'STREAMS')
+ raise DataError("XREAD streams must be a non empty dict")
+ pieces.append(b"STREAMS")
keys, values = zip(*streams.items())
pieces.extend(keys)
pieces.extend(values)
- return self.execute_command('XREAD', *pieces)
+ return self.execute_command("XREAD", *pieces)
- def xreadgroup(self, groupname, consumername, streams, count=None,
- block=None, noack=False):
+ def xreadgroup(
+ self, groupname, consumername, streams, count=None, block=None, noack=False
+ ):
"""
Read from a stream via a consumer group.
groupname: name of the consumer group.
@@ -2656,28 +2774,27 @@ class StreamCommands:
For more information check https://redis.io/commands/xreadgroup
"""
- pieces = [b'GROUP', groupname, consumername]
+ pieces = [b"GROUP", groupname, consumername]
if count is not None:
if not isinstance(count, int) or count < 1:
raise DataError("XREADGROUP count must be a positive integer")
- pieces.append(b'COUNT')
+ pieces.append(b"COUNT")
pieces.append(str(count))
if block is not None:
if not isinstance(block, int) or block < 0:
- raise DataError("XREADGROUP block must be a non-negative "
- "integer")
- pieces.append(b'BLOCK')
+ raise DataError("XREADGROUP block must be a non-negative " "integer")
+ pieces.append(b"BLOCK")
pieces.append(str(block))
if noack:
- pieces.append(b'NOACK')
+ pieces.append(b"NOACK")
if not isinstance(streams, dict) or len(streams) == 0:
- raise DataError('XREADGROUP streams must be a non empty dict')
- pieces.append(b'STREAMS')
+ raise DataError("XREADGROUP streams must be a non empty dict")
+ pieces.append(b"STREAMS")
pieces.extend(streams.keys())
pieces.extend(streams.values())
- return self.execute_command('XREADGROUP', *pieces)
+ return self.execute_command("XREADGROUP", *pieces)
- def xrevrange(self, name, max='+', min='-', count=None):
+ def xrevrange(self, name, max="+", min="-", count=None):
"""
Read stream values within an interval, in reverse order.
name: name of the stream
@@ -2693,14 +2810,13 @@ class StreamCommands:
pieces = [max, min]
if count is not None:
if not isinstance(count, int) or count < 1:
- raise DataError('XREVRANGE count must be a positive integer')
- pieces.append(b'COUNT')
+ raise DataError("XREVRANGE count must be a positive integer")
+ pieces.append(b"COUNT")
pieces.append(str(count))
- return self.execute_command('XREVRANGE', name, *pieces)
+ return self.execute_command("XREVRANGE", name, *pieces)
- def xtrim(self, name, maxlen=None, approximate=True, minid=None,
- limit=None):
+ def xtrim(self, name, maxlen=None, approximate=True, minid=None, limit=None):
"""
Trims old messages from a stream.
name: name of the stream.
@@ -2715,15 +2831,14 @@ class StreamCommands:
"""
pieces = []
if maxlen is not None and minid is not None:
- raise DataError("Only one of ``maxlen`` or ``minid`` "
- "may be specified")
+ raise DataError("Only one of ``maxlen`` or ``minid`` " "may be specified")
if maxlen is not None:
- pieces.append(b'MAXLEN')
+ pieces.append(b"MAXLEN")
if minid is not None:
- pieces.append(b'MINID')
+ pieces.append(b"MINID")
if approximate:
- pieces.append(b'~')
+ pieces.append(b"~")
if maxlen is not None:
pieces.append(maxlen)
if minid is not None:
@@ -2732,7 +2847,7 @@ class StreamCommands:
pieces.append(b"LIMIT")
pieces.append(limit)
- return self.execute_command('XTRIM', name, *pieces)
+ return self.execute_command("XTRIM", name, *pieces)
class SortedSetCommands:
@@ -2740,8 +2855,10 @@ class SortedSetCommands:
Redis commands for Sorted Sets data type.
see: https://redis.io/topics/data-types-intro#redis-sorted-sets
"""
- def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False,
- gt=None, lt=None):
+
+ def zadd(
+ self, name, mapping, nx=False, xx=False, ch=False, incr=False, gt=None, lt=None
+ ):
"""
Set any number of element-name, score pairs to the key ``name``. Pairs
are specified as a dict of element-names keys to score values.
@@ -2780,30 +2897,32 @@ class SortedSetCommands:
if nx and xx:
raise DataError("ZADD allows either 'nx' or 'xx', not both")
if incr and len(mapping) != 1:
- raise DataError("ZADD option 'incr' only works when passing a "
- "single element/score pair")
+ raise DataError(
+ "ZADD option 'incr' only works when passing a "
+ "single element/score pair"
+ )
if nx is True and (gt is not None or lt is not None):
raise DataError("Only one of 'nx', 'lt', or 'gr' may be defined.")
pieces = []
options = {}
if nx:
- pieces.append(b'NX')
+ pieces.append(b"NX")
if xx:
- pieces.append(b'XX')
+ pieces.append(b"XX")
if ch:
- pieces.append(b'CH')
+ pieces.append(b"CH")
if incr:
- pieces.append(b'INCR')
- options['as_score'] = True
+ pieces.append(b"INCR")
+ options["as_score"] = True
if gt:
- pieces.append(b'GT')
+ pieces.append(b"GT")
if lt:
- pieces.append(b'LT')
+ pieces.append(b"LT")
for pair in mapping.items():
pieces.append(pair[1])
pieces.append(pair[0])
- return self.execute_command('ZADD', name, *pieces, **options)
+ return self.execute_command("ZADD", name, *pieces, **options)
def zcard(self, name):
"""
@@ -2811,7 +2930,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zcard
"""
- return self.execute_command('ZCARD', name)
+ return self.execute_command("ZCARD", name)
def zcount(self, name, min, max):
"""
@@ -2820,7 +2939,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zcount
"""
- return self.execute_command('ZCOUNT', name, min, max)
+ return self.execute_command("ZCOUNT", name, min, max)
def zdiff(self, keys, withscores=False):
"""
@@ -2850,7 +2969,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zincrby
"""
- return self.execute_command('ZINCRBY', name, amount, value)
+ return self.execute_command("ZINCRBY", name, amount, value)
def zinter(self, keys, aggregate=None, withscores=False):
"""
@@ -2864,8 +2983,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zinter
"""
- return self._zaggregate('ZINTER', None, keys, aggregate,
- withscores=withscores)
+ return self._zaggregate("ZINTER", None, keys, aggregate, withscores=withscores)
def zinterstore(self, dest, keys, aggregate=None):
"""
@@ -2879,7 +2997,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zinterstore
"""
- return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
+ return self._zaggregate("ZINTERSTORE", dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
@@ -2888,7 +3006,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zlexcount
"""
- return self.execute_command('ZLEXCOUNT', name, min, max)
+ return self.execute_command("ZLEXCOUNT", name, min, max)
def zpopmax(self, name, count=None):
"""
@@ -2898,10 +3016,8 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zpopmax
"""
args = (count is not None) and [count] or []
- options = {
- 'withscores': True
- }
- return self.execute_command('ZPOPMAX', name, *args, **options)
+ options = {"withscores": True}
+ return self.execute_command("ZPOPMAX", name, *args, **options)
def zpopmin(self, name, count=None):
"""
@@ -2911,10 +3027,8 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zpopmin
"""
args = (count is not None) and [count] or []
- options = {
- 'withscores': True
- }
- return self.execute_command('ZPOPMIN', name, *args, **options)
+ options = {"withscores": True}
+ return self.execute_command("ZPOPMIN", name, *args, **options)
def zrandmember(self, key, count=None, withscores=False):
"""
@@ -2957,7 +3071,7 @@ class SortedSetCommands:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
- return self.execute_command('BZPOPMAX', *keys)
+ return self.execute_command("BZPOPMAX", *keys)
def bzpopmin(self, keys, timeout=0):
"""
@@ -2976,43 +3090,63 @@ class SortedSetCommands:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
- return self.execute_command('BZPOPMIN', *keys)
-
- def _zrange(self, command, dest, name, start, end, desc=False,
- byscore=False, bylex=False, withscores=False,
- score_cast_func=float, offset=None, num=None):
+ return self.execute_command("BZPOPMIN", *keys)
+
+ def _zrange(
+ self,
+ command,
+ dest,
+ name,
+ start,
+ end,
+ desc=False,
+ byscore=False,
+ bylex=False,
+ withscores=False,
+ score_cast_func=float,
+ offset=None,
+ num=None,
+ ):
if byscore and bylex:
- raise DataError("``byscore`` and ``bylex`` can not be "
- "specified together.")
- if (offset is not None and num is None) or \
- (num is not None and offset is None):
+ raise DataError(
+ "``byscore`` and ``bylex`` can not be " "specified together."
+ )
+ if (offset is not None and num is None) or (num is not None and offset is None):
raise DataError("``offset`` and ``num`` must both be specified.")
if bylex and withscores:
- raise DataError("``withscores`` not supported in combination "
- "with ``bylex``.")
+ raise DataError(
+ "``withscores`` not supported in combination " "with ``bylex``."
+ )
pieces = [command]
if dest:
pieces.append(dest)
pieces.extend([name, start, end])
if byscore:
- pieces.append('BYSCORE')
+ pieces.append("BYSCORE")
if bylex:
- pieces.append('BYLEX')
+ pieces.append("BYLEX")
if desc:
- pieces.append('REV')
+ pieces.append("REV")
if offset is not None and num is not None:
- pieces.extend(['LIMIT', offset, num])
+ pieces.extend(["LIMIT", offset, num])
if withscores:
- pieces.append('WITHSCORES')
- options = {
- 'withscores': withscores,
- 'score_cast_func': score_cast_func
- }
+ pieces.append("WITHSCORES")
+ options = {"withscores": withscores, "score_cast_func": score_cast_func}
return self.execute_command(*pieces, **options)
- def zrange(self, name, start, end, desc=False, withscores=False,
- score_cast_func=float, byscore=False, bylex=False,
- offset=None, num=None):
+ def zrange(
+ self,
+ name,
+ start,
+ end,
+ desc=False,
+ withscores=False,
+ score_cast_func=float,
+ byscore=False,
+ bylex=False,
+ offset=None,
+ num=None,
+ ):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
@@ -3043,16 +3177,25 @@ class SortedSetCommands:
"""
# Need to support ``desc`` also when using old redis version
# because it was supported in 3.5.3 (of redis-py)
- if not byscore and not bylex and (offset is None and num is None) \
- and desc:
- return self.zrevrange(name, start, end, withscores,
- score_cast_func)
-
- return self._zrange('ZRANGE', None, name, start, end, desc, byscore,
- bylex, withscores, score_cast_func, offset, num)
+ if not byscore and not bylex and (offset is None and num is None) and desc:
+ return self.zrevrange(name, start, end, withscores, score_cast_func)
+
+ return self._zrange(
+ "ZRANGE",
+ None,
+ name,
+ start,
+ end,
+ desc,
+ byscore,
+ bylex,
+ withscores,
+ score_cast_func,
+ offset,
+ num,
+ )
- def zrevrange(self, name, start, end, withscores=False,
- score_cast_func=float):
+ def zrevrange(self, name, start, end, withscores=False, score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
@@ -3066,18 +3209,24 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrevrange
"""
- pieces = ['ZREVRANGE', name, start, end]
+ pieces = ["ZREVRANGE", name, start, end]
if withscores:
- pieces.append(b'WITHSCORES')
- options = {
- 'withscores': withscores,
- 'score_cast_func': score_cast_func
- }
+ pieces.append(b"WITHSCORES")
+ options = {"withscores": withscores, "score_cast_func": score_cast_func}
return self.execute_command(*pieces, **options)
- def zrangestore(self, dest, name, start, end,
- byscore=False, bylex=False, desc=False,
- offset=None, num=None):
+ def zrangestore(
+ self,
+ dest,
+ name,
+ start,
+ end,
+ byscore=False,
+ bylex=False,
+ desc=False,
+ offset=None,
+ num=None,
+ ):
"""
Stores in ``dest`` the result of a range of values from sorted set
``name`` between ``start`` and ``end`` sorted in ascending order.
@@ -3101,8 +3250,20 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrangestore
"""
- return self._zrange('ZRANGESTORE', dest, name, start, end, desc,
- byscore, bylex, False, None, offset, num)
+ return self._zrange(
+ "ZRANGESTORE",
+ dest,
+ name,
+ start,
+ end,
+ desc,
+ byscore,
+ bylex,
+ False,
+ None,
+ offset,
+ num,
+ )
def zrangebylex(self, name, min, max, start=None, num=None):
"""
@@ -3114,12 +3275,11 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrangebylex
"""
- if (start is not None and num is None) or \
- (num is not None and start is None):
+ if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
- pieces = ['ZRANGEBYLEX', name, min, max]
+ pieces = ["ZRANGEBYLEX", name, min, max]
if start is not None and num is not None:
- pieces.extend([b'LIMIT', start, num])
+ pieces.extend([b"LIMIT", start, num])
return self.execute_command(*pieces)
def zrevrangebylex(self, name, max, min, start=None, num=None):
@@ -3132,16 +3292,23 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrevrangebylex
"""
- if (start is not None and num is None) or \
- (num is not None and start is None):
+ if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
- pieces = ['ZREVRANGEBYLEX', name, max, min]
+ pieces = ["ZREVRANGEBYLEX", name, max, min]
if start is not None and num is not None:
- pieces.extend(['LIMIT', start, num])
+ pieces.extend(["LIMIT", start, num])
return self.execute_command(*pieces)
- def zrangebyscore(self, name, min, max, start=None, num=None,
- withscores=False, score_cast_func=float):
+ def zrangebyscore(
+ self,
+ name,
+ min,
+ max,
+ start=None,
+ num=None,
+ withscores=False,
+ score_cast_func=float,
+ ):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
@@ -3156,22 +3323,26 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrangebyscore
"""
- if (start is not None and num is None) or \
- (num is not None and start is None):
+ if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
- pieces = ['ZRANGEBYSCORE', name, min, max]
+ pieces = ["ZRANGEBYSCORE", name, min, max]
if start is not None and num is not None:
- pieces.extend(['LIMIT', start, num])
+ pieces.extend(["LIMIT", start, num])
if withscores:
- pieces.append('WITHSCORES')
- options = {
- 'withscores': withscores,
- 'score_cast_func': score_cast_func
- }
+ pieces.append("WITHSCORES")
+ options = {"withscores": withscores, "score_cast_func": score_cast_func}
return self.execute_command(*pieces, **options)
- def zrevrangebyscore(self, name, max, min, start=None, num=None,
- withscores=False, score_cast_func=float):
+ def zrevrangebyscore(
+ self,
+ name,
+ max,
+ min,
+ start=None,
+ num=None,
+ withscores=False,
+ score_cast_func=float,
+ ):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
@@ -3186,18 +3357,14 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrevrangebyscore
"""
- if (start is not None and num is None) or \
- (num is not None and start is None):
+ if (start is not None and num is None) or (num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
- pieces = ['ZREVRANGEBYSCORE', name, max, min]
+ pieces = ["ZREVRANGEBYSCORE", name, max, min]
if start is not None and num is not None:
- pieces.extend(['LIMIT', start, num])
+ pieces.extend(["LIMIT", start, num])
if withscores:
- pieces.append('WITHSCORES')
- options = {
- 'withscores': withscores,
- 'score_cast_func': score_cast_func
- }
+ pieces.append("WITHSCORES")
+ options = {"withscores": withscores, "score_cast_func": score_cast_func}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
@@ -3207,7 +3374,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrank
"""
- return self.execute_command('ZRANK', name, value)
+ return self.execute_command("ZRANK", name, value)
def zrem(self, name, *values):
"""
@@ -3215,7 +3382,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrem
"""
- return self.execute_command('ZREM', name, *values)
+ return self.execute_command("ZREM", name, *values)
def zremrangebylex(self, name, min, max):
"""
@@ -3226,7 +3393,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zremrangebylex
"""
- return self.execute_command('ZREMRANGEBYLEX', name, min, max)
+ return self.execute_command("ZREMRANGEBYLEX", name, min, max)
def zremrangebyrank(self, name, min, max):
"""
@@ -3237,7 +3404,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zremrangebyrank
"""
- return self.execute_command('ZREMRANGEBYRANK', name, min, max)
+ return self.execute_command("ZREMRANGEBYRANK", name, min, max)
def zremrangebyscore(self, name, min, max):
"""
@@ -3246,7 +3413,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zremrangebyscore
"""
- return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
+ return self.execute_command("ZREMRANGEBYSCORE", name, min, max)
def zrevrank(self, name, value):
"""
@@ -3255,7 +3422,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zrevrank
"""
- return self.execute_command('ZREVRANK', name, value)
+ return self.execute_command("ZREVRANK", name, value)
def zscore(self, name, value):
"""
@@ -3263,7 +3430,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zscore
"""
- return self.execute_command('ZSCORE', name, value)
+ return self.execute_command("ZSCORE", name, value)
def zunion(self, keys, aggregate=None, withscores=False):
"""
@@ -3274,8 +3441,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zunion
"""
- return self._zaggregate('ZUNION', None, keys, aggregate,
- withscores=withscores)
+ return self._zaggregate("ZUNION", None, keys, aggregate, withscores=withscores)
def zunionstore(self, dest, keys, aggregate=None):
"""
@@ -3285,7 +3451,7 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zunionstore
"""
- return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
+ return self._zaggregate("ZUNIONSTORE", dest, keys, aggregate)
def zmscore(self, key, members):
"""
@@ -3299,12 +3465,11 @@ class SortedSetCommands:
For more information check https://redis.io/commands/zmscore
"""
if not members:
- raise DataError('ZMSCORE members must be a non-empty list')
+ raise DataError("ZMSCORE members must be a non-empty list")
pieces = [key] + members
- return self.execute_command('ZMSCORE', *pieces)
+ return self.execute_command("ZMSCORE", *pieces)
- def _zaggregate(self, command, dest, keys, aggregate=None,
- **options):
+ def _zaggregate(self, command, dest, keys, aggregate=None, **options):
pieces = [command]
if dest is not None:
pieces.append(dest)
@@ -3315,16 +3480,16 @@ class SortedSetCommands:
weights = None
pieces.extend(keys)
if weights:
- pieces.append(b'WEIGHTS')
+ pieces.append(b"WEIGHTS")
pieces.extend(weights)
if aggregate:
- if aggregate.upper() in ['SUM', 'MIN', 'MAX']:
- pieces.append(b'AGGREGATE')
+ if aggregate.upper() in ["SUM", "MIN", "MAX"]:
+ pieces.append(b"AGGREGATE")
pieces.append(aggregate)
else:
raise DataError("aggregate can be sum, min or max.")
- if options.get('withscores', False):
- pieces.append(b'WITHSCORES')
+ if options.get("withscores", False):
+ pieces.append(b"WITHSCORES")
return self.execute_command(*pieces, **options)
@@ -3333,13 +3498,14 @@ class HyperlogCommands:
Redis commands of HyperLogLogs data type.
see: https://redis.io/topics/data-types-intro#hyperloglogs
"""
+
def pfadd(self, name, *values):
"""
Adds the specified elements to the specified HyperLogLog.
For more information check https://redis.io/commands/pfadd
"""
- return self.execute_command('PFADD', name, *values)
+ return self.execute_command("PFADD", name, *values)
def pfcount(self, *sources):
"""
@@ -3348,7 +3514,7 @@ class HyperlogCommands:
For more information check https://redis.io/commands/pfcount
"""
- return self.execute_command('PFCOUNT', *sources)
+ return self.execute_command("PFCOUNT", *sources)
def pfmerge(self, dest, *sources):
"""
@@ -3356,7 +3522,7 @@ class HyperlogCommands:
For more information check https://redis.io/commands/pfmerge
"""
- return self.execute_command('PFMERGE', dest, *sources)
+ return self.execute_command("PFMERGE", dest, *sources)
class HashCommands:
@@ -3364,13 +3530,14 @@ class HashCommands:
Redis commands for Hash data type.
see: https://redis.io/topics/data-types-intro#redis-hashes
"""
+
def hdel(self, name, *keys):
"""
Delete ``keys`` from hash ``name``
For more information check https://redis.io/commands/hdel
"""
- return self.execute_command('HDEL', name, *keys)
+ return self.execute_command("HDEL", name, *keys)
def hexists(self, name, key):
"""
@@ -3378,7 +3545,7 @@ class HashCommands:
For more information check https://redis.io/commands/hexists
"""
- return self.execute_command('HEXISTS', name, key)
+ return self.execute_command("HEXISTS", name, key)
def hget(self, name, key):
"""
@@ -3386,7 +3553,7 @@ class HashCommands:
For more information check https://redis.io/commands/hget
"""
- return self.execute_command('HGET', name, key)
+ return self.execute_command("HGET", name, key)
def hgetall(self, name):
"""
@@ -3394,7 +3561,7 @@ class HashCommands:
For more information check https://redis.io/commands/hgetall
"""
- return self.execute_command('HGETALL', name)
+ return self.execute_command("HGETALL", name)
def hincrby(self, name, key, amount=1):
"""
@@ -3402,7 +3569,7 @@ class HashCommands:
For more information check https://redis.io/commands/hincrby
"""
- return self.execute_command('HINCRBY', name, key, amount)
+ return self.execute_command("HINCRBY", name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
@@ -3410,7 +3577,7 @@ class HashCommands:
For more information check https://redis.io/commands/hincrbyfloat
"""
- return self.execute_command('HINCRBYFLOAT', name, key, amount)
+ return self.execute_command("HINCRBYFLOAT", name, key, amount)
def hkeys(self, name):
"""
@@ -3418,7 +3585,7 @@ class HashCommands:
For more information check https://redis.io/commands/hkeys
"""
- return self.execute_command('HKEYS', name)
+ return self.execute_command("HKEYS", name)
def hlen(self, name):
"""
@@ -3426,7 +3593,7 @@ class HashCommands:
For more information check https://redis.io/commands/hlen
"""
- return self.execute_command('HLEN', name)
+ return self.execute_command("HLEN", name)
def hset(self, name, key=None, value=None, mapping=None):
"""
@@ -3446,7 +3613,7 @@ class HashCommands:
for pair in mapping.items():
items.extend(pair)
- return self.execute_command('HSET', name, *items)
+ return self.execute_command("HSET", name, *items)
def hsetnx(self, name, key, value):
"""
@@ -3455,7 +3622,7 @@ class HashCommands:
For more information check https://redis.io/commands/hsetnx
"""
- return self.execute_command('HSETNX', name, key, value)
+ return self.execute_command("HSETNX", name, key, value)
def hmset(self, name, mapping):
"""
@@ -3465,8 +3632,8 @@ class HashCommands:
For more information check https://redis.io/commands/hmset
"""
warnings.warn(
- f'{self.__class__.__name__}.hmset() is deprecated. '
- f'Use {self.__class__.__name__}.hset() instead.',
+ f"{self.__class__.__name__}.hmset() is deprecated. "
+ f"Use {self.__class__.__name__}.hset() instead.",
DeprecationWarning,
stacklevel=2,
)
@@ -3475,7 +3642,7 @@ class HashCommands:
items = []
for pair in mapping.items():
items.extend(pair)
- return self.execute_command('HMSET', name, *items)
+ return self.execute_command("HMSET", name, *items)
def hmget(self, name, keys, *args):
"""
@@ -3484,7 +3651,7 @@ class HashCommands:
For more information check https://redis.io/commands/hmget
"""
args = list_or_args(keys, args)
- return self.execute_command('HMGET', name, *args)
+ return self.execute_command("HMGET", name, *args)
def hvals(self, name):
"""
@@ -3492,7 +3659,7 @@ class HashCommands:
For more information check https://redis.io/commands/hvals
"""
- return self.execute_command('HVALS', name)
+ return self.execute_command("HVALS", name)
def hstrlen(self, name, key):
"""
@@ -3501,7 +3668,7 @@ class HashCommands:
For more information check https://redis.io/commands/hstrlen
"""
- return self.execute_command('HSTRLEN', name, key)
+ return self.execute_command("HSTRLEN", name, key)
class PubSubCommands:
@@ -3509,6 +3676,7 @@ class PubSubCommands:
Redis PubSub commands.
see https://redis.io/topics/pubsub
"""
+
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
@@ -3516,15 +3684,15 @@ class PubSubCommands:
For more information check https://redis.io/commands/publish
"""
- return self.execute_command('PUBLISH', channel, message)
+ return self.execute_command("PUBLISH", channel, message)
- def pubsub_channels(self, pattern='*'):
+ def pubsub_channels(self, pattern="*"):
"""
Return a list of channels that have at least one subscriber
For more information check https://redis.io/commands/pubsub-channels
"""
- return self.execute_command('PUBSUB CHANNELS', pattern)
+ return self.execute_command("PUBSUB CHANNELS", pattern)
def pubsub_numpat(self):
"""
@@ -3532,7 +3700,7 @@ class PubSubCommands:
For more information check https://redis.io/commands/pubsub-numpat
"""
- return self.execute_command('PUBSUB NUMPAT')
+ return self.execute_command("PUBSUB NUMPAT")
def pubsub_numsub(self, *args):
"""
@@ -3541,7 +3709,7 @@ class PubSubCommands:
For more information check https://redis.io/commands/pubsub-numsub
"""
- return self.execute_command('PUBSUB NUMSUB', *args)
+ return self.execute_command("PUBSUB NUMSUB", *args)
class ScriptCommands:
@@ -3549,6 +3717,7 @@ class ScriptCommands:
Redis Lua script commands. see:
https://redis.com/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/
"""
+
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
@@ -3560,7 +3729,7 @@ class ScriptCommands:
For more information check https://redis.io/commands/eval
"""
- return self.execute_command('EVAL', script, numkeys, *keys_and_args)
+ return self.execute_command("EVAL", script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
@@ -3574,7 +3743,7 @@ class ScriptCommands:
For more information check https://redis.io/commands/evalsha
"""
- return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
+ return self.execute_command("EVALSHA", sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
@@ -3584,7 +3753,7 @@ class ScriptCommands:
For more information check https://redis.io/commands/script-exists
"""
- return self.execute_command('SCRIPT EXISTS', *args)
+ return self.execute_command("SCRIPT EXISTS", *args)
def script_debug(self, *args):
raise NotImplementedError(
@@ -3600,14 +3769,16 @@ class ScriptCommands:
# Redis pre 6 had no sync_type.
if sync_type not in ["SYNC", "ASYNC", None]:
- raise DataError("SCRIPT FLUSH defaults to SYNC in redis > 6.2, or "
- "accepts SYNC/ASYNC. For older versions, "
- "of redis leave as None.")
+ raise DataError(
+ "SCRIPT FLUSH defaults to SYNC in redis > 6.2, or "
+ "accepts SYNC/ASYNC. For older versions, "
+ "of redis leave as None."
+ )
if sync_type is None:
pieces = []
else:
pieces = [sync_type]
- return self.execute_command('SCRIPT FLUSH', *pieces)
+ return self.execute_command("SCRIPT FLUSH", *pieces)
def script_kill(self):
"""
@@ -3615,7 +3786,7 @@ class ScriptCommands:
For more information check https://redis.io/commands/script-kill
"""
- return self.execute_command('SCRIPT KILL')
+ return self.execute_command("SCRIPT KILL")
def script_load(self, script):
"""
@@ -3623,7 +3794,7 @@ class ScriptCommands:
For more information check https://redis.io/commands/script-load
"""
- return self.execute_command('SCRIPT LOAD', script)
+ return self.execute_command("SCRIPT LOAD", script)
def register_script(self, script):
"""
@@ -3640,6 +3811,7 @@ class GeoCommands:
Redis Geospatial commands.
see: https://redis.com/redis-best-practices/indexing-patterns/geospatial/
"""
+
def geoadd(self, name, values, nx=False, xx=False, ch=False):
"""
Add the specified geospatial items to the specified key identified
@@ -3664,17 +3836,16 @@ class GeoCommands:
if nx and xx:
raise DataError("GEOADD allows either 'nx' or 'xx', not both")
if len(values) % 3 != 0:
- raise DataError("GEOADD requires places with lon, lat and name"
- " values")
+ raise DataError("GEOADD requires places with lon, lat and name" " values")
pieces = [name]
if nx:
- pieces.append('NX')
+ pieces.append("NX")
if xx:
- pieces.append('XX')
+ pieces.append("XX")
if ch:
- pieces.append('CH')
+ pieces.append("CH")
pieces.extend(values)
- return self.execute_command('GEOADD', *pieces)
+ return self.execute_command("GEOADD", *pieces)
def geodist(self, name, place1, place2, unit=None):
"""
@@ -3686,11 +3857,11 @@ class GeoCommands:
For more information check https://redis.io/commands/geodist
"""
pieces = [name, place1, place2]
- if unit and unit not in ('m', 'km', 'mi', 'ft'):
+ if unit and unit not in ("m", "km", "mi", "ft"):
raise DataError("GEODIST invalid unit")
elif unit:
pieces.append(unit)
- return self.execute_command('GEODIST', *pieces)
+ return self.execute_command("GEODIST", *pieces)
def geohash(self, name, *values):
"""
@@ -3699,7 +3870,7 @@ class GeoCommands:
For more information check https://redis.io/commands/geohash
"""
- return self.execute_command('GEOHASH', name, *values)
+ return self.execute_command("GEOHASH", name, *values)
def geopos(self, name, *values):
"""
@@ -3709,11 +3880,24 @@ class GeoCommands:
For more information check https://redis.io/commands/geopos
"""
- return self.execute_command('GEOPOS', name, *values)
-
- def georadius(self, name, longitude, latitude, radius, unit=None,
- withdist=False, withcoord=False, withhash=False, count=None,
- sort=None, store=None, store_dist=None, any=False):
+ return self.execute_command("GEOPOS", name, *values)
+
+ def georadius(
+ self,
+ name,
+ longitude,
+ latitude,
+ radius,
+ unit=None,
+ withdist=False,
+ withcoord=False,
+ withhash=False,
+ count=None,
+ sort=None,
+ store=None,
+ store_dist=None,
+ any=False,
+ ):
"""
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
@@ -3744,17 +3928,38 @@ class GeoCommands:
For more information check https://redis.io/commands/georadius
"""
- return self._georadiusgeneric('GEORADIUS',
- name, longitude, latitude, radius,
- unit=unit, withdist=withdist,
- withcoord=withcoord, withhash=withhash,
- count=count, sort=sort, store=store,
- store_dist=store_dist, any=any)
+ return self._georadiusgeneric(
+ "GEORADIUS",
+ name,
+ longitude,
+ latitude,
+ radius,
+ unit=unit,
+ withdist=withdist,
+ withcoord=withcoord,
+ withhash=withhash,
+ count=count,
+ sort=sort,
+ store=store,
+ store_dist=store_dist,
+ any=any,
+ )
- def georadiusbymember(self, name, member, radius, unit=None,
- withdist=False, withcoord=False, withhash=False,
- count=None, sort=None, store=None, store_dist=None,
- any=False):
+ def georadiusbymember(
+ self,
+ name,
+ member,
+ radius,
+ unit=None,
+ withdist=False,
+ withcoord=False,
+ withhash=False,
+ count=None,
+ sort=None,
+ store=None,
+ store_dist=None,
+ any=False,
+ ):
"""
This command is exactly like ``georadius`` with the sole difference
that instead of taking, as the center of the area to query, a longitude
@@ -3763,61 +3968,85 @@ class GeoCommands:
For more information check https://redis.io/commands/georadiusbymember
"""
- return self._georadiusgeneric('GEORADIUSBYMEMBER',
- name, member, radius, unit=unit,
- withdist=withdist, withcoord=withcoord,
- withhash=withhash, count=count,
- sort=sort, store=store,
- store_dist=store_dist, any=any)
+ return self._georadiusgeneric(
+ "GEORADIUSBYMEMBER",
+ name,
+ member,
+ radius,
+ unit=unit,
+ withdist=withdist,
+ withcoord=withcoord,
+ withhash=withhash,
+ count=count,
+ sort=sort,
+ store=store,
+ store_dist=store_dist,
+ any=any,
+ )
def _georadiusgeneric(self, command, *args, **kwargs):
pieces = list(args)
- if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'):
+ if kwargs["unit"] and kwargs["unit"] not in ("m", "km", "mi", "ft"):
raise DataError("GEORADIUS invalid unit")
- elif kwargs['unit']:
- pieces.append(kwargs['unit'])
+ elif kwargs["unit"]:
+ pieces.append(kwargs["unit"])
else:
- pieces.append('m',)
+ pieces.append(
+ "m",
+ )
- if kwargs['any'] and kwargs['count'] is None:
+ if kwargs["any"] and kwargs["count"] is None:
raise DataError("``any`` can't be provided without ``count``")
for arg_name, byte_repr in (
- ('withdist', 'WITHDIST'),
- ('withcoord', 'WITHCOORD'),
- ('withhash', 'WITHHASH')):
+ ("withdist", "WITHDIST"),
+ ("withcoord", "WITHCOORD"),
+ ("withhash", "WITHHASH"),
+ ):
if kwargs[arg_name]:
pieces.append(byte_repr)
- if kwargs['count'] is not None:
- pieces.extend(['COUNT', kwargs['count']])
- if kwargs['any']:
- pieces.append('ANY')
+ if kwargs["count"] is not None:
+ pieces.extend(["COUNT", kwargs["count"]])
+ if kwargs["any"]:
+ pieces.append("ANY")
- if kwargs['sort']:
- if kwargs['sort'] == 'ASC':
- pieces.append('ASC')
- elif kwargs['sort'] == 'DESC':
- pieces.append('DESC')
+ if kwargs["sort"]:
+ if kwargs["sort"] == "ASC":
+ pieces.append("ASC")
+ elif kwargs["sort"] == "DESC":
+ pieces.append("DESC")
else:
raise DataError("GEORADIUS invalid sort")
- if kwargs['store'] and kwargs['store_dist']:
- raise DataError("GEORADIUS store and store_dist cant be set"
- " together")
+ if kwargs["store"] and kwargs["store_dist"]:
+ raise DataError("GEORADIUS store and store_dist cant be set" " together")
- if kwargs['store']:
- pieces.extend([b'STORE', kwargs['store']])
+ if kwargs["store"]:
+ pieces.extend([b"STORE", kwargs["store"]])
- if kwargs['store_dist']:
- pieces.extend([b'STOREDIST', kwargs['store_dist']])
+ if kwargs["store_dist"]:
+ pieces.extend([b"STOREDIST", kwargs["store_dist"]])
return self.execute_command(command, *pieces, **kwargs)
- def geosearch(self, name, member=None, longitude=None, latitude=None,
- unit='m', radius=None, width=None, height=None, sort=None,
- count=None, any=False, withcoord=False,
- withdist=False, withhash=False):
+ def geosearch(
+ self,
+ name,
+ member=None,
+ longitude=None,
+ latitude=None,
+ unit="m",
+ radius=None,
+ width=None,
+ height=None,
+ sort=None,
+ count=None,
+ any=False,
+ withcoord=False,
+ withdist=False,
+ withhash=False,
+ ):
"""
Return the members of specified key identified by the
``name`` argument, which are within the borders of the
@@ -3853,19 +4082,42 @@ class GeoCommands:
For more information check https://redis.io/commands/geosearch
"""
- return self._geosearchgeneric('GEOSEARCH',
- name, member=member, longitude=longitude,
- latitude=latitude, unit=unit,
- radius=radius, width=width,
- height=height, sort=sort, count=count,
- any=any, withcoord=withcoord,
- withdist=withdist, withhash=withhash,
- store=None, store_dist=None)
+ return self._geosearchgeneric(
+ "GEOSEARCH",
+ name,
+ member=member,
+ longitude=longitude,
+ latitude=latitude,
+ unit=unit,
+ radius=radius,
+ width=width,
+ height=height,
+ sort=sort,
+ count=count,
+ any=any,
+ withcoord=withcoord,
+ withdist=withdist,
+ withhash=withhash,
+ store=None,
+ store_dist=None,
+ )
- def geosearchstore(self, dest, name, member=None, longitude=None,
- latitude=None, unit='m', radius=None, width=None,
- height=None, sort=None, count=None, any=False,
- storedist=False):
+ def geosearchstore(
+ self,
+ dest,
+ name,
+ member=None,
+ longitude=None,
+ latitude=None,
+ unit="m",
+ radius=None,
+ width=None,
+ height=None,
+ sort=None,
+ count=None,
+ any=False,
+ storedist=False,
+ ):
"""
This command is like GEOSEARCH, but stores the result in
``dest``. By default, it stores the results in the destination
@@ -3876,74 +4128,86 @@ class GeoCommands:
For more information check https://redis.io/commands/geosearchstore
"""
- return self._geosearchgeneric('GEOSEARCHSTORE',
- dest, name, member=member,
- longitude=longitude, latitude=latitude,
- unit=unit, radius=radius, width=width,
- height=height, sort=sort, count=count,
- any=any, withcoord=None,
- withdist=None, withhash=None,
- store=None, store_dist=storedist)
+ return self._geosearchgeneric(
+ "GEOSEARCHSTORE",
+ dest,
+ name,
+ member=member,
+ longitude=longitude,
+ latitude=latitude,
+ unit=unit,
+ radius=radius,
+ width=width,
+ height=height,
+ sort=sort,
+ count=count,
+ any=any,
+ withcoord=None,
+ withdist=None,
+ withhash=None,
+ store=None,
+ store_dist=storedist,
+ )
def _geosearchgeneric(self, command, *args, **kwargs):
pieces = list(args)
# FROMMEMBER or FROMLONLAT
- if kwargs['member'] is None:
- if kwargs['longitude'] is None or kwargs['latitude'] is None:
- raise DataError("GEOSEARCH must have member or"
- " longitude and latitude")
- if kwargs['member']:
- if kwargs['longitude'] or kwargs['latitude']:
- raise DataError("GEOSEARCH member and longitude or latitude"
- " cant be set together")
- pieces.extend([b'FROMMEMBER', kwargs['member']])
- if kwargs['longitude'] and kwargs['latitude']:
- pieces.extend([b'FROMLONLAT',
- kwargs['longitude'], kwargs['latitude']])
+ if kwargs["member"] is None:
+ if kwargs["longitude"] is None or kwargs["latitude"] is None:
+ raise DataError(
+ "GEOSEARCH must have member or" " longitude and latitude"
+ )
+ if kwargs["member"]:
+ if kwargs["longitude"] or kwargs["latitude"]:
+ raise DataError(
+ "GEOSEARCH member and longitude or latitude" " cant be set together"
+ )
+ pieces.extend([b"FROMMEMBER", kwargs["member"]])
+ if kwargs["longitude"] and kwargs["latitude"]:
+ pieces.extend([b"FROMLONLAT", kwargs["longitude"], kwargs["latitude"]])
# BYRADIUS or BYBOX
- if kwargs['radius'] is None:
- if kwargs['width'] is None or kwargs['height'] is None:
- raise DataError("GEOSEARCH must have radius or"
- " width and height")
- if kwargs['unit'] is None:
+ if kwargs["radius"] is None:
+ if kwargs["width"] is None or kwargs["height"] is None:
+ raise DataError("GEOSEARCH must have radius or" " width and height")
+ if kwargs["unit"] is None:
raise DataError("GEOSEARCH must have unit")
- if kwargs['unit'].lower() not in ('m', 'km', 'mi', 'ft'):
+ if kwargs["unit"].lower() not in ("m", "km", "mi", "ft"):
raise DataError("GEOSEARCH invalid unit")
- if kwargs['radius']:
- if kwargs['width'] or kwargs['height']:
- raise DataError("GEOSEARCH radius and width or height"
- " cant be set together")
- pieces.extend([b'BYRADIUS', kwargs['radius'], kwargs['unit']])
- if kwargs['width'] and kwargs['height']:
- pieces.extend([b'BYBOX',
- kwargs['width'], kwargs['height'], kwargs['unit']])
+ if kwargs["radius"]:
+ if kwargs["width"] or kwargs["height"]:
+ raise DataError(
+ "GEOSEARCH radius and width or height" " cant be set together"
+ )
+ pieces.extend([b"BYRADIUS", kwargs["radius"], kwargs["unit"]])
+ if kwargs["width"] and kwargs["height"]:
+ pieces.extend([b"BYBOX", kwargs["width"], kwargs["height"], kwargs["unit"]])
# sort
- if kwargs['sort']:
- if kwargs['sort'].upper() == 'ASC':
- pieces.append(b'ASC')
- elif kwargs['sort'].upper() == 'DESC':
- pieces.append(b'DESC')
+ if kwargs["sort"]:
+ if kwargs["sort"].upper() == "ASC":
+ pieces.append(b"ASC")
+ elif kwargs["sort"].upper() == "DESC":
+ pieces.append(b"DESC")
else:
raise DataError("GEOSEARCH invalid sort")
# count any
- if kwargs['count']:
- pieces.extend([b'COUNT', kwargs['count']])
- if kwargs['any']:
- pieces.append(b'ANY')
- elif kwargs['any']:
- raise DataError("GEOSEARCH ``any`` can't be provided "
- "without count")
+ if kwargs["count"]:
+ pieces.extend([b"COUNT", kwargs["count"]])
+ if kwargs["any"]:
+ pieces.append(b"ANY")
+ elif kwargs["any"]:
+ raise DataError("GEOSEARCH ``any`` can't be provided " "without count")
# other properties
for arg_name, byte_repr in (
- ('withdist', b'WITHDIST'),
- ('withcoord', b'WITHCOORD'),
- ('withhash', b'WITHHASH'),
- ('store_dist', b'STOREDIST')):
+ ("withdist", b"WITHDIST"),
+ ("withcoord", b"WITHCOORD"),
+ ("withhash", b"WITHHASH"),
+ ("store_dist", b"STOREDIST"),
+ ):
if kwargs[arg_name]:
pieces.append(byte_repr)
@@ -3955,6 +4219,7 @@ class ModuleCommands:
Redis Module commands.
see: https://redis.io/topics/modules-intro
"""
+
def module_load(self, path, *args):
"""
Loads the module from ``path``.
@@ -3963,7 +4228,7 @@ class ModuleCommands:
For more information check https://redis.io/commands/module-load
"""
- return self.execute_command('MODULE LOAD', path, *args)
+ return self.execute_command("MODULE LOAD", path, *args)
def module_unload(self, name):
"""
@@ -3972,7 +4237,7 @@ class ModuleCommands:
For more information check https://redis.io/commands/module-unload
"""
- return self.execute_command('MODULE UNLOAD', name)
+ return self.execute_command("MODULE UNLOAD", name)
def module_list(self):
"""
@@ -3981,7 +4246,7 @@ class ModuleCommands:
For more information check https://redis.io/commands/module-list
"""
- return self.execute_command('MODULE LIST')
+ return self.execute_command("MODULE LIST")
def command_info(self):
raise NotImplementedError(
@@ -3989,13 +4254,13 @@ class ModuleCommands:
)
def command_count(self):
- return self.execute_command('COMMAND COUNT')
+ return self.execute_command("COMMAND COUNT")
def command_getkeys(self, *args):
- return self.execute_command('COMMAND GETKEYS', *args)
+ return self.execute_command("COMMAND GETKEYS", *args)
def command(self):
- return self.execute_command('COMMAND')
+ return self.execute_command("COMMAND")
class Script:
@@ -4022,6 +4287,7 @@ class Script:
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
from redis.client import Pipeline
+
if isinstance(client, Pipeline):
# Make sure the pipeline can register the script before executing.
client.scripts.add(self)
@@ -4039,6 +4305,7 @@ class BitFieldOperation:
"""
Command builder for BITFIELD commands.
"""
+
def __init__(self, client, key, default_overflow=None):
self.client = client
self.key = key
@@ -4050,7 +4317,7 @@ class BitFieldOperation:
Reset the state of the instance to when it was constructed
"""
self.operations = []
- self._last_overflow = 'WRAP'
+ self._last_overflow = "WRAP"
self.overflow(self._default_overflow or self._last_overflow)
def overflow(self, overflow):
@@ -4063,7 +4330,7 @@ class BitFieldOperation:
overflow = overflow.upper()
if overflow != self._last_overflow:
self._last_overflow = overflow
- self.operations.append(('OVERFLOW', overflow))
+ self.operations.append(("OVERFLOW", overflow))
return self
def incrby(self, fmt, offset, increment, overflow=None):
@@ -4083,7 +4350,7 @@ class BitFieldOperation:
if overflow is not None:
self.overflow(overflow)
- self.operations.append(('INCRBY', fmt, offset, increment))
+ self.operations.append(("INCRBY", fmt, offset, increment))
return self
def get(self, fmt, offset):
@@ -4096,7 +4363,7 @@ class BitFieldOperation:
fmt='u8', offset='#2', the offset will be 16.
:returns: a :py:class:`BitFieldOperation` instance.
"""
- self.operations.append(('GET', fmt, offset))
+ self.operations.append(("GET", fmt, offset))
return self
def set(self, fmt, offset, value):
@@ -4110,12 +4377,12 @@ class BitFieldOperation:
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
- self.operations.append(('SET', fmt, offset, value))
+ self.operations.append(("SET", fmt, offset, value))
return self
@property
def command(self):
- cmd = ['BITFIELD', self.key]
+ cmd = ["BITFIELD", self.key]
for ops in self.operations:
cmd.extend(ops)
return cmd
@@ -4132,19 +4399,31 @@ class BitFieldOperation:
return self.client.execute_command(*command)
-class DataAccessCommands(BasicKeyCommands, ListCommands,
- ScanCommands, SetCommands, StreamCommands,
- SortedSetCommands,
- HyperlogCommands, HashCommands, GeoCommands,
- ):
+class DataAccessCommands(
+ BasicKeyCommands,
+ ListCommands,
+ ScanCommands,
+ SetCommands,
+ StreamCommands,
+ SortedSetCommands,
+ HyperlogCommands,
+ HashCommands,
+ GeoCommands,
+):
"""
A class containing all of the implemented data access redis commands.
This class is to be used as a mixin.
"""
-class CoreCommands(ACLCommands, DataAccessCommands, ManagementCommands,
- ModuleCommands, PubSubCommands, ScriptCommands):
+class CoreCommands(
+ ACLCommands,
+ DataAccessCommands,
+ ManagementCommands,
+ ModuleCommands,
+ PubSubCommands,
+ ScriptCommands,
+):
"""
A class containing all of the implemented redis commands. This class is
to be used as a mixin.
diff --git a/redis/commands/helpers.py b/redis/commands/helpers.py
index dc5705b..80dfd76 100644
--- a/redis/commands/helpers.py
+++ b/redis/commands/helpers.py
@@ -22,7 +22,7 @@ def list_or_args(keys, args):
def nativestr(x):
"""Return the decoded binary string, or a string, depending on type."""
r = x.decode("utf-8", "replace") if isinstance(x, bytes) else x
- if r == 'null':
+ if r == "null":
return
return r
@@ -58,14 +58,14 @@ def parse_list_to_dict(response):
res = {}
for i in range(0, len(response), 2):
if isinstance(response[i], list):
- res['Child iterators'].append(parse_list_to_dict(response[i]))
- elif isinstance(response[i+1], list):
- res['Child iterators'] = [parse_list_to_dict(response[i+1])]
+ res["Child iterators"].append(parse_list_to_dict(response[i]))
+ elif isinstance(response[i + 1], list):
+ res["Child iterators"] = [parse_list_to_dict(response[i + 1])]
else:
try:
- res[response[i]] = float(response[i+1])
+ res[response[i]] = float(response[i + 1])
except (TypeError, ValueError):
- res[response[i]] = response[i+1]
+ res[response[i]] = response[i + 1]
return res
diff --git a/redis/commands/json/__init__.py b/redis/commands/json/__init__.py
index d634dbd..12c0648 100644
--- a/redis/commands/json/__init__.py
+++ b/redis/commands/json/__init__.py
@@ -1,12 +1,10 @@
-from json import JSONDecoder, JSONEncoder, JSONDecodeError
+from json import JSONDecodeError, JSONDecoder, JSONEncoder
+
+import redis
-from .decoders import (
- decode_list,
- bulk_of_jsons,
-)
from ..helpers import nativestr
from .commands import JSONCommands
-import redis
+from .decoders import bulk_of_jsons, decode_list
class JSON(JSONCommands):
diff --git a/redis/commands/json/commands.py b/redis/commands/json/commands.py
index 1affaaf..e7f07b6 100644
--- a/redis/commands/json/commands.py
+++ b/redis/commands/json/commands.py
@@ -1,8 +1,10 @@
-from .path import Path
-from .decoders import decode_dict_keys
from deprecated import deprecated
+
from redis.exceptions import DataError
+from .decoders import decode_dict_keys
+from .path import Path
+
class JSONCommands:
"""json commands."""
@@ -29,8 +31,7 @@ class JSONCommands:
For more information: https://oss.redis.com/redisjson/commands/#jsonarrindex
""" # noqa
return self.execute_command(
- "JSON.ARRINDEX", name, str(path), self._encode(scalar),
- start, stop
+ "JSON.ARRINDEX", name, str(path), self._encode(scalar), start, stop
)
def arrinsert(self, name, path, index, *args):
@@ -66,8 +67,7 @@ class JSONCommands:
For more information: https://oss.redis.com/redisjson/commands/#jsonarrtrim
""" # noqa
- return self.execute_command("JSON.ARRTRIM", name, str(path),
- start, stop)
+ return self.execute_command("JSON.ARRTRIM", name, str(path), start, stop)
def type(self, name, path=Path.rootPath()):
"""Get the type of the JSON value under ``path`` from key ``name``.
@@ -109,7 +109,7 @@ class JSONCommands:
"JSON.NUMINCRBY", name, str(path), self._encode(number)
)
- @deprecated(version='4.0.0', reason='deprecated since redisjson 1.0.0')
+ @deprecated(version="4.0.0", reason="deprecated since redisjson 1.0.0")
def nummultby(self, name, path, number):
"""Multiply the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``.
@@ -218,7 +218,7 @@ class JSONCommands:
``name``.
For more information: https://oss.redis.com/redisjson/commands/#jsonstrlen
- """ # noqa
+ """ # noqa
pieces = [name]
if path is not None:
pieces.append(str(path))
@@ -240,9 +240,7 @@ class JSONCommands:
For more information: https://oss.redis.com/redisjson/commands/#jsonstrappend
""" # noqa
pieces = [name, str(path), self._encode(value)]
- return self.execute_command(
- "JSON.STRAPPEND", *pieces
- )
+ return self.execute_command("JSON.STRAPPEND", *pieces)
def debug(self, subcommand, key=None, path=Path.rootPath()):
"""Return the memory usage in bytes of a value under ``path`` from
@@ -252,8 +250,7 @@ class JSONCommands:
""" # noqa
valid_subcommands = ["MEMORY", "HELP"]
if subcommand not in valid_subcommands:
- raise DataError("The only valid subcommands are ",
- str(valid_subcommands))
+ raise DataError("The only valid subcommands are ", str(valid_subcommands))
pieces = [subcommand]
if subcommand == "MEMORY":
if key is None:
@@ -262,17 +259,20 @@ class JSONCommands:
pieces.append(str(path))
return self.execute_command("JSON.DEBUG", *pieces)
- @deprecated(version='4.0.0',
- reason='redisjson-py supported this, call get directly.')
+ @deprecated(
+ version="4.0.0", reason="redisjson-py supported this, call get directly."
+ )
def jsonget(self, *args, **kwargs):
return self.get(*args, **kwargs)
- @deprecated(version='4.0.0',
- reason='redisjson-py supported this, call get directly.')
+ @deprecated(
+ version="4.0.0", reason="redisjson-py supported this, call get directly."
+ )
def jsonmget(self, *args, **kwargs):
return self.mget(*args, **kwargs)
- @deprecated(version='4.0.0',
- reason='redisjson-py supported this, call get directly.')
+ @deprecated(
+ version="4.0.0", reason="redisjson-py supported this, call get directly."
+ )
def jsonset(self, *args, **kwargs):
return self.set(*args, **kwargs)
diff --git a/redis/commands/json/decoders.py b/redis/commands/json/decoders.py
index b19395c..b938471 100644
--- a/redis/commands/json/decoders.py
+++ b/redis/commands/json/decoders.py
@@ -1,6 +1,7 @@
-from ..helpers import nativestr
-import re
import copy
+import re
+
+from ..helpers import nativestr
def bulk_of_jsons(d):
@@ -33,7 +34,7 @@ def unstring(obj):
One can't simply call int/float in a try/catch because there is a
semantic difference between (for example) 15.0 and 15.
"""
- floatreg = '^\\d+.\\d+$'
+ floatreg = "^\\d+.\\d+$"
match = re.findall(floatreg, obj)
if match != []:
return float(match[0])
diff --git a/redis/commands/parser.py b/redis/commands/parser.py
index 26b190c..dadf3c6 100644
--- a/redis/commands/parser.py
+++ b/redis/commands/parser.py
@@ -1,7 +1,4 @@
-from redis.exceptions import (
- RedisError,
- ResponseError
-)
+from redis.exceptions import RedisError, ResponseError
from redis.utils import str_if_bytes
@@ -13,6 +10,7 @@ class CommandsParser:
'movablekeys', and these commands' keys are determined by the command
'COMMAND GETKEYS'.
"""
+
def __init__(self, redis_connection):
self.initialized = False
self.commands = {}
@@ -51,20 +49,24 @@ class CommandsParser:
)
command = self.commands.get(cmd_name)
- if 'movablekeys' in command['flags']:
+ if "movablekeys" in command["flags"]:
keys = self._get_moveable_keys(redis_conn, *args)
- elif 'pubsub' in command['flags']:
+ elif "pubsub" in command["flags"]:
keys = self._get_pubsub_keys(*args)
else:
- if command['step_count'] == 0 and command['first_key_pos'] == 0 \
- and command['last_key_pos'] == 0:
+ if (
+ command["step_count"] == 0
+ and command["first_key_pos"] == 0
+ and command["last_key_pos"] == 0
+ ):
# The command doesn't have keys in it
return None
- last_key_pos = command['last_key_pos']
+ last_key_pos = command["last_key_pos"]
if last_key_pos < 0:
last_key_pos = len(args) - abs(last_key_pos)
- keys_pos = list(range(command['first_key_pos'], last_key_pos + 1,
- command['step_count']))
+ keys_pos = list(
+ range(command["first_key_pos"], last_key_pos + 1, command["step_count"])
+ )
keys = [args[pos] for pos in keys_pos]
return keys
@@ -77,11 +79,13 @@ class CommandsParser:
pieces = pieces + cmd_name.split()
pieces = pieces + list(args[1:])
try:
- keys = redis_conn.execute_command('COMMAND GETKEYS', *pieces)
+ keys = redis_conn.execute_command("COMMAND GETKEYS", *pieces)
except ResponseError as e:
message = e.__str__()
- if 'Invalid arguments' in message or \
- 'The command has no key arguments' in message:
+ if (
+ "Invalid arguments" in message
+ or "The command has no key arguments" in message
+ ):
return None
else:
raise e
@@ -99,18 +103,17 @@ class CommandsParser:
return None
args = [str_if_bytes(arg) for arg in args]
command = args[0].upper()
- if command == 'PUBSUB':
+ if command == "PUBSUB":
# the second argument is a part of the command name, e.g.
# ['PUBSUB', 'NUMSUB', 'foo'].
pubsub_type = args[1].upper()
- if pubsub_type in ['CHANNELS', 'NUMSUB']:
+ if pubsub_type in ["CHANNELS", "NUMSUB"]:
keys = args[2:]
- elif command in ['SUBSCRIBE', 'PSUBSCRIBE', 'UNSUBSCRIBE',
- 'PUNSUBSCRIBE']:
+ elif command in ["SUBSCRIBE", "PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE"]:
# format example:
# SUBSCRIBE channel [channel ...]
keys = list(args[1:])
- elif command == 'PUBLISH':
+ elif command == "PUBLISH":
# format example:
# PUBLISH channel message
keys = [args[1]]
diff --git a/redis/commands/redismodules.py b/redis/commands/redismodules.py
index 5f629fb..2420d7b 100644
--- a/redis/commands/redismodules.py
+++ b/redis/commands/redismodules.py
@@ -1,4 +1,4 @@
-from json import JSONEncoder, JSONDecoder
+from json import JSONDecoder, JSONEncoder
class RedisModuleCommands:
@@ -7,21 +7,18 @@ class RedisModuleCommands:
"""
def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()):
- """Access the json namespace, providing support for redis json.
- """
+ """Access the json namespace, providing support for redis json."""
from .json import JSON
- jj = JSON(
- client=self,
- encoder=encoder,
- decoder=decoder)
+
+ jj = JSON(client=self, encoder=encoder, decoder=decoder)
return jj
def ft(self, index_name="idx"):
- """Access the search namespace, providing support for redis search.
- """
+ """Access the search namespace, providing support for redis search."""
from .search import Search
+
s = Search(client=self, index_name=index_name)
return s
@@ -31,5 +28,6 @@ class RedisModuleCommands:
"""
from .timeseries import TimeSeries
+
s = TimeSeries(client=self)
return s
diff --git a/redis/commands/search/__init__.py b/redis/commands/search/__init__.py
index a30cebe..94bc037 100644
--- a/redis/commands/search/__init__.py
+++ b/redis/commands/search/__init__.py
@@ -35,7 +35,7 @@ class Search(SearchCommands):
replace=False,
partial=False,
no_create=False,
- **fields
+ **fields,
):
"""
Add a document to the batch query
@@ -49,7 +49,7 @@ class Search(SearchCommands):
replace=replace,
partial=partial,
no_create=no_create,
- **fields
+ **fields,
)
self.current_chunk += 1
self.total += 1
diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py
index 553bc39..4ec6fc9 100644
--- a/redis/commands/search/commands.py
+++ b/redis/commands/search/commands.py
@@ -1,13 +1,13 @@
import itertools
import time
-from .document import Document
-from .result import Result
-from .query import Query
+from ..helpers import parse_to_dict
from ._util import to_string
from .aggregation import AggregateRequest, AggregateResult, Cursor
+from .document import Document
+from .query import Query
+from .result import Result
from .suggestion import SuggestionParser
-from ..helpers import parse_to_dict
NUMERIC = "NUMERIC"
@@ -148,7 +148,7 @@ class SearchCommands:
partial=False,
language=None,
no_create=False,
- **fields
+ **fields,
):
"""
Internal add_document used for both batch and single doc indexing
@@ -211,7 +211,7 @@ class SearchCommands:
partial=False,
language=None,
no_create=False,
- **fields
+ **fields,
):
"""
Add a single document to the index.
@@ -253,7 +253,7 @@ class SearchCommands:
partial=partial,
language=language,
no_create=no_create,
- **fields
+ **fields,
)
def add_document_hash(
@@ -274,7 +274,7 @@ class SearchCommands:
- **replace**: if True, and the document already is in the index, we
perform an update and reindex the document
- **language**: Specify the language used for document tokenization.
-
+
For more information: https://oss.redis.com/redisearch/Commands/#ftaddhash
""" # noqa
return self._add_document_hash(
@@ -294,7 +294,7 @@ class SearchCommands:
- **delete_actual_document**: if set to True, RediSearch also delete
the actual document if it is in the index
-
+
For more information: https://oss.redis.com/redisearch/Commands/#ftdel
""" # noqa
args = [DEL_CMD, self.index_name, doc_id]
@@ -453,7 +453,7 @@ class SearchCommands:
cmd = [PROFILE_CMD, self.index_name, ""]
if limited:
cmd.append("LIMITED")
- cmd.append('QUERY')
+ cmd.append("QUERY")
if isinstance(query, AggregateRequest):
cmd[2] = "AGGREGATE"
@@ -462,19 +462,20 @@ class SearchCommands:
cmd[2] = "SEARCH"
cmd += query.get_args()
else:
- raise ValueError("Must provide AggregateRequest object or "
- "Query object.")
+ raise ValueError("Must provide AggregateRequest object or " "Query object.")
res = self.execute_command(*cmd)
if isinstance(query, AggregateRequest):
result = self._get_AggregateResult(res[0], query, query._cursor)
else:
- result = Result(res[0],
- not query._no_content,
- duration=(time.time() - st) * 1000.0,
- has_payload=query._with_payloads,
- with_scores=query._with_scores,)
+ result = Result(
+ res[0],
+ not query._no_content,
+ duration=(time.time() - st) * 1000.0,
+ has_payload=query._with_payloads,
+ with_scores=query._with_scores,
+ )
return result, parse_to_dict(res[1])
@@ -535,8 +536,7 @@ class SearchCommands:
# ]
# }
corrections[_correction[1]] = [
- {"score": _item[0], "suggestion": _item[1]}
- for _item in _correction[2]
+ {"score": _item[0], "suggestion": _item[1]} for _item in _correction[2]
]
return corrections
@@ -704,8 +704,7 @@ class SearchCommands:
return self.execute_command(SUGDEL_COMMAND, key, string)
def sugget(
- self, key, prefix, fuzzy=False, num=10, with_scores=False,
- with_payloads=False
+ self, key, prefix, fuzzy=False, num=10, with_scores=False, with_payloads=False
):
"""
Get a list of suggestions from the AutoCompleter, for a given prefix.
@@ -769,7 +768,7 @@ class SearchCommands:
If set to true, we do not scan and index.
terms :
The terms.
-
+
For more information: https://oss.redis.com/redisearch/Commands/#ftsynupdate
""" # noqa
cmd = [SYNUPDATE_CMD, self.index_name, groupid]
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
index 076c872..69e3908 100644
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -9,8 +9,7 @@ class Field:
NOINDEX = "NOINDEX"
AS = "AS"
- def __init__(self, name, args=[], sortable=False,
- no_index=False, as_name=None):
+ def __init__(self, name, args=[], sortable=False, no_index=False, as_name=None):
self.name = name
self.args = args
self.args_suffix = list()
@@ -47,8 +46,7 @@ class TextField(Field):
def __init__(
self, name, weight=1.0, no_stem=False, phonetic_matcher=None, **kwargs
):
- Field.__init__(self, name,
- args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
+ Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
if no_stem:
Field.append_arg(self, self.NOSTEM)
diff --git a/redis/commands/search/query.py b/redis/commands/search/query.py
index 5534f7b..2bb8347 100644
--- a/redis/commands/search/query.py
+++ b/redis/commands/search/query.py
@@ -62,11 +62,9 @@ class Query:
def _mk_field_list(self, fields):
if not fields:
return []
- return \
- [fields] if isinstance(fields, str) else list(fields)
+ return [fields] if isinstance(fields, str) else list(fields)
- def summarize(self, fields=None, context_len=None,
- num_frags=None, sep=None):
+ def summarize(self, fields=None, context_len=None, num_frags=None, sep=None):
"""
Return an abridged format of the field, containing only the segments of
the field which contain the matching term(s).
@@ -300,8 +298,7 @@ class NumericFilter(Filter):
INF = "+inf"
NEG_INF = "-inf"
- def __init__(self, field, minval, maxval, minExclusive=False,
- maxExclusive=False):
+ def __init__(self, field, minval, maxval, minExclusive=False, maxExclusive=False):
args = [
minval if not minExclusive else f"({minval}",
maxval if not maxExclusive else f"({maxval}",
diff --git a/redis/commands/search/querystring.py b/redis/commands/search/querystring.py
index ffba542..1da0387 100644
--- a/redis/commands/search/querystring.py
+++ b/redis/commands/search/querystring.py
@@ -15,8 +15,7 @@ def between(a, b, inclusive_min=True, inclusive_max=True):
"""
Indicate that value is a numeric range
"""
- return RangeValue(a, b, inclusive_min=inclusive_min,
- inclusive_max=inclusive_max)
+ return RangeValue(a, b, inclusive_min=inclusive_min, inclusive_max=inclusive_max)
def equal(n):
@@ -200,9 +199,7 @@ class Node:
return [BaseNode(f"@{key}:{vals[0].to_string()}")]
if not vals[0].combinable:
return [BaseNode(f"@{key}:{v.to_string()}") for v in vals]
- s = BaseNode(
- f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})"
- )
+ s = BaseNode(f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})")
return [s]
@classmethod
diff --git a/redis/commands/search/result.py b/redis/commands/search/result.py
index 57ba53d..5f4aca6 100644
--- a/redis/commands/search/result.py
+++ b/redis/commands/search/result.py
@@ -1,5 +1,5 @@
-from .document import Document
from ._util import to_string
+from .document import Document
class Result:
diff --git a/redis/commands/search/suggestion.py b/redis/commands/search/suggestion.py
index 6d295a6..5d1eba6 100644
--- a/redis/commands/search/suggestion.py
+++ b/redis/commands/search/suggestion.py
@@ -46,8 +46,6 @@ class SuggestionParser:
def __iter__(self):
for i in range(0, len(self._sugs), self.sugsize):
ss = self._sugs[i]
- score = float(self._sugs[i + self._scoreidx]) \
- if self.with_scores else 1.0
- payload = self._sugs[i + self._payloadidx] \
- if self.with_payloads else None
+ score = float(self._sugs[i + self._scoreidx]) if self.with_scores else 1.0
+ payload = self._sugs[i + self._payloadidx] if self.with_payloads else None
yield Suggestion(ss, score, payload)
diff --git a/redis/commands/sentinel.py b/redis/commands/sentinel.py
index 1f02984..a9b06c2 100644
--- a/redis/commands/sentinel.py
+++ b/redis/commands/sentinel.py
@@ -9,41 +9,39 @@ class SentinelCommands:
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
- warnings.warn(
- DeprecationWarning('Use the individual sentinel_* methods'))
+ warnings.warn(DeprecationWarning("Use the individual sentinel_* methods"))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
- return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
- service_name)
+ return self.execute_command("SENTINEL GET-MASTER-ADDR-BY-NAME", service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
- return self.execute_command('SENTINEL MASTER', service_name)
+ return self.execute_command("SENTINEL MASTER", service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
- return self.execute_command('SENTINEL MASTERS')
+ return self.execute_command("SENTINEL MASTERS")
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
- return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
+ return self.execute_command("SENTINEL MONITOR", name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
- return self.execute_command('SENTINEL REMOVE', name)
+ return self.execute_command("SENTINEL REMOVE", name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
- return self.execute_command('SENTINEL SENTINELS', service_name)
+ return self.execute_command("SENTINEL SENTINELS", service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
- return self.execute_command('SENTINEL SET', name, option, value)
+ return self.execute_command("SENTINEL SET", name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
- return self.execute_command('SENTINEL SLAVES', service_name)
+ return self.execute_command("SENTINEL SLAVES", service_name)
def sentinel_reset(self, pattern):
"""
@@ -54,7 +52,7 @@ class SentinelCommands:
failover in progress), and removes every slave and sentinel already
discovered and associated with the master.
"""
- return self.execute_command('SENTINEL RESET', pattern, once=True)
+ return self.execute_command("SENTINEL RESET", pattern, once=True)
def sentinel_failover(self, new_master_name):
"""
@@ -63,7 +61,7 @@ class SentinelCommands:
configuration will be published so that the other Sentinels will
update their configurations).
"""
- return self.execute_command('SENTINEL FAILOVER', new_master_name)
+ return self.execute_command("SENTINEL FAILOVER", new_master_name)
def sentinel_ckquorum(self, new_master_name):
"""
@@ -74,9 +72,7 @@ class SentinelCommands:
This command should be used in monitoring systems to check if a
Sentinel deployment is ok.
"""
- return self.execute_command('SENTINEL CKQUORUM',
- new_master_name,
- once=True)
+ return self.execute_command("SENTINEL CKQUORUM", new_master_name, once=True)
def sentinel_flushconfig(self):
"""
@@ -94,4 +90,4 @@ class SentinelCommands:
This command works even if the previous configuration file is
completely missing.
"""
- return self.execute_command('SENTINEL FLUSHCONFIG')
+ return self.execute_command("SENTINEL FLUSHCONFIG")
diff --git a/redis/commands/timeseries/__init__.py b/redis/commands/timeseries/__init__.py
index 5ce538f..5b1f151 100644
--- a/redis/commands/timeseries/__init__.py
+++ b/redis/commands/timeseries/__init__.py
@@ -1,19 +1,12 @@
import redis.client
-from .utils import (
- parse_range,
- parse_get,
- parse_m_range,
- parse_m_get,
-)
-from .info import TSInfo
from ..helpers import parse_to_list
from .commands import (
ALTER_CMD,
CREATE_CMD,
CREATERULE_CMD,
- DELETERULE_CMD,
DEL_CMD,
+ DELETERULE_CMD,
GET_CMD,
INFO_CMD,
MGET_CMD,
@@ -24,6 +17,8 @@ from .commands import (
REVRANGE_CMD,
TimeSeriesCommands,
)
+from .info import TSInfo
+from .utils import parse_get, parse_m_get, parse_m_range, parse_range
class TimeSeries(TimeSeriesCommands):
diff --git a/redis/commands/timeseries/commands.py b/redis/commands/timeseries/commands.py
index 460ba76..c86e0b9 100644
--- a/redis/commands/timeseries/commands.py
+++ b/redis/commands/timeseries/commands.py
@@ -1,6 +1,5 @@
from redis.exceptions import DataError
-
ADD_CMD = "TS.ADD"
ALTER_CMD = "TS.ALTER"
CREATERULE_CMD = "TS.CREATERULE"
@@ -58,7 +57,7 @@ class TimeSeriesCommands:
- 'min': only override if the value is lower than the existing value.
- 'max': only override if the value is higher than the existing value.
When this is not set, the server-wide default will be used.
-
+
For more information: https://oss.redis.com/redistimeseries/commands/#tscreate
""" # noqa
retention_msecs = kwargs.get("retention_msecs", None)
@@ -81,7 +80,7 @@ class TimeSeriesCommands:
For more information see
The parameters are the same as TS.CREATE.
-
+
For more information: https://oss.redis.com/redistimeseries/commands/#tsalter
""" # noqa
retention_msecs = kwargs.get("retention_msecs", None)
@@ -129,7 +128,7 @@ class TimeSeriesCommands:
- 'min': only override if the value is lower than the existing value.
- 'max': only override if the value is higher than the existing value.
When this is not set, the server-wide default will be used.
-
+
For more information: https://oss.redis.com/redistimeseries/master/commands/#tsadd
""" # noqa
retention_msecs = kwargs.get("retention_msecs", None)
@@ -276,13 +275,7 @@ class TimeSeriesCommands:
""" # noqa
return self.execute_command(DEL_CMD, key, from_time, to_time)
- def createrule(
- self,
- source_key,
- dest_key,
- aggregation_type,
- bucket_size_msec
- ):
+ def createrule(self, source_key, dest_key, aggregation_type, bucket_size_msec):
"""
Create a compaction rule from values added to `source_key` into `dest_key`.
Aggregating for `bucket_size_msec` where an `aggregation_type` can be
@@ -321,11 +314,7 @@ class TimeSeriesCommands:
"""Create TS.RANGE and TS.REVRANGE arguments."""
params = [key, from_time, to_time]
self._appendFilerByTs(params, filter_by_ts)
- self._appendFilerByValue(
- params,
- filter_by_min_value,
- filter_by_max_value
- )
+ self._appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
self._appendCount(params, count)
self._appendAlign(params, align)
self._appendAggregation(params, aggregation_type, bucket_size_msec)
@@ -471,11 +460,7 @@ class TimeSeriesCommands:
"""Create TS.MRANGE and TS.MREVRANGE arguments."""
params = [from_time, to_time]
self._appendFilerByTs(params, filter_by_ts)
- self._appendFilerByValue(
- params,
- filter_by_min_value,
- filter_by_max_value
- )
+ self._appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
self._appendCount(params, count)
self._appendAlign(params, align)
self._appendAggregation(params, aggregation_type, bucket_size_msec)
@@ -654,7 +639,7 @@ class TimeSeriesCommands:
return self.execute_command(MREVRANGE_CMD, *params)
def get(self, key):
- """ # noqa
+ """# noqa
Get the last sample of `key`.
For more information: https://oss.redis.com/redistimeseries/master/commands/#tsget
@@ -662,7 +647,7 @@ class TimeSeriesCommands:
return self.execute_command(GET_CMD, key)
def mget(self, filters, with_labels=False):
- """ # noqa
+ """# noqa
Get the last samples matching the specific `filter`.
For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmget
@@ -674,7 +659,7 @@ class TimeSeriesCommands:
return self.execute_command(MGET_CMD, *params)
def info(self, key):
- """ # noqa
+ """# noqa
Get information of `key`.
For more information: https://oss.redis.com/redistimeseries/master/commands/#tsinfo
@@ -682,7 +667,7 @@ class TimeSeriesCommands:
return self.execute_command(INFO_CMD, key)
def queryindex(self, filters):
- """ # noqa
+ """# noqa
Get all the keys matching the `filter` list.
For more information: https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex
diff --git a/redis/commands/timeseries/info.py b/redis/commands/timeseries/info.py
index 2b8acd1..fba7f09 100644
--- a/redis/commands/timeseries/info.py
+++ b/redis/commands/timeseries/info.py
@@ -1,5 +1,5 @@
-from .utils import list_to_dict
from ..helpers import nativestr
+from .utils import list_to_dict
class TSInfo:
diff --git a/redis/commands/timeseries/utils.py b/redis/commands/timeseries/utils.py
index c33b7c5..c49b040 100644
--- a/redis/commands/timeseries/utils.py
+++ b/redis/commands/timeseries/utils.py
@@ -2,9 +2,7 @@ from ..helpers import nativestr
def list_to_dict(aList):
- return {
- nativestr(aList[i][0]): nativestr(aList[i][1])
- for i in range(len(aList))}
+ return {nativestr(aList[i][0]): nativestr(aList[i][1]) for i in range(len(aList))}
def parse_range(response):
@@ -16,9 +14,7 @@ def parse_m_range(response):
"""Parse multi range response. Used by TS.MRANGE and TS.MREVRANGE."""
res = []
for item in response:
- res.append(
- {nativestr(item[0]):
- [list_to_dict(item[1]), parse_range(item[2])]})
+ res.append({nativestr(item[0]): [list_to_dict(item[1]), parse_range(item[2])]})
return sorted(res, key=lambda d: list(d.keys()))
@@ -34,8 +30,7 @@ def parse_m_get(response):
res = []
for item in response:
if not item[2]:
- res.append(
- {nativestr(item[0]): [list_to_dict(item[1]), None, None]})
+ res.append({nativestr(item[0]): [list_to_dict(item[1]), None, None]})
else:
res.append(
{
diff --git a/redis/connection.py b/redis/connection.py
index ef3a667..d13fe65 100755
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -1,8 +1,3 @@
-from packaging.version import Version
-from itertools import chain
-from time import time
-from queue import LifoQueue, Empty, Full
-from urllib.parse import parse_qs, unquote, urlparse
import copy
import errno
import io
@@ -10,6 +5,12 @@ import os
import socket
import threading
import weakref
+from itertools import chain
+from queue import Empty, Full, LifoQueue
+from time import time
+from urllib.parse import parse_qs, unquote, urlparse
+
+from packaging.version import Version
from redis.backoff import NoBackoff
from redis.exceptions import (
@@ -21,20 +22,20 @@ from redis.exceptions import (
DataError,
ExecAbortError,
InvalidResponse,
+ ModuleError,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
- ModuleError,
)
-
from redis.retry import Retry
from redis.utils import HIREDIS_AVAILABLE, str_if_bytes
try:
import ssl
+
ssl_available = True
except ImportError:
ssl_available = False
@@ -44,7 +45,7 @@ NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
}
if ssl_available:
- if hasattr(ssl, 'SSLWantReadError'):
+ if hasattr(ssl, "SSLWantReadError"):
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2
else:
@@ -56,34 +57,31 @@ if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = Version(hiredis.__version__)
- HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
- hiredis_version >= Version('0.1.3')
- HIREDIS_SUPPORTS_BYTE_BUFFER = \
- hiredis_version >= Version('0.1.4')
- HIREDIS_SUPPORTS_ENCODING_ERRORS = \
- hiredis_version >= Version('1.0.0')
+ HIREDIS_SUPPORTS_CALLABLE_ERRORS = hiredis_version >= Version("0.1.3")
+ HIREDIS_SUPPORTS_BYTE_BUFFER = hiredis_version >= Version("0.1.4")
+ HIREDIS_SUPPORTS_ENCODING_ERRORS = hiredis_version >= Version("1.0.0")
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
HIREDIS_USE_BYTE_BUFFER = False
-SYM_STAR = b'*'
-SYM_DOLLAR = b'$'
-SYM_CRLF = b'\r\n'
-SYM_EMPTY = b''
+SYM_STAR = b"*"
+SYM_DOLLAR = b"$"
+SYM_CRLF = b"\r\n"
+SYM_EMPTY = b""
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
-MODULE_LOAD_ERROR = 'Error loading the extension. ' \
- 'Please check the server logs.'
-NO_SUCH_MODULE_ERROR = 'Error unloading module: no such module with that name'
-MODULE_UNLOAD_NOT_POSSIBLE_ERROR = 'Error unloading module: operation not ' \
- 'possible.'
-MODULE_EXPORTS_DATA_TYPES_ERROR = "Error unloading module: the module " \
- "exports one or more module-side data " \
- "types, can't unload"
+MODULE_LOAD_ERROR = "Error loading the extension. " "Please check the server logs."
+NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
+MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not " "possible."
+MODULE_EXPORTS_DATA_TYPES_ERROR = (
+ "Error unloading module: the module "
+ "exports one or more module-side data "
+ "types, can't unload"
+)
class Encoder:
@@ -100,15 +98,19 @@ class Encoder:
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
- raise DataError("Invalid input of type: 'bool'. Convert to a "
- "bytes, string, int or float first.")
+ raise DataError(
+ "Invalid input of type: 'bool'. Convert to a "
+ "bytes, string, int or float first."
+ )
elif isinstance(value, (int, float)):
value = repr(value).encode()
elif not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
- raise DataError(f"Invalid input of type: '{typename}'. "
- f"Convert to a bytes, string, int or float first.")
+ raise DataError(
+ f"Invalid input of type: '{typename}'. "
+ f"Convert to a bytes, string, int or float first."
+ )
if isinstance(value, str):
value = value.encode(self.encoding, self.encoding_errors)
return value
@@ -125,36 +127,36 @@ class Encoder:
class BaseParser:
EXCEPTION_CLASSES = {
- 'ERR': {
- 'max number of clients reached': ConnectionError,
- 'Client sent AUTH, but no password is set': AuthenticationError,
- 'invalid password': AuthenticationError,
+ "ERR": {
+ "max number of clients reached": ConnectionError,
+ "Client sent AUTH, but no password is set": AuthenticationError,
+ "invalid password": AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
- 'wrong number of arguments for \'auth\' command':
- AuthenticationWrongNumberOfArgsError,
+ "wrong number of arguments "
+ "for 'auth' command": AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
- 'wrong number of arguments for \'AUTH\' command':
- AuthenticationWrongNumberOfArgsError,
+ "wrong number of arguments "
+ "for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
MODULE_LOAD_ERROR: ModuleError,
MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
NO_SUCH_MODULE_ERROR: ModuleError,
MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
},
- 'EXECABORT': ExecAbortError,
- 'LOADING': BusyLoadingError,
- 'NOSCRIPT': NoScriptError,
- 'READONLY': ReadOnlyError,
- 'NOAUTH': AuthenticationError,
- 'NOPERM': NoPermissionError,
+ "EXECABORT": ExecAbortError,
+ "LOADING": BusyLoadingError,
+ "NOSCRIPT": NoScriptError,
+ "READONLY": ReadOnlyError,
+ "NOAUTH": AuthenticationError,
+ "NOPERM": NoPermissionError,
}
def parse_error(self, response):
"Parse an error response"
- error_code = response.split(' ')[0]
+ error_code = response.split(" ")[0]
if error_code in self.EXCEPTION_CLASSES:
- response = response[len(error_code) + 1:]
+ response = response[len(error_code) + 1 :]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
@@ -177,8 +179,7 @@ class SocketBuffer:
def length(self):
return self.bytes_written - self.bytes_read
- def _read_from_socket(self, length=None, timeout=SENTINEL,
- raise_on_timeout=True):
+ def _read_from_socket(self, length=None, timeout=SENTINEL, raise_on_timeout=True):
sock = self._sock
socket_read_size = self.socket_read_size
buf = self._buffer
@@ -220,9 +221,9 @@ class SocketBuffer:
sock.settimeout(self.socket_timeout)
def can_read(self, timeout):
- return bool(self.length) or \
- self._read_from_socket(timeout=timeout,
- raise_on_timeout=False)
+ return bool(self.length) or self._read_from_socket(
+ timeout=timeout, raise_on_timeout=False
+ )
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
@@ -283,6 +284,7 @@ class SocketBuffer:
class PythonParser(BaseParser):
"Plain Python parsing class"
+
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self.encoder = None
@@ -298,9 +300,9 @@ class PythonParser(BaseParser):
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
- self._buffer = SocketBuffer(self._sock,
- self.socket_read_size,
- connection.socket_timeout)
+ self._buffer = SocketBuffer(
+ self._sock, self.socket_read_size, connection.socket_timeout
+ )
self.encoder = connection.encoder
def on_disconnect(self):
@@ -321,12 +323,12 @@ class PythonParser(BaseParser):
byte, response = raw[:1], raw[1:]
- if byte not in (b'-', b'+', b':', b'$', b'*'):
+ if byte not in (b"-", b"+", b":", b"$", b"*"):
raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
- if byte == b'-':
- response = response.decode('utf-8', errors='replace')
+ if byte == b"-":
+ response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
@@ -338,24 +340,26 @@ class PythonParser(BaseParser):
# necessary, so just return the exception instance here.
return error
# single value
- elif byte == b'+':
+ elif byte == b"+":
pass
# int value
- elif byte == b':':
+ elif byte == b":":
response = int(response)
# bulk response
- elif byte == b'$':
+ elif byte == b"$":
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
- elif byte == b'*':
+ elif byte == b"*":
length = int(response)
if length == -1:
return None
- response = [self.read_response(disable_decoding=disable_decoding)
- for i in range(length)]
+ response = [
+ self.read_response(disable_decoding=disable_decoding)
+ for i in range(length)
+ ]
if isinstance(response, bytes) and disable_decoding is False:
response = self.encoder.decode(response)
return response
@@ -363,6 +367,7 @@ class PythonParser(BaseParser):
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
+
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
@@ -381,18 +386,18 @@ class HiredisParser(BaseParser):
self._sock = connection._sock
self._socket_timeout = connection.socket_timeout
kwargs = {
- 'protocolError': InvalidResponse,
- 'replyError': self.parse_error,
+ "protocolError": InvalidResponse,
+ "replyError": self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
- kwargs['replyError'] = ResponseError
+ kwargs["replyError"] = ResponseError
if connection.encoder.decode_responses:
- kwargs['encoding'] = connection.encoder.encoding
+ kwargs["encoding"] = connection.encoder.encoding
if HIREDIS_SUPPORTS_ENCODING_ERRORS:
- kwargs['errors'] = connection.encoder.encoding_errors
+ kwargs["errors"] = connection.encoder.encoding_errors
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
@@ -408,8 +413,7 @@ class HiredisParser(BaseParser):
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
- return self.read_from_socket(timeout=timeout,
- raise_on_timeout=False)
+ return self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
@@ -468,16 +472,22 @@ class HiredisParser(BaseParser):
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
- elif isinstance(response, list) and response and \
- isinstance(response[0], ResponseError):
+ elif (
+ isinstance(response, list)
+ and response
+ and isinstance(response[0], ResponseError)
+ ):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
- elif isinstance(response, list) and response and \
- isinstance(response[0], ConnectionError):
+ elif (
+ isinstance(response, list)
+ and response
+ and isinstance(response[0], ConnectionError)
+ ):
raise response[0]
return response
@@ -491,14 +501,29 @@ else:
class Connection:
"Manages TCP communication to and from a Redis server"
- def __init__(self, host='localhost', port=6379, db=0, password=None,
- socket_timeout=None, socket_connect_timeout=None,
- socket_keepalive=False, socket_keepalive_options=None,
- socket_type=0, retry_on_timeout=False, encoding='utf-8',
- encoding_errors='strict', decode_responses=False,
- parser_class=DefaultParser, socket_read_size=65536,
- health_check_interval=0, client_name=None, username=None,
- retry=None, redis_connect_func=None):
+ def __init__(
+ self,
+ host="localhost",
+ port=6379,
+ db=0,
+ password=None,
+ socket_timeout=None,
+ socket_connect_timeout=None,
+ socket_keepalive=False,
+ socket_keepalive_options=None,
+ socket_type=0,
+ retry_on_timeout=False,
+ encoding="utf-8",
+ encoding_errors="strict",
+ decode_responses=False,
+ parser_class=DefaultParser,
+ socket_read_size=65536,
+ health_check_interval=0,
+ client_name=None,
+ username=None,
+ retry=None,
+ redis_connect_func=None,
+ ):
"""
Initialize a new Connection.
To specify a retry policy, first set `retry_on_timeout` to `True`
@@ -536,17 +561,13 @@ class Connection:
self._buffer_cutoff = 6000
def __repr__(self):
- repr_args = ','.join([f'{k}={v}' for k, v in self.repr_pieces()])
- return f'{self.__class__.__name__}<{repr_args}>'
+ repr_args = ",".join([f"{k}={v}" for k, v in self.repr_pieces()])
+ return f"{self.__class__.__name__}<{repr_args}>"
def repr_pieces(self):
- pieces = [
- ('host', self.host),
- ('port', self.port),
- ('db', self.db)
- ]
+ pieces = [("host", self.host), ("port", self.port), ("db", self.db)]
if self.client_name:
- pieces.append(('client_name', self.client_name))
+ pieces.append(("client_name", self.client_name))
return pieces
def __del__(self):
@@ -606,8 +627,9 @@ class Connection:
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
- for res in socket.getaddrinfo(self.host, self.port, self.socket_type,
- socket.SOCK_STREAM):
+ for res in socket.getaddrinfo(
+ self.host, self.port, self.socket_type, socket.SOCK_STREAM
+ ):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
@@ -658,12 +680,12 @@ class Connection:
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
- auth_args = (self.username, self.password or '')
+ auth_args = (self.username, self.password or "")
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
- self.send_command('AUTH', *auth_args, check_health=False)
+ self.send_command("AUTH", *auth_args, check_health=False)
try:
auth_response = self.read_response()
@@ -672,23 +694,23 @@ class Connection:
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
- self.send_command('AUTH', self.password, check_health=False)
+ self.send_command("AUTH", self.password, check_health=False)
auth_response = self.read_response()
- if str_if_bytes(auth_response) != 'OK':
- raise AuthenticationError('Invalid Username or Password')
+ if str_if_bytes(auth_response) != "OK":
+ raise AuthenticationError("Invalid Username or Password")
# if a client_name is given, set it
if self.client_name:
- self.send_command('CLIENT', 'SETNAME', self.client_name)
- if str_if_bytes(self.read_response()) != 'OK':
- raise ConnectionError('Error setting client name')
+ self.send_command("CLIENT", "SETNAME", self.client_name)
+ if str_if_bytes(self.read_response()) != "OK":
+ raise ConnectionError("Error setting client name")
# if a database is specified, switch to it
if self.db:
- self.send_command('SELECT', self.db)
- if str_if_bytes(self.read_response()) != 'OK':
- raise ConnectionError('Invalid Database')
+ self.send_command("SELECT", self.db)
+ if str_if_bytes(self.read_response()) != "OK":
+ raise ConnectionError("Invalid Database")
def disconnect(self):
"Disconnects from the Redis server"
@@ -705,9 +727,9 @@ class Connection:
def _send_ping(self):
"""Send PING, expect PONG in return"""
- self.send_command('PING', check_health=False)
- if str_if_bytes(self.read_response()) != 'PONG':
- raise ConnectionError('Bad response from PING health check')
+ self.send_command("PING", check_health=False)
+ if str_if_bytes(self.read_response()) != "PONG":
+ raise ConnectionError("Bad response from PING health check")
def _ping_failed(self, error):
"""Function to call when PING fails"""
@@ -736,7 +758,7 @@ class Connection:
except OSError as e:
self.disconnect()
if len(e.args) == 1:
- errno, errmsg = 'UNKNOWN', e.args[0]
+ errno, errmsg = "UNKNOWN", e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
@@ -747,8 +769,9 @@ class Connection:
def send_command(self, *args, **kwargs):
"""Pack and send a command to the Redis server"""
- self.send_packed_command(self.pack_command(*args),
- check_health=kwargs.get('check_health', True))
+ self.send_packed_command(
+ self.pack_command(*args), check_health=kwargs.get("check_health", True)
+ )
def can_read(self, timeout=0):
"""Poll the socket to see if there's data that can be read."""
@@ -760,17 +783,15 @@ class Connection:
def read_response(self, disable_decoding=False):
"""Read the response from a previously sent command"""
try:
- response = self._parser.read_response(
- disable_decoding=disable_decoding
- )
+ response = self._parser.read_response(disable_decoding=disable_decoding)
except socket.timeout:
self.disconnect()
raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
except OSError as e:
self.disconnect()
raise ConnectionError(
- f"Error while reading from {self.host}:{self.port}"
- f" : {e.args}")
+ f"Error while reading from {self.host}:{self.port}" f" : {e.args}"
+ )
except BaseException:
self.disconnect()
raise
@@ -792,7 +813,7 @@ class Connection:
# not encoded.
if isinstance(args[0], str):
args = tuple(args[0].encode().split()) + args[1:]
- elif b' ' in args[0]:
+ elif b" " in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
@@ -802,17 +823,28 @@ class Connection:
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
- if (len(buff) > buffer_cutoff or arg_length > buffer_cutoff
- or isinstance(arg, memoryview)):
+ if (
+ len(buff) > buffer_cutoff
+ or arg_length > buffer_cutoff
+ or isinstance(arg, memoryview)
+ ):
buff = SYM_EMPTY.join(
- (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF))
+ (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
+ )
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
- (buff, SYM_DOLLAR, str(arg_length).encode(),
- SYM_CRLF, arg, SYM_CRLF))
+ (
+ buff,
+ SYM_DOLLAR,
+ str(arg_length).encode(),
+ SYM_CRLF,
+ arg,
+ SYM_CRLF,
+ )
+ )
output.append(buff)
return output
@@ -826,8 +858,11 @@ class Connection:
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
- if (buffer_length > buffer_cutoff or chunklen > buffer_cutoff
- or isinstance(chunk, memoryview)):
+ if (
+ buffer_length > buffer_cutoff
+ or chunklen > buffer_cutoff
+ or isinstance(chunk, memoryview)
+ ):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
@@ -844,10 +879,15 @@ class Connection:
class SSLConnection(Connection):
-
- def __init__(self, ssl_keyfile=None, ssl_certfile=None,
- ssl_cert_reqs='required', ssl_ca_certs=None,
- ssl_check_hostname=False, **kwargs):
+ def __init__(
+ self,
+ ssl_keyfile=None,
+ ssl_certfile=None,
+ ssl_cert_reqs="required",
+ ssl_ca_certs=None,
+ ssl_check_hostname=False,
+ **kwargs,
+ ):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
@@ -859,13 +899,14 @@ class SSLConnection(Connection):
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, str):
CERT_REQS = {
- 'none': ssl.CERT_NONE,
- 'optional': ssl.CERT_OPTIONAL,
- 'required': ssl.CERT_REQUIRED
+ "none": ssl.CERT_NONE,
+ "optional": ssl.CERT_OPTIONAL,
+ "required": ssl.CERT_REQUIRED,
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
- f"Invalid SSL Certificate Requirements Flag: {ssl_cert_reqs}")
+ f"Invalid SSL Certificate Requirements Flag: {ssl_cert_reqs}"
+ )
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
@@ -878,22 +919,30 @@ class SSLConnection(Connection):
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
- context.load_cert_chain(certfile=self.certfile,
- keyfile=self.keyfile)
+ context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
return context.wrap_socket(sock, server_hostname=self.host)
class UnixDomainSocketConnection(Connection):
-
- def __init__(self, path='', db=0, username=None, password=None,
- socket_timeout=None, encoding='utf-8',
- encoding_errors='strict', decode_responses=False,
- retry_on_timeout=False,
- parser_class=DefaultParser, socket_read_size=65536,
- health_check_interval=0, client_name=None,
- retry=None):
+ def __init__(
+ self,
+ path="",
+ db=0,
+ username=None,
+ password=None,
+ socket_timeout=None,
+ encoding="utf-8",
+ encoding_errors="strict",
+ decode_responses=False,
+ retry_on_timeout=False,
+ parser_class=DefaultParser,
+ socket_read_size=65536,
+ health_check_interval=0,
+ client_name=None,
+ retry=None,
+ ):
"""
Initialize a new UnixDomainSocketConnection.
To specify a retry policy, first set `retry_on_timeout` to `True`
@@ -926,11 +975,11 @@ class UnixDomainSocketConnection(Connection):
def repr_pieces(self):
pieces = [
- ('path', self.path),
- ('db', self.db),
+ ("path", self.path),
+ ("db", self.db),
]
if self.client_name:
- pieces.append(('client_name', self.client_name))
+ pieces.append(("client_name", self.client_name))
return pieces
def _connect(self):
@@ -952,11 +1001,11 @@ class UnixDomainSocketConnection(Connection):
)
-FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
+FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
def to_bool(value):
- if value is None or value == '':
+ if value is None or value == "":
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
@@ -964,14 +1013,14 @@ def to_bool(value):
URL_QUERY_ARGUMENT_PARSERS = {
- 'db': int,
- 'socket_timeout': float,
- 'socket_connect_timeout': float,
- 'socket_keepalive': to_bool,
- 'retry_on_timeout': to_bool,
- 'max_connections': int,
- 'health_check_interval': int,
- 'ssl_check_hostname': to_bool,
+ "db": int,
+ "socket_timeout": float,
+ "socket_connect_timeout": float,
+ "socket_keepalive": to_bool,
+ "retry_on_timeout": to_bool,
+ "max_connections": int,
+ "health_check_interval": int,
+ "ssl_check_hostname": to_bool,
}
@@ -987,42 +1036,42 @@ def parse_url(url):
try:
kwargs[name] = parser(value)
except (TypeError, ValueError):
- raise ValueError(
- f"Invalid value for `{name}` in connection URL."
- )
+ raise ValueError(f"Invalid value for `{name}` in connection URL.")
else:
kwargs[name] = value
if url.username:
- kwargs['username'] = unquote(url.username)
+ kwargs["username"] = unquote(url.username)
if url.password:
- kwargs['password'] = unquote(url.password)
+ kwargs["password"] = unquote(url.password)
# We only support redis://, rediss:// and unix:// schemes.
- if url.scheme == 'unix':
+ if url.scheme == "unix":
if url.path:
- kwargs['path'] = unquote(url.path)
- kwargs['connection_class'] = UnixDomainSocketConnection
+ kwargs["path"] = unquote(url.path)
+ kwargs["connection_class"] = UnixDomainSocketConnection
- elif url.scheme in ('redis', 'rediss'):
+ elif url.scheme in ("redis", "rediss"):
if url.hostname:
- kwargs['host'] = unquote(url.hostname)
+ kwargs["host"] = unquote(url.hostname)
if url.port:
- kwargs['port'] = int(url.port)
+ kwargs["port"] = int(url.port)
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
- if url.path and 'db' not in kwargs:
+ if url.path and "db" not in kwargs:
try:
- kwargs['db'] = int(unquote(url.path).replace('/', ''))
+ kwargs["db"] = int(unquote(url.path).replace("/", ""))
except (AttributeError, ValueError):
pass
- if url.scheme == 'rediss':
- kwargs['connection_class'] = SSLConnection
+ if url.scheme == "rediss":
+ kwargs["connection_class"] = SSLConnection
else:
- raise ValueError('Redis URL must specify one of the following '
- 'schemes (redis://, rediss://, unix://)')
+ raise ValueError(
+ "Redis URL must specify one of the following "
+ "schemes (redis://, rediss://, unix://)"
+ )
return kwargs
@@ -1040,6 +1089,7 @@ class ConnectionPool:
Any additional keyword arguments are passed to the constructor of
``connection_class``.
"""
+
@classmethod
def from_url(cls, url, **kwargs):
"""
@@ -1084,8 +1134,9 @@ class ConnectionPool:
kwargs.update(url_options)
return cls(**kwargs)
- def __init__(self, connection_class=Connection, max_connections=None,
- **connection_kwargs):
+ def __init__(
+ self, connection_class=Connection, max_connections=None, **connection_kwargs
+ ):
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
@@ -1194,12 +1245,12 @@ class ConnectionPool:
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
- raise ConnectionError('Connection has data')
+ raise ConnectionError("Connection has data")
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
- raise ConnectionError('Connection not ready')
+ raise ConnectionError("Connection not ready")
except BaseException:
# release the connection back to the pool so that we don't
# leak it
@@ -1212,9 +1263,9 @@ class ConnectionPool:
"Return an encoder based on encoding settings"
kwargs = self.connection_kwargs
return Encoder(
- encoding=kwargs.get('encoding', 'utf-8'),
- encoding_errors=kwargs.get('encoding_errors', 'strict'),
- decode_responses=kwargs.get('decode_responses', False)
+ encoding=kwargs.get("encoding", "utf-8"),
+ encoding_errors=kwargs.get("encoding_errors", "strict"),
+ decode_responses=kwargs.get("decode_responses", False),
)
def make_connection(self):
@@ -1259,8 +1310,9 @@ class ConnectionPool:
self._checkpid()
with self._lock:
if inuse_connections:
- connections = chain(self._available_connections,
- self._in_use_connections)
+ connections = chain(
+ self._available_connections, self._in_use_connections
+ )
else:
connections = self._available_connections
@@ -1301,16 +1353,23 @@ class BlockingConnectionPool(ConnectionPool):
>>> # not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
- def __init__(self, max_connections=50, timeout=20,
- connection_class=Connection, queue_class=LifoQueue,
- **connection_kwargs):
+
+ def __init__(
+ self,
+ max_connections=50,
+ timeout=20,
+ connection_class=Connection,
+ queue_class=LifoQueue,
+ **connection_kwargs,
+ ):
self.queue_class = queue_class
self.timeout = timeout
super().__init__(
connection_class=connection_class,
max_connections=max_connections,
- **connection_kwargs)
+ **connection_kwargs,
+ )
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
@@ -1381,12 +1440,12 @@ class BlockingConnectionPool(ConnectionPool):
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
- raise ConnectionError('Connection has data')
+ raise ConnectionError("Connection has data")
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
- raise ConnectionError('Connection not ready')
+ raise ConnectionError("Connection not ready")
except BaseException:
# release the connection back to the pool so that we don't leak it
self.release(connection)
diff --git a/redis/crc.py b/redis/crc.py
index 7d2ee50..c47e2ac 100644
--- a/redis/crc.py
+++ b/redis/crc.py
@@ -4,10 +4,7 @@ from binascii import crc_hqx
# For more information see: https://github.com/redis/redis/issues/2576
REDIS_CLUSTER_HASH_SLOTS = 16384
-__all__ = [
- "key_slot",
- "REDIS_CLUSTER_HASH_SLOTS"
-]
+__all__ = ["key_slot", "REDIS_CLUSTER_HASH_SLOTS"]
def key_slot(key, bucket=REDIS_CLUSTER_HASH_SLOTS):
@@ -20,5 +17,5 @@ def key_slot(key, bucket=REDIS_CLUSTER_HASH_SLOTS):
if start > -1:
end = key.find(b"}", start + 1)
if end > -1 and end != start + 1:
- key = key[start + 1: end]
+ key = key[start + 1 : end]
return crc_hqx(key, 0) % bucket
diff --git a/redis/exceptions.py b/redis/exceptions.py
index eb6ecc2..e37cad3 100644
--- a/redis/exceptions.py
+++ b/redis/exceptions.py
@@ -83,6 +83,7 @@ class AuthenticationWrongNumberOfArgsError(ResponseError):
An error to indicate that the wrong number of args
were sent to the AUTH command
"""
+
pass
@@ -90,6 +91,7 @@ class RedisClusterException(Exception):
"""
Base exception for the RedisCluster client
"""
+
pass
@@ -98,6 +100,7 @@ class ClusterError(RedisError):
Cluster errors occurred multiple times, resulting in an exhaustion of the
command execution TTL
"""
+
pass
@@ -111,6 +114,7 @@ class ClusterDownError(ClusterError, ResponseError):
unavailable. It automatically returns available as soon as all the slots
are covered again.
"""
+
def __init__(self, resp):
self.args = (resp,)
self.message = resp
@@ -135,8 +139,8 @@ class AskError(ResponseError):
"""should only redirect to master node"""
self.args = (resp,)
self.message = resp
- slot_id, new_node = resp.split(' ')
- host, port = new_node.rsplit(':', 1)
+ slot_id, new_node = resp.split(" ")
+ host, port = new_node.rsplit(":", 1)
self.slot_id = int(slot_id)
self.node_addr = self.host, self.port = host, int(port)
@@ -147,6 +151,7 @@ class TryAgainError(ResponseError):
Operations on keys that don't exist or are - during resharding - split
between the source and destination nodes, will generate a -TRYAGAIN error.
"""
+
def __init__(self, *args, **kwargs):
pass
@@ -157,6 +162,7 @@ class ClusterCrossSlotError(ResponseError):
A CROSSSLOT error is generated when keys in a request don't hash to the
same slot.
"""
+
message = "Keys in request don't hash to the same slot"
@@ -166,6 +172,7 @@ class MovedError(AskError):
A request sent to a node that doesn't serve this key will be replayed with
a MOVED error that points to the correct node.
"""
+
pass
@@ -174,6 +181,7 @@ class MasterDownError(ClusterDownError):
Error indicated MASTERDOWN error received from cluster.
Link with MASTER is down and replica-serve-stale-data is set to 'no'.
"""
+
pass
@@ -185,4 +193,5 @@ class SlotNotCoveredError(RedisClusterException):
If this error is raised the client should drop the current node layout and
attempt to reconnect and refresh the node layout again
"""
+
pass
diff --git a/redis/lock.py b/redis/lock.py
index d229752..95bb413 100644
--- a/redis/lock.py
+++ b/redis/lock.py
@@ -2,6 +2,7 @@ import threading
import time as mod_time
import uuid
from types import SimpleNamespace
+
from redis.exceptions import LockError, LockNotOwnedError
@@ -70,8 +71,16 @@ class Lock:
return 1
"""
- def __init__(self, redis, name, timeout=None, sleep=0.1,
- blocking=True, blocking_timeout=None, thread_local=True):
+ def __init__(
+ self,
+ redis,
+ name,
+ timeout=None,
+ sleep=0.1,
+ blocking=True,
+ blocking_timeout=None,
+ thread_local=True,
+ ):
"""
Create a new Lock instance named ``name`` using the Redis client
supplied by ``redis``.
@@ -129,11 +138,7 @@ class Lock:
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
- self.local = (
- threading.local()
- if self.thread_local
- else SimpleNamespace()
- )
+ self.local = threading.local() if self.thread_local else SimpleNamespace()
self.local.token = None
self.register_scripts()
@@ -145,8 +150,7 @@ class Lock:
if cls.lua_extend is None:
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
if cls.lua_reacquire is None:
- cls.lua_reacquire = \
- client.register_script(cls.LUA_REACQUIRE_SCRIPT)
+ cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
def __enter__(self):
if self.acquire():
@@ -222,8 +226,7 @@ class Lock:
if stored_token and not isinstance(stored_token, bytes):
encoder = self.redis.connection_pool.get_encoder()
stored_token = encoder.encode(stored_token)
- return self.local.token is not None and \
- stored_token == self.local.token
+ return self.local.token is not None and stored_token == self.local.token
def release(self):
"Releases the already acquired lock"
@@ -234,11 +237,10 @@ class Lock:
self.do_release(expected_token)
def do_release(self, expected_token):
- if not bool(self.lua_release(keys=[self.name],
- args=[expected_token],
- client=self.redis)):
- raise LockNotOwnedError("Cannot release a lock"
- " that's no longer owned")
+ if not bool(
+ self.lua_release(keys=[self.name], args=[expected_token], client=self.redis)
+ ):
+ raise LockNotOwnedError("Cannot release a lock" " that's no longer owned")
def extend(self, additional_time, replace_ttl=False):
"""
@@ -262,17 +264,11 @@ class Lock:
if not bool(
self.lua_extend(
keys=[self.name],
- args=[
- self.local.token,
- additional_time,
- replace_ttl and "1" or "0"
- ],
+ args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
client=self.redis,
)
):
- raise LockNotOwnedError(
- "Cannot extend a lock that's" " no longer owned"
- )
+ raise LockNotOwnedError("Cannot extend a lock that's" " no longer owned")
return True
def reacquire(self):
@@ -287,9 +283,10 @@ class Lock:
def do_reacquire(self):
timeout = int(self.timeout * 1000)
- if not bool(self.lua_reacquire(keys=[self.name],
- args=[self.local.token, timeout],
- client=self.redis)):
- raise LockNotOwnedError("Cannot reacquire a lock that's"
- " no longer owned")
+ if not bool(
+ self.lua_reacquire(
+ keys=[self.name], args=[self.local.token, timeout], client=self.redis
+ )
+ ):
+ raise LockNotOwnedError("Cannot reacquire a lock that's" " no longer owned")
return True
diff --git a/redis/retry.py b/redis/retry.py
index cd06a23..75504c7 100644
--- a/redis/retry.py
+++ b/redis/retry.py
@@ -6,8 +6,9 @@ from redis.exceptions import ConnectionError, TimeoutError
class Retry:
"""Retry a specific number of times after a failure"""
- def __init__(self, backoff, retries,
- supported_errors=(ConnectionError, TimeoutError)):
+ def __init__(
+ self, backoff, retries, supported_errors=(ConnectionError, TimeoutError)
+ ):
"""
Initialize a `Retry` object with a `Backoff` object
that retries a maximum of `retries` times.
diff --git a/redis/sentinel.py b/redis/sentinel.py
index 06877bd..c9383d3 100644
--- a/redis/sentinel.py
+++ b/redis/sentinel.py
@@ -3,9 +3,8 @@ import weakref
from redis.client import Redis
from redis.commands import SentinelCommands
-from redis.connection import ConnectionPool, Connection, SSLConnection
-from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,
- TimeoutError)
+from redis.connection import Connection, ConnectionPool, SSLConnection
+from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError
from redis.utils import str_if_bytes
@@ -19,14 +18,14 @@ class SlaveNotFoundError(ConnectionError):
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
- self.connection_pool = kwargs.pop('connection_pool')
+ self.connection_pool = kwargs.pop("connection_pool")
super().__init__(**kwargs)
def __repr__(self):
pool = self.connection_pool
- s = f'{type(self).__name__}<service={pool.service_name}%s>'
+ s = f"{type(self).__name__}<service={pool.service_name}%s>"
if self.host:
- host_info = f',host={self.host},port={self.port}'
+ host_info = f",host={self.host},port={self.port}"
s = s % host_info
return s
@@ -34,9 +33,9 @@ class SentinelManagedConnection(Connection):
self.host, self.port = address
super().connect()
if self.connection_pool.check_connection:
- self.send_command('PING')
- if str_if_bytes(self.read_response()) != 'PONG':
- raise ConnectionError('PING failed')
+ self.send_command("PING")
+ if str_if_bytes(self.read_response()) != "PONG":
+ raise ConnectionError("PING failed")
def connect(self):
if self._sock:
@@ -62,7 +61,7 @@ class SentinelManagedConnection(Connection):
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
self.disconnect()
- raise ConnectionError('The previous master is now a slave')
+ raise ConnectionError("The previous master is now a slave")
raise
@@ -79,19 +78,21 @@ class SentinelConnectionPool(ConnectionPool):
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
- kwargs['connection_class'] = kwargs.get(
- 'connection_class',
- SentinelManagedSSLConnection if kwargs.pop('ssl', False)
- else SentinelManagedConnection)
- self.is_master = kwargs.pop('is_master', True)
- self.check_connection = kwargs.pop('check_connection', False)
+ kwargs["connection_class"] = kwargs.get(
+ "connection_class",
+ SentinelManagedSSLConnection
+ if kwargs.pop("ssl", False)
+ else SentinelManagedConnection,
+ )
+ self.is_master = kwargs.pop("is_master", True)
+ self.check_connection = kwargs.pop("check_connection", False)
super().__init__(**kwargs)
- self.connection_kwargs['connection_pool'] = weakref.proxy(self)
+ self.connection_kwargs["connection_pool"] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
def __repr__(self):
- role = 'master' if self.is_master else 'slave'
+ role = "master" if self.is_master else "slave"
return f"{type(self).__name__}<service={self.service_name}({role})"
def reset(self):
@@ -100,15 +101,14 @@ class SentinelConnectionPool(ConnectionPool):
self.slave_rr_counter = None
def owns_connection(self, connection):
- check = not self.is_master or \
- (self.is_master and
- self.master_address == (connection.host, connection.port))
+ check = not self.is_master or (
+ self.is_master and self.master_address == (connection.host, connection.port)
+ )
parent = super()
return check and parent.owns_connection(connection)
def get_master_address(self):
- master_address = self.sentinel_manager.discover_master(
- self.service_name)
+ master_address = self.sentinel_manager.discover_master(self.service_name)
if self.is_master:
if self.master_address != master_address:
self.master_address = master_address
@@ -124,8 +124,7 @@ class SentinelConnectionPool(ConnectionPool):
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in range(len(slaves)):
- self.slave_rr_counter = (
- self.slave_rr_counter + 1) % len(slaves)
+ self.slave_rr_counter = (self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
@@ -133,7 +132,7 @@ class SentinelConnectionPool(ConnectionPool):
yield self.get_master_address()
except MasterNotFoundError:
pass
- raise SlaveNotFoundError(f'No slave found for {self.service_name!r}')
+ raise SlaveNotFoundError(f"No slave found for {self.service_name!r}")
class Sentinel(SentinelCommands):
@@ -165,20 +164,25 @@ class Sentinel(SentinelCommands):
establishing a connection to a Redis server.
"""
- def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
- **connection_kwargs):
+ def __init__(
+ self,
+ sentinels,
+ min_other_sentinels=0,
+ sentinel_kwargs=None,
+ **connection_kwargs,
+ ):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = {
- k: v
- for k, v in connection_kwargs.items()
- if k.startswith('socket_')
+ k: v for k, v in connection_kwargs.items() if k.startswith("socket_")
}
self.sentinel_kwargs = sentinel_kwargs
- self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs)
- for hostname, port in sentinels]
+ self.sentinels = [
+ Redis(hostname, port, **self.sentinel_kwargs)
+ for hostname, port in sentinels
+ ]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
@@ -188,9 +192,9 @@ class Sentinel(SentinelCommands):
once - If set to True, then execute the resulting command on a single
node at random, rather than across the entire sentinel cluster.
"""
- once = bool(kwargs.get('once', False))
- if 'once' in kwargs.keys():
- kwargs.pop('once')
+ once = bool(kwargs.get("once", False))
+ if "once" in kwargs.keys():
+ kwargs.pop("once")
if once:
for sentinel in self.sentinels:
@@ -202,16 +206,18 @@ class Sentinel(SentinelCommands):
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
- sentinel_addresses.append('{host}:{port}'.format_map(
- sentinel.connection_pool.connection_kwargs,
- ))
+ sentinel_addresses.append(
+ "{host}:{port}".format_map(
+ sentinel.connection_pool.connection_kwargs,
+ )
+ )
return f'{type(self).__name__}<sentinels=[{",".join(sentinel_addresses)}]>'
def check_master_state(self, state, service_name):
- if not state['is_master'] or state['is_sdown'] or state['is_odown']:
+ if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
return False
# Check if our sentinel doesn't see other nodes
- if state['num-other-sentinels'] < self.min_other_sentinels:
+ if state["num-other-sentinels"] < self.min_other_sentinels:
return False
return True
@@ -232,17 +238,19 @@ class Sentinel(SentinelCommands):
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
- sentinel, self.sentinels[0])
- return state['ip'], state['port']
+ sentinel,
+ self.sentinels[0],
+ )
+ return state["ip"], state["port"]
raise MasterNotFoundError(f"No master found for {service_name!r}")
def filter_slaves(self, slaves):
"Remove slaves that are in an ODOWN or SDOWN state"
slaves_alive = []
for slave in slaves:
- if slave['is_odown'] or slave['is_sdown']:
+ if slave["is_odown"] or slave["is_sdown"]:
continue
- slaves_alive.append((slave['ip'], slave['port']))
+ slaves_alive.append((slave["ip"], slave["port"]))
return slaves_alive
def discover_slaves(self, service_name):
@@ -257,8 +265,13 @@ class Sentinel(SentinelCommands):
return slaves
return []
- def master_for(self, service_name, redis_class=Redis,
- connection_pool_class=SentinelConnectionPool, **kwargs):
+ def master_for(
+ self,
+ service_name,
+ redis_class=Redis,
+ connection_pool_class=SentinelConnectionPool,
+ **kwargs,
+ ):
"""
Returns a redis client instance for the ``service_name`` master.
@@ -281,14 +294,22 @@ class Sentinel(SentinelCommands):
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
- kwargs['is_master'] = True
+ kwargs["is_master"] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
- return redis_class(connection_pool=connection_pool_class(
- service_name, self, **connection_kwargs))
-
- def slave_for(self, service_name, redis_class=Redis,
- connection_pool_class=SentinelConnectionPool, **kwargs):
+ return redis_class(
+ connection_pool=connection_pool_class(
+ service_name, self, **connection_kwargs
+ )
+ )
+
+ def slave_for(
+ self,
+ service_name,
+ redis_class=Redis,
+ connection_pool_class=SentinelConnectionPool,
+ **kwargs,
+ ):
"""
Returns redis client instance for the ``service_name`` slave(s).
@@ -306,8 +327,11 @@ class Sentinel(SentinelCommands):
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
- kwargs['is_master'] = False
+ kwargs["is_master"] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
- return redis_class(connection_pool=connection_pool_class(
- service_name, self, **connection_kwargs))
+ return redis_class(
+ connection_pool=connection_pool_class(
+ service_name, self, **connection_kwargs
+ )
+ )
diff --git a/redis/utils.py b/redis/utils.py
index 0e78cc5..50961cb 100644
--- a/redis/utils.py
+++ b/redis/utils.py
@@ -1,8 +1,8 @@
from contextlib import contextmanager
-
try:
import hiredis # noqa
+
HIREDIS_AVAILABLE = True
except ImportError:
HIREDIS_AVAILABLE = False
@@ -16,6 +16,7 @@ def from_url(url, **kwargs):
none is provided.
"""
from redis.client import Redis
+
return Redis.from_url(url, **kwargs)
@@ -28,9 +29,7 @@ def pipeline(redis_obj):
def str_if_bytes(value):
return (
- value.decode('utf-8', errors='replace')
- if isinstance(value, bytes)
- else value
+ value.decode("utf-8", errors="replace") if isinstance(value, bytes) else value
)