summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChayim <chayim@users.noreply.github.com>2021-10-25 17:06:04 +0300
committerGitHub <noreply@github.com>2021-10-25 17:06:04 +0300
commit3946da29d7e451a20289fb6e282516fa24e402af (patch)
tree25cf4b73b4e00d66c75288790616ea882823e2b7
parent0ef4c0711693b4b313ce97261214bd151d8261d5 (diff)
downloadredis-py-3946da29d7e451a20289fb6e282516fa24e402af.tar.gz
redisjson support (#1636)
-rw-r--r--docker-compose.yml58
-rw-r--r--docker/base/Dockerfile4
-rw-r--r--docker/base/Dockerfile.sentinel3
-rw-r--r--docker/base/README.md1
-rw-r--r--docker/master/Dockerfile7
-rw-r--r--docker/master/redis.conf1
-rw-r--r--docker/replica/Dockerfile7
-rw-r--r--docker/replica/redis.conf1
-rw-r--r--docker/sentinel_1/Dockerfile7
-rw-r--r--docker/sentinel_1/sentinel.conf3
-rw-r--r--docker/sentinel_2/Dockerfile7
-rw-r--r--docker/sentinel_2/sentinel.conf3
-rw-r--r--docker/sentinel_3/Dockerfile7
-rw-r--r--docker/sentinel_3/sentinel.conf3
-rwxr-xr-xredis/client.py48
-rw-r--r--redis/commands/__init__.py11
-rw-r--r--redis/commands/core.py (renamed from redis/commands.py)124
-rw-r--r--redis/commands/helpers.py25
-rw-r--r--redis/commands/json/__init__.py84
-rw-r--r--redis/commands/json/commands.py197
-rw-r--r--redis/commands/json/helpers.py25
-rw-r--r--redis/commands/json/path.py21
-rw-r--r--redis/commands/redismodules.py17
-rw-r--r--redis/commands/sentinel.py97
-rw-r--r--tests/conftest.py33
-rw-r--r--tests/test_commands.py9
-rw-r--r--tests/test_connection.py33
-rw-r--r--tests/test_json.py235
-rw-r--r--tox.ini28
29 files changed, 855 insertions, 244 deletions
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index 103a51b..0000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-version: "3.8"
-
-services:
- master:
- build: docker/master
- ports:
- - "6379:6379"
-
- slave:
- build: docker/slave
- depends_on:
- - "master"
- ports:
- - "6380:6380"
-
- sentinel_1:
- build: docker/sentinel_1
- depends_on:
- - "slave"
- ports:
- - "26379:26379"
-
- sentinel_2:
- build: docker/sentinel_2
- depends_on:
- - "slave"
- ports:
- - "26380:26380"
-
- sentinel_3:
- build: docker/sentinel_3
- depends_on:
- - "slave"
- ports:
- - "26381:26381"
-
- test:
- build: .
- depends_on:
- - "sentinel_3"
- environment:
- REDIS_MASTER_HOST: master
- REDIS_MASTER_PORT: 6379
- CI:
- CI_BUILD_ID:
- CI_BUILD_URL:
- CI_JOB_ID:
- CODECOV_ENV:
- CODECOV_SLUG:
- CODECOV_TOKEN:
- CODECOV_URL:
- SHIPPABLE:
- GITHUB_ACTIONS:
- VCS_BRANCH_NAME:
- VCS_COMMIT_ID:
- VCS_PULL_REQUEST:
- VCS_SLUG:
- VCS_TAG:
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 60e4141..60be374 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -1 +1,3 @@
-FROM redis:6.2.5-buster \ No newline at end of file
+FROM redis:6.2.6-buster
+
+CMD ["redis-server", "/redis.conf"]
diff --git a/docker/base/Dockerfile.sentinel b/docker/base/Dockerfile.sentinel
new file mode 100644
index 0000000..93c16a7
--- /dev/null
+++ b/docker/base/Dockerfile.sentinel
@@ -0,0 +1,3 @@
+FROM redis:6.2.6-buster
+
+CMD ["redis-sentinel", "/sentinel.conf"]
diff --git a/docker/base/README.md b/docker/base/README.md
new file mode 100644
index 0000000..a2f26a8
--- /dev/null
+++ b/docker/base/README.md
@@ -0,0 +1 @@
+Dockers in this folder are built, and uploaded to the redisfab dockerhub store.
diff --git a/docker/master/Dockerfile b/docker/master/Dockerfile
deleted file mode 100644
index 1f4bd73..0000000
--- a/docker/master/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM redis-py-base:latest
-
-COPY redis.conf /usr/local/etc/redis/redis.conf
-
-EXPOSE 6379
-
-CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ]
diff --git a/docker/master/redis.conf b/docker/master/redis.conf
index ed00766..15a31b5 100644
--- a/docker/master/redis.conf
+++ b/docker/master/redis.conf
@@ -1,3 +1,2 @@
-bind master 127.0.0.1
port 6379
save ""
diff --git a/docker/replica/Dockerfile b/docker/replica/Dockerfile
deleted file mode 100644
index 7b6bdbe..0000000
--- a/docker/replica/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM redis-py-base:latest
-
-COPY redis.conf /usr/local/etc/redis/redis.conf
-
-EXPOSE 6380
-
-CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ]
diff --git a/docker/replica/redis.conf b/docker/replica/redis.conf
index 4a1dcd7..a76d402 100644
--- a/docker/replica/redis.conf
+++ b/docker/replica/redis.conf
@@ -1,4 +1,3 @@
-bind replica 127.0.0.1
port 6380
save ""
replicaof master 6379
diff --git a/docker/sentinel_1/Dockerfile b/docker/sentinel_1/Dockerfile
deleted file mode 100644
index 66f6a75..0000000
--- a/docker/sentinel_1/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM redis-py-base:latest
-
-COPY sentinel.conf /usr/local/etc/redis/sentinel.conf
-
-EXPOSE 26379
-
-CMD [ "redis-sentinel", "/usr/local/etc/redis/sentinel.conf" ]
diff --git a/docker/sentinel_1/sentinel.conf b/docker/sentinel_1/sentinel.conf
index fc9aa68..bd2d830 100644
--- a/docker/sentinel_1/sentinel.conf
+++ b/docker/sentinel_1/sentinel.conf
@@ -1,7 +1,6 @@
-bind sentinel_1 127.0.0.1
port 26379
-sentinel monitor redis-py-test master 6379 2
+sentinel monitor redis-py-test 127.0.0.1 6379 2
sentinel down-after-milliseconds redis-py-test 5000
sentinel failover-timeout redis-py-test 60000
sentinel parallel-syncs redis-py-test 1
diff --git a/docker/sentinel_2/Dockerfile b/docker/sentinel_2/Dockerfile
deleted file mode 100644
index 1c0bb92..0000000
--- a/docker/sentinel_2/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM redis-py-base:latest
-
-COPY sentinel.conf /usr/local/etc/redis/sentinel.conf
-
-EXPOSE 26380
-
-CMD [ "redis-sentinel", "/usr/local/etc/redis/sentinel.conf" ]
diff --git a/docker/sentinel_2/sentinel.conf b/docker/sentinel_2/sentinel.conf
index 264443c..955621b 100644
--- a/docker/sentinel_2/sentinel.conf
+++ b/docker/sentinel_2/sentinel.conf
@@ -1,7 +1,6 @@
-bind sentinel_2 127.0.0.1
port 26380
-sentinel monitor redis-py-test master 6379 2
+sentinel monitor redis-py-test 127.0.0.1 6379 2
sentinel down-after-milliseconds redis-py-test 5000
sentinel failover-timeout redis-py-test 60000
sentinel parallel-syncs redis-py-test 1
diff --git a/docker/sentinel_3/Dockerfile b/docker/sentinel_3/Dockerfile
deleted file mode 100644
index cf1ec21..0000000
--- a/docker/sentinel_3/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM redis-py-base:latest
-
-COPY sentinel.conf /usr/local/etc/redis/sentinel.conf
-
-EXPOSE 26381
-
-CMD [ "redis-sentinel", "/usr/local/etc/redis/sentinel.conf" ]
diff --git a/docker/sentinel_3/sentinel.conf b/docker/sentinel_3/sentinel.conf
index b0827f1..62c4051 100644
--- a/docker/sentinel_3/sentinel.conf
+++ b/docker/sentinel_3/sentinel.conf
@@ -1,7 +1,6 @@
-bind sentinel_3 127.0.0.1
port 26381
-sentinel monitor redis-py-test master 6379 2
+sentinel monitor redis-py-test 127.0.0.1 6379 2
sentinel down-after-milliseconds redis-py-test 5000
sentinel failover-timeout redis-py-test 60000
sentinel parallel-syncs redis-py-test 1
diff --git a/redis/client.py b/redis/client.py
index a6bd183..4db9887 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -6,10 +6,7 @@ import re
import threading
import time
import warnings
-from redis.commands import (
- list_or_args,
- Commands
-)
+from redis.commands import CoreCommands, RedisModuleCommands, list_or_args
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection)
from redis.lock import Lock
@@ -609,7 +606,7 @@ def parse_set_result(response, **options):
return response and str_if_bytes(response) == 'OK'
-class Redis(Commands, object):
+class Redis(RedisModuleCommands, CoreCommands, object):
"""
Implementation of the Redis protocol.
@@ -898,6 +895,47 @@ class Redis(Commands, object):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
+ def load_external_module(self, modname, funcname, func):
+ """
+ This function can be used to add externally defined redis modules,
+ and their namespaces to the redis client.
+ modname - A string containing the name of the redis module to look for
+ in the redis info block.
+ funcname - A string containing the name of the function to create
+ func - The function, being added to this class.
+
+ ex: Assume that one has a custom redis module named foomod that
+ creates command named 'foo.dothing' and 'foo.anotherthing' in redis.
+ To load function functions into this namespace:
+
+ from redis import Redis
+ from foomodule import F
+ r = Redis()
+ r.load_external_module("foomod", "foo", F)
+ r.foo().dothing('your', 'arguments')
+
+ For a concrete example see the reimport of the redisjson module in
+ tests/test_connection.py::test_loading_external_modules
+ """
+ mods = self.loaded_modules
+ if modname.lower() not in mods:
+ raise ModuleError("{} is not loaded in redis.".format(modname))
+ setattr(self, funcname, func)
+
+ @property
+ def loaded_modules(self):
+ key = '__redis_modules__'
+ mods = getattr(self, key, None)
+ if mods is not None:
+ return mods
+
+ try:
+ mods = [f.get('name').lower() for f in self.info().get('modules')]
+ except TypeError:
+ mods = []
+ setattr(self, key, mods)
+ return mods
+
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
diff --git a/redis/commands/__init__.py b/redis/commands/__init__.py
new file mode 100644
index 0000000..f1ddaaa
--- /dev/null
+++ b/redis/commands/__init__.py
@@ -0,0 +1,11 @@
+from .core import CoreCommands
+from .redismodules import RedisModuleCommands
+from .helpers import list_or_args
+from .sentinel import SentinelCommands
+
+__all__ = [
+ 'CoreCommands',
+ 'RedisModuleCommands',
+ 'SentinelCommands',
+ 'list_or_args'
+]
diff --git a/redis/commands.py b/redis/commands/core.py
index 2697e78..6512b45 100644
--- a/redis/commands.py
+++ b/redis/commands/core.py
@@ -3,6 +3,7 @@ import time
import warnings
import hashlib
+from .helpers import list_or_args
from redis.exceptions import (
ConnectionError,
DataError,
@@ -11,24 +12,7 @@ from redis.exceptions import (
)
-def list_or_args(keys, args):
- # returns a single new list combining keys and args
- try:
- iter(keys)
- # a string or bytes instance can be iterated, but indicates
- # keys wasn't passed as a list
- if isinstance(keys, (bytes, str)):
- keys = [keys]
- else:
- keys = list(keys)
- except TypeError:
- keys = [keys]
- if args:
- keys.extend(args)
- return keys
-
-
-class Commands:
+class CoreCommands:
"""
A class containing all of the implemented redis commands. This class is
to be used as a mixin.
@@ -1173,16 +1157,16 @@ class Commands:
if ex is not None:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
- ex = int(ex.total_seconds())
- if isinstance(ex, int):
+ pieces.append(int(ex.total_seconds()))
+ elif isinstance(ex, int):
pieces.append(ex)
else:
raise DataError("ex must be datetime.timedelta or int")
if px is not None:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
- px = int(px.total_seconds() * 1000)
- if isinstance(px, int):
+ pieces.append(int(px.total_seconds() * 1000))
+ elif isinstance(px, int):
pieces.append(px)
else:
raise DataError("px must be datetime.timedelta or int")
@@ -3413,99 +3397,3 @@ class BitFieldOperation:
command = self.command
self.reset()
return self.client.execute_command(*command)
-
-
-class SentinelCommands:
- """
- A class containing the commands specific to redis sentinal. This class is
- to be used as a mixin.
- """
-
- def sentinel(self, *args):
- "Redis Sentinel's SENTINEL command."
- warnings.warn(
- DeprecationWarning('Use the individual sentinel_* methods'))
-
- def sentinel_get_master_addr_by_name(self, service_name):
- "Returns a (host, port) pair for the given ``service_name``"
- return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
- service_name)
-
- def sentinel_master(self, service_name):
- "Returns a dictionary containing the specified masters state."
- return self.execute_command('SENTINEL MASTER', service_name)
-
- def sentinel_masters(self):
- "Returns a list of dictionaries containing each master's state."
- return self.execute_command('SENTINEL MASTERS')
-
- def sentinel_monitor(self, name, ip, port, quorum):
- "Add a new master to Sentinel to be monitored"
- return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
-
- def sentinel_remove(self, name):
- "Remove a master from Sentinel's monitoring"
- return self.execute_command('SENTINEL REMOVE', name)
-
- def sentinel_sentinels(self, service_name):
- "Returns a list of sentinels for ``service_name``"
- return self.execute_command('SENTINEL SENTINELS', service_name)
-
- def sentinel_set(self, name, option, value):
- "Set Sentinel monitoring parameters for a given master"
- return self.execute_command('SENTINEL SET', name, option, value)
-
- def sentinel_slaves(self, service_name):
- "Returns a list of slaves for ``service_name``"
- return self.execute_command('SENTINEL SLAVES', service_name)
-
- def sentinel_reset(self, pattern):
- """
- This command will reset all the masters with matching name.
- The pattern argument is a glob-style pattern.
-
- The reset process clears any previous state in a master (including a
- failover in progress), and removes every slave and sentinel already
- discovered and associated with the master.
- """
- return self.execute_command('SENTINEL RESET', pattern, once=True)
-
- def sentinel_failover(self, new_master_name):
- """
- Force a failover as if the master was not reachable, and without
- asking for agreement to other Sentinels (however a new version of the
- configuration will be published so that the other Sentinels will
- update their configurations).
- """
- return self.execute_command('SENTINEL FAILOVER', new_master_name)
-
- def sentinel_ckquorum(self, new_master_name):
- """
- Check if the current Sentinel configuration is able to reach the
- quorum needed to failover a master, and the majority needed to
- authorize the failover.
-
- This command should be used in monitoring systems to check if a
- Sentinel deployment is ok.
- """
- return self.execute_command('SENTINEL CKQUORUM',
- new_master_name,
- once=True)
-
- def sentinel_flushconfig(self):
- """
- Force Sentinel to rewrite its configuration on disk, including the
- current Sentinel state.
-
- Normally Sentinel rewrites the configuration every time something
- changes in its state (in the context of the subset of the state which
- is persisted on disk across restart).
- However sometimes it is possible that the configuration file is lost
- because of operation errors, disk failures, package upgrade scripts or
- configuration managers. In those cases a way to to force Sentinel to
- rewrite the configuration file is handy.
-
- This command works even if the previous configuration file is
- completely missing.
- """
- return self.execute_command('SENTINEL FLUSHCONFIG')
diff --git a/redis/commands/helpers.py b/redis/commands/helpers.py
new file mode 100644
index 0000000..b012621
--- /dev/null
+++ b/redis/commands/helpers.py
@@ -0,0 +1,25 @@
+def list_or_args(keys, args):
+ # returns a single new list combining keys and args
+ try:
+ iter(keys)
+ # a string or bytes instance can be iterated, but indicates
+ # keys wasn't passed as a list
+ if isinstance(keys, (bytes, str)):
+ keys = [keys]
+ else:
+ keys = list(keys)
+ except TypeError:
+ keys = [keys]
+ if args:
+ keys.extend(args)
+ return keys
+
+
+def nativestr(x):
+ """Return the decoded binary string, or a string, depending on type."""
+ return x.decode("utf-8", "replace") if isinstance(x, bytes) else x
+
+
+def delist(x):
+ """Given a list of binaries, return the stringified version."""
+ return [nativestr(obj) for obj in x]
diff --git a/redis/commands/json/__init__.py b/redis/commands/json/__init__.py
new file mode 100644
index 0000000..92f1199
--- /dev/null
+++ b/redis/commands/json/__init__.py
@@ -0,0 +1,84 @@
+# from typing import Optional
+from json import JSONDecoder, JSONEncoder
+
+# # from redis.client import Redis
+
+from .helpers import bulk_of_jsons
+from ..helpers import nativestr, delist
+from .commands import JSONCommands
+# from ..feature import AbstractFeature
+
+
+class JSON(JSONCommands):
+ """
+ Create a client for talking to json.
+
+ :param decoder:
+ :type json.JSONDecoder: An instance of json.JSONDecoder
+
+ :param encoder:
+ :type json.JSONEncoder: An instance of json.JSONEncoder
+ """
+
+ def __init__(
+ self,
+ client,
+ decoder=JSONDecoder(),
+ encoder=JSONEncoder(),
+ ):
+ """
+ Create a client for talking to json.
+
+ :param decoder:
+ :type json.JSONDecoder: An instance of json.JSONDecoder
+
+ :param encoder:
+ :type json.JSONEncoder: An instance of json.JSONEncoder
+ """
+ # Set the module commands' callbacks
+ self.MODULE_CALLBACKS = {
+ "JSON.CLEAR": int,
+ "JSON.DEL": int,
+ "JSON.FORGET": int,
+ "JSON.GET": self._decode,
+ "JSON.MGET": bulk_of_jsons(self._decode),
+ "JSON.SET": lambda r: r and nativestr(r) == "OK",
+ "JSON.NUMINCRBY": self._decode,
+ "JSON.NUMMULTBY": self._decode,
+ "JSON.TOGGLE": lambda b: b == b"true",
+ "JSON.STRAPPEND": int,
+ "JSON.STRLEN": int,
+ "JSON.ARRAPPEND": int,
+ "JSON.ARRINDEX": int,
+ "JSON.ARRINSERT": int,
+ "JSON.ARRLEN": int,
+ "JSON.ARRPOP": self._decode,
+ "JSON.ARRTRIM": int,
+ "JSON.OBJLEN": int,
+ "JSON.OBJKEYS": delist,
+ # "JSON.RESP": delist,
+ "JSON.DEBUG": int,
+ }
+
+ self.client = client
+ self.execute_command = client.execute_command
+
+ for key, value in self.MODULE_CALLBACKS.items():
+ self.client.set_response_callback(key, value)
+
+ self.__encoder__ = encoder
+ self.__decoder__ = decoder
+
+ def _decode(self, obj):
+ """Get the decoder."""
+ if obj is None:
+ return obj
+
+ try:
+ return self.__decoder__.decode(obj)
+ except TypeError:
+ return self.__decoder__.decode(obj.decode())
+
+ def _encode(self, obj):
+ """Get the encoder."""
+ return self.__encoder__.encode(obj)
diff --git a/redis/commands/json/commands.py b/redis/commands/json/commands.py
new file mode 100644
index 0000000..2f8039f
--- /dev/null
+++ b/redis/commands/json/commands.py
@@ -0,0 +1,197 @@
+from .path import Path, str_path
+from .helpers import decode_dict_keys
+
+
+class JSONCommands:
+ """json commands."""
+
+ def arrappend(self, name, path=Path.rootPath(), *args):
+ """Append the objects ``args`` to the array under the
+ ``path` in key ``name``.
+ """
+ pieces = [name, str_path(path)]
+ for o in args:
+ pieces.append(self._encode(o))
+ return self.execute_command("JSON.ARRAPPEND", *pieces)
+
+ def arrindex(self, name, path, scalar, start=0, stop=-1):
+ """
+ Return the index of ``scalar`` in the JSON array under ``path`` at key
+ ``name``.
+
+ The search can be limited using the optional inclusive ``start``
+ and exclusive ``stop`` indices.
+ """
+ return self.execute_command(
+ "JSON.ARRINDEX", name, str_path(path), self._encode(scalar),
+ start, stop
+ )
+
+ def arrinsert(self, name, path, index, *args):
+ """Insert the objects ``args`` to the array at index ``index``
+ under the ``path` in key ``name``.
+ """
+ pieces = [name, str_path(path), index]
+ for o in args:
+ pieces.append(self._encode(o))
+ return self.execute_command("JSON.ARRINSERT", *pieces)
+
+ def forget(self, name, path=Path.rootPath()):
+ """Alias for jsondel (delete the JSON value)."""
+ return self.execute_command("JSON.FORGET", name, str_path(path))
+
+ def arrlen(self, name, path=Path.rootPath()):
+ """Return the length of the array JSON value under ``path``
+ at key``name``.
+ """
+ return self.execute_command("JSON.ARRLEN", name, str_path(path))
+
+ def arrpop(self, name, path=Path.rootPath(), index=-1):
+ """Pop the element at ``index`` in the array JSON value under
+ ``path`` at key ``name``.
+ """
+ return self.execute_command("JSON.ARRPOP", name, str_path(path), index)
+
+ def arrtrim(self, name, path, start, stop):
+ """Trim the array JSON value under ``path`` at key ``name`` to the
+ inclusive range given by ``start`` and ``stop``.
+ """
+ return self.execute_command("JSON.ARRTRIM", name, str_path(path),
+ start, stop)
+
+ def type(self, name, path=Path.rootPath()):
+ """Get the type of the JSON value under ``path`` from key ``name``."""
+ return self.execute_command("JSON.TYPE", name, str_path(path))
+
+ def resp(self, name, path=Path.rootPath()):
+ """Return the JSON value under ``path`` at key ``name``."""
+ return self.execute_command("JSON.RESP", name, str_path(path))
+
+ def objkeys(self, name, path=Path.rootPath()):
+ """Return the key names in the dictionary JSON value under ``path`` at
+ key ``name``."""
+ return self.execute_command("JSON.OBJKEYS", name, str_path(path))
+
+ def objlen(self, name, path=Path.rootPath()):
+ """Return the length of the dictionary JSON value under ``path`` at key
+ ``name``.
+ """
+ return self.execute_command("JSON.OBJLEN", name, str_path(path))
+
+ def numincrby(self, name, path, number):
+ """Increment the numeric (integer or floating point) JSON value under
+ ``path`` at key ``name`` by the provided ``number``.
+ """
+ return self.execute_command(
+ "JSON.NUMINCRBY", name, str_path(path), self._encode(number)
+ )
+
+ def nummultby(self, name, path, number):
+ """Multiply the numeric (integer or floating point) JSON value under
+ ``path`` at key ``name`` with the provided ``number``.
+ """
+ return self.execute_command(
+ "JSON.NUMMULTBY", name, str_path(path), self._encode(number)
+ )
+
+ def clear(self, name, path=Path.rootPath()):
+ """
+ Empty arrays and objects (to have zero slots/keys without deleting the
+ array/object).
+
+ Return the count of cleared paths (ignoring non-array and non-objects
+ paths).
+ """
+ return self.execute_command("JSON.CLEAR", name, str_path(path))
+
+ def delete(self, name, path=Path.rootPath()):
+ """Delete the JSON value stored at key ``name`` under ``path``."""
+ return self.execute_command("JSON.DEL", name, str_path(path))
+
+ def get(self, name, *args, no_escape=False):
+ """
+ Get the object stored as a JSON value at key ``name``.
+
+ ``args`` is zero or more paths, and defaults to root path
+ ```no_escape`` is a boolean flag to add no_escape option to get
+ non-ascii characters
+ """
+ pieces = [name]
+ if no_escape:
+ pieces.append("noescape")
+
+ if len(args) == 0:
+ pieces.append(Path.rootPath())
+
+ else:
+ for p in args:
+ pieces.append(str_path(p))
+
+ # Handle case where key doesn't exist. The JSONDecoder would raise a
+ # TypeError exception since it can't decode None
+ try:
+ return self.execute_command("JSON.GET", *pieces)
+ except TypeError:
+ return None
+
+ def mget(self, path, *args):
+ """Get the objects stored as a JSON values under ``path`` from keys
+ ``args``.
+ """
+ pieces = []
+ pieces.extend(args)
+ pieces.append(str_path(path))
+ return self.execute_command("JSON.MGET", *pieces)
+
+ def set(self, name, path, obj, nx=False, xx=False, decode_keys=False):
+ """
+ Set the JSON value at key ``name`` under the ``path`` to ``obj``.
+
+ ``nx`` if set to True, set ``value`` only if it does not exist.
+ ``xx`` if set to True, set ``value`` only if it exists.
+ ``decode_keys`` If set to True, the keys of ``obj`` will be decoded
+ with utf-8.
+ """
+ if decode_keys:
+ obj = decode_dict_keys(obj)
+
+ pieces = [name, str_path(path), self._encode(obj)]
+
+ # Handle existential modifiers
+ if nx and xx:
+ raise Exception(
+ "nx and xx are mutually exclusive: use one, the "
+ "other or neither - but not both"
+ )
+ elif nx:
+ pieces.append("NX")
+ elif xx:
+ pieces.append("XX")
+ return self.execute_command("JSON.SET", *pieces)
+
+ def strlen(self, name, path=Path.rootPath()):
+ """Return the length of the string JSON value under ``path`` at key
+ ``name``.
+ """
+ return self.execute_command("JSON.STRLEN", name, str_path(path))
+
+ def toggle(self, name, path=Path.rootPath()):
+ """Toggle boolean value under ``path`` at key ``name``.
+ returning the new value.
+ """
+ return self.execute_command("JSON.TOGGLE", name, str_path(path))
+
+ def strappend(self, name, string, path=Path.rootPath()):
+ """Append to the string JSON value under ``path`` at key ``name``
+ the provided ``string``.
+ """
+ return self.execute_command(
+ "JSON.STRAPPEND", name, str_path(path), self._encode(string)
+ )
+
+ def debug(self, name, path=Path.rootPath()):
+ """Return the memory usage in bytes of a value under ``path`` from
+ key ``name``.
+ """
+ return self.execute_command("JSON.DEBUG", "MEMORY",
+ name, str_path(path))
diff --git a/redis/commands/json/helpers.py b/redis/commands/json/helpers.py
new file mode 100644
index 0000000..8fb20d9
--- /dev/null
+++ b/redis/commands/json/helpers.py
@@ -0,0 +1,25 @@
+import copy
+
+
+def bulk_of_jsons(d):
+ """Replace serialized JSON values with objects in a
+ bulk array response (list).
+ """
+
+ def _f(b):
+ for index, item in enumerate(b):
+ if item is not None:
+ b[index] = d(item)
+ return b
+
+ return _f
+
+
+def decode_dict_keys(obj):
+ """Decode the keys of the given dictionary with utf-8."""
+ newobj = copy.copy(obj)
+ for k in obj.keys():
+ if isinstance(k, bytes):
+ newobj[k.decode("utf-8")] = newobj[k]
+ newobj.pop(k)
+ return newobj
diff --git a/redis/commands/json/path.py b/redis/commands/json/path.py
new file mode 100644
index 0000000..dff8648
--- /dev/null
+++ b/redis/commands/json/path.py
@@ -0,0 +1,21 @@
+def str_path(p):
+ """Return the string representation of a path if it is of class Path."""
+ if isinstance(p, Path):
+ return p.strPath
+ else:
+ return p
+
+
+class Path(object):
+ """This class represents a path in a JSON value."""
+
+ strPath = ""
+
+ @staticmethod
+ def rootPath():
+ """Return the root path's string representation."""
+ return "."
+
+ def __init__(self, path):
+ """Make a new path based on the string representation in `path`."""
+ self.strPath = path
diff --git a/redis/commands/redismodules.py b/redis/commands/redismodules.py
new file mode 100644
index 0000000..fb53107
--- /dev/null
+++ b/redis/commands/redismodules.py
@@ -0,0 +1,17 @@
+from json import JSONEncoder, JSONDecoder
+from redis.exceptions import ModuleError
+
+
+class RedisModuleCommands:
+ """This class contains the wrapper functions to bring supported redis
+ modules into the command namepsace.
+ """
+
+ def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()):
+ """Access the json namespace, providing support for redis json."""
+ if 'rejson' not in self.loaded_modules:
+ raise ModuleError("rejson is not a loaded in the redis instance.")
+
+ from .json import JSON
+ jj = JSON(client=self, encoder=encoder, decoder=decoder)
+ return jj
diff --git a/redis/commands/sentinel.py b/redis/commands/sentinel.py
new file mode 100644
index 0000000..1f02984
--- /dev/null
+++ b/redis/commands/sentinel.py
@@ -0,0 +1,97 @@
+import warnings
+
+
+class SentinelCommands:
+ """
+ A class containing the commands specific to redis sentinal. This class is
+ to be used as a mixin.
+ """
+
+ def sentinel(self, *args):
+ "Redis Sentinel's SENTINEL command."
+ warnings.warn(
+ DeprecationWarning('Use the individual sentinel_* methods'))
+
+ def sentinel_get_master_addr_by_name(self, service_name):
+ "Returns a (host, port) pair for the given ``service_name``"
+ return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
+ service_name)
+
+ def sentinel_master(self, service_name):
+ "Returns a dictionary containing the specified masters state."
+ return self.execute_command('SENTINEL MASTER', service_name)
+
+ def sentinel_masters(self):
+ "Returns a list of dictionaries containing each master's state."
+ return self.execute_command('SENTINEL MASTERS')
+
+ def sentinel_monitor(self, name, ip, port, quorum):
+ "Add a new master to Sentinel to be monitored"
+ return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
+
+ def sentinel_remove(self, name):
+ "Remove a master from Sentinel's monitoring"
+ return self.execute_command('SENTINEL REMOVE', name)
+
+ def sentinel_sentinels(self, service_name):
+ "Returns a list of sentinels for ``service_name``"
+ return self.execute_command('SENTINEL SENTINELS', service_name)
+
+ def sentinel_set(self, name, option, value):
+ "Set Sentinel monitoring parameters for a given master"
+ return self.execute_command('SENTINEL SET', name, option, value)
+
+ def sentinel_slaves(self, service_name):
+ "Returns a list of slaves for ``service_name``"
+ return self.execute_command('SENTINEL SLAVES', service_name)
+
+ def sentinel_reset(self, pattern):
+ """
+ This command will reset all the masters with matching name.
+ The pattern argument is a glob-style pattern.
+
+ The reset process clears any previous state in a master (including a
+ failover in progress), and removes every slave and sentinel already
+ discovered and associated with the master.
+ """
+ return self.execute_command('SENTINEL RESET', pattern, once=True)
+
+ def sentinel_failover(self, new_master_name):
+ """
+ Force a failover as if the master was not reachable, and without
+ asking for agreement to other Sentinels (however a new version of the
+ configuration will be published so that the other Sentinels will
+ update their configurations).
+ """
+ return self.execute_command('SENTINEL FAILOVER', new_master_name)
+
+ def sentinel_ckquorum(self, new_master_name):
+ """
+ Check if the current Sentinel configuration is able to reach the
+ quorum needed to failover a master, and the majority needed to
+ authorize the failover.
+
+ This command should be used in monitoring systems to check if a
+ Sentinel deployment is ok.
+ """
+ return self.execute_command('SENTINEL CKQUORUM',
+ new_master_name,
+ once=True)
+
+ def sentinel_flushconfig(self):
+ """
+ Force Sentinel to rewrite its configuration on disk, including the
+ current Sentinel state.
+
+ Normally Sentinel rewrites the configuration every time something
+ changes in its state (in the context of the subset of the state which
+ is persisted on disk across restart).
+ However sometimes it is possible that the configuration file is lost
+ because of operation errors, disk failures, package upgrade scripts or
+ configuration managers. In those cases a way to to force Sentinel to
+ rewrite the configuration file is handy.
+
+ This command works even if the previous configuration file is
+ completely missing.
+ """
+ return self.execute_command('SENTINEL FLUSHCONFIG')
diff --git a/tests/conftest.py b/tests/conftest.py
index c099463..9ca429d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -11,6 +11,7 @@ from urllib.parse import urlparse
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
+default_redismod_url = "redis://localhost:36379/9"
def pytest_addoption(parser):
@@ -19,6 +20,12 @@ def pytest_addoption(parser):
help="Redis connection string,"
" defaults to `%(default)s`")
+ parser.addoption('--redismod-url', default=default_redismod_url,
+ action="store",
+ help="Connection string to redis server"
+ " with loaded modules,"
+ " defaults to `%(default)s`")
+
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
@@ -35,6 +42,11 @@ def pytest_sessionstart(session):
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
+ # module info
+ redismod_url = session.config.getoption("--redismod-url")
+ info = _get_info(redismod_url)
+ REDIS_INFO["modules"] = info["modules"]
+
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
@@ -57,6 +69,21 @@ def skip_unless_arch_bits(arch_bits):
reason="server is not {}-bit".format(arch_bits))
+def skip_ifmodversion_lt(min_version: str, module_name: str):
+ modules = REDIS_INFO["modules"]
+ if modules == []:
+ return pytest.mark.skipif(True, reason="No redis modules found")
+
+ for j in modules:
+ if module_name == j.get('name'):
+ version = j.get('ver')
+ mv = int(min_version.replace(".", ""))
+ check = version < mv
+ return pytest.mark.skipif(check, reason="Redis module version")
+
+ raise AttributeError("No redis module named {}".format(module_name))
+
+
def _get_client(cls, request, single_connection_client=True, flushdb=True,
**kwargs):
"""
@@ -89,6 +116,12 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
@pytest.fixture()
+def modclient(request, port=36379, **kwargs):
+ with _get_client(redis.Redis, request, port=port, **kwargs) as client:
+ yield client
+
+
+@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 694090e..6d65931 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -2552,7 +2552,7 @@ class TestRedisCommands:
assert r.geosearch('barcelona', member='place3', radius=100,
unit='km', count=2) == [b'place3', b'\x80place2']
assert r.geosearch('barcelona', member='place3', radius=100,
- unit='km', count=1, any=True)[0] \
+ unit='km', count=1, any=1)[0] \
in [b'place1', b'place3', b'\x80place2']
@skip_unless_arch_bits(64)
@@ -2657,8 +2657,7 @@ class TestRedisCommands:
# use any without count
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member='place3',
- radius=100, any=True)
+ assert r.geosearch('barcelona', member='place3', radius=100, any=1)
@skip_if_server_version_lt('6.2.0')
def test_geosearchstore(self, r):
@@ -3239,7 +3238,6 @@ class TestRedisCommands:
response = r.xpending_range(stream, group,
min='-', max='+', count=5,
consumername=consumer1)
- assert len(response) == 1
assert response[0]['message_id'] == m1
assert response[0]['consumer'] == consumer1.encode()
@@ -3604,7 +3602,8 @@ class TestRedisCommands:
@skip_if_server_version_lt('4.0.0')
def test_module_list(self, r):
assert isinstance(r.module_list(), list)
- assert not r.module_list()
+ for x in r.module_list():
+ assert isinstance(x, dict)
@skip_if_server_version_lt('2.8.13')
def test_command_count(self, r):
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 128bac7..6728e0a 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -1,8 +1,10 @@
from unittest import mock
+import types
import pytest
-from redis.exceptions import InvalidResponse
+from redis.exceptions import InvalidResponse, ModuleError
from redis.utils import HIREDIS_AVAILABLE
+from .conftest import skip_if_server_version_lt
@pytest.mark.skipif(HIREDIS_AVAILABLE, reason='PythonParser only')
@@ -13,3 +15,32 @@ def test_invalid_response(r):
with pytest.raises(InvalidResponse) as cm:
parser.read_response()
assert str(cm.value) == 'Protocol Error: %r' % raw
+
+
+@skip_if_server_version_lt('4.0.0')
+def test_loaded_modules(r, modclient):
+ assert r.loaded_modules == []
+ assert 'rejson' in modclient.loaded_modules
+
+
+@skip_if_server_version_lt('4.0.0')
+def test_loading_external_modules(r, modclient):
+ def inner():
+ pass
+
+ with pytest.raises(ModuleError):
+ r.load_external_module('rejson', 'myfuncname', inner)
+
+ modclient.load_external_module('rejson', 'myfuncname', inner)
+ assert getattr(modclient, 'myfuncname') == inner
+ assert isinstance(getattr(modclient, 'myfuncname'), types.FunctionType)
+
+ # and call it
+ from redis.commands import RedisModuleCommands
+ j = RedisModuleCommands.json
+ modclient.load_external_module('rejson', 'sometestfuncname', j)
+
+ d = {'hello': 'world!'}
+ mod = j(modclient)
+ mod.set("fookey", ".", d)
+ assert mod.get('fookey') == d
diff --git a/tests/test_json.py b/tests/test_json.py
new file mode 100644
index 0000000..96675f1
--- /dev/null
+++ b/tests/test_json.py
@@ -0,0 +1,235 @@
+import pytest
+import redis
+from redis.commands.json.path import Path
+from .conftest import skip_ifmodversion_lt
+
+
+@pytest.fixture
+def client(modclient):
+ modclient.flushdb()
+ return modclient
+
+
+@pytest.mark.json
+def test_json_setbinarykey(client):
+ d = {"hello": "world", b"some": "value"}
+ with pytest.raises(TypeError):
+ client.json().set("somekey", Path.rootPath(), d)
+ assert client.json().set("somekey", Path.rootPath(), d, decode_keys=True)
+
+
+@pytest.mark.json
+def test_json_setgetdeleteforget(client):
+ assert client.json().set("foo", Path.rootPath(), "bar")
+ assert client.json().get("foo") == "bar"
+ assert client.json().get("baz") is None
+ assert client.json().delete("foo") == 1
+ assert client.json().forget("foo") == 0 # second delete
+ assert client.exists("foo") == 0
+
+
+@pytest.mark.json
+def test_justaget(client):
+ client.json().set("foo", Path.rootPath(), "bar")
+ assert client.json().get("foo") == "bar"
+
+
+@pytest.mark.json
+def test_json_get_jset(client):
+ assert client.json().set("foo", Path.rootPath(), "bar")
+ assert "bar" == client.json().get("foo")
+ assert client.json().get("baz") is None
+ assert 1 == client.json().delete("foo")
+ assert client.exists("foo") == 0
+
+
+@pytest.mark.json
+def test_nonascii_setgetdelete(client):
+ assert client.json().set("notascii", Path.rootPath(),
+ "hyvää-élève") is True
+ assert "hyvää-élève" == client.json().get("notascii", no_escape=True)
+ assert 1 == client.json().delete("notascii")
+ assert client.exists("notascii") == 0
+
+
+@pytest.mark.json
+def test_jsonsetexistentialmodifiersshouldsucceed(client):
+ obj = {"foo": "bar"}
+ assert client.json().set("obj", Path.rootPath(), obj)
+
+ # Test that flags prevent updates when conditions are unmet
+ assert client.json().set("obj", Path("foo"), "baz", nx=True) is None
+ assert client.json().set("obj", Path("qaz"), "baz", xx=True) is None
+
+ # Test that flags allow updates when conditions are met
+ assert client.json().set("obj", Path("foo"), "baz", xx=True)
+ assert client.json().set("obj", Path("qaz"), "baz", nx=True)
+
+ # Test that flags are mutually exlusive
+ with pytest.raises(Exception):
+ client.json().set("obj", Path("foo"), "baz", nx=True, xx=True)
+
+
+@pytest.mark.json
+def test_mgetshouldsucceed(client):
+ client.json().set("1", Path.rootPath(), 1)
+ client.json().set("2", Path.rootPath(), 2)
+ r = client.json().mget(Path.rootPath(), "1", "2")
+ e = [1, 2]
+ assert e == r
+
+
+@pytest.mark.json
+@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
+def test_clearShouldSucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 1, 2, 3, 4])
+ assert 1 == client.json().clear("arr", Path.rootPath())
+ assert [] == client.json().get("arr")
+
+
+@pytest.mark.json
+def test_typeshouldsucceed(client):
+ client.json().set("1", Path.rootPath(), 1)
+ assert b"integer" == client.json().type("1")
+
+
+@pytest.mark.json
+def test_numincrbyshouldsucceed(client):
+ client.json().set("num", Path.rootPath(), 1)
+ assert 2 == client.json().numincrby("num", Path.rootPath(), 1)
+ assert 2.5 == client.json().numincrby("num", Path.rootPath(), 0.5)
+ assert 1.25 == client.json().numincrby("num", Path.rootPath(), -1.25)
+
+
+@pytest.mark.json
+def test_nummultbyshouldsucceed(client):
+ client.json().set("num", Path.rootPath(), 1)
+ assert 2 == client.json().nummultby("num", Path.rootPath(), 2)
+ assert 5 == client.json().nummultby("num", Path.rootPath(), 2.5)
+ assert 2.5 == client.json().nummultby("num", Path.rootPath(), 0.5)
+
+
+@pytest.mark.json
+@skip_ifmodversion_lt("99.99.99", "ReJSON") # todo: update after the release
+def test_toggleShouldSucceed(client):
+ client.json().set("bool", Path.rootPath(), False)
+ assert client.json().toggle("bool", Path.rootPath())
+ assert not client.json().toggle("bool", Path.rootPath())
+ # check non-boolean value
+ client.json().set("num", Path.rootPath(), 1)
+ with pytest.raises(redis.exceptions.ResponseError):
+ client.json().toggle("num", Path.rootPath())
+
+
+@pytest.mark.json
+def test_strappendshouldsucceed(client):
+ client.json().set("str", Path.rootPath(), "foo")
+ assert 6 == client.json().strappend("str", "bar", Path.rootPath())
+ assert "foobar" == client.json().get("str", Path.rootPath())
+
+
+@pytest.mark.json
+def test_debug(client):
+ client.json().set("str", Path.rootPath(), "foo")
+ assert 24 == client.json().debug("str", Path.rootPath())
+
+
+@pytest.mark.json
+def test_strlenshouldsucceed(client):
+ client.json().set("str", Path.rootPath(), "foo")
+ assert 3 == client.json().strlen("str", Path.rootPath())
+ client.json().strappend("str", "bar", Path.rootPath())
+ assert 6 == client.json().strlen("str", Path.rootPath())
+
+
+@pytest.mark.json
+def test_arrappendshouldsucceed(client):
+ client.json().set("arr", Path.rootPath(), [1])
+ assert 2 == client.json().arrappend("arr", Path.rootPath(), 2)
+ assert 4 == client.json().arrappend("arr", Path.rootPath(), 3, 4)
+ assert 7 == client.json().arrappend("arr", Path.rootPath(), *[5, 6, 7])
+
+
+@pytest.mark.json
+def testArrIndexShouldSucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 1, 2, 3, 4])
+ assert 1 == client.json().arrindex("arr", Path.rootPath(), 1)
+ assert -1 == client.json().arrindex("arr", Path.rootPath(), 1, 2)
+
+
+@pytest.mark.json
+def test_arrinsertshouldsucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 4])
+ assert 5 - -client.json().arrinsert(
+ "arr",
+ Path.rootPath(),
+ 1,
+ *[
+ 1,
+ 2,
+ 3,
+ ]
+ )
+ assert [0, 1, 2, 3, 4] == client.json().get("arr")
+
+
+@pytest.mark.json
+def test_arrlenshouldsucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 1, 2, 3, 4])
+ assert 5 == client.json().arrlen("arr", Path.rootPath())
+
+
+@pytest.mark.json
+def test_arrpopshouldsucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 1, 2, 3, 4])
+ assert 4 == client.json().arrpop("arr", Path.rootPath(), 4)
+ assert 3 == client.json().arrpop("arr", Path.rootPath(), -1)
+ assert 2 == client.json().arrpop("arr", Path.rootPath())
+ assert 0 == client.json().arrpop("arr", Path.rootPath(), 0)
+ assert [1] == client.json().get("arr")
+
+
+@pytest.mark.json
+def test_arrtrimshouldsucceed(client):
+ client.json().set("arr", Path.rootPath(), [0, 1, 2, 3, 4])
+ assert 3 == client.json().arrtrim("arr", Path.rootPath(), 1, 3)
+ assert [1, 2, 3] == client.json().get("arr")
+
+
+@pytest.mark.json
+def test_respshouldsucceed(client):
+ obj = {"foo": "bar", "baz": 1, "qaz": True}
+ client.json().set("obj", Path.rootPath(), obj)
+ assert b"bar" == client.json().resp("obj", Path("foo"))
+ assert 1 == client.json().resp("obj", Path("baz"))
+ assert client.json().resp("obj", Path("qaz"))
+
+
+@pytest.mark.json
+def test_objkeysshouldsucceed(client):
+ obj = {"foo": "bar", "baz": "qaz"}
+ client.json().set("obj", Path.rootPath(), obj)
+ keys = client.json().objkeys("obj", Path.rootPath())
+ keys.sort()
+ exp = list(obj.keys())
+ exp.sort()
+ assert exp == keys
+
+
+@pytest.mark.json
+def test_objlenshouldsucceed(client):
+ obj = {"foo": "bar", "baz": "qaz"}
+ client.json().set("obj", Path.rootPath(), obj)
+ assert len(obj) == client.json().objlen("obj", Path.rootPath())
+
+
+# @pytest.mark.pipeline
+# @pytest.mark.json
+# def test_pipelineshouldsucceed(client):
+# p = client.json().pipeline()
+# p.set("foo", Path.rootPath(), "bar")
+# p.get("foo")
+# p.delete("foo")
+# assert [True, "bar", 1] == p.execute()
+# assert client.keys() == []
+# assert client.get("foo") is None
diff --git a/tox.ini b/tox.ini
index bfe937f..7805296 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,7 @@
[pytest]
addopts = -s
+markers =
+ json: run only the redisjson module tests
[tox]
minversion = 3.2.0
@@ -8,63 +10,63 @@ envlist = {py35,py36,py37,py38,py39,pypy3}-{plain,hiredis}, flake8
[docker:master]
name = master
-image = redis:6.2-bullseye
+image = redisfab/redis-py:6.2.6-buster
ports =
6379:6379/tcp
healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6379)) else False"
volumes =
- bind:rw:{toxinidir}/docker/master/redis.conf:/usr/local/etc/redis/redis.conf
+ bind:rw:{toxinidir}/docker/master/redis.conf:/redis.conf
[docker:replica]
name = replica
-image = redis:6.2-bullseye
+image = redisfab/redis-py:6.2.6-buster
links =
master:master
ports =
6380:6380/tcp
healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6380)) else False"
volumes =
- bind:rw:{toxinidir}/docker/replica/redis.conf:/usr/local/etc/redis/redis.conf
+ bind:rw:{toxinidir}/docker/replica/redis.conf:/redis.conf
[docker:sentinel_1]
name = sentinel_1
-image = redis:6.2-bullseye
+image = redisfab/redis-py-sentinel:6.2.6-buster
links =
master:master
ports =
26379:26379/tcp
healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26379)) else False"
volumes =
- bind:rw:{toxinidir}/docker/sentinel_1/sentinel.conf:/usr/local/etc/redis/sentinel.conf
+ bind:rw:{toxinidir}/docker/sentinel_1/sentinel.conf:/sentinel.conf
[docker:sentinel_2]
name = sentinel_2
-image = redis:6.2-bullseye
+image = redisfab/redis-py-sentinel:6.2.6-buster
links =
master:master
ports =
26380:26380/tcp
healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26380)) else False"
volumes =
- bind:rw:{toxinidir}/docker/sentinel_2/sentinel.conf:/usr/local/etc/redis/sentinel.conf
+ bind:rw:{toxinidir}/docker/sentinel_2/sentinel.conf:/sentinel.conf
[docker:sentinel_3]
name = sentinel_3
-image = redis:6.2-bullseye
+image = redisfab/redis-py-sentinel:6.2.6-buster
links =
master:master
ports =
26381:26381/tcp
healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',26381)) else False"
volumes =
- bind:rw:{toxinidir}/docker/sentinel_3/sentinel.conf:/usr/local/etc/redis/sentinel.conf
+ bind:rw:{toxinidir}/docker/sentinel_3/sentinel.conf:/sentinel.conf
[docker:redismod]
name = redismod
image = redislabs/redismod:edge
ports =
- 16379:16379/tcp
-healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',16379)) else False"
+ 36379:6379/tcp
+healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',36379)) else False"
[docker:lots-of-pythons]
name = lots-of-pythons
@@ -98,7 +100,7 @@ docker =
sentinel_3
redismod
lots-of-pythons
-commands = echo
+commands = /usr/bin/echo
[testenv:flake8]
deps_files = dev_requirements.txt