summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2021-04-20 08:03:58 +0300
committerGitHub <noreply@github.com>2021-04-20 08:03:58 +0300
commit959d6035e5a92e68a50b5c8443ffac537c46027b (patch)
tree19805c04dc350b956d8dae779bcfcfe43fd66d3a
parent92bde124caaedef0e1572765854b2deb9aa22a35 (diff)
parentaa730ef1eadf7e7e603315f8a204927d33eb2be1 (diff)
downloadredis-959d6035e5a92e68a50b5c8443ffac537c46027b.tar.gz
Merge 6.2.2 release6.2.2
Release 6.2.2
-rw-r--r--.github/workflows/daily.yml16
-rw-r--r--00-RELEASENOTES74
-rw-r--r--README.md38
-rw-r--r--redis.conf62
-rw-r--r--src/Makefile22
-rw-r--r--src/acl.c31
-rw-r--r--src/ae.c17
-rw-r--r--src/ae_epoll.c2
-rw-r--r--src/anet.c73
-rw-r--r--src/anet.h7
-rw-r--r--src/aof.c22
-rw-r--r--src/asciilogo.h4
-rw-r--r--src/bio.c18
-rw-r--r--src/blocked.c29
-rw-r--r--src/childinfo.c49
-rw-r--r--src/cluster.c241
-rw-r--r--src/cluster.h12
-rw-r--r--src/config.c118
-rw-r--r--src/crc64.c3
-rw-r--r--src/crc64.h2
-rw-r--r--src/db.c21
-rw-r--r--src/debug.c8
-rw-r--r--src/defrag.c4
-rw-r--r--src/dict.c64
-rw-r--r--src/dict.h4
-rw-r--r--src/endianconv.c3
-rw-r--r--src/endianconv.h2
-rw-r--r--src/help.h2
-rw-r--r--src/intset.c15
-rw-r--r--src/intset.h2
-rw-r--r--src/latency.c4
-rw-r--r--src/listpack.c2
-rw-r--r--src/module.c326
-rw-r--r--src/modules/gendoc.rb134
-rw-r--r--src/multi.c37
-rw-r--r--src/networking.c69
-rw-r--r--src/notify.c2
-rw-r--r--src/object.c6
-rw-r--r--src/pubsub.c18
-rw-r--r--src/quicklist.c340
-rw-r--r--src/quicklist.h2
-rw-r--r--src/rdb.c18
-rw-r--r--src/redis-benchmark.c6
-rw-r--r--src/redis-check-rdb.c2
-rw-r--r--src/redis-cli.c150
-rw-r--r--src/redismodule.h17
-rw-r--r--src/replication.c84
-rw-r--r--src/rio.c6
-rw-r--r--src/scripting.c17
-rw-r--r--src/sds.c3
-rw-r--r--src/sds.h2
-rw-r--r--src/sentinel.c91
-rw-r--r--src/server.c348
-rw-r--r--src/server.h77
-rw-r--r--src/sha1.c3
-rw-r--r--src/sha1.h2
-rw-r--r--src/t_hash.c2
-rw-r--r--src/t_list.c14
-rw-r--r--src/t_stream.c22
-rw-r--r--src/t_string.c2
-rw-r--r--src/t_zset.c131
-rw-r--r--src/tls.c48
-rw-r--r--src/util.c3
-rw-r--r--src/util.h2
-rw-r--r--src/version.h4
-rw-r--r--src/ziplist.c24
-rw-r--r--src/ziplist.h2
-rw-r--r--src/zipmap.c6
-rw-r--r--src/zipmap.h2
-rw-r--r--src/zmalloc.c14
-rw-r--r--src/zmalloc.h11
-rw-r--r--tests/assets/nodefaultuser.acl2
-rw-r--r--tests/assets/user.acl3
-rw-r--r--tests/cluster/cluster.tcl32
-rw-r--r--tests/cluster/tests/04-resharding.tcl17
-rw-r--r--tests/cluster/tests/12-replica-migration-2.tcl6
-rw-r--r--tests/cluster/tests/12.1-replica-migration-3.tcl71
-rw-r--r--tests/cluster/tests/15-cluster-slots.tcl13
-rw-r--r--tests/cluster/tests/17-diskless-load-swapdb.tcl9
-rw-r--r--tests/cluster/tests/19-cluster-nodes-slots.tcl9
-rw-r--r--tests/cluster/tests/20-half-migrated-slot.tcl98
-rw-r--r--tests/cluster/tests/21-many-slot-migration.tcl64
-rw-r--r--tests/cluster/tests/includes/utils.tcl25
-rw-r--r--tests/instances.tcl17
-rw-r--r--tests/integration/corrupt-dump.tcl11
-rw-r--r--tests/integration/psync2-pingoff.tcl18
-rw-r--r--tests/integration/redis-cli.tcl36
-rw-r--r--tests/integration/replication-4.tcl16
-rw-r--r--tests/integration/replication.tcl30
-rw-r--r--tests/modules/keyspace_events.c69
-rw-r--r--tests/modules/propagate.c94
-rw-r--r--tests/sentinel/tests/00-base.tcl4
-rw-r--r--tests/sentinel/tests/01-conf-update.tcl2
-rw-r--r--tests/sentinel/tests/02-slaves-reconf.tcl4
-rw-r--r--tests/sentinel/tests/05-manual.tcl2
-rw-r--r--tests/sentinel/tests/10-replica-priority.tcl73
-rw-r--r--tests/support/cluster.tcl43
-rw-r--r--tests/support/redis.tcl6
-rw-r--r--tests/support/server.tcl25
-rw-r--r--tests/support/test.tcl2
-rw-r--r--tests/support/util.tcl91
-rw-r--r--tests/unit/acl.tcl165
-rw-r--r--tests/unit/expire.tcl5
-rw-r--r--tests/unit/introspection.tcl20
-rw-r--r--tests/unit/maxmemory.tcl2
-rw-r--r--tests/unit/moduleapi/keyspace_events.tcl15
-rw-r--r--tests/unit/moduleapi/propagate.tcl129
-rw-r--r--tests/unit/obuf-limits.tcl12
-rw-r--r--tests/unit/other.tcl8
-rw-r--r--tests/unit/pendingquerybuf.tcl11
-rw-r--r--tests/unit/pubsub.tcl48
-rw-r--r--tests/unit/scripting.tcl65
-rw-r--r--tests/unit/slowlog.tcl32
-rw-r--r--tests/unit/tls.tcl15
-rw-r--r--tests/unit/type/hash.tcl6
-rw-r--r--tests/unit/type/set.tcl6
-rw-r--r--tests/unit/type/stream-cgroups.tcl16
-rw-r--r--tests/unit/type/zset.tcl120
-rwxr-xr-xutils/whatisdoing.sh2
119 files changed, 3298 insertions, 1289 deletions
diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml
index 59d236d9a..ee9ac1bbf 100644
--- a/.github/workflows/daily.yml
+++ b/.github/workflows/daily.yml
@@ -17,7 +17,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: make
- run: make
+ run: make REDIS_CFLAGS='-Werror -DREDIS_TEST'
- name: test
run: |
sudo apt-get install tcl8.6
@@ -28,6 +28,8 @@ jobs:
run: ./runtest-sentinel
- name: cluster tests
run: ./runtest-cluster
+ - name: unittest
+ run: ./src/redis-server test all
test-ubuntu-libc-malloc:
runs-on: ubuntu-latest
@@ -76,7 +78,7 @@ jobs:
- name: make
run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386
- make 32bit
+ make 32bit REDIS_CFLAGS='-Werror -DREDIS_TEST'
- name: test
run: |
sudo apt-get install tcl8.6
@@ -89,6 +91,8 @@ jobs:
run: ./runtest-sentinel
- name: cluster tests
run: ./runtest-cluster
+ - name: unittest
+ run: ./src/redis-server test all
test-ubuntu-tls:
runs-on: ubuntu-latest
@@ -142,7 +146,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: make
- run: make valgrind
+ run: make valgrind REDIS_CFLAGS='-Werror -DREDIS_TEST'
- name: test
run: |
sudo apt-get update
@@ -150,6 +154,10 @@ jobs:
./runtest --valgrind --verbose --clients 1 --dump-logs
- name: module api test
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1
+ - name: unittest
+ run: |
+ valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
+ if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-valgrind-no-malloc-usable-size:
runs-on: ubuntu-latest
@@ -259,6 +267,7 @@ jobs:
test-alpine-jemalloc:
runs-on: ubuntu-latest
+ if: github.repository == 'redis/redis'
container: alpine:latest
steps:
- uses: actions/checkout@v2
@@ -279,6 +288,7 @@ jobs:
test-alpine-libc-malloc:
runs-on: ubuntu-latest
+ if: github.repository == 'redis/redis'
container: alpine:latest
steps:
- uses: actions/checkout@v2
diff --git a/00-RELEASENOTES b/00-RELEASENOTES
index d94c60465..8a1405e41 100644
--- a/00-RELEASENOTES
+++ b/00-RELEASENOTES
@@ -12,6 +12,80 @@ SECURITY: There are security fixes in the release.
--------------------------------------------------------------------------------
================================================================================
+Redis 6.2.2 Released Mon April 19 19:00:00 IST 2021
+================================================================================
+
+Upgrade urgency: HIGH, if you're using ACL and pub/sub, CONFIG REWRITE, or
+suffering from performance regression. see below.
+
+Bug fixes for regressions in previous releases of Redis 6.2:
+* Fix BGSAVE, AOFRW, and replication slowdown due to child reporting CoW (#8645)
+* Fix short busy loop when timer event is about to fire (#8764)
+* Fix default user, overwritten and reset users losing pubsub channel permissions (#8723)
+* Fix config rewrite with an empty `save` config resulsing in default `save` values (#8719)
+* Fix not starting on alpine/libmusl without IPv6 (#8655)
+* Fix issues with propagation and MULTI/EXEC in modules (#8617)
+ Several issues around nested calls and thread safe contexts
+
+Bug fixes that are only applicable to previous releases of Redis 6.2:
+* ACL Pub/Sub channels permission handling for save/load scenario (#8794)
+* Fix early rejection of PUBLISH inside MULTI-EXEC transaction (#8534)
+* Fix missing SLOWLOG records for blocked commands (#8632)
+* Allow RESET command during busy scripts (#8629)
+* Fix some error replies were not counted on stats (#8659)
+
+Bug fixes:
+* Add a timeout mechanism for replicas stuck in fullsync (#8762)
+* Process HELLO command even if the default user has no permissions (#8633)
+* Client issuing a long running script and using a pipeline, got disconnected (#8715)
+* Fix script kill to work also on scripts that use `pcall` (#8661)
+* Fix list-compress-depth may compress more node than required (#8311)
+* Fix redis-cli handling of rediss:// URL scheme (#8705)
+* Cluster: Skip unnecessary check which may prevent failure detection (#8585)
+* Cluster: Fix hang manual failover when replica just started (#8651)
+* Sentinel: Fix info-refresh time field before sentinel get first response (#8567)
+* Sentinel: Fix possible crash on failed connection attempt (#8627)
+* Systemd: Send the readiness notification when a replica is ready to accept connections (#8409)
+
+Command behavior changes:
+* ZADD: fix wrong reply when INCR used with GT/LT which blocked the update (#8717)
+ It was responding with the incremented value rather than nil
+* XAUTOCLAIM: fix response to return the next available id as the cursor (#8725)
+ Previous behavior was retuning the last one which was already scanned
+* XAUTOCLAIM: fix JUSTID to prevent incrementing delivery_count (#8724)
+
+New config options:
+* Add cluster-allow-replica-migration config option (#5285)
+* Add replica-announced config option (#8653)
+* Add support for plaintext clients in TLS cluster (#8587)
+* Add support for reading encrypted keyfiles (#8644)
+
+Improvements:
+* Fix performance regression in BRPOP on Redis 6.0 (#8689)
+* Avoid adding slowlog entries for config with sensitive data (#8584)
+* Improve redis-cli non-binary safe string handling (#8566)
+* Optimize CLUSTER SLOTS reply (#8541)
+* Handle remaining fsync errors (#8419)
+
+Info fields and introspection changes:
+* Strip % sign from current_fork_perc info field (#8628)
+* Fix RSS memory info on FreeBSD (#8620)
+* Fix client_recent_max_input/output_buffer in 'INFO CLIENTS' when all clients drop (#8588)
+* Fix invalid master_link_down_since_seconds in info replication (#8785)
+
+Platform and deployment-related changes:
+* Fix FreeBSD <12.x builds (#8603)
+
+Modules:
+* Add macros for RedisModule_log logging levels (#4246)
+* Add RedisModule_GetAbsExpire / RedisModule_SetAbsExpire (#8564)
+* Add a module type for key space notification (#8759)
+* Set module eviction context flag only in masters (#8631)
+* Fix unusable RedisModule_IsAOFClient API (#8596)
+* Fix missing EXEC on modules propagation after failed EVAL execution (#8654)
+* Fix edge-case when a module client is unblocked (#8618)
+
+================================================================================
Redis 6.2.1 Released Mon Mar 1 17:51:36 IST 2021
================================================================================
diff --git a/README.md b/README.md
index d892f4881..87225b34b 100644
--- a/README.md
+++ b/README.md
@@ -15,10 +15,10 @@ Another good example is to think of Redis as a more complex version of memcached
If you want to know more, this is a list of selected starting points:
-* Introduction to Redis data types. http://redis.io/topics/data-types-intro
+* Introduction to Redis data types. https://redis.io/topics/data-types-intro
* Try Redis directly inside your browser. http://try.redis.io
-* The full list of Redis commands. http://redis.io/commands
-* There is much more inside the official Redis documentation. http://redis.io/documentation
+* The full list of Redis commands. https://redis.io/commands
+* There is much more inside the official Redis documentation. https://redis.io/documentation
Building Redis
--------------
@@ -49,7 +49,7 @@ To append a suffix to Redis program names, use:
% make PROG_SUFFIX="-alt"
-You can run a 32 bit Redis binary using:
+You can build a 32 bit Redis binary using:
% make 32bit
@@ -184,7 +184,7 @@ then in another terminal try the following:
(integer) 2
redis>
-You can find the list of all the available commands at http://redis.io/commands.
+You can find the list of all the available commands at https://redis.io/commands.
Installing Redis
-----------------
@@ -294,19 +294,19 @@ the structure definition.
Another important Redis data structure is the one defining a client.
In the past it was called `redisClient`, now just `client`. The structure
has many fields, here we'll just show the main ones:
-
- struct client {
- int fd;
- sds querybuf;
- int argc;
- robj **argv;
- redisDb *db;
- int flags;
- list *reply;
- char buf[PROTO_REPLY_CHUNK_BYTES];
- ... many other fields ...
- }
-
+```c
+struct client {
+ int fd;
+ sds querybuf;
+ int argc;
+ robj **argv;
+ redisDb *db;
+ int flags;
+ list *reply;
+ char buf[PROTO_REPLY_CHUNK_BYTES];
+ // ... many other fields ...
+}
+```
The client structure defines a *connected client*:
* The `fd` field is the client socket file descriptor.
@@ -453,7 +453,7 @@ Other C files
* `scripting.c` implements Lua scripting. It is completely self-contained and isolated from the rest of the Redis implementation and is simple enough to understand if you are familiar with the Lua API.
* `cluster.c` implements the Redis Cluster. Probably a good read only after being very familiar with the rest of the Redis code base. If you want to read `cluster.c` make sure to read the [Redis Cluster specification][3].
-[3]: http://redis.io/topics/cluster-spec
+[3]: https://redis.io/topics/cluster-spec
Anatomy of a Redis command
---
diff --git a/redis.conf b/redis.conf
index 465d56fc0..e8eff2774 100644
--- a/redis.conf
+++ b/redis.conf
@@ -150,6 +150,11 @@ tcp-keepalive 300
#
# tls-cert-file redis.crt
# tls-key-file redis.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-key-file-pass secret
# Normally Redis uses the same certificate for both server functions (accepting
# connections) and client functions (replicating from a master, establishing
@@ -162,6 +167,11 @@ tcp-keepalive 300
#
# tls-client-cert-file client.crt
# tls-client-key-file client.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-client-key-file-pass secret
# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
#
@@ -657,6 +667,18 @@ repl-disable-tcp-nodelay no
# By default the priority is 100.
replica-priority 100
+# -----------------------------------------------------------------------------
+# By default, Redis Sentinel includes all replicas in its reports. A replica
+# can be excluded from Redis Sentinel's announcements. An unannounced replica
+# will be ignored by the 'sentinel replicas <master>' command and won't be
+# exposed to Redis Sentinel's clients.
+#
+# This option does not change the behavior of replica-priority. Even with
+# replica-announced set to 'no', the replica can be promoted to master. To
+# prevent this behavior, set replica-priority to 0.
+#
+# replica-announced yes
+
# It is possible for a master to stop accepting writes if there are less than
# N replicas connected, having a lag less or equal than M seconds.
#
@@ -895,7 +917,7 @@ acllog-max-len 128
# order to provide better out-of-the-box Pub/Sub security. Therefore, it is
# recommended that you explicitly define Pub/Sub permissions for all users
# rather then rely on implicit default values. Once you've set explicit
-# Pub/Sub for all exisitn users, you should uncomment the following line.
+# Pub/Sub for all existing users, you should uncomment the following line.
#
# acl-pubsub-default resetchannels
@@ -1225,7 +1247,7 @@ disable-thp yes
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
-# Please check http://redis.io/topics/persistence for more information.
+# Please check https://redis.io/topics/persistence for more information.
appendonly no
@@ -1434,12 +1456,21 @@ lua-time-limit 5000
# master in your cluster.
#
# Default is 1 (replicas migrate only if their masters remain with at least
-# one replica). To disable migration just set it to a very large value.
+# one replica). To disable migration just set it to a very large value or
+# set cluster-allow-replica-migration to 'no'.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
# cluster-migration-barrier 1
+# Turning off this option allows to use less automatic cluster configuration.
+# It both disables migration to orphaned masters and migration from masters
+# that became empty.
+#
+# Default is 'yes' (allow automatic migrations).
+#
+# cluster-allow-replica-migration yes
+
# By default Redis Cluster nodes stop accepting queries if they detect there
# is at least a hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
@@ -1480,7 +1511,7 @@ lua-time-limit 5000
# cluster-allow-reads-when-down no
# In order to setup your cluster make sure to read the documentation
-# available at http://redis.io web site.
+# available at https://redis.io web site.
########################## CLUSTER DOCKER/NAT support ########################
@@ -1490,16 +1521,21 @@ lua-time-limit 5000
#
# In order to make Redis Cluster working in such environments, a static
# configuration where each node knows its public address is needed. The
-# following two options are used for this scope, and are:
+# following four options are used for this scope, and are:
#
# * cluster-announce-ip
# * cluster-announce-port
+# * cluster-announce-tls-port
# * cluster-announce-bus-port
#
-# Each instructs the node about its address, client port, and cluster message
-# bus port. The information is then published in the header of the bus packets
-# so that other nodes will be able to correctly map the address of the node
-# publishing the information.
+# Each instructs the node about its address, client ports (for connections
+# without and with TLS) and cluster message bus port. The information is then
+# published in the header of the bus packets so that other nodes will be able to
+# correctly map the address of the node publishing the information.
+#
+# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set
+# to zero, then cluster-announce-port refers to the TLS port. Note also that
+# cluster-announce-tls-port has no effect if cluster-tls is set to no.
#
# If the above options are not used, the normal Redis Cluster auto-detection
# will be used instead.
@@ -1512,7 +1548,8 @@ lua-time-limit 5000
# Example:
#
# cluster-announce-ip 10.1.1.5
-# cluster-announce-port 6379
+# cluster-announce-tls-port 6379
+# cluster-announce-port 0
# cluster-announce-bus-port 6380
################################## SLOW LOG ###################################
@@ -1563,7 +1600,7 @@ latency-monitor-threshold 0
############################# EVENT NOTIFICATION ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
-# This feature is documented at http://redis.io/topics/notifications
+# This feature is documented at https://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
@@ -1586,8 +1623,9 @@ latency-monitor-threshold 0
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# t Stream commands
+# d Module key type events
# m Key-miss events (Note: It is not included in the 'A' class)
-# A Alias for g$lshzxet, so that the "AKE" string means all the events
+# A Alias for g$lshzxetd, so that the "AKE" string means all the events
# (Except key-miss events which are excluded from 'A' due to their
# unique nature).
#
diff --git a/src/Makefile b/src/Makefile
index ecd69295f..28d50da02 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -256,6 +256,17 @@ endif
FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a $(LIBSSL_LIBS) $(LIBCRYPTO_LIBS)
endif
+ifndef V
+ define MAKE_INSTALL
+ @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$(1)$(ENDCOLOR) 1>&2
+ @$(INSTALL) $(1) $(2)
+ endef
+else
+ define MAKE_INSTALL
+ $(INSTALL) $(1) $(2)
+ endef
+endif
+
REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS)
REDIS_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS)
REDIS_INSTALL=$(QUIET_INSTALL)$(INSTALL)
@@ -351,9 +362,6 @@ $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ)
$(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
$(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/hdr_histogram/hdr_histogram.o $(FINAL_LIBS)
-dict-benchmark: dict.c zmalloc.c sds.c siphash.c mt19937-64.c
- $(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS)
-
DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ:%.o=%.d)
-include $(DEP)
@@ -364,7 +372,7 @@ DEP = $(REDIS_SERVER_OBJ:%.o=%.d) $(REDIS_CLI_OBJ:%.o=%.d) $(REDIS_BENCHMARK_OBJ
$(REDIS_CC) -MMD -o $@ -c $<
clean:
- rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
+ rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep
rm -f $(DEP)
.PHONY: clean
@@ -417,9 +425,9 @@ src/help.h:
install: all
@mkdir -p $(INSTALL_BIN)
- $(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(INSTALL_BIN)
- $(REDIS_INSTALL) $(REDIS_BENCHMARK_NAME) $(INSTALL_BIN)
- $(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN)
+ $(call MAKE_INSTALL,$(REDIS_SERVER_NAME),$(INSTALL_BIN))
+ $(call MAKE_INSTALL,$(REDIS_BENCHMARK_NAME),$(INSTALL_BIN))
+ $(call MAKE_INSTALL,$(REDIS_CLI_NAME),$(INSTALL_BIN))
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_RDB_NAME)
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_AOF_NAME)
@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME)
diff --git a/src/acl.c b/src/acl.c
index 445409ecd..6a2ade646 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -245,7 +245,7 @@ user *ACLCreateUser(const char *name, size_t namelen) {
if (raxFind(Users,(unsigned char*)name,namelen) != raxNotFound) return NULL;
user *u = zmalloc(sizeof(*u));
u->name = sdsnewlen(name,namelen);
- u->flags = USER_FLAG_DISABLED | server.acl_pubusub_default;
+ u->flags = USER_FLAG_DISABLED | server.acl_pubsub_default;
u->allowed_subcommands = NULL;
u->passwords = listCreate();
u->patterns = listCreate();
@@ -652,6 +652,7 @@ sds ACLDescribeUser(user *u) {
if (u->flags & USER_FLAG_ALLCHANNELS) {
res = sdscatlen(res,"&* ",3);
} else {
+ res = sdscatlen(res,"resetchannels ",14);
listRewind(u->channels,&li);
while((ln = listNext(&li))) {
sds thispat = listNodeValue(ln);
@@ -1000,6 +1001,8 @@ int ACLSetUser(user *u, const char *op, ssize_t oplen) {
serverAssert(ACLSetUser(u,"resetpass",-1) == C_OK);
serverAssert(ACLSetUser(u,"resetkeys",-1) == C_OK);
serverAssert(ACLSetUser(u,"resetchannels",-1) == C_OK);
+ if (server.acl_pubsub_default & USER_FLAG_ALLCHANNELS)
+ serverAssert(ACLSetUser(u,"allchannels",-1) == C_OK);
serverAssert(ACLSetUser(u,"off",-1) == C_OK);
serverAssert(ACLSetUser(u,"sanitize-payload",-1) == C_OK);
serverAssert(ACLSetUser(u,"-@all",-1) == C_OK);
@@ -1180,9 +1183,9 @@ int ACLCheckCommandPerm(client *c, int *keyidxptr) {
/* If there is no associated user, the connection can run anything. */
if (u == NULL) return ACL_OK;
- /* Check if the user can execute this command. */
- if (!(u->flags & USER_FLAG_ALLCOMMANDS) &&
- c->cmd->proc != authCommand)
+ /* Check if the user can execute this command or if the command
+ * doesn't need to be authenticated (hello, auth). */
+ if (!(u->flags & USER_FLAG_ALLCOMMANDS) && !(c->cmd->flags & CMD_NO_AUTH))
{
/* If the bit is not set we have to check further, in case the
* command is allowed just with that specific subcommand. */
@@ -1360,6 +1363,22 @@ int ACLCheckPubsubPerm(client *c, int idx, int count, int literal, int *idxptr)
}
+/* Check whether the command is ready to be exceuted by ACLCheckCommandPerm.
+ * If check passes, then check whether pub/sub channels of the command is
+ * ready to be executed by ACLCheckPubsubPerm */
+int ACLCheckAllPerm(client *c, int *idxptr) {
+ int acl_retval = ACLCheckCommandPerm(c,idxptr);
+ if (acl_retval != ACL_OK)
+ return acl_retval;
+ if (c->cmd->proc == publishCommand)
+ acl_retval = ACLCheckPubsubPerm(c,1,1,0,idxptr);
+ else if (c->cmd->proc == subscribeCommand)
+ acl_retval = ACLCheckPubsubPerm(c,1,c->argc-1,0,idxptr);
+ else if (c->cmd->proc == psubscribeCommand)
+ acl_retval = ACLCheckPubsubPerm(c,1,c->argc-1,1,idxptr);
+ return acl_retval;
+}
+
/* =============================================================================
* ACL loading / saving functions
* ==========================================================================*/
@@ -1873,6 +1892,10 @@ void addACLLogEntry(client *c, int reason, int argpos, sds username) {
void aclCommand(client *c) {
char *sub = c->argv[1]->ptr;
if (!strcasecmp(sub,"setuser") && c->argc >= 3) {
+ /* Consider information about passwords or permissions
+ * to be sensitive, which will be the arguments for this
+ * subcommand. */
+ preventCommandLogging(c);
sds username = c->argv[2]->ptr;
/* Check username validity. */
if (ACLStringHasSpaces(username,sdslen(username))) {
diff --git a/src/ae.c b/src/ae.c
index 283f51438..48fb63ed9 100644
--- a/src/ae.c
+++ b/src/ae.c
@@ -239,7 +239,7 @@ int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id)
return AE_ERR; /* NO event with the specified ID found */
}
-/* How many milliseconds until the first timer should fire.
+/* How many microseconds until the first timer should fire.
* If there are no timers, -1 is returned.
*
* Note that's O(N) since time events are unsorted.
@@ -248,7 +248,7 @@ int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id)
* Much better but still insertion or deletion of timers is O(N).
* 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)).
*/
-static long msUntilEarliestTimer(aeEventLoop *eventLoop) {
+static int64_t usUntilEarliestTimer(aeEventLoop *eventLoop) {
aeTimeEvent *te = eventLoop->timeEventHead;
if (te == NULL) return -1;
@@ -260,8 +260,7 @@ static long msUntilEarliestTimer(aeEventLoop *eventLoop) {
}
monotime now = getMonotonicUs();
- return (now >= earliest->when)
- ? 0 : (long)((earliest->when - now) / 1000);
+ return (now >= earliest->when) ? 0 : earliest->when - now;
}
/* Process time events */
@@ -361,14 +360,14 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags)
((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) {
int j;
struct timeval tv, *tvp;
- long msUntilTimer = -1;
+ int64_t usUntilTimer = -1;
if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT))
- msUntilTimer = msUntilEarliestTimer(eventLoop);
+ usUntilTimer = usUntilEarliestTimer(eventLoop);
- if (msUntilTimer >= 0) {
- tv.tv_sec = msUntilTimer / 1000;
- tv.tv_usec = (msUntilTimer % 1000) * 1000;
+ if (usUntilTimer >= 0) {
+ tv.tv_sec = usUntilTimer / 1000000;
+ tv.tv_usec = usUntilTimer % 1000000;
tvp = &tv;
} else {
/* If we have to check for events but need to return
diff --git a/src/ae_epoll.c b/src/ae_epoll.c
index 07ca8ca41..023c93a17 100644
--- a/src/ae_epoll.c
+++ b/src/ae_epoll.c
@@ -111,7 +111,7 @@ static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
int retval, numevents = 0;
retval = epoll_wait(state->epfd,state->events,eventLoop->setsize,
- tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1);
+ tvp ? (tvp->tv_sec*1000 + (tvp->tv_usec + 999)/1000) : -1);
if (retval > 0) {
int j;
diff --git a/src/anet.c b/src/anet.c
index 0bfa575f5..a121c2768 100644
--- a/src/anet.c
+++ b/src/anet.c
@@ -186,27 +186,6 @@ int anetDisableTcpNoDelay(char *err, int fd)
return anetSetTcpNoDelay(err, fd, 0);
}
-
-int anetSetSendBuffer(char *err, int fd, int buffsize)
-{
- if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffsize, sizeof(buffsize)) == -1)
- {
- anetSetError(err, "setsockopt SO_SNDBUF: %s", strerror(errno));
- return ANET_ERR;
- }
- return ANET_OK;
-}
-
-int anetTcpKeepAlive(char *err, int fd)
-{
- int yes = 1;
- if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)) == -1) {
- anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno));
- return ANET_ERR;
- }
- return ANET_OK;
-}
-
/* Set the socket send timeout (SO_SNDTIMEO socket option) to the specified
* number of milliseconds, or disable it if the 'ms' argument is zero. */
int anetSendTimeout(char *err, int fd, long long ms) {
@@ -378,23 +357,11 @@ end:
}
}
-int anetTcpConnect(char *err, const char *addr, int port)
-{
- return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONE);
-}
-
int anetTcpNonBlockConnect(char *err, const char *addr, int port)
{
return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONBLOCK);
}
-int anetTcpNonBlockBindConnect(char *err, const char *addr, int port,
- const char *source_addr)
-{
- return anetTcpGenericConnect(err,addr,port,source_addr,
- ANET_CONNECT_NONBLOCK);
-}
-
int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port,
const char *source_addr)
{
@@ -430,46 +397,6 @@ int anetUnixGenericConnect(char *err, const char *path, int flags)
return s;
}
-int anetUnixConnect(char *err, const char *path)
-{
- return anetUnixGenericConnect(err,path,ANET_CONNECT_NONE);
-}
-
-int anetUnixNonBlockConnect(char *err, const char *path)
-{
- return anetUnixGenericConnect(err,path,ANET_CONNECT_NONBLOCK);
-}
-
-/* Like read(2) but make sure 'count' is read before to return
- * (unless error or EOF condition is encountered) */
-int anetRead(int fd, char *buf, int count)
-{
- ssize_t nread, totlen = 0;
- while(totlen != count) {
- nread = read(fd,buf,count-totlen);
- if (nread == 0) return totlen;
- if (nread == -1) return -1;
- totlen += nread;
- buf += nread;
- }
- return totlen;
-}
-
-/* Like write(2) but make sure 'count' is written before to return
- * (unless error is encountered) */
-int anetWrite(int fd, char *buf, int count)
-{
- ssize_t nwritten, totlen = 0;
- while(totlen != count) {
- nwritten = write(fd,buf,count-totlen);
- if (nwritten == 0) return totlen;
- if (nwritten == -1) return -1;
- totlen += nwritten;
- buf += nwritten;
- }
- return totlen;
-}
-
static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog) {
if (bind(s,sa,len) == -1) {
anetSetError(err, "bind: %s", strerror(errno));
diff --git a/src/anet.h b/src/anet.h
index 5da2f3b46..2a685cc01 100644
--- a/src/anet.h
+++ b/src/anet.h
@@ -53,26 +53,19 @@
#define FD_TO_PEER_NAME 0
#define FD_TO_SOCK_NAME 1
-int anetTcpConnect(char *err, const char *addr, int port);
int anetTcpNonBlockConnect(char *err, const char *addr, int port);
-int anetTcpNonBlockBindConnect(char *err, const char *addr, int port, const char *source_addr);
int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port, const char *source_addr);
-int anetUnixConnect(char *err, const char *path);
-int anetUnixNonBlockConnect(char *err, const char *path);
-int anetRead(int fd, char *buf, int count);
int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags);
int anetTcpServer(char *err, int port, char *bindaddr, int backlog);
int anetTcp6Server(char *err, int port, char *bindaddr, int backlog);
int anetUnixServer(char *err, char *path, mode_t perm, int backlog);
int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port);
int anetUnixAccept(char *err, int serversock);
-int anetWrite(int fd, char *buf, int count);
int anetNonBlock(char *err, int fd);
int anetBlock(char *err, int fd);
int anetCloexec(int fd);
int anetEnableTcpNoDelay(char *err, int fd);
int anetDisableTcpNoDelay(char *err, int fd);
-int anetTcpKeepAlive(char *err, int fd);
int anetSendTimeout(char *err, int fd, long long ms);
int anetRecvTimeout(char *err, int fd, long long ms);
int anetFdToString(int fd, char *ip, size_t ip_len, int *port, int fd_to_str_type);
diff --git a/src/aof.c b/src/aof.c
index f1586cf90..0d2971eac 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -218,7 +218,7 @@ void killAppendOnlyChild(void) {
serverLog(LL_NOTICE,"Killing running AOF rewrite child: %ld",
(long) server.child_pid);
if (kill(server.child_pid,SIGUSR1) != -1) {
- while(wait3(&statloc,0,NULL) != server.child_pid);
+ while(waitpid(-1, &statloc, 0) != server.child_pid);
}
/* Reset the buffer accumulating changes while the child saves. */
aofRewriteBufferReset();
@@ -234,9 +234,12 @@ void killAppendOnlyChild(void) {
void stopAppendOnly(void) {
serverAssert(server.aof_state != AOF_OFF);
flushAppendOnlyFile(1);
- redis_fsync(server.aof_fd);
- server.aof_fsync_offset = server.aof_current_size;
- server.aof_last_fsync = server.unixtime;
+ if (redis_fsync(server.aof_fd) == -1) {
+ serverLog(LL_WARNING,"Fail to fsync the AOF file: %s",strerror(errno));
+ } else {
+ server.aof_fsync_offset = server.aof_current_size;
+ server.aof_last_fsync = server.unixtime;
+ }
close(server.aof_fd);
server.aof_fd = -1;
@@ -290,6 +293,15 @@ int startAppendOnly(void) {
server.aof_last_fsync = server.unixtime;
server.aof_fd = newfd;
+ /* If AOF fsync error in bio job, we just ignore it and log the event. */
+ int aof_bio_fsync_status;
+ atomicGet(server.aof_bio_fsync_status, aof_bio_fsync_status);
+ if (aof_bio_fsync_status == C_ERR) {
+ serverLog(LL_WARNING,
+ "AOF reopen, just ignore the AOF fsync error in bio job");
+ atomicSet(server.aof_bio_fsync_status,C_OK);
+ }
+
/* If AOF was in error state, we just ignore it and log the event. */
if (server.aof_last_write_status == C_ERR) {
serverLog(LL_WARNING,"AOF reopen, just ignore the last error.");
@@ -1590,7 +1602,7 @@ int rewriteAppendOnlyFile(char *filename) {
if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr;
if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK)
goto werr;
- /* We read the ACK from the server using a 10 seconds timeout. Normally
+ /* We read the ACK from the server using a 5 seconds timeout. Normally
* it should reply ASAP, but just in case we lose its reply, we are sure
* the child will eventually get terminated. */
if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 ||
diff --git a/src/asciilogo.h b/src/asciilogo.h
index 044ca0c55..a62f68cf9 100644
--- a/src/asciilogo.h
+++ b/src/asciilogo.h
@@ -31,13 +31,13 @@ const char *ascii_logo =
" _._ \n"
" _.-``__ ''-._ \n"
" _.-`` `. `_. ''-._ Redis %s (%s/%d) %s bit\n"
-" .-`` .-```. ```\\/ _.,_ ''-._ \n"
+" .-`` .-```. ```\\/ _.,_ ''-._ \n"
" ( ' , .-` | `, ) Running in %s mode\n"
" |`-._`-...-` __...-.``-._|'` _.-'| Port: %d\n"
" | `-._ `._ / _.-' | PID: %ld\n"
" `-._ `-._ `-./ _.-' _.-' \n"
" |`-._`-._ `-.__.-' _.-'_.-'| \n"
-" | `-._`-._ _.-'_.-' | http://redis.io \n"
+" | `-._`-._ _.-'_.-' | https://redis.io \n"
" `-._ `-._`-.__.-'_.-' _.-' \n"
" |`-._`-._ `-.__.-' _.-'_.-'| \n"
" | `-._`-._ _.-'_.-' | \n"
diff --git a/src/bio.c b/src/bio.c
index c6e17f49d..ff18c4fe7 100644
--- a/src/bio.c
+++ b/src/bio.c
@@ -220,7 +220,23 @@ void *bioProcessBackgroundJobs(void *arg) {
if (type == BIO_CLOSE_FILE) {
close(job->fd);
} else if (type == BIO_AOF_FSYNC) {
- redis_fsync(job->fd);
+ /* The fd may be closed by main thread and reused for another
+ * socket, pipe, or file. We just ignore these errno because
+ * aof fsync did not really fail. */
+ if (redis_fsync(job->fd) == -1 &&
+ errno != EBADF && errno != EINVAL)
+ {
+ int last_status;
+ atomicGet(server.aof_bio_fsync_status,last_status);
+ atomicSet(server.aof_bio_fsync_status,C_ERR);
+ atomicSet(server.aof_bio_fsync_errno,errno);
+ if (last_status == C_OK) {
+ serverLog(LL_WARNING,
+ "Fail to fsync the AOF file: %s",strerror(errno));
+ }
+ } else {
+ atomicSet(server.aof_bio_fsync_status,C_OK);
+ }
} else if (type == BIO_LAZY_FREE) {
job->free_fn(job->free_args);
} else {
diff --git a/src/blocked.c b/src/blocked.c
index 09e17213c..eb110bd35 100644
--- a/src/blocked.c
+++ b/src/blocked.c
@@ -106,12 +106,11 @@ void blockClient(client *c, int btype) {
void updateStatsOnUnblock(client *c, long blocked_us, long reply_us){
const ustime_t total_cmd_duration = c->duration + blocked_us + reply_us;
c->lastcmd->microseconds += total_cmd_duration;
+
/* Log the command into the Slow log if needed. */
- if (!(c->lastcmd->flags & CMD_SKIP_SLOWLOG)) {
- slowlogPushEntryIfNeeded(c,c->argv,c->argc,total_cmd_duration);
- /* Log the reply duration event. */
- latencyAddSampleIfNeeded("command-unblocking",reply_us/1000);
- }
+ slowlogPushCurrentCommand(c, c->lastcmd, total_cmd_duration);
+ /* Log the reply duration event. */
+ latencyAddSampleIfNeeded("command-unblocking",reply_us/1000);
}
/* This function is called in the beforeSleep() function of the event loop
@@ -188,6 +187,16 @@ void unblockClient(client *c) {
} else {
serverPanic("Unknown btype in unblockClient().");
}
+
+ /* Reset the client for a new query since, for blocking commands
+ * we do not do it immediately after the command returns (when the
+ * client got blocked) in order to be still able to access the argument
+ * vector from module callbacks and updateStatsOnUnblock. */
+ if (c->btype != BLOCKED_PAUSE) {
+ freeClientOriginalArgv(c);
+ resetClient(c);
+ }
+
/* Clear the flags, and put the client in the unblocked list so that
* we'll process new commands in its query buffer ASAP. */
server.blocked_clients--;
@@ -279,7 +288,6 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
* freed by the next unblockClient()
* call. */
if (dstkey) incrRefCount(dstkey);
- unblockClient(receiver);
monotime replyTimer;
elapsedStart(&replyTimer);
@@ -292,6 +300,7 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
listTypePush(o,value,wherefrom);
}
updateStatsOnUnblock(receiver, 0, elapsedUs(replyTimer));
+ unblockClient(receiver);
if (dstkey) decrRefCount(dstkey);
decrRefCount(value);
@@ -335,11 +344,11 @@ void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
int where = (receiver->lastcmd &&
receiver->lastcmd->proc == bzpopminCommand)
? ZSET_MIN : ZSET_MAX;
- unblockClient(receiver);
monotime replyTimer;
elapsedStart(&replyTimer);
genericZpopCommand(receiver,&rl->key,1,where,1,NULL);
updateStatsOnUnblock(receiver, 0, elapsedUs(replyTimer));
+ unblockClient(receiver);
zcard--;
/* Replicate the command. */
@@ -471,6 +480,10 @@ void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) {
void serveClientsBlockedOnKeyByModule(readyList *rl) {
dictEntry *de;
+ /* Optimization: If no clients are in type BLOCKED_MODULE,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_MODULE]) return;
+
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
de = dictFind(rl->db->blocking_keys,rl->key);
@@ -553,7 +566,7 @@ void handleClientsBlockedOnKeys(void) {
* way we can lookup an object multiple times (BLMOVE does
* that) without the risk of it being freed in the second
* lookup, invalidating the first one.
- * See https://github.com/antirez/redis/pull/6554. */
+ * See https://github.com/redis/redis/pull/6554. */
server.fixed_time_expire++;
updateCachedTime(0);
diff --git a/src/childinfo.c b/src/childinfo.c
index e3f33a96c..4f0a42001 100644
--- a/src/childinfo.c
+++ b/src/childinfo.c
@@ -33,6 +33,7 @@
typedef struct {
size_t keys;
size_t cow;
+ monotime cow_updated;
double progress;
childInfoType information_type; /* Type of information */
} child_info_data;
@@ -69,18 +70,39 @@ void closeChildInfoPipe(void) {
void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress, char *pname) {
if (server.child_info_pipe[1] == -1) return;
- child_info_data data = {0}; /* zero everything, including padding to sattisfy valgrind */
+ static monotime cow_updated = 0;
+ static uint64_t cow_update_cost = 0;
+ static size_t cow = 0;
+
+ child_info_data data = {0}; /* zero everything, including padding to satisfy valgrind */
+
+ /* When called to report current info, we need to throttle down CoW updates as they
+ * can be very expensive. To do that, we measure the time it takes to get a reading
+ * and schedule the next reading to happen not before time*CHILD_COW_COST_FACTOR
+ * passes. */
+
+ monotime now = getMonotonicUs();
+ if (info_type != CHILD_INFO_TYPE_CURRENT_INFO ||
+ !cow_updated ||
+ now - cow_updated > cow_update_cost * CHILD_COW_DUTY_CYCLE)
+ {
+ cow = zmalloc_get_private_dirty(-1);
+ cow_updated = getMonotonicUs();
+ cow_update_cost = cow_updated - now;
+
+ if (cow) {
+ serverLog((info_type == CHILD_INFO_TYPE_CURRENT_INFO) ? LL_VERBOSE : LL_NOTICE,
+ "%s: %zu MB of memory used by copy-on-write",
+ pname, data.cow / (1024 * 1024));
+ }
+ }
+
data.information_type = info_type;
data.keys = keys;
- data.cow = zmalloc_get_private_dirty(-1);
+ data.cow = cow;
+ data.cow_updated = cow_updated;
data.progress = progress;
- if (data.cow) {
- serverLog((info_type == CHILD_INFO_TYPE_CURRENT_INFO) ? LL_VERBOSE : LL_NOTICE,
- "%s: %zu MB of memory used by copy-on-write",
- pname, data.cow/(1024*1024));
- }
-
ssize_t wlen = sizeof(data);
if (write(server.child_info_pipe[1], &data, wlen) != wlen) {
@@ -89,9 +111,10 @@ void sendChildInfoGeneric(childInfoType info_type, size_t keys, double progress,
}
/* Update Child info. */
-void updateChildInfo(childInfoType information_type, size_t cow, size_t keys, double progress) {
+void updateChildInfo(childInfoType information_type, size_t cow, monotime cow_updated, size_t keys, double progress) {
if (information_type == CHILD_INFO_TYPE_CURRENT_INFO) {
server.stat_current_cow_bytes = cow;
+ server.stat_current_cow_updated = cow_updated;
server.stat_current_save_keys_processed = keys;
if (progress != -1) server.stat_module_progress = progress;
} else if (information_type == CHILD_INFO_TYPE_AOF_COW_SIZE) {
@@ -107,7 +130,7 @@ void updateChildInfo(childInfoType information_type, size_t cow, size_t keys, do
* if complete data read into the buffer,
* data is stored into *buffer, and returns 1.
* otherwise, the partial data is left in the buffer, waiting for the next read, and returns 0. */
-int readChildInfo(childInfoType *information_type, size_t *cow, size_t *keys, double* progress) {
+int readChildInfo(childInfoType *information_type, size_t *cow, monotime *cow_updated, size_t *keys, double* progress) {
/* We are using here a static buffer in combination with the server.child_info_nread to handle short reads */
static child_info_data buffer;
ssize_t wlen = sizeof(buffer);
@@ -124,6 +147,7 @@ int readChildInfo(childInfoType *information_type, size_t *cow, size_t *keys, do
if (server.child_info_nread == wlen) {
*information_type = buffer.information_type;
*cow = buffer.cow;
+ *cow_updated = buffer.cow_updated;
*keys = buffer.keys;
*progress = buffer.progress;
return 1;
@@ -137,12 +161,13 @@ void receiveChildInfo(void) {
if (server.child_info_pipe[0] == -1) return;
size_t cow;
+ monotime cow_updated;
size_t keys;
double progress;
childInfoType information_type;
/* Drain the pipe and update child info so that we get the final message. */
- while (readChildInfo(&information_type, &cow, &keys, &progress)) {
- updateChildInfo(information_type, cow, keys, progress);
+ while (readChildInfo(&information_type, &cow, &cow_updated, &keys, &progress)) {
+ updateChildInfo(information_type, cow, cow_updated, keys, progress);
}
}
diff --git a/src/cluster.c b/src/cluster.c
index 4605e3ea9..0f0ab737e 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -55,7 +55,7 @@ void clusterSendFail(char *nodename);
void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request);
void clusterUpdateState(void);
int clusterNodeGetSlotBit(clusterNode *n, int slot);
-sds clusterGenNodesDescription(int filter);
+sds clusterGenNodesDescription(int filter, int use_pport);
clusterNode *clusterLookupNode(const char *name);
int clusterNodeAddSlave(clusterNode *master, clusterNode *slave);
int clusterAddSlot(clusterNode *n, int slot);
@@ -190,6 +190,9 @@ int clusterLoadConfig(char *filename) {
* base port. */
n->cport = busp ? atoi(busp) : n->port + CLUSTER_PORT_INCR;
+ /* The plaintext port for client in a TLS cluster (n->pport) is not
+ * stored in nodes.conf. It is received later over the bus protocol. */
+
/* Parse flags */
p = s = argv[2];
while(p) {
@@ -336,7 +339,7 @@ int clusterSaveConfig(int do_fsync) {
/* Get the nodes description and concatenate our "vars" directive to
* save currentEpoch and lastVoteEpoch. */
- ci = clusterGenNodesDescription(CLUSTER_NODE_HANDSHAKE);
+ ci = clusterGenNodesDescription(CLUSTER_NODE_HANDSHAKE, 0);
ci = sdscatprintf(ci,"vars currentEpoch %llu lastVoteEpoch %llu\n",
(unsigned long long) server.cluster->currentEpoch,
(unsigned long long) server.cluster->lastVoteEpoch);
@@ -355,7 +358,7 @@ int clusterSaveConfig(int do_fsync) {
if (write(fd,ci,sdslen(ci)) != (ssize_t)sdslen(ci)) goto err;
if (do_fsync) {
server.cluster->todo_before_sleep &= ~CLUSTER_TODO_FSYNC_CONFIG;
- fsync(fd);
+ if (fsync(fd) == -1) goto err;
}
/* Truncate the file if needed to remove the final \n padding that
@@ -437,6 +440,26 @@ int clusterLockConfig(char *filename) {
return C_OK;
}
+/* Derives our ports to be announced in the cluster bus. */
+void deriveAnnouncedPorts(int *announced_port, int *announced_pport,
+ int *announced_cport) {
+ int port = server.tls_cluster ? server.tls_port : server.port;
+ /* Default announced ports. */
+ *announced_port = port;
+ *announced_pport = server.tls_cluster ? server.port : 0;
+ *announced_cport = port + CLUSTER_PORT_INCR;
+ /* Config overriding announced ports. */
+ if (server.tls_cluster && server.cluster_announce_tls_port) {
+ *announced_port = server.cluster_announce_tls_port;
+ *announced_pport = server.cluster_announce_port;
+ } else if (server.cluster_announce_port) {
+ *announced_port = server.cluster_announce_port;
+ }
+ if (server.cluster_announce_bus_port) {
+ *announced_cport = server.cluster_announce_bus_port;
+ }
+}
+
/* Some flags (currently just the NOFAILOVER flag) may need to be updated
* in the "myself" node based on the current configuration of the node,
* that may change at runtime via CONFIG SET. This function changes the
@@ -524,14 +547,9 @@ void clusterInit(void) {
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
- /* Set myself->port / cport to my listening ports, we'll just need to
+ /* Set myself->port/cport/pport to my listening ports, we'll just need to
* discover the IP address via MEET messages. */
- myself->port = port;
- myself->cport = port+CLUSTER_PORT_INCR;
- if (server.cluster_announce_port)
- myself->port = server.cluster_announce_port;
- if (server.cluster_announce_bus_port)
- myself->cport = server.cluster_announce_bus_port;
+ deriveAnnouncedPorts(&myself->port, &myself->pport, &myself->cport);
server.cluster->mf_end = 0;
resetManualFailover();
@@ -782,6 +800,7 @@ clusterNode *createClusterNode(char *nodename, int flags) {
memset(node->ip,0,sizeof(node->ip));
node->port = 0;
node->cport = 0;
+ node->pport = 0;
node->fail_reports = listCreate();
node->voted_time = 0;
node->orphaned_time = 0;
@@ -1488,6 +1507,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
if (node->link) freeClusterLink(node->link);
memcpy(node->ip,g->ip,NET_IP_STR_LEN);
node->port = ntohs(g->port);
+ node->pport = ntohs(g->pport);
node->cport = ntohs(g->cport);
node->flags &= ~CLUSTER_NODE_NOADDR;
}
@@ -1509,6 +1529,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
node = createClusterNode(g->nodename, flags);
memcpy(node->ip,g->ip,NET_IP_STR_LEN);
node->port = ntohs(g->port);
+ node->pport = ntohs(g->pport);
node->cport = ntohs(g->cport);
clusterAddNode(node);
}
@@ -1548,6 +1569,7 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link,
{
char ip[NET_IP_STR_LEN] = {0};
int port = ntohs(hdr->port);
+ int pport = ntohs(hdr->pport);
int cport = ntohs(hdr->cport);
/* We don't proceed if the link is the same as the sender link, as this
@@ -1559,12 +1581,13 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link,
if (link == node->link) return 0;
nodeIp2String(ip,link,hdr->myip);
- if (node->port == port && node->cport == cport &&
+ if (node->port == port && node->cport == cport && node->pport == pport &&
strcmp(ip,node->ip) == 0) return 0;
/* IP / port is different, update it. */
memcpy(node->ip,ip,sizeof(ip));
node->port = port;
+ node->pport = pport;
node->cport = cport;
if (node->link) freeClusterLink(node->link);
node->flags &= ~CLUSTER_NODE_NOADDR;
@@ -1610,7 +1633,7 @@ void clusterSetNodeAsMaster(clusterNode *n) {
* case we receive the info via an UPDATE packet. */
void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) {
int j;
- clusterNode *curmaster, *newmaster = NULL;
+ clusterNode *curmaster = NULL, *newmaster = NULL;
/* The dirty slots list is a list of slots for which we lose the ownership
* while having still keys inside. This usually happens after a failover
* or after a manual cluster reconfiguration operated by the admin.
@@ -1621,6 +1644,12 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
uint16_t dirty_slots[CLUSTER_SLOTS];
int dirty_slots_count = 0;
+ /* We should detect if sender is new master of our shard.
+ * We will know it if all our slots were migrated to sender, and sender
+ * has no slots except ours */
+ int sender_slots = 0;
+ int migrated_our_slots = 0;
+
/* Here we set curmaster to this node or the node this node
* replicates to if it's a slave. In the for loop we are
* interested to check if slots are taken away from curmaster. */
@@ -1633,6 +1662,8 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
for (j = 0; j < CLUSTER_SLOTS; j++) {
if (bitmapTestBit(slots,j)) {
+ sender_slots++;
+
/* The slot is already bound to the sender of this message. */
if (server.cluster->slots[j] == sender) continue;
@@ -1659,8 +1690,10 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
dirty_slots_count++;
}
- if (server.cluster->slots[j] == curmaster)
+ if (server.cluster->slots[j] == curmaster) {
newmaster = sender;
+ migrated_our_slots++;
+ }
clusterDelSlot(j);
clusterAddSlot(sender,j);
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
@@ -1683,7 +1716,9 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
* master.
* 2) We are a slave and our master is left without slots. We need
* to replicate to the new slots owner. */
- if (newmaster && curmaster->numslots == 0) {
+ if (newmaster && curmaster->numslots == 0 &&
+ (server.cluster_allow_replica_migration ||
+ sender_slots == migrated_our_slots)) {
serverLog(LL_WARNING,
"Configuration change detected. Reconfiguring myself "
"as a replica of %.40s", sender->name);
@@ -1811,7 +1846,7 @@ int clusterProcessPacket(clusterLink *link) {
nodeIsSlave(myself) &&
myself->slaveof == sender &&
hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED &&
- server.cluster->mf_master_offset == 0)
+ server.cluster->mf_master_offset == -1)
{
server.cluster->mf_master_offset = sender->repl_offset;
clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_MANUALFAILOVER);
@@ -1862,6 +1897,7 @@ int clusterProcessPacket(clusterLink *link) {
node = createClusterNode(NULL,CLUSTER_NODE_HANDSHAKE);
nodeIp2String(node->ip,link,hdr->myip);
node->port = ntohs(hdr->port);
+ node->pport = ntohs(hdr->pport);
node->cport = ntohs(hdr->cport);
clusterAddNode(node);
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG);
@@ -1924,6 +1960,7 @@ int clusterProcessPacket(clusterLink *link) {
link->node->flags |= CLUSTER_NODE_NOADDR;
link->node->ip[0] = '\0';
link->node->port = 0;
+ link->node->pport = 0;
link->node->cport = 0;
freeClusterLink(link);
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG);
@@ -2423,19 +2460,16 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) {
hdr->myip[NET_IP_STR_LEN-1] = '\0';
}
- /* Handle cluster-announce-port as well. */
- int port = server.tls_cluster ? server.tls_port : server.port;
- int announced_port = server.cluster_announce_port ?
- server.cluster_announce_port : port;
- int announced_cport = server.cluster_announce_bus_port ?
- server.cluster_announce_bus_port :
- (port + CLUSTER_PORT_INCR);
+ /* Handle cluster-announce-[tls-|bus-]port. */
+ int announced_port, announced_pport, announced_cport;
+ deriveAnnouncedPorts(&announced_port, &announced_pport, &announced_cport);
memcpy(hdr->myslots,master->slots,sizeof(hdr->myslots));
memset(hdr->slaveof,0,CLUSTER_NAMELEN);
if (myself->slaveof != NULL)
memcpy(hdr->slaveof,myself->slaveof->name, CLUSTER_NAMELEN);
hdr->port = htons(announced_port);
+ hdr->pport = htons(announced_pport);
hdr->cport = htons(announced_cport);
hdr->flags = htons(myself->flags);
hdr->state = server.cluster->state;
@@ -2492,6 +2526,7 @@ void clusterSetGossipEntry(clusterMsg *hdr, int i, clusterNode *n) {
gossip->port = htons(n->port);
gossip->cport = htons(n->cport);
gossip->flags = htons(n->flags);
+ gossip->pport = htons(n->pport);
gossip->notused1 = 0;
}
@@ -3090,7 +3125,7 @@ void clusterFailoverReplaceYourMaster(void) {
/* This function is called if we are a slave node and our master serving
* a non-zero amount of hash slots is in FAIL state.
*
- * The gaol of this function is:
+ * The goal of this function is:
* 1) To check if we are able to perform a failover, is our data updated?
* 2) Try to get elected by masters.
* 3) Perform the failover informing all the other nodes.
@@ -3135,7 +3170,7 @@ void clusterHandleSlaveFailover(void) {
return;
}
- /* Set data_age to the number of seconds we are disconnected from
+ /* Set data_age to the number of milliseconds we are disconnected from
* the master. */
if (server.repl_state == REPL_STATE_CONNECTED) {
data_age = (mstime_t)(server.unixtime - server.master->lastinteraction)
@@ -3419,7 +3454,7 @@ void resetManualFailover(void) {
server.cluster->mf_end = 0; /* No manual failover in progress. */
server.cluster->mf_can_start = 0;
server.cluster->mf_slave = NULL;
- server.cluster->mf_master_offset = 0;
+ server.cluster->mf_master_offset = -1;
}
/* If a manual failover timed out, abort it. */
@@ -3440,7 +3475,7 @@ void clusterHandleManualFailover(void) {
* next steps are performed by clusterHandleSlaveFailover(). */
if (server.cluster->mf_can_start) return;
- if (server.cluster->mf_master_offset == 0) return; /* Wait for offset... */
+ if (server.cluster->mf_master_offset == -1) return; /* Wait for offset... */
if (server.cluster->mf_master_offset == replicationGetSlaveOffset()) {
/* Our replication offset matches the master replication offset
@@ -3630,7 +3665,6 @@ void clusterCron(void) {
now - node->link->ctime >
server.cluster_node_timeout && /* was not already reconnected */
node->ping_sent && /* we already sent a ping */
- node->pong_received < node->ping_sent && /* still waiting pong */
/* and we are waiting for the pong more than timeout/2 */
ping_delay > server.cluster_node_timeout/2 &&
/* and in such interval we are not seeing any traffic at all. */
@@ -3713,7 +3747,8 @@ void clusterCron(void) {
* the orphaned masters. Note that it does not make sense to try
* a migration if there is no master with at least *two* working
* slaves. */
- if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves)
+ if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves &&
+ server.cluster_allow_replica_migration)
clusterHandleSlaveMigration(max_slaves);
}
@@ -3823,7 +3858,7 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) {
* However new masters with slots assigned are considered valid
* migration targets if the rest of the cluster is not a slave-less.
*
- * See https://github.com/antirez/redis/issues/3043 for more info. */
+ * See https://github.com/redis/redis/issues/3043 for more info. */
if (n->numslots == 1 && clusterMastersHaveSlaves())
n->flags |= CLUSTER_NODE_MIGRATE_TO;
}
@@ -4131,15 +4166,16 @@ sds representClusterNodeFlags(sds ci, uint16_t flags) {
* See clusterGenNodesDescription() top comment for more information.
*
* The function returns the string representation as an SDS string. */
-sds clusterGenNodeDescription(clusterNode *node) {
+sds clusterGenNodeDescription(clusterNode *node, int use_pport) {
int j, start;
sds ci;
+ int port = use_pport && node->pport ? node->pport : node->port;
/* Node coordinates */
ci = sdscatlen(sdsempty(),node->name,CLUSTER_NAMELEN);
ci = sdscatfmt(ci," %s:%i@%i ",
node->ip,
- node->port,
+ port,
node->cport);
/* Flags */
@@ -4250,10 +4286,13 @@ void clusterGenNodesSlotsInfo(int filter) {
* include all the known nodes in the representation, including nodes in
* the HANDSHAKE state.
*
+ * Setting use_pport to 1 in a TLS cluster makes the result contain the
+ * plaintext client port rather then the TLS client port of each node.
+ *
* The representation obtained using this function is used for the output
* of the CLUSTER NODES function, and as format for the cluster
* configuration file (nodes.conf) for a given node. */
-sds clusterGenNodesDescription(int filter) {
+sds clusterGenNodesDescription(int filter, int use_pport) {
sds ci = sdsempty(), ni;
dictIterator *di;
dictEntry *de;
@@ -4266,7 +4305,7 @@ sds clusterGenNodesDescription(int filter) {
clusterNode *node = dictGetVal(de);
if (node->flags & filter) continue;
- ni = clusterGenNodeDescription(node);
+ ni = clusterGenNodeDescription(node, use_pport);
ci = sdscatsds(ci,ni);
sdsfree(ni);
ci = sdscatlen(ci,"\n",1);
@@ -4313,7 +4352,37 @@ int getSlotOrReply(client *c, robj *o) {
return (int) slot;
}
-void clusterReplyMultiBulkSlots(client *c) {
+void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, int end_slot) {
+ int i, nested_elements = 3; /* slots (2) + master addr (1) */
+ void *nested_replylen = addReplyDeferredLen(c);
+ addReplyLongLong(c, start_slot);
+ addReplyLongLong(c, end_slot);
+ addReplyArrayLen(c, 3);
+ addReplyBulkCString(c, node->ip);
+ /* Report non-TLS ports to non-TLS client in TLS cluster if available. */
+ int use_pport = (server.tls_cluster &&
+ c->conn && connGetType(c->conn) != CONN_TYPE_TLS);
+ addReplyLongLong(c, use_pport && node->pport ? node->pport : node->port);
+ addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
+
+ /* Remaining nodes in reply are replicas for slot range */
+ for (i = 0; i < node->numslaves; i++) {
+ /* This loop is copy/pasted from clusterGenNodeDescription()
+ * with modifications for per-slot node aggregation. */
+ if (nodeFailed(node->slaves[i])) continue;
+ addReplyArrayLen(c, 3);
+ addReplyBulkCString(c, node->slaves[i]->ip);
+ /* Report slave's non-TLS port to non-TLS client in TLS cluster */
+ addReplyLongLong(c, (use_pport && node->slaves[i]->pport ?
+ node->slaves[i]->pport :
+ node->slaves[i]->port));
+ addReplyBulkCBuffer(c, node->slaves[i]->name, CLUSTER_NAMELEN);
+ nested_elements++;
+ }
+ setDeferredArrayLen(c, nested_replylen, nested_elements);
+}
+
+void clusterReplyMultiBulkSlots(client * c) {
/* Format: 1) 1) start slot
* 2) end slot
* 3) 1) master IP
@@ -4324,69 +4393,29 @@ void clusterReplyMultiBulkSlots(client *c) {
* 3) node ID
* ... continued until done
*/
-
- int num_masters = 0;
+ clusterNode *n = NULL;
+ int num_masters = 0, start = -1;
void *slot_replylen = addReplyDeferredLen(c);
- dictEntry *de;
- dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
- while((de = dictNext(di)) != NULL) {
- clusterNode *node = dictGetVal(de);
- int j = 0, start = -1;
- int i, nested_elements = 0;
-
- /* Skip slaves (that are iterated when producing the output of their
- * master) and masters not serving any slot. */
- if (!nodeIsMaster(node) || node->numslots == 0) continue;
-
- for(i = 0; i < node->numslaves; i++) {
- if (nodeFailed(node->slaves[i])) continue;
- nested_elements++;
+ for (int i = 0; i <= CLUSTER_SLOTS; i++) {
+ /* Find start node and slot id. */
+ if (n == NULL) {
+ if (i == CLUSTER_SLOTS) break;
+ n = server.cluster->slots[i];
+ start = i;
+ continue;
}
- for (j = 0; j < CLUSTER_SLOTS; j++) {
- int bit, i;
-
- if ((bit = clusterNodeGetSlotBit(node,j)) != 0) {
- if (start == -1) start = j;
- }
- if (start != -1 && (!bit || j == CLUSTER_SLOTS-1)) {
- addReplyArrayLen(c, nested_elements + 3); /* slots (2) + master addr (1). */
-
- if (bit && j == CLUSTER_SLOTS-1) j++;
-
- /* If slot exists in output map, add to it's list.
- * else, create a new output map for this slot */
- if (start == j-1) {
- addReplyLongLong(c, start); /* only one slot; low==high */
- addReplyLongLong(c, start);
- } else {
- addReplyLongLong(c, start); /* low */
- addReplyLongLong(c, j-1); /* high */
- }
- start = -1;
-
- /* First node reply position is always the master */
- addReplyArrayLen(c, 3);
- addReplyBulkCString(c, node->ip);
- addReplyLongLong(c, node->port);
- addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
-
- /* Remaining nodes in reply are replicas for slot range */
- for (i = 0; i < node->numslaves; i++) {
- /* This loop is copy/pasted from clusterGenNodeDescription()
- * with modifications for per-slot node aggregation */
- if (nodeFailed(node->slaves[i])) continue;
- addReplyArrayLen(c, 3);
- addReplyBulkCString(c, node->slaves[i]->ip);
- addReplyLongLong(c, node->slaves[i]->port);
- addReplyBulkCBuffer(c, node->slaves[i]->name, CLUSTER_NAMELEN);
- }
- num_masters++;
- }
+ /* Add cluster slots info when occur different node with start
+ * or end of slot. */
+ if (i == CLUSTER_SLOTS || n != server.cluster->slots[i]) {
+ addNodeReplyForClusterSlot(c, n, start, i-1);
+ num_masters++;
+ if (i == CLUSTER_SLOTS) break;
+ n = server.cluster->slots[i];
+ start = i;
}
}
- dictReleaseIterator(di);
setDeferredArrayLen(c, slot_replylen, num_masters);
}
@@ -4475,7 +4504,11 @@ NULL
}
} else if (!strcasecmp(c->argv[1]->ptr,"nodes") && c->argc == 2) {
/* CLUSTER NODES */
- sds nodes = clusterGenNodesDescription(0);
+ /* Report plaintext ports, only if cluster is TLS but client is known to
+ * be non-TLS). */
+ int use_pport = (server.tls_cluster &&
+ c->conn && connGetType(c->conn) != CONN_TYPE_TLS);
+ sds nodes = clusterGenNodesDescription(0, use_pport);
addReplyVerbatim(c,nodes,sdslen(nodes),"txt");
sdsfree(nodes);
} else if (!strcasecmp(c->argv[1]->ptr,"myid") && c->argc == 2) {
@@ -4851,9 +4884,12 @@ NULL
return;
}
+ /* Use plaintext port if cluster is TLS but client is non-TLS. */
+ int use_pport = (server.tls_cluster &&
+ c->conn && connGetType(c->conn) != CONN_TYPE_TLS);
addReplyArrayLen(c,n->numslaves);
for (j = 0; j < n->numslaves; j++) {
- sds ni = clusterGenNodeDescription(n->slaves[j]);
+ sds ni = clusterGenNodeDescription(n->slaves[j], use_pport);
addReplyBulkCString(c,ni);
sdsfree(ni);
}
@@ -5824,18 +5860,14 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
* cluster is down. */
if (error_code) *error_code = CLUSTER_REDIR_DOWN_STATE;
return NULL;
- } else if ((cmd->flags & CMD_WRITE) && !(cmd->proc == evalCommand)
- && !(cmd->proc == evalShaCommand))
- {
- /* The cluster is configured to allow read only commands
- * but this command is neither readonly, nor EVAL or
- * EVALSHA. */
+ } else if (cmd->flags & CMD_WRITE) {
+ /* The cluster is configured to allow read only commands */
if (error_code) *error_code = CLUSTER_REDIR_DOWN_RO_STATE;
return NULL;
} else {
/* Fall through and allow the command to be executed:
* this happens when server.cluster_allow_reads_when_down is
- * true and the command is a readonly command or EVAL / EVALSHA. */
+ * true and the command is not a write command */
}
}
@@ -5876,7 +5908,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in
int is_write_command = (c->cmd->flags & CMD_WRITE) ||
(c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE));
if (c->flags & CLIENT_READONLY &&
- (!is_write_command || cmd->proc == evalCommand || cmd->proc == evalShaCommand) &&
+ !is_write_command &&
nodeIsSlave(myself) &&
myself->slaveof == n)
{
@@ -5913,10 +5945,15 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co
} else if (error_code == CLUSTER_REDIR_MOVED ||
error_code == CLUSTER_REDIR_ASK)
{
+ /* Redirect to IP:port. Include plaintext port if cluster is TLS but
+ * client is non-TLS. */
+ int use_pport = (server.tls_cluster &&
+ c->conn && connGetType(c->conn) != CONN_TYPE_TLS);
+ int port = use_pport && n->pport ? n->pport : n->port;
addReplyErrorSds(c,sdscatprintf(sdsempty(),
"-%s %d %s:%d",
(error_code == CLUSTER_REDIR_ASK) ? "ASK" : "MOVED",
- hashslot,n->ip,n->port));
+ hashslot, n->ip, port));
} else {
serverPanic("getNodeByQuery() unknown error.");
}
diff --git a/src/cluster.h b/src/cluster.h
index 716c0d49c..f476a50a0 100644
--- a/src/cluster.h
+++ b/src/cluster.h
@@ -135,7 +135,9 @@ typedef struct clusterNode {
mstime_t orphaned_time; /* Starting time of orphaned master condition */
long long repl_offset; /* Last known repl offset for this node. */
char ip[NET_IP_STR_LEN]; /* Latest known IP address of this node */
- int port; /* Latest known clients port of this node */
+ int port; /* Latest known clients port (TLS or plain). */
+ int pport; /* Latest known clients plaintext port. Only used
+ if the main clients port is for TLS. */
int cport; /* Latest known cluster port of this node. */
clusterLink *link; /* TCP/IP link with this node */
list *fail_reports; /* List of nodes signaling this as failing */
@@ -168,7 +170,7 @@ typedef struct clusterState {
clusterNode *mf_slave; /* Slave performing the manual failover. */
/* Manual failover state of slave. */
long long mf_master_offset; /* Master offset the slave needs to start MF
- or zero if still not received. */
+ or -1 if still not received. */
int mf_can_start; /* If non-zero signal that the manual failover
can start requesting masters vote. */
/* The following fields are used by masters to take state on elections. */
@@ -194,7 +196,8 @@ typedef struct {
uint16_t port; /* base port last time it was seen */
uint16_t cport; /* cluster port last time it was seen */
uint16_t flags; /* node->flags copy */
- uint32_t notused1;
+ uint16_t pport; /* plaintext-port, when base port is TLS */
+ uint16_t notused1;
} clusterMsgDataGossip;
typedef struct {
@@ -267,7 +270,8 @@ typedef struct {
unsigned char myslots[CLUSTER_SLOTS/8];
char slaveof[CLUSTER_NAMELEN];
char myip[NET_IP_STR_LEN]; /* Sender IP, if not all zeroed. */
- char notused1[34]; /* 34 bytes reserved for future usage. */
+ char notused1[32]; /* 32 bytes reserved for future usage. */
+ uint16_t pport; /* Sender TCP plaintext port, if base port is TLS */
uint16_t cport; /* Sender TCP cluster bus port */
uint16_t flags; /* Sender node flags */
unsigned char state; /* Cluster state from the POV of the sender */
diff --git a/src/config.c b/src/config.c
index 1d6ce6b8c..9861c5f52 100644
--- a/src/config.c
+++ b/src/config.c
@@ -229,8 +229,6 @@ typedef union typeData {
typedef struct typeInterface {
/* Called on server start, to init the server with default value */
void (*init)(typeData data);
- /* Called on server start, should return 1 on success, 0 on error and should set err */
- int (*load)(typeData data, sds *argc, int argv, const char **err);
/* Called on server startup and CONFIG SET, returns 1 on success, 0 on error
* and can set a verbose err string, update is true when called from CONFIG SET */
int (*set)(typeData data, sds value, int update, const char **err);
@@ -243,11 +241,16 @@ typedef struct typeInterface {
typedef struct standardConfig {
const char *name; /* The user visible name of this config */
const char *alias; /* An alias that can also be used for this config */
- const int modifiable; /* Can this value be updated by CONFIG SET? */
+ const unsigned int flags; /* Flags for this specific config */
typeInterface interface; /* The function pointers that define the type interface */
typeData data; /* The type specific data exposed used by the interface */
} standardConfig;
+#define MODIFIABLE_CONFIG 0 /* This is the implied default for a standard
+ * config, which is mutable. */
+#define IMMUTABLE_CONFIG (1ULL<<0) /* Can this value only be set at startup? */
+#define SENSITIVE_CONFIG (1ULL<<1) /* Does this value contain sensitive information */
+
standardConfig configs[];
/*-----------------------------------------------------------------------------
@@ -618,6 +621,10 @@ void loadServerConfigFromString(char *config) {
goto loaderr;
}
+ /* To ensure backward compatibility and work while hz is out of range */
+ if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ;
+ if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ;
+
sdsfreesplitres(lines,totlines);
return;
@@ -714,9 +721,13 @@ void configSetCommand(client *c) {
/* Iterate the configs that are standard */
for (standardConfig *config = configs; config->name != NULL; config++) {
- if(config->modifiable && (!strcasecmp(c->argv[2]->ptr,config->name) ||
+ if (!(config->flags & IMMUTABLE_CONFIG) &&
+ (!strcasecmp(c->argv[2]->ptr,config->name) ||
(config->alias && !strcasecmp(c->argv[2]->ptr,config->alias))))
{
+ if (config->flags & SENSITIVE_CONFIG) {
+ preventCommandLogging(c);
+ }
if (!config->interface.set(config->data,o->ptr,1,&errstr)) {
goto badfmt;
}
@@ -1370,14 +1381,19 @@ void rewriteConfigSaveOption(struct rewriteConfigState *state) {
return;
}
- /* Note that if there are no save parameters at all, all the current
- * config line with "save" will be detected as orphaned and deleted,
- * resulting into no RDB persistence as expected. */
- for (j = 0; j < server.saveparamslen; j++) {
- line = sdscatprintf(sdsempty(),"save %ld %d",
- (long) server.saveparams[j].seconds, server.saveparams[j].changes);
- rewriteConfigRewriteLine(state,"save",line,1);
+ /* Rewrite save parameters, or an empty 'save ""' line to avoid the
+ * defaults from being used.
+ */
+ if (!server.saveparamslen) {
+ rewriteConfigRewriteLine(state,"save",sdsnew("save \"\""),1);
+ } else {
+ for (j = 0; j < server.saveparamslen; j++) {
+ line = sdscatprintf(sdsempty(),"save %ld %d",
+ (long) server.saveparams[j].seconds, server.saveparams[j].changes);
+ rewriteConfigRewriteLine(state,"save",line,1);
+ }
}
+
/* Mark "save" as processed in case server.saveparamslen is zero. */
rewriteConfigMarkAsProcessed(state,"save");
}
@@ -1711,13 +1727,10 @@ int rewriteConfig(char *path, int force_all) {
#define LOADBUF_SIZE 256
static char loadbuf[LOADBUF_SIZE];
-#define MODIFIABLE_CONFIG 1
-#define IMMUTABLE_CONFIG 0
-
-#define embedCommonConfig(config_name, config_alias, is_modifiable) \
+#define embedCommonConfig(config_name, config_alias, config_flags) \
.name = (config_name), \
.alias = (config_alias), \
- .modifiable = (is_modifiable),
+ .flags = (config_flags),
#define embedConfigInterface(initfn, setfn, getfn, rewritefn) .interface = { \
.init = (initfn), \
@@ -1768,8 +1781,8 @@ static void boolConfigRewrite(typeData data, const char *name, struct rewriteCon
rewriteConfigYesNoOption(state, name,*(data.yesno.config), data.yesno.default_value);
}
-#define createBoolConfig(name, alias, modifiable, config_addr, default, is_valid, update) { \
- embedCommonConfig(name, alias, modifiable) \
+#define createBoolConfig(name, alias, flags, config_addr, default, is_valid, update) { \
+ embedCommonConfig(name, alias, flags) \
embedConfigInterface(boolConfigInit, boolConfigSet, boolConfigGet, boolConfigRewrite) \
.data.yesno = { \
.config = &(config_addr), \
@@ -1841,8 +1854,8 @@ static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConf
#define ALLOW_EMPTY_STRING 0
#define EMPTY_STRING_IS_NULL 1
-#define createStringConfig(name, alias, modifiable, empty_to_null, config_addr, default, is_valid, update) { \
- embedCommonConfig(name, alias, modifiable) \
+#define createStringConfig(name, alias, flags, empty_to_null, config_addr, default, is_valid, update) { \
+ embedCommonConfig(name, alias, flags) \
embedConfigInterface(stringConfigInit, stringConfigSet, stringConfigGet, stringConfigRewrite) \
.data.string = { \
.config = &(config_addr), \
@@ -1853,8 +1866,8 @@ static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConf
} \
}
-#define createSDSConfig(name, alias, modifiable, empty_to_null, config_addr, default, is_valid, update) { \
- embedCommonConfig(name, alias, modifiable) \
+#define createSDSConfig(name, alias, flags, empty_to_null, config_addr, default, is_valid, update) { \
+ embedCommonConfig(name, alias, flags) \
embedConfigInterface(sdsConfigInit, sdsConfigSet, sdsConfigGet, sdsConfigRewrite) \
.data.sds = { \
.config = &(config_addr), \
@@ -1909,8 +1922,8 @@ static void enumConfigRewrite(typeData data, const char *name, struct rewriteCon
rewriteConfigEnumOption(state, name,*(data.enumd.config), data.enumd.enum_value, data.enumd.default_value);
}
-#define createEnumConfig(name, alias, modifiable, enum, config_addr, default, is_valid, update) { \
- embedCommonConfig(name, alias, modifiable) \
+#define createEnumConfig(name, alias, flags, enum, config_addr, default, is_valid, update) { \
+ embedCommonConfig(name, alias, flags) \
embedConfigInterface(enumConfigInit, enumConfigSet, enumConfigGet, enumConfigRewrite) \
.data.enumd = { \
.config = &(config_addr), \
@@ -2063,8 +2076,8 @@ static void numericConfigRewrite(typeData data, const char *name, struct rewrite
#define INTEGER_CONFIG 0
#define MEMORY_CONFIG 1
-#define embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) { \
- embedCommonConfig(name, alias, modifiable) \
+#define embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) { \
+ embedCommonConfig(name, alias, flags) \
embedConfigInterface(numericConfigInit, numericConfigSet, numericConfigGet, numericConfigRewrite) \
.data.numeric = { \
.lower_bound = (lower), \
@@ -2074,71 +2087,71 @@ static void numericConfigRewrite(typeData data, const char *name, struct rewrite
.update_fn = (update), \
.is_memory = (memory),
-#define createIntConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createIntConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_INT, \
.config.i = &(config_addr) \
} \
}
-#define createUIntConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createUIntConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_UINT, \
.config.ui = &(config_addr) \
} \
}
-#define createLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createLongConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_LONG, \
.config.l = &(config_addr) \
} \
}
-#define createULongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createULongConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_ULONG, \
.config.ul = &(config_addr) \
} \
}
-#define createLongLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createLongLongConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_LONG_LONG, \
.config.ll = &(config_addr) \
} \
}
-#define createULongLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createULongLongConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_ULONG_LONG, \
.config.ull = &(config_addr) \
} \
}
-#define createSizeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createSizeTConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_SIZE_T, \
.config.st = &(config_addr) \
} \
}
-#define createSSizeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createSSizeTConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_SSIZE_T, \
.config.sst = &(config_addr) \
} \
}
-#define createTimeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createTimeTConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_TIME_T, \
.config.tt = &(config_addr) \
} \
}
-#define createOffTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
- embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
+#define createOffTConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
+ embedCommonNumericalConfig(name, alias, flags, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_OFF_T, \
.config.ot = &(config_addr) \
} \
@@ -2425,13 +2438,15 @@ standardConfig configs[] = {
createBoolConfig("crash-memcheck-enabled", NULL, MODIFIABLE_CONFIG, server.memcheck_enabled, 1, NULL, NULL),
createBoolConfig("use-exit-on-panic", NULL, MODIFIABLE_CONFIG, server.use_exit_on_panic, 0, NULL, NULL),
createBoolConfig("disable-thp", NULL, MODIFIABLE_CONFIG, server.disable_thp, 1, NULL, NULL),
+ createBoolConfig("cluster-allow-replica-migration", NULL, MODIFIABLE_CONFIG, server.cluster_allow_replica_migration, 1, NULL, NULL),
+ createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, server.replica_announced, 1, NULL, NULL),
/* String Configs */
createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL),
createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL),
createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL),
createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL),
- createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL),
+ createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL),
createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, NULL),
createStringConfig("syslog-ident", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.syslog_ident, "redis", NULL, NULL),
createStringConfig("dbfilename", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.rdb_filename, "dump.rdb", isValidDBfilename, NULL),
@@ -2444,8 +2459,8 @@ standardConfig configs[] = {
createStringConfig("proc-title-template", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.proc_title_template, CONFIG_DEFAULT_PROC_TITLE_TEMPLATE, isValidProcTitleTemplate, updateProcTitleTemplate),
/* SDS Configs */
- createSDSConfig("masterauth", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL),
- createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass),
+ createSDSConfig("masterauth", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL),
+ createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass),
/* Enum Configs */
createEnumConfig("supervised", NULL, IMMUTABLE_CONFIG, supervised_mode_enum, server.supervised_mode, SUPERVISED_NONE, NULL, NULL),
@@ -2455,7 +2470,7 @@ standardConfig configs[] = {
createEnumConfig("maxmemory-policy", NULL, MODIFIABLE_CONFIG, maxmemory_policy_enum, server.maxmemory_policy, MAXMEMORY_NO_EVICTION, NULL, NULL),
createEnumConfig("appendfsync", NULL, MODIFIABLE_CONFIG, aof_fsync_enum, server.aof_fsync, AOF_FSYNC_EVERYSEC, NULL, NULL),
createEnumConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, oom_score_adj_enum, server.oom_score_adj, OOM_SCORE_ADJ_NO, NULL, updateOOMScoreAdj),
- createEnumConfig("acl-pubsub-default", NULL, MODIFIABLE_CONFIG, acl_pubsub_default_enum, server.acl_pubusub_default, USER_FLAG_ALLCHANNELS, NULL, NULL),
+ createEnumConfig("acl-pubsub-default", NULL, MODIFIABLE_CONFIG, acl_pubsub_default_enum, server.acl_pubsub_default, USER_FLAG_ALLCHANNELS, NULL, NULL),
createEnumConfig("sanitize-dump-payload", NULL, MODIFIABLE_CONFIG, sanitize_dump_payload_enum, server.sanitize_dump_payload, SANITIZE_DUMP_NO, NULL, NULL),
/* Integer configs */
@@ -2482,6 +2497,7 @@ standardConfig configs[] = {
createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */
createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, NULL), /* Default: Use +10000 offset. */
createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, NULL), /* Use server.port */
+ createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* Use server.tls_port */
createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL),
createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL),
createIntConfig("list-compress-depth", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL),
@@ -2539,8 +2555,10 @@ standardConfig configs[] = {
createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool),
createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, updateTlsCfg),
+ createStringConfig("tls-key-file-pass", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file_pass, NULL, NULL, updateTlsCfg),
createStringConfig("tls-client-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-client-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file, NULL, NULL, updateTlsCfg),
+ createStringConfig("tls-client-key-file-pass", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file_pass, NULL, NULL, updateTlsCfg),
createStringConfig("tls-dh-params-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ca-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ca-cert-dir", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_dir, NULL, NULL, updateTlsCfg),
diff --git a/src/crc64.c b/src/crc64.c
index 6c9432c4a..d4db4158e 100644
--- a/src/crc64.c
+++ b/src/crc64.c
@@ -127,9 +127,10 @@ uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l) {
#include <stdio.h>
#define UNUSED(x) (void)(x)
-int crc64Test(int argc, char *argv[]) {
+int crc64Test(int argc, char *argv[], int accurate) {
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
crc64_init();
printf("[calcula]: e9c6d914c4b8d9ca == %016" PRIx64 "\n",
(uint64_t)_crc64(0, "123456789", 9));
diff --git a/src/crc64.h b/src/crc64.h
index 60c42345f..38b0b6387 100644
--- a/src/crc64.h
+++ b/src/crc64.h
@@ -7,7 +7,7 @@ void crc64_init(void);
uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l);
#ifdef REDIS_TEST
-int crc64Test(int argc, char *argv[]);
+int crc64Test(int argc, char *argv[], int accurate);
#endif
#endif
diff --git a/src/db.c b/src/db.c
index 57705f003..ec68c228c 100644
--- a/src/db.c
+++ b/src/db.c
@@ -139,9 +139,9 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
keymiss:
if (!(flags & LOOKUP_NONOTIFY)) {
- server.stat_keyspace_misses++;
notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id);
}
+ server.stat_keyspace_misses++;
return NULL;
}
@@ -164,16 +164,24 @@ robj *lookupKeyWriteWithFlags(redisDb *db, robj *key, int flags) {
robj *lookupKeyWrite(redisDb *db, robj *key) {
return lookupKeyWriteWithFlags(db, key, LOOKUP_NONE);
}
-
+static void SentReplyOnKeyMiss(client *c, robj *reply){
+ serverAssert(sdsEncodedObject(reply));
+ sds rep = reply->ptr;
+ if (sdslen(rep) > 1 && rep[0] == '-'){
+ addReplyErrorObject(c, reply);
+ } else {
+ addReply(c,reply);
+ }
+}
robj *lookupKeyReadOrReply(client *c, robj *key, robj *reply) {
robj *o = lookupKeyRead(c->db, key);
- if (!o) addReply(c,reply);
+ if (!o) SentReplyOnKeyMiss(c, reply);
return o;
}
robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply) {
robj *o = lookupKeyWrite(c->db, key);
- if (!o) addReply(c,reply);
+ if (!o) SentReplyOnKeyMiss(c, reply);
return o;
}
@@ -1445,7 +1453,12 @@ void propagateExpire(redisDb *db, robj *key, int lazy) {
incrRefCount(argv[0]);
incrRefCount(argv[1]);
+ /* If the master decided to expire a key we must propagate it to replicas no matter what..
+ * Even if module executed a command without asking for propagation. */
+ int prev_replication_allowed = server.replication_allowed;
+ server.replication_allowed = 1;
propagate(server.delCommand,db->id,argv,2,PROPAGATE_AOF|PROPAGATE_REPL);
+ server.replication_allowed = prev_replication_allowed;
decrRefCount(argv[0]);
decrRefCount(argv[1]);
diff --git a/src/debug.c b/src/debug.c
index e7fec293a..098ce6ef7 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -249,7 +249,7 @@ void xorObjectDigest(redisDb *db, robj *keyobj, unsigned char *digest, robj *o)
}
streamIteratorStop(&si);
} else if (o->type == OBJ_MODULE) {
- RedisModuleDigest md;
+ RedisModuleDigest md = {{0},{0}};
moduleValue *mv = o->ptr;
moduleType *mt = mv->type;
moduleInitDigestContext(md);
@@ -441,7 +441,7 @@ void debugCommand(client *c) {
" conflicting keys will generate an exception and kill the server."
" * NOSAVE: the database will be loaded from an existing RDB file.",
" Examples:",
-" * DEBUG RELOAD: verify that the server is able to persist, flsuh and reload",
+" * DEBUG RELOAD: verify that the server is able to persist, flush and reload",
" the database.",
" * DEBUG RELOAD NOSAVE: replace the current database with the contents of an",
" existing RDB file.",
@@ -473,7 +473,7 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"segfault")) {
*((char*)-1) = 'x';
} else if (!strcasecmp(c->argv[1]->ptr,"panic")) {
- serverPanic("DEBUG PANIC called at Unix time %ld", time(NULL));
+ serverPanic("DEBUG PANIC called at Unix time %lld", (long long)time(NULL));
} else if (!strcasecmp(c->argv[1]->ptr,"restart") ||
!strcasecmp(c->argv[1]->ptr,"crash-and-recover"))
{
@@ -1809,7 +1809,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
serverLog(LL_WARNING,
"Accessing address: %p", (void*)info->si_addr);
}
- if (info->si_pid != -1) {
+ if (info->si_code <= SI_USER && info->si_pid != -1) {
serverLog(LL_WARNING, "Killed by PID: %ld, UID: %d", (long) info->si_pid, info->si_uid);
}
diff --git a/src/defrag.c b/src/defrag.c
index 2b28d523d..3dfb8679c 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -347,7 +347,9 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) {
if ((newsds = activeDefragSds(sdsele))) {
/* When defragging an sds value, we need to update the dict key */
uint64_t hash = dictGetHash(d, newsds);
- replaceSatelliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged);
+ dictEntry **deref = dictFindEntryRefByPtrAndHash(d, sdsele, hash);
+ if (deref)
+ (*deref)->key = newsds;
ln->value = newsds;
defragged++;
}
diff --git a/src/dict.c b/src/dict.c
index 4a9f3fb0a..21c616e6f 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -45,11 +45,7 @@
#include "dict.h"
#include "zmalloc.h"
-#ifndef DICT_BENCHMARK_MAIN
#include "redisassert.h"
-#else
-#include <assert.h>
-#endif
/* Using dictEnableResize() / dictDisableResize() we make possible to
* enable/disable resizing of the hash table as needed. This is very important
@@ -1175,20 +1171,18 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
/* ------------------------------- Benchmark ---------------------------------*/
-#ifdef DICT_BENCHMARK_MAIN
-
-#include "sds.h"
+#ifdef REDIS_TEST
uint64_t hashCallback(const void *key) {
- return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
+ return dictGenHashFunction((unsigned char*)key, strlen((char*)key));
}
int compareCallback(void *privdata, const void *key1, const void *key2) {
int l1,l2;
DICT_NOTUSED(privdata);
- l1 = sdslen((sds)key1);
- l2 = sdslen((sds)key2);
+ l1 = strlen((char*)key1);
+ l2 = strlen((char*)key2);
if (l1 != l2) return 0;
return memcmp(key1, key2, l1) == 0;
}
@@ -1196,7 +1190,19 @@ int compareCallback(void *privdata, const void *key1, const void *key2) {
void freeCallback(void *privdata, void *val) {
DICT_NOTUSED(privdata);
- sdsfree(val);
+ zfree(val);
+}
+
+char *stringFromLongLong(long long value) {
+ char buf[32];
+ int len;
+ char *s;
+
+ len = sprintf(buf,"%lld",value);
+ s = zmalloc(len+1);
+ memcpy(s, buf, len);
+ s[len] = '\0';
+ return s;
}
dictType BenchmarkDictType = {
@@ -1215,22 +1221,26 @@ dictType BenchmarkDictType = {
printf(msg ": %ld items in %lld ms\n", count, elapsed); \
} while(0)
-/* dict-benchmark [count] */
-int main(int argc, char **argv) {
+/* ./redis-server test dict [<count> | --accurate] */
+int dictTest(int argc, char **argv, int accurate) {
long j;
long long start, elapsed;
dict *dict = dictCreate(&BenchmarkDictType,NULL);
long count = 0;
- if (argc == 2) {
- count = strtol(argv[1],NULL,10);
+ if (argc == 4) {
+ if (accurate) {
+ count = 5000000;
+ } else {
+ count = strtol(argv[3],NULL,10);
+ }
} else {
- count = 5000000;
+ count = 5000;
}
start_benchmark();
for (j = 0; j < count; j++) {
- int retval = dictAdd(dict,sdsfromlonglong(j),(void*)j);
+ int retval = dictAdd(dict,stringFromLongLong(j),(void*)j);
assert(retval == DICT_OK);
}
end_benchmark("Inserting");
@@ -1243,28 +1253,28 @@ int main(int argc, char **argv) {
start_benchmark();
for (j = 0; j < count; j++) {
- sds key = sdsfromlonglong(j);
+ char *key = stringFromLongLong(j);
dictEntry *de = dictFind(dict,key);
assert(de != NULL);
- sdsfree(key);
+ zfree(key);
}
end_benchmark("Linear access of existing elements");
start_benchmark();
for (j = 0; j < count; j++) {
- sds key = sdsfromlonglong(j);
+ char *key = stringFromLongLong(j);
dictEntry *de = dictFind(dict,key);
assert(de != NULL);
- sdsfree(key);
+ zfree(key);
}
end_benchmark("Linear access of existing elements (2nd round)");
start_benchmark();
for (j = 0; j < count; j++) {
- sds key = sdsfromlonglong(rand() % count);
+ char *key = stringFromLongLong(rand() % count);
dictEntry *de = dictFind(dict,key);
assert(de != NULL);
- sdsfree(key);
+ zfree(key);
}
end_benchmark("Random access of existing elements");
@@ -1277,17 +1287,17 @@ int main(int argc, char **argv) {
start_benchmark();
for (j = 0; j < count; j++) {
- sds key = sdsfromlonglong(rand() % count);
+ char *key = stringFromLongLong(rand() % count);
key[0] = 'X';
dictEntry *de = dictFind(dict,key);
assert(de == NULL);
- sdsfree(key);
+ zfree(key);
}
end_benchmark("Accessing missing");
start_benchmark();
for (j = 0; j < count; j++) {
- sds key = sdsfromlonglong(j);
+ char *key = stringFromLongLong(j);
int retval = dictDelete(dict,key);
assert(retval == DICT_OK);
key[0] += 17; /* Change first number to letter. */
@@ -1295,5 +1305,7 @@ int main(int argc, char **argv) {
assert(retval == DICT_OK);
}
end_benchmark("Removing and adding");
+ dictRelease(dict);
+ return 0;
}
#endif
diff --git a/src/dict.h b/src/dict.h
index bd57f859e..7e2258960 100644
--- a/src/dict.h
+++ b/src/dict.h
@@ -201,4 +201,8 @@ extern dictType dictTypeHeapStringCopyKey;
extern dictType dictTypeHeapStrings;
extern dictType dictTypeHeapStringCopyKeyValue;
+#ifdef REDIS_TEST
+int dictTest(int argc, char *argv[], int accurate);
+#endif
+
#endif /* __DICT_H */
diff --git a/src/endianconv.c b/src/endianconv.c
index 918844e25..98ed405a5 100644
--- a/src/endianconv.c
+++ b/src/endianconv.c
@@ -105,11 +105,12 @@ uint64_t intrev64(uint64_t v) {
#include <stdio.h>
#define UNUSED(x) (void)(x)
-int endianconvTest(int argc, char *argv[]) {
+int endianconvTest(int argc, char *argv[], int accurate) {
char buf[32];
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
sprintf(buf,"ciaoroma");
memrev16(buf);
diff --git a/src/endianconv.h b/src/endianconv.h
index 475f72b08..004749786 100644
--- a/src/endianconv.h
+++ b/src/endianconv.h
@@ -72,7 +72,7 @@ uint64_t intrev64(uint64_t v);
#endif
#ifdef REDIS_TEST
-int endianconvTest(int argc, char *argv[]);
+int endianconvTest(int argc, char *argv[], int accurate);
#endif
#endif
diff --git a/src/help.h b/src/help.h
index 97de00231..c14b53aba 100644
--- a/src/help.h
+++ b/src/help.h
@@ -184,7 +184,7 @@ struct commandHelp {
8,
"6.2.0" },
{ "CLIENT KILL",
- "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no]",
+ "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [LADDR ip:port] [SKIPME yes/no]",
"Kill the connection of a client",
8,
"2.4.0" },
diff --git a/src/intset.c b/src/intset.c
index 74de87acb..1a64ecae8 100644
--- a/src/intset.c
+++ b/src/intset.c
@@ -284,7 +284,7 @@ size_t intsetBlobLen(intset *is) {
return sizeof(intset)+intrev32ifbe(is->length)*intrev32ifbe(is->encoding);
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we make sure there are no duplicate or out of order records. */
int intsetValidateIntegrity(const unsigned char *p, size_t size, int deep) {
@@ -392,7 +392,7 @@ static void checkConsistency(intset *is) {
}
#define UNUSED(x) (void)(x)
-int intsetTest(int argc, char **argv) {
+int intsetTest(int argc, char **argv, int accurate) {
uint8_t success;
int i;
intset *is;
@@ -400,6 +400,7 @@ int intsetTest(int argc, char **argv) {
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
printf("Value encodings: "); {
assert(_intsetValueEncoding(-32768) == INTSET_ENC_INT16);
@@ -424,6 +425,7 @@ int intsetTest(int argc, char **argv) {
is = intsetAdd(is,4,&success); assert(success);
is = intsetAdd(is,4,&success); assert(!success);
ok();
+ zfree(is);
}
printf("Large number of random adds: "); {
@@ -436,6 +438,7 @@ int intsetTest(int argc, char **argv) {
assert(intrev32ifbe(is->length) == inserts);
checkConsistency(is);
ok();
+ zfree(is);
}
printf("Upgrade from int16 to int32: "); {
@@ -447,6 +450,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,32));
assert(intsetFind(is,65535));
checkConsistency(is);
+ zfree(is);
is = intsetNew();
is = intsetAdd(is,32,NULL);
@@ -457,6 +461,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,-65535));
checkConsistency(is);
ok();
+ zfree(is);
}
printf("Upgrade from int16 to int64: "); {
@@ -468,6 +473,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,32));
assert(intsetFind(is,4294967295));
checkConsistency(is);
+ zfree(is);
is = intsetNew();
is = intsetAdd(is,32,NULL);
@@ -478,6 +484,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,-4294967295));
checkConsistency(is);
ok();
+ zfree(is);
}
printf("Upgrade from int32 to int64: "); {
@@ -489,6 +496,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,65535));
assert(intsetFind(is,4294967295));
checkConsistency(is);
+ zfree(is);
is = intsetNew();
is = intsetAdd(is,65535,NULL);
@@ -499,6 +507,7 @@ int intsetTest(int argc, char **argv) {
assert(intsetFind(is,-4294967295));
checkConsistency(is);
ok();
+ zfree(is);
}
printf("Stress lookups: "); {
@@ -512,6 +521,7 @@ int intsetTest(int argc, char **argv) {
for (i = 0; i < num; i++) intsetSearch(is,rand() % ((1<<bits)-1),NULL);
printf("%ld lookups, %ld element set, %lldusec\n",
num,size,usec()-start);
+ zfree(is);
}
printf("Stress add+delete: "); {
@@ -528,6 +538,7 @@ int intsetTest(int argc, char **argv) {
}
checkConsistency(is);
ok();
+ zfree(is);
}
return 0;
diff --git a/src/intset.h b/src/intset.h
index 08260fc94..22ea5febf 100644
--- a/src/intset.h
+++ b/src/intset.h
@@ -49,7 +49,7 @@ size_t intsetBlobLen(intset *is);
int intsetValidateIntegrity(const unsigned char *is, size_t size, int deep);
#ifdef REDIS_TEST
-int intsetTest(int argc, char *argv[]);
+int intsetTest(int argc, char *argv[], int accurate);
#endif
#endif // __INTSET_H
diff --git a/src/latency.c b/src/latency.c
index d447b2b5b..a5dfc5a7d 100644
--- a/src/latency.c
+++ b/src/latency.c
@@ -256,7 +256,7 @@ sds createLatencyReport(void) {
if (dictSize(server.latency_events) == 0 &&
server.latency_monitor_threshold == 0)
{
- report = sdscat(report,"I'm sorry, Dave, I can't do that. Latency monitoring is disabled in this Redis instance. You may use \"CONFIG SET latency-monitor-threshold <milliseconds>.\" in order to enable it. If we weren't in a deep space mission I'd suggest to take a look at http://redis.io/topics/latency-monitor.\n");
+ report = sdscat(report,"I'm sorry, Dave, I can't do that. Latency monitoring is disabled in this Redis instance. You may use \"CONFIG SET latency-monitor-threshold <milliseconds>.\" in order to enable it. If we weren't in a deep space mission I'd suggest to take a look at https://redis.io/topics/latency-monitor.\n");
return report;
}
@@ -426,7 +426,7 @@ sds createLatencyReport(void) {
}
if (advise_slowlog_inspect) {
- report = sdscat(report,"- Check your Slow Log to understand what are the commands you are running which are too slow to execute. Please check http://redis.io/commands/slowlog for more information.\n");
+ report = sdscat(report,"- Check your Slow Log to understand what are the commands you are running which are too slow to execute. Please check https://redis.io/commands/slowlog for more information.\n");
}
/* Intrinsic latency. */
diff --git a/src/listpack.c b/src/listpack.c
index a2255f0d7..ee256bad3 100644
--- a/src/listpack.c
+++ b/src/listpack.c
@@ -908,7 +908,7 @@ int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes) {
#undef OUT_OF_RANGE
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we scan all the entries one by one. */
int lpValidateIntegrity(unsigned char *lp, size_t size, int deep){
diff --git a/src/module.c b/src/module.c
index 274210590..05bf3a275 100644
--- a/src/module.c
+++ b/src/module.c
@@ -27,6 +27,30 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+/* --------------------------------------------------------------------------
+ * Modules API documentation information
+ *
+ * The comments in this file are used to generate the API documentation on the
+ * Redis website.
+ *
+ * Each function starting with RM_ and preceded by a block comment is included
+ * in the API documentation. To hide an RM_ function, put a blank line between
+ * the comment and the function definition or put the comment inside the
+ * function body.
+ *
+ * The functions are divided into sections. Each section is preceded by a
+ * documentation block, which is comment block starting with a markdown level 2
+ * heading, i.e. a line starting with ##, on the first line of the comment block
+ * (with the exception of a ----- line which can appear first). Other comment
+ * blocks, which are not intended for the modules API user, such as this comment
+ * block, do NOT start with a markdown level 2 heading, so they are included in
+ * the generated a API documentation.
+ *
+ * The documentation comments may contain markdown formatting. Some automatic
+ * replacements are done, such as the replacement of RM with RedisModule in
+ * function names. For details, see the script src/modules/gendoc.rb.
+ * -------------------------------------------------------------------------- */
+
#include "server.h"
#include "cluster.h"
#include "slowlog.h"
@@ -169,6 +193,7 @@ typedef struct RedisModuleCtx RedisModuleCtx;
#define REDISMODULE_CTX_THREAD_SAFE (1<<4)
#define REDISMODULE_CTX_BLOCKED_DISCONNECTED (1<<5)
#define REDISMODULE_CTX_MODULE_COMMAND_CALL (1<<6)
+#define REDISMODULE_CTX_MULTI_EMITTED (1<<7)
/* This represents a Redis key opened with RM_OpenKey(). */
struct RedisModuleKey {
@@ -396,7 +421,10 @@ void RM_FreeDict(RedisModuleCtx *ctx, RedisModuleDict *d);
void RM_FreeServerInfo(RedisModuleCtx *ctx, RedisModuleServerInfoData *data);
/* --------------------------------------------------------------------------
- * Heap allocation raw functions
+ * ## Heap allocation raw functions
+ *
+ * Memory allocated with these functions are taken into account by Redis key
+ * eviction algorithms and are reported in Redis memory usage information.
* -------------------------------------------------------------------------- */
/* Use like malloc(). Memory allocated with this function is reported in
@@ -579,13 +607,13 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) {
* defined in the main executable having the same names.
* -------------------------------------------------------------------------- */
-/* Lookup the requested module API and store the function pointer into the
- * target pointer. The function returns REDISMODULE_ERR if there is no such
- * named API, otherwise REDISMODULE_OK.
- *
- * This function is not meant to be used by modules developer, it is only
- * used implicitly by including redismodule.h. */
int RM_GetApi(const char *funcname, void **targetPtrPtr) {
+ /* Lookup the requested module API and store the function pointer into the
+ * target pointer. The function returns REDISMODULE_ERR if there is no such
+ * named API, otherwise REDISMODULE_OK.
+ *
+ * This function is not meant to be used by modules developer, it is only
+ * used implicitly by including redismodule.h. */
dictEntry *he = dictFind(server.moduleapi, funcname);
if (!he) return REDISMODULE_ERR;
*targetPtrPtr = dictGetVal(he);
@@ -599,17 +627,21 @@ void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) {
/* We don't need to do anything here if the context was never used
* in order to propagate commands. */
+ if (!(ctx->flags & REDISMODULE_CTX_MULTI_EMITTED)) return;
+
+ /* We don't need to do anything here if the server isn't inside
+ * a transaction. */
if (!server.propagate_in_transaction) return;
- /* If this command is executed from with Lua or MULTI/EXEC we do noy
+ /* If this command is executed from with Lua or MULTI/EXEC we do not
* need to propagate EXEC */
if (server.in_eval || server.in_exec) return;
/* Handle the replication of the final EXEC, since whatever a command
* emits is always wrapped around MULTI/EXEC. */
- beforePropagateMultiOrExec(0);
alsoPropagate(server.execCommand,c->db->id,&shared.exec,1,
PROPAGATE_AOF|PROPAGATE_REPL);
+ afterPropagateExec();
/* If this is not a module command context (but is instead a simple
* callback context), we have to handle directly the "also propagate"
@@ -708,6 +740,14 @@ int moduleGetCommandKeysViaAPI(struct redisCommand *cmd, robj **argv, int argc,
return result->numkeys;
}
+/* --------------------------------------------------------------------------
+ * ## Commands API
+ *
+ * These functions are used to implement custom Redis commands.
+ *
+ * For examples, see https://redis.io/topics/modules-intro.
+ * -------------------------------------------------------------------------- */
+
/* Return non-zero if a module command, that was declared with the
* flag "getkeys-api", is called in a special way to get the keys positions
* and not to get executed. Otherwise zero is returned. */
@@ -882,11 +922,15 @@ int RM_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc c
return REDISMODULE_OK;
}
-/* Called by RM_Init() to setup the `ctx->module` structure.
- *
- * This is an internal function, Redis modules developers don't need
- * to use it. */
+/* --------------------------------------------------------------------------
+ * ## Module information and time measurement
+ * -------------------------------------------------------------------------- */
+
void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int apiver) {
+ /* Called by RM_Init() to setup the `ctx->module` structure.
+ *
+ * This is an internal function, Redis modules developers don't need
+ * to use it. */
RedisModule *module;
if (ctx->module != NULL) return;
@@ -952,20 +996,29 @@ int RM_BlockedClientMeasureTimeEnd(RedisModuleBlockedClient *bc) {
* repl-diskless-load to work if enabled.
* The module should use RedisModule_IsIOError after reads, before using the
* data that was read, and in case of error, propagate it upwards, and also be
- * able to release the partially populated value and all it's allocations. */
+ * able to release the partially populated value and all it's allocations.
+ *
+ * REDISMODULE_OPTION_NO_IMPLICIT_SIGNAL_MODIFIED:
+ * See RM_SignalModifiedKey().
+ */
void RM_SetModuleOptions(RedisModuleCtx *ctx, int options) {
ctx->module->options = options;
}
/* Signals that the key is modified from user's perspective (i.e. invalidate WATCH
- * and client side caching). */
+ * and client side caching).
+ *
+ * This is done automatically when a key opened for writing is closed, unless
+ * the option REDISMODULE_OPTION_NO_IMPLICIT_SIGNAL_MODIFIED has been set using
+ * RM_SetModuleOptions().
+*/
int RM_SignalModifiedKey(RedisModuleCtx *ctx, RedisModuleString *keyname) {
signalModifiedKey(ctx->client,ctx->client->db,keyname);
return REDISMODULE_OK;
}
/* --------------------------------------------------------------------------
- * Automatic memory management for modules
+ * ## Automatic memory management for modules
* -------------------------------------------------------------------------- */
/* Enable automatic memory management.
@@ -1061,7 +1114,7 @@ void autoMemoryCollect(RedisModuleCtx *ctx) {
}
/* --------------------------------------------------------------------------
- * String objects APIs
+ * ## String objects APIs
* -------------------------------------------------------------------------- */
/* Create a new module string object. The returned string must be freed
@@ -1330,14 +1383,6 @@ int RM_StringToLongDouble(const RedisModuleString *str, long double *ld) {
* Returns REDISMODULE_OK on success and returns REDISMODULE_ERR if the string
* is not a valid string representation of a stream ID. The special IDs "+" and
* "-" are allowed.
- *
- * RedisModuleStreamID is a struct with two 64-bit fields, which is used in
- * stream functions and defined as
- *
- * typedef struct RedisModuleStreamID {
- * uint64_t ms;
- * uint64_t seq;
- * } RedisModuleStreamID;
*/
int RM_StringToStreamID(const RedisModuleString *str, RedisModuleStreamID *id) {
streamID streamid;
@@ -1392,13 +1437,15 @@ int RM_StringAppendBuffer(RedisModuleCtx *ctx, RedisModuleString *str, const cha
}
/* --------------------------------------------------------------------------
- * Reply APIs
+ * ## Reply APIs
+ *
+ * These functions are used for sending replies to the client.
*
* Most functions always return REDISMODULE_OK so you can use it with
* 'return' in order to return from the command implementation with:
*
* if (... some condition ...)
- * return RM_ReplyWithLongLong(ctx,mycount);
+ * return RedisModule_ReplyWithLongLong(ctx,mycount);
* -------------------------------------------------------------------------- */
/* Send an error about the number of arguments given to the command,
@@ -1687,7 +1734,7 @@ int RM_ReplyWithLongDouble(RedisModuleCtx *ctx, long double ld) {
}
/* --------------------------------------------------------------------------
- * Commands replication API
+ * ## Commands replication API
* -------------------------------------------------------------------------- */
/* Helper function to replicate MULTI the first time we replicate something
@@ -1696,7 +1743,7 @@ int RM_ReplyWithLongDouble(RedisModuleCtx *ctx, long double ld) {
void moduleReplicateMultiIfNeeded(RedisModuleCtx *ctx) {
/* Skip this if client explicitly wrap the command with MULTI, or if
* the module command was called by a script. */
- if (server.lua_caller || server.in_exec) return;
+ if (server.in_eval || server.in_exec) return;
/* If we already emitted MULTI return ASAP. */
if (server.propagate_in_transaction) return;
/* If this is a thread safe context, we do not want to wrap commands
@@ -1707,10 +1754,12 @@ void moduleReplicateMultiIfNeeded(RedisModuleCtx *ctx) {
* context, we have to setup the op array for the "also propagate" API
* so that RM_Replicate() will work. */
if (!(ctx->flags & REDISMODULE_CTX_MODULE_COMMAND_CALL)) {
+ serverAssert(ctx->saved_oparray.ops == NULL);
ctx->saved_oparray = server.also_propagate;
redisOpArrayInit(&server.also_propagate);
}
execCommandPropagateMulti(ctx->client->db->id);
+ ctx->flags |= REDISMODULE_CTX_MULTI_EMITTED;
}
/* Replicate the specified command and arguments to slaves and AOF, as effect
@@ -1734,7 +1783,7 @@ void moduleReplicateMultiIfNeeded(RedisModuleCtx *ctx) {
* the AOF or the replicas from the propagation of the specified command.
* Otherwise, by default, the command will be propagated in both channels.
*
- * ## Note about calling this function from a thread safe context:
+ * #### Note about calling this function from a thread safe context:
*
* Normally when you call this function from the callback implementing a
* module command, or any other callback provided by the Redis Module API,
@@ -1746,7 +1795,7 @@ void moduleReplicateMultiIfNeeded(RedisModuleCtx *ctx) {
* and the command specified is inserted in the AOF and replication stream
* immediately.
*
- * ## Return value
+ * #### Return value
*
* The command returns REDISMODULE_ERR if the format specifiers are invalid
* or the command name does not belong to a known command. */
@@ -1810,7 +1859,7 @@ int RM_ReplicateVerbatim(RedisModuleCtx *ctx) {
}
/* --------------------------------------------------------------------------
- * DB and Key APIs -- Generic API
+ * ## DB and Key APIs -- Generic API
* -------------------------------------------------------------------------- */
/* Return the ID of the current client calling the currently active module
@@ -2077,7 +2126,7 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) {
flags |= REDISMODULE_CTX_FLAGS_LOADING;
/* Maxmemory and eviction policy */
- if (server.maxmemory > 0) {
+ if (server.maxmemory > 0 && (!server.masterhost || !server.repl_slave_ignore_maxmemory)) {
flags |= REDISMODULE_CTX_FLAGS_MAXMEMORY;
if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION)
@@ -2327,7 +2376,7 @@ mstime_t RM_GetExpire(RedisModuleKey *key) {
* The function returns REDISMODULE_OK on success or REDISMODULE_ERR if
* the key was not open for writing or is an empty key. */
int RM_SetExpire(RedisModuleKey *key, mstime_t expire) {
- if (!(key->mode & REDISMODULE_WRITE) || key->value == NULL)
+ if (!(key->mode & REDISMODULE_WRITE) || key->value == NULL || (expire < 0 && expire != REDISMODULE_NO_EXPIRE))
return REDISMODULE_ERR;
if (expire != REDISMODULE_NO_EXPIRE) {
expire += mstime();
@@ -2338,6 +2387,36 @@ int RM_SetExpire(RedisModuleKey *key, mstime_t expire) {
return REDISMODULE_OK;
}
+/* Return the key expire value, as absolute Unix timestamp.
+ * If no TTL is associated with the key or if the key is empty,
+ * REDISMODULE_NO_EXPIRE is returned. */
+mstime_t RM_GetAbsExpire(RedisModuleKey *key) {
+ mstime_t expire = getExpire(key->db,key->key);
+ if (expire == -1 || key->value == NULL)
+ return REDISMODULE_NO_EXPIRE;
+ return expire;
+}
+
+/* Set a new expire for the key. If the special expire
+ * REDISMODULE_NO_EXPIRE is set, the expire is cancelled if there was
+ * one (the same as the PERSIST command).
+ *
+ * Note that the expire must be provided as a positive integer representing
+ * the absolute Unix timestamp the key should have.
+ *
+ * The function returns REDISMODULE_OK on success or REDISMODULE_ERR if
+ * the key was not open for writing or is an empty key. */
+int RM_SetAbsExpire(RedisModuleKey *key, mstime_t expire) {
+ if (!(key->mode & REDISMODULE_WRITE) || key->value == NULL || (expire < 0 && expire != REDISMODULE_NO_EXPIRE))
+ return REDISMODULE_ERR;
+ if (expire != REDISMODULE_NO_EXPIRE) {
+ setExpire(key->ctx->client,key->db,key->key,expire);
+ } else {
+ removeExpire(key->db,key->key);
+ }
+ return REDISMODULE_OK;
+}
+
/* Performs similar operation to FLUSHALL, and optionally start a new AOF file (if enabled)
* If restart_aof is true, you must make sure the command that triggered this call is not
* propagated to the AOF file.
@@ -2361,7 +2440,9 @@ RedisModuleString *RM_RandomKey(RedisModuleCtx *ctx) {
}
/* --------------------------------------------------------------------------
- * Key API for String type
+ * ## Key API for String type
+ *
+ * See also RM_ValueLength(), which returns the length of a string.
* -------------------------------------------------------------------------- */
/* If the key is open for writing, set the specified string 'str' as the
@@ -2471,7 +2552,9 @@ int RM_StringTruncate(RedisModuleKey *key, size_t newlen) {
}
/* --------------------------------------------------------------------------
- * Key API for List type
+ * ## Key API for List type
+ *
+ * See also RM_ValueLength(), which returns the length of a list.
* -------------------------------------------------------------------------- */
/* Push an element into a list, on head or tail depending on 'where' argument.
@@ -2509,26 +2592,28 @@ RedisModuleString *RM_ListPop(RedisModuleKey *key, int where) {
}
/* --------------------------------------------------------------------------
- * Key API for Sorted Set type
+ * ## Key API for Sorted Set type
+ *
+ * See also RM_ValueLength(), which returns the length of a sorted set.
* -------------------------------------------------------------------------- */
/* Conversion from/to public flags of the Modules API and our private flags,
* so that we have everything decoupled. */
int moduleZsetAddFlagsToCoreFlags(int flags) {
int retflags = 0;
- if (flags & REDISMODULE_ZADD_XX) retflags |= ZADD_XX;
- if (flags & REDISMODULE_ZADD_NX) retflags |= ZADD_NX;
- if (flags & REDISMODULE_ZADD_GT) retflags |= ZADD_GT;
- if (flags & REDISMODULE_ZADD_LT) retflags |= ZADD_LT;
+ if (flags & REDISMODULE_ZADD_XX) retflags |= ZADD_IN_XX;
+ if (flags & REDISMODULE_ZADD_NX) retflags |= ZADD_IN_NX;
+ if (flags & REDISMODULE_ZADD_GT) retflags |= ZADD_IN_GT;
+ if (flags & REDISMODULE_ZADD_LT) retflags |= ZADD_IN_LT;
return retflags;
}
/* See previous function comment. */
int moduleZsetAddFlagsFromCoreFlags(int flags) {
int retflags = 0;
- if (flags & ZADD_ADDED) retflags |= REDISMODULE_ZADD_ADDED;
- if (flags & ZADD_UPDATED) retflags |= REDISMODULE_ZADD_UPDATED;
- if (flags & ZADD_NOP) retflags |= REDISMODULE_ZADD_NOP;
+ if (flags & ZADD_OUT_ADDED) retflags |= REDISMODULE_ZADD_ADDED;
+ if (flags & ZADD_OUT_UPDATED) retflags |= REDISMODULE_ZADD_UPDATED;
+ if (flags & ZADD_OUT_NOP) retflags |= REDISMODULE_ZADD_NOP;
return retflags;
}
@@ -2565,16 +2650,16 @@ int moduleZsetAddFlagsFromCoreFlags(int flags) {
* * 'score' double value is not a number (NaN).
*/
int RM_ZsetAdd(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr) {
- int flags = 0;
+ int in_flags = 0, out_flags = 0;
if (!(key->mode & REDISMODULE_WRITE)) return REDISMODULE_ERR;
if (key->value && key->value->type != OBJ_ZSET) return REDISMODULE_ERR;
if (key->value == NULL) moduleCreateEmptyKey(key,REDISMODULE_KEYTYPE_ZSET);
- if (flagsptr) flags = moduleZsetAddFlagsToCoreFlags(*flagsptr);
- if (zsetAdd(key->value,score,ele->ptr,&flags,NULL) == 0) {
+ if (flagsptr) in_flags = moduleZsetAddFlagsToCoreFlags(*flagsptr);
+ if (zsetAdd(key->value,score,ele->ptr,in_flags,&out_flags,NULL) == 0) {
if (flagsptr) *flagsptr = 0;
return REDISMODULE_ERR;
}
- if (flagsptr) *flagsptr = moduleZsetAddFlagsFromCoreFlags(flags);
+ if (flagsptr) *flagsptr = moduleZsetAddFlagsFromCoreFlags(out_flags);
return REDISMODULE_OK;
}
@@ -2592,22 +2677,17 @@ int RM_ZsetAdd(RedisModuleKey *key, double score, RedisModuleString *ele, int *f
* with the new score of the element after the increment, if no error
* is returned. */
int RM_ZsetIncrby(RedisModuleKey *key, double score, RedisModuleString *ele, int *flagsptr, double *newscore) {
- int flags = 0;
+ int in_flags = 0, out_flags = 0;
if (!(key->mode & REDISMODULE_WRITE)) return REDISMODULE_ERR;
if (key->value && key->value->type != OBJ_ZSET) return REDISMODULE_ERR;
if (key->value == NULL) moduleCreateEmptyKey(key,REDISMODULE_KEYTYPE_ZSET);
- if (flagsptr) flags = moduleZsetAddFlagsToCoreFlags(*flagsptr);
- flags |= ZADD_INCR;
- if (zsetAdd(key->value,score,ele->ptr,&flags,newscore) == 0) {
+ if (flagsptr) in_flags = moduleZsetAddFlagsToCoreFlags(*flagsptr);
+ in_flags |= ZADD_IN_INCR;
+ if (zsetAdd(key->value,score,ele->ptr,in_flags,&out_flags,newscore) == 0) {
if (flagsptr) *flagsptr = 0;
return REDISMODULE_ERR;
}
- /* zsetAdd() may signal back that the resulting score is not a number. */
- if (flagsptr && (*flagsptr & ZADD_NAN)) {
- *flagsptr = 0;
- return REDISMODULE_ERR;
- }
- if (flagsptr) *flagsptr = moduleZsetAddFlagsFromCoreFlags(flags);
+ if (flagsptr) *flagsptr = moduleZsetAddFlagsFromCoreFlags(out_flags);
return REDISMODULE_OK;
}
@@ -2657,7 +2737,7 @@ int RM_ZsetScore(RedisModuleKey *key, RedisModuleString *ele, double *score) {
}
/* --------------------------------------------------------------------------
- * Key API for Sorted Set iterator
+ * ## Key API for Sorted Set iterator
* -------------------------------------------------------------------------- */
void zsetKeyReset(RedisModuleKey *key) {
@@ -2964,7 +3044,9 @@ int RM_ZsetRangePrev(RedisModuleKey *key) {
}
/* --------------------------------------------------------------------------
- * Key API for Hash type
+ * ## Key API for Hash type
+ *
+ * See also RM_ValueLength(), which returns the number of fields in a hash.
* -------------------------------------------------------------------------- */
/* Set the field of the specified hash field to the specified value.
@@ -3199,7 +3281,20 @@ int RM_HashGet(RedisModuleKey *key, int flags, ...) {
}
/* --------------------------------------------------------------------------
- * Key API for the stream type.
+ * ## Key API for Stream type
+ *
+ * For an introduction to streams, see https://redis.io/topics/streams-intro.
+ *
+ * The type RedisModuleStreamID, which is used in stream functions, is a struct
+ * with two 64-bit fields and is defined as
+ *
+ * typedef struct RedisModuleStreamID {
+ * uint64_t ms;
+ * uint64_t seq;
+ * } RedisModuleStreamID;
+ *
+ * See also RM_ValueLength(), which returns the length of a stream, and the
+ * conversion functions RM_StringToStreamID() and RM_CreateStringFromStreamID().
* -------------------------------------------------------------------------- */
/* Adds an entry to a stream. Like XADD without trimming.
@@ -3366,8 +3461,8 @@ int RM_StreamDelete(RedisModuleKey *key, RedisModuleStreamID *id) {
* //
* // ... Do stuff ...
* //
- * RedisModule_Free(field);
- * RedisModule_Free(value);
+ * RedisModule_FreeString(ctx, field);
+ * RedisModule_FreeString(ctx, value);
* }
* }
* RedisModule_StreamIteratorStop(key);
@@ -3648,7 +3743,9 @@ long long RM_StreamTrimByID(RedisModuleKey *key, int flags, RedisModuleStreamID
}
/* --------------------------------------------------------------------------
- * Redis <-> Modules generic Call() API
+ * ## Calling Redis commands from modules
+ *
+ * RM_Call() sends a command to Redis. The remaining functions handle the reply.
* -------------------------------------------------------------------------- */
/* Create a new RedisModuleCallReply object. The processing of the reply
@@ -4067,20 +4164,30 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
}
}
- /* If we are using single commands replication, we need to wrap what
- * we propagate into a MULTI/EXEC block, so that it will be atomic like
- * a Lua script in the context of AOF and slaves. */
- if (replicate) moduleReplicateMultiIfNeeded(ctx);
+ /* We need to use a global replication_allowed flag in order to prevent
+ * replication of nested RM_Calls. Example:
+ * 1. module1.foo does RM_Call of module2.bar without replication (i.e. no '!')
+ * 2. module2.bar internally calls RM_Call of INCR with '!'
+ * 3. at the end of module1.foo we call RM_ReplicateVerbatim
+ * We want the replica/AOF to see only module1.foo and not the INCR from module2.bar */
+ int prev_replication_allowed = server.replication_allowed;
+ server.replication_allowed = replicate && server.replication_allowed;
/* Run the command */
int call_flags = CMD_CALL_SLOWLOG | CMD_CALL_STATS | CMD_CALL_NOWRAP;
if (replicate) {
+ /* If we are using single commands replication, we need to wrap what
+ * we propagate into a MULTI/EXEC block, so that it will be atomic like
+ * a Lua script in the context of AOF and slaves. */
+ moduleReplicateMultiIfNeeded(ctx);
+
if (!(flags & REDISMODULE_ARGV_NO_AOF))
call_flags |= CMD_CALL_PROPAGATE_AOF;
if (!(flags & REDISMODULE_ARGV_NO_REPLICAS))
call_flags |= CMD_CALL_PROPAGATE_REPL;
}
call(c,call_flags);
+ server.replication_allowed = prev_replication_allowed;
serverAssert((c->flags & CLIENT_BLOCKED) == 0);
@@ -4121,7 +4228,7 @@ const char *RM_CallReplyProto(RedisModuleCallReply *reply, size_t *len) {
}
/* --------------------------------------------------------------------------
- * Modules data types
+ * ## Modules data types
*
* When String DMA or using existing data structures is not enough, it is
* possible to create new data types from scratch and export them to
@@ -4264,6 +4371,12 @@ void moduleTypeNameByID(char *name, uint64_t moduleid) {
}
}
+/* Return the name of the module that owns the specified moduleType. */
+const char *moduleTypeModuleName(moduleType *mt) {
+ if (!mt || !mt->module) return NULL;
+ return mt->module->name;
+}
+
/* Create a copy of a module type value using the copy callback. If failed
* or not supported, produce an error reply and return NULL.
*/
@@ -4479,7 +4592,7 @@ void *RM_ModuleTypeGetValue(RedisModuleKey *key) {
}
/* --------------------------------------------------------------------------
- * RDB loading and saving functions
+ * ## RDB loading and saving functions
* -------------------------------------------------------------------------- */
/* Called when there is a load error in the context of a module. On some
@@ -4791,7 +4904,7 @@ ssize_t rdbSaveModulesAux(rio *rdb, int when) {
}
/* --------------------------------------------------------------------------
- * Key digest API (DEBUG DIGEST interface for modules types)
+ * ## Key digest API (DEBUG DIGEST interface for modules types)
* -------------------------------------------------------------------------- */
/* Add a new element to the digest. This function can be called multiple times
@@ -4912,7 +5025,7 @@ RedisModuleString *RM_SaveDataTypeToString(RedisModuleCtx *ctx, void *data, cons
}
/* --------------------------------------------------------------------------
- * AOF API for modules data types
+ * ## AOF API for modules data types
* -------------------------------------------------------------------------- */
/* Emits a command into the AOF during the AOF rewriting process. This function
@@ -4967,7 +5080,7 @@ void RM_EmitAOF(RedisModuleIO *io, const char *cmdname, const char *fmt, ...) {
}
/* --------------------------------------------------------------------------
- * IO context handling
+ * ## IO context handling
* -------------------------------------------------------------------------- */
RedisModuleCtx *RM_GetContextFromIO(RedisModuleIO *io) {
@@ -4994,7 +5107,7 @@ const RedisModuleString *RM_GetKeyNameFromModuleKey(RedisModuleKey *key) {
}
/* --------------------------------------------------------------------------
- * Logging
+ * ## Logging
* -------------------------------------------------------------------------- */
/* This is the low level function implementing both:
@@ -5025,10 +5138,10 @@ void moduleLogRaw(RedisModule *module, const char *levelstr, const char *fmt, va
* printf-alike specifiers, while level is a string describing the log
* level to use when emitting the log, and must be one of the following:
*
- * * "debug"
- * * "verbose"
- * * "notice"
- * * "warning"
+ * * "debug" (`REDISMODULE_LOGLEVEL_DEBUG`)
+ * * "verbose" (`REDISMODULE_LOGLEVEL_VERBOSE`)
+ * * "notice" (`REDISMODULE_LOGLEVEL_NOTICE`)
+ * * "warning" (`REDISMODULE_LOGLEVEL_WARNING`)
*
* If the specified log level is invalid, verbose is used by default.
* There is a fixed limit to the length of the log line this function is able
@@ -5079,7 +5192,10 @@ void RM_LatencyAddSample(const char *event, mstime_t latency) {
}
/* --------------------------------------------------------------------------
- * Blocking clients from modules
+ * ## Blocking clients from modules
+ *
+ * For a guide about blocking commands in modules, see
+ * https://redis.io/topics/modules-blocking-ops.
* -------------------------------------------------------------------------- */
/* Readable handler for the awake pipe. We do nothing here, the awake bytes
@@ -5140,11 +5256,6 @@ void unblockClientFromModule(client *c) {
moduleUnblockClient(c);
bc->client = NULL;
- /* Reset the client for a new query since, for blocking commands implemented
- * into modules, we do not it immediately after the command returns (and
- * the client blocks) in order to be still able to access the argument
- * vector from callbacks. */
- resetClient(c);
}
/* Block a client in the context of a module: this function implements both
@@ -5544,6 +5655,12 @@ void moduleHandleBlockedClients(void) {
* API to unblock the client and the memory will be released. */
void moduleBlockedClientTimedOut(client *c) {
RedisModuleBlockedClient *bc = c->bpop.module_blocked_handle;
+
+ /* Protect against re-processing: don't serve clients that are already
+ * in the unblocking list for any reason (including RM_UnblockClient()
+ * explicit call). See #6798. */
+ if (bc->unblocked) return;
+
RedisModuleCtx ctx = REDISMODULE_CTX_INIT;
ctx.flags |= REDISMODULE_CTX_BLOCKED_TIMEOUT;
ctx.module = bc->module;
@@ -5600,7 +5717,7 @@ int RM_BlockedClientDisconnected(RedisModuleCtx *ctx) {
}
/* --------------------------------------------------------------------------
- * Thread Safe Contexts
+ * ## Thread Safe Contexts
* -------------------------------------------------------------------------- */
/* Return a context which can be used inside threads to make Redis context
@@ -5710,7 +5827,7 @@ void moduleReleaseGIL(void) {
/* --------------------------------------------------------------------------
- * Module Keyspace Notifications API
+ * ## Module Keyspace Notifications API
* -------------------------------------------------------------------------- */
/* Subscribe to keyspace notifications. This is a low-level version of the
@@ -5734,6 +5851,7 @@ void moduleReleaseGIL(void) {
* - REDISMODULE_NOTIFY_EXPIRED: Expiration events
* - REDISMODULE_NOTIFY_EVICTED: Eviction events
* - REDISMODULE_NOTIFY_STREAM: Stream events
+ * - REDISMODULE_NOTIFY_MODULE: Module types events
* - REDISMODULE_NOTIFY_KEYMISS: Key-miss events
* - REDISMODULE_NOTIFY_ALL: All events (Excluding REDISMODULE_NOTIFY_KEYMISS)
* - REDISMODULE_NOTIFY_LOADED: A special notification available only for modules,
@@ -5843,7 +5961,7 @@ void moduleUnsubscribeNotifications(RedisModule *module) {
}
/* --------------------------------------------------------------------------
- * Modules Cluster API
+ * ## Modules Cluster API
* -------------------------------------------------------------------------- */
/* The Cluster message callback function pointer type. */
@@ -6098,7 +6216,7 @@ void RM_SetClusterFlags(RedisModuleCtx *ctx, uint64_t flags) {
}
/* --------------------------------------------------------------------------
- * Modules Timers API
+ * ## Modules Timers API
*
* Module timers are an high precision "green timers" abstraction where
* every module can register even millions of timers without problems, even if
@@ -6272,7 +6390,7 @@ int RM_GetTimerInfo(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remain
}
/* --------------------------------------------------------------------------
- * Modules ACL API
+ * ## Modules ACL API
*
* Implements a hook into the authentication and authorization within Redis.
* --------------------------------------------------------------------------*/
@@ -6502,7 +6620,7 @@ RedisModuleString *RM_GetClientCertificate(RedisModuleCtx *ctx, uint64_t client_
}
/* --------------------------------------------------------------------------
- * Modules Dictionary API
+ * ## Modules Dictionary API
*
* Implements a sorted dictionary (actually backed by a radix tree) with
* the usual get / set / del / num-items API, together with an iterator
@@ -6756,7 +6874,7 @@ int RM_DictCompare(RedisModuleDictIter *di, const char *op, RedisModuleString *k
/* --------------------------------------------------------------------------
- * Modules Info fields
+ * ## Modules Info fields
* -------------------------------------------------------------------------- */
int RM_InfoEndDictField(RedisModuleInfoCtx *ctx);
@@ -7070,7 +7188,7 @@ double RM_ServerInfoGetFieldDouble(RedisModuleServerInfoData *data, const char*
}
/* --------------------------------------------------------------------------
- * Modules utility APIs
+ * ## Modules utility APIs
* -------------------------------------------------------------------------- */
/* Return random bytes using SHA1 in counter mode with a /dev/urandom
@@ -7089,7 +7207,7 @@ void RM_GetRandomHexChars(char *dst, size_t len) {
}
/* --------------------------------------------------------------------------
- * Modules API exporting / importing
+ * ## Modules API exporting / importing
* -------------------------------------------------------------------------- */
/* This function is called by a module in order to export some API with a
@@ -7226,7 +7344,7 @@ int moduleUnregisterFilters(RedisModule *module) {
}
/* --------------------------------------------------------------------------
- * Module Command Filter API
+ * ## Module Command Filter API
* -------------------------------------------------------------------------- */
/* Register a new command filter function.
@@ -7436,7 +7554,7 @@ float RM_GetUsedMemoryRatio(){
}
/* --------------------------------------------------------------------------
- * Scanning keyspace and hashes
+ * ## Scanning keyspace and hashes
* -------------------------------------------------------------------------- */
typedef void (*RedisModuleScanCB)(RedisModuleCtx *ctx, RedisModuleString *keyname, RedisModuleKey *key, void *privdata);
@@ -7707,7 +7825,7 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc
/* --------------------------------------------------------------------------
- * Module fork API
+ * ## Module fork API
* -------------------------------------------------------------------------- */
/* Create a background child process with the current frozen snaphost of the
@@ -7767,7 +7885,7 @@ int TerminateModuleForkChild(int child_pid, int wait) {
serverLog(LL_VERBOSE,"Killing running module fork child: %ld",
(long) server.child_pid);
if (kill(server.child_pid,SIGUSR1) != -1 && wait) {
- while(wait4(server.child_pid,&statloc,0,NULL) !=
+ while(waitpid(server.child_pid, &statloc, 0) !=
server.child_pid);
}
/* Reset the buffer accumulating changes while the child saves. */
@@ -7801,7 +7919,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) {
}
/* --------------------------------------------------------------------------
- * Server hooks implementation
+ * ## Server hooks implementation
* -------------------------------------------------------------------------- */
/* Register to be notified, via a callback, when the specified server event
@@ -8682,6 +8800,10 @@ size_t moduleCount(void) {
return dictSize(modules);
}
+/* --------------------------------------------------------------------------
+ * ## Key eviction API
+ * -------------------------------------------------------------------------- */
+
/* Set the key last access time for LRU based eviction. not relevant if the
* servers's maxmemory policy is LFU based. Value is idle time in milliseconds.
* returns REDISMODULE_OK if the LRU was updated, REDISMODULE_ERR otherwise. */
@@ -8732,6 +8854,10 @@ int RM_GetLFU(RedisModuleKey *key, long long *lfu_freq) {
return REDISMODULE_OK;
}
+/* --------------------------------------------------------------------------
+ * ## Miscellaneous APIs
+ * -------------------------------------------------------------------------- */
+
/**
* Returns the full ContextFlags mask, using the return value
* the module can check if a certain set of flags are supported
@@ -8880,6 +9006,10 @@ int *RM_GetCommandKeys(RedisModuleCtx *ctx, RedisModuleString **argv, int argc,
return res;
}
+/* --------------------------------------------------------------------------
+ * ## Defrag API
+ * -------------------------------------------------------------------------- */
+
/* The defrag context, used to manage state during calls to the data type
* defrag callback.
*/
diff --git a/src/modules/gendoc.rb b/src/modules/gendoc.rb
index 2fd2ec5d7..f83b1ad9d 100644
--- a/src/modules/gendoc.rb
+++ b/src/modules/gendoc.rb
@@ -1,3 +1,4 @@
+# coding: utf-8
# gendoc.rb -- Converts the top-comments inside module.c to modules API
# reference documentation in markdown format.
@@ -21,15 +22,48 @@ def markdown(s)
l = l.gsub(/(?<![`A-z])[a-z_]+\(\)/, '`\0`')
# Add backquotes around macro and var names containing underscores.
l = l.gsub(/(?<![`A-z\*])[A-Za-z]+_[A-Za-z0-9_]+/){|x| "`#{x}`"}
- # Link URLs preceded by space (i.e. when not already linked)
- l = l.gsub(/ (https?:\/\/[A-Za-z0-9_\/\.\-]+[A-Za-z0-9\/])/,
- ' [\1](\1)')
+ # Link URLs preceded by space or newline (not already linked)
+ l = l.gsub(/(^| )(https?:\/\/[A-Za-z0-9_\/\.\-]+[A-Za-z0-9\/])/,
+ '\1[\2](\2)')
+ # Replace double-dash with unicode ndash
+ l = l.gsub(/ -- /, ' – ')
end
+ # Link function names to their definition within the page
+ l = l.gsub(/`(RedisModule_[A-z0-9]+)[()]*`/) {|x|
+ $index[$1] ? "[#{x}](\##{$1})" : x
+ }
newlines << l
}
return newlines.join("\n")
end
+# Linebreak a prototype longer than 80 characters on the commas, but only
+# between balanced parentheses so that we don't linebreak args which are
+# function pointers, and then aligning each arg under each other.
+def linebreak_proto(proto, indent)
+ if proto.bytesize <= 80
+ return proto
+ end
+ parts = proto.split(/,\s*/);
+ if parts.length == 1
+ return proto;
+ end
+ align_pos = proto.index("(") + 1;
+ align = " " * align_pos
+ result = parts.shift;
+ bracket_balance = 0;
+ parts.each{|part|
+ if bracket_balance == 0
+ result += ",\n" + indent + align
+ else
+ result += ", "
+ end
+ result += part
+ bracket_balance += part.count("(") - part.count(")")
+ }
+ return result;
+end
+
# Given the source code array and the index at which an exported symbol was
# detected, extracts and outputs the documentation.
def docufy(src,i)
@@ -38,7 +72,11 @@ def docufy(src,i)
name = name.sub("RM_","RedisModule_")
proto = src[i].sub("{","").strip+";\n"
proto = proto.sub("RM_","RedisModule_")
- puts "## `#{name}`\n\n"
+ proto = linebreak_proto(proto, " ");
+ # Add a link target with the function name. (We don't trust the exact id of
+ # the generated one, which depends on the Markdown implementation.)
+ puts "<span id=\"#{name}\"></span>\n\n"
+ puts "### `#{name}`\n\n"
puts " #{proto}\n"
comment = ""
while true
@@ -50,13 +88,87 @@ def docufy(src,i)
puts comment+"\n\n"
end
+# Print a comment from line until */ is found, as markdown.
+def section_doc(src, i)
+ name = get_section_heading(src, i)
+ comment = "<span id=\"#{section_name_to_id(name)}\"></span>\n\n"
+ while true
+ # append line, except if it's a horizontal divider
+ comment = comment + src[i] if src[i] !~ /^[\/ ]?\*{1,2} ?-{50,}/
+ break if src[i] =~ /\*\//
+ i = i+1
+ end
+ comment = markdown(comment)
+ puts comment+"\n\n"
+end
+
+# generates an id suitable for links within the page
+def section_name_to_id(name)
+ return "section-" +
+ name.strip.downcase.gsub(/[^a-z0-9]+/, '-').gsub(/^-+|-+$/, '')
+end
+
+# Returns the name of the first section heading in the comment block for which
+# is_section_doc(src, i) is true
+def get_section_heading(src, i)
+ if src[i] =~ /^\/\*\*? \#+ *(.*)/
+ heading = $1
+ elsif src[i+1] =~ /^ ?\* \#+ *(.*)/
+ heading = $1
+ end
+ return heading.gsub(' -- ', ' – ')
+end
+
+# Returns true if the line is the start of a generic documentation section. Such
+# section must start with the # symbol, i.e. a markdown heading, on the first or
+# the second line.
+def is_section_doc(src, i)
+ return src[i] =~ /^\/\*\*? \#/ ||
+ (src[i] =~ /^\/\*/ && src[i+1] =~ /^ ?\* \#/)
+end
+
+def is_func_line(src, i)
+ line = src[i]
+ return line =~ /RM_/ &&
+ line[0] != ' ' && line[0] != '#' && line[0] != '/' &&
+ src[i-1] =~ /\*\//
+end
+
puts "# Modules API reference\n\n"
puts "<!-- This file is generated from module.c using gendoc.rb -->\n\n"
-src = File.open("../module.c").to_a
-src.each_with_index{|line,i|
- if line =~ /RM_/ && line[0] != ' ' && line[0] != '#' && line[0] != '/'
- if src[i-1] =~ /\*\//
- docufy(src,i)
- end
+src = File.open(File.dirname(__FILE__) ++ "/../module.c").to_a
+
+# Build function index
+$index = {}
+src.each_with_index do |line,i|
+ if is_func_line(src, i)
+ line =~ /RM_([A-z0-9]+)/
+ name = "RedisModule_#{$1}"
+ $index[name] = true
end
-}
+end
+
+# Print TOC
+puts "## Sections\n\n"
+src.each_with_index do |_line,i|
+ if is_section_doc(src, i)
+ name = get_section_heading(src, i)
+ puts "* [#{name}](\##{section_name_to_id(name)})\n"
+ end
+end
+puts "* [Function index](#section-function-index)\n\n"
+
+# Docufy: Print function prototype and markdown docs
+src.each_with_index do |_line,i|
+ if is_func_line(src, i)
+ docufy(src, i)
+ elsif is_section_doc(src, i)
+ section_doc(src, i)
+ end
+end
+
+# Print function index
+puts "<span id=\"section-function-index\"></span>\n\n"
+puts "## Function index\n\n"
+$index.keys.sort.each{|x| puts "* [`#{x}`](\##{x})\n"}
+puts "\n"
diff --git a/src/multi.c b/src/multi.c
index d88c5f1b8..902c919c7 100644
--- a/src/multi.c
+++ b/src/multi.c
@@ -113,34 +113,34 @@ void discardCommand(client *c) {
addReply(c,shared.ok);
}
-void beforePropagateMultiOrExec(int multi) {
- if (multi) {
- /* Propagating MULTI */
- serverAssert(!server.propagate_in_transaction);
- server.propagate_in_transaction = 1;
- } else {
- /* Propagating EXEC */
- serverAssert(server.propagate_in_transaction == 1);
- server.propagate_in_transaction = 0;
- }
+void beforePropagateMulti() {
+ /* Propagating MULTI */
+ serverAssert(!server.propagate_in_transaction);
+ server.propagate_in_transaction = 1;
+}
+
+void afterPropagateExec() {
+ /* Propagating EXEC */
+ serverAssert(server.propagate_in_transaction == 1);
+ server.propagate_in_transaction = 0;
}
/* Send a MULTI command to all the slaves and AOF file. Check the execCommand
* implementation for more information. */
void execCommandPropagateMulti(int dbid) {
- beforePropagateMultiOrExec(1);
+ beforePropagateMulti();
propagate(server.multiCommand,dbid,&shared.multi,1,
PROPAGATE_AOF|PROPAGATE_REPL);
}
void execCommandPropagateExec(int dbid) {
- beforePropagateMultiOrExec(0);
propagate(server.execCommand,dbid,&shared.exec,1,
PROPAGATE_AOF|PROPAGATE_REPL);
+ afterPropagateExec();
}
/* Aborts a transaction, with a specific error message.
- * The transaction is always aboarted with -EXECABORT so that the client knows
+ * The transaction is always aborted with -EXECABORT so that the client knows
* the server exited the multi state, but the actual reason for the abort is
* included too.
* Note: 'error' may or may not end with \r\n. see addReplyErrorFormat. */
@@ -202,11 +202,9 @@ void execCommand(client *c) {
c->cmd = c->mstate.commands[j].cmd;
/* ACL permissions are also checked at the time of execution in case
- * they were changed after the commands were ququed. */
+ * they were changed after the commands were queued. */
int acl_errpos;
- int acl_retval = ACLCheckCommandPerm(c,&acl_errpos);
- if (acl_retval == ACL_OK && c->cmd->proc == publishCommand)
- acl_retval = ACLCheckPubsubPerm(c,1,1,0,&acl_errpos);
+ int acl_retval = ACLCheckAllPerm(c,&acl_errpos);
if (acl_retval != ACL_OK) {
char *reason;
switch (acl_retval) {
@@ -217,7 +215,8 @@ void execCommand(client *c) {
reason = "no permission to touch the specified keys";
break;
case ACL_DENIED_CHANNEL:
- reason = "no permission to publish to the specified channel";
+ reason = "no permission to access one of the channels used "
+ "as arguments";
break;
default:
reason = "no permission";
@@ -254,7 +253,6 @@ void execCommand(client *c) {
if (server.propagate_in_transaction) {
int is_master = server.masterhost == NULL;
server.dirty++;
- beforePropagateMultiOrExec(0);
/* If inside the MULTI/EXEC block this instance was suddenly
* switched from master to slave (using the SLAVEOF command), the
* initial MULTI was propagated into the replication backlog, but the
@@ -264,6 +262,7 @@ void execCommand(client *c) {
char *execcmd = "*1\r\n$4\r\nEXEC\r\n";
feedReplicationBacklog(execcmd,strlen(execcmd));
}
+ afterPropagateExec();
}
server.in_exec = 0;
diff --git a/src/networking.c b/src/networking.c
index 50e4b71bc..2355a376b 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -154,6 +154,7 @@ client *createClient(connection *conn) {
c->read_reploff = 0;
c->repl_ack_off = 0;
c->repl_ack_time = 0;
+ c->repl_last_partial_write = 0;
c->slave_listening_port = 0;
c->slave_addr = NULL;
c->slave_capa = SLAVE_CAPA_NONE;
@@ -331,8 +332,9 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) {
memcpy(tail->buf, s, len);
listAddNodeTail(c->reply, tail);
c->reply_bytes += tail->size;
+
+ asyncCloseClientOnOutputBufferLimitReached(c);
}
- asyncCloseClientOnOutputBufferLimitReached(c);
}
/* -----------------------------------------------------------------------------
@@ -562,7 +564,7 @@ void *addReplyDeferredLen(client *c) {
void setDeferredReply(client *c, void *node, const char *s, size_t length) {
listNode *ln = (listNode*)node;
- clientReplyBlock *next;
+ clientReplyBlock *next, *prev;
/* Abort when *node is NULL: when the client should not accept writes
* we return NULL in addReplyDeferredLen() */
@@ -571,14 +573,31 @@ void setDeferredReply(client *c, void *node, const char *s, size_t length) {
/* Normally we fill this dummy NULL node, added by addReplyDeferredLen(),
* with a new buffer structure containing the protocol needed to specify
- * the length of the array following. However sometimes when there is
- * little memory to move, we may instead remove this NULL node, and prefix
- * our protocol in the node immediately after to it, in order to save a
- * write(2) syscall later. Conditions needed to do it:
+ * the length of the array following. However sometimes there might be room
+ * in the previous/next node so we can instead remove this NULL node, and
+ * suffix/prefix our data in the node immediately before/after it, in order
+ * to save a write(2) syscall later. Conditions needed to do it:
*
+ * - The prev node is non-NULL and has space in it or
* - The next node is non-NULL,
* - It has enough room already allocated
* - And not too large (avoid large memmove) */
+ if (ln->prev != NULL && (prev = listNodeValue(ln->prev)) &&
+ prev->size - prev->used > 0)
+ {
+ size_t len_to_copy = prev->size - prev->used;
+ if (len_to_copy > length)
+ len_to_copy = length;
+ memcpy(prev->buf + prev->used, s, len_to_copy);
+ prev->used += len_to_copy;
+ length -= len_to_copy;
+ if (length == 0) {
+ listDelNode(c->reply, ln);
+ return;
+ }
+ s += len_to_copy;
+ }
+
if (ln->next != NULL && (next = listNodeValue(ln->next)) &&
next->size - next->used >= length &&
next->used < PROTO_REPLY_CHUNK_BYTES * 4)
@@ -596,8 +615,9 @@ void setDeferredReply(client *c, void *node, const char *s, size_t length) {
memcpy(buf->buf, s, length);
listNodeValue(ln) = buf;
c->reply_bytes += buf->size;
+
+ asyncCloseClientOnOutputBufferLimitReached(c);
}
- asyncCloseClientOnOutputBufferLimitReached(c);
}
/* Populate the length object and try gluing it to the next chunk. */
@@ -1531,9 +1551,7 @@ int writeToClient(client *c, int handler_installed) {
}
atomicIncr(server.stat_net_output_bytes, totwritten);
if (nwritten == -1) {
- if (connGetState(c->conn) == CONN_STATE_CONNECTED) {
- nwritten = 0;
- } else {
+ if (connGetState(c->conn) != CONN_STATE_CONNECTED) {
serverLog(LL_VERBOSE,
"Error writing to client: %s", connGetLastError(c->conn));
freeClientAsync(c);
@@ -1645,6 +1663,9 @@ void resetClient(client *c) {
c->flags |= CLIENT_REPLY_SKIP;
c->flags &= ~CLIENT_REPLY_SKIP_NEXT;
}
+
+ /* Always clear the prevent logging field. */
+ c->flags &= ~CLIENT_PREVENT_LOGGING;
}
/* This function is used when we want to re-enter the event loop but there
@@ -1954,13 +1975,10 @@ void commandProcessed(client *c) {
c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos;
}
- /* Don't reset the client structure for clients blocked in a
- * module blocking command, so that the reply callback will
- * still be able to access the client argv and argc field.
- * The client will be reset in unblockClientFromModule(). */
- if (!(c->flags & CLIENT_BLOCKED) ||
- (c->btype != BLOCKED_MODULE && c->btype != BLOCKED_PAUSE))
- {
+ /* Don't reset the client structure for blocked clients, so that the reply
+ * callback will still be able to access the client argv and argc fields.
+ * The client will be reset in unblockClient(). */
+ if (!(c->flags & CLIENT_BLOCKED)) {
resetClient(c);
}
@@ -1990,12 +2008,20 @@ void commandProcessed(client *c) {
* of processing the command, otherwise C_OK is returned. */
int processCommandAndResetClient(client *c) {
int deadclient = 0;
+ client *old_client = server.current_client;
server.current_client = c;
if (processCommand(c) == C_OK) {
commandProcessed(c);
}
if (server.current_client == NULL) deadclient = 1;
- server.current_client = NULL;
+ /*
+ * Restore the old client, this is needed because when a script
+ * times out, we will get into this code from processEventsWhileBlocked.
+ * Which will cause to set the server.current_client. If not restored
+ * we will return 1 to our caller which will falsely indicate the client
+ * is dead and will stop reading from its buffer.
+ */
+ server.current_client = old_client;
/* performEvictions may flush slave output buffers. This may
* result in a slave, that may be the active client, to be
* freed. */
@@ -2941,6 +2967,7 @@ void helloCommand(client *c) {
int moreargs = (c->argc-1) - j;
const char *opt = c->argv[j]->ptr;
if (!strcasecmp(opt,"AUTH") && moreargs >= 2) {
+ preventCommandLogging(c);
if (ACLAuthenticateUser(c, c->argv[j+1], c->argv[j+2]) == C_ERR) {
addReplyError(c,"-WRONGPASS invalid username-password pair or user is disabled.");
return;
@@ -3007,7 +3034,7 @@ void securityWarningCommand(client *c) {
static time_t logged_time;
time_t now = time(NULL);
- if (labs(now-logged_time) > 60) {
+ if (llabs(now-logged_time) > 60) {
serverLog(LL_WARNING,"Possible SECURITY ATTACK detected. It looks like somebody is sending POST or Host: commands to Redis. This is likely due to an attacker attempting to use Cross Protocol Scripting to compromise your Redis instance. Connection aborted.");
logged_time = now;
}
@@ -3304,6 +3331,8 @@ int areClientsPaused(void) {
* if it has. Also returns true if clients are now paused and false
* otherwise. */
int checkClientPauseTimeoutAndReturnIfPaused(void) {
+ if (!areClientsPaused())
+ return 0;
if (server.client_pause_end_time < server.mstime) {
unpauseClients();
}
@@ -3332,7 +3361,7 @@ void processEventsWhileBlocked(void) {
/* Note: when we are processing events while blocked (for instance during
* busy Lua scripts), we set a global flag. When such flag is set, we
* avoid handling the read part of clients using threaded I/O.
- * See https://github.com/antirez/redis/issues/6988 for more info. */
+ * See https://github.com/redis/redis/issues/6988 for more info. */
ProcessingEventsWhileBlocked = 1;
while (iterations--) {
long long startval = server.events_processed_while_blocked;
diff --git a/src/notify.c b/src/notify.c
index 5c7634bce..afaddbfca 100644
--- a/src/notify.c
+++ b/src/notify.c
@@ -56,6 +56,7 @@ int keyspaceEventsStringToFlags(char *classes) {
case 'E': flags |= NOTIFY_KEYEVENT; break;
case 't': flags |= NOTIFY_STREAM; break;
case 'm': flags |= NOTIFY_KEY_MISS; break;
+ case 'd': flags |= NOTIFY_MODULE; break;
default: return -1;
}
}
@@ -82,6 +83,7 @@ sds keyspaceEventsFlagsToString(int flags) {
if (flags & NOTIFY_EXPIRED) res = sdscatlen(res,"x",1);
if (flags & NOTIFY_EVICTED) res = sdscatlen(res,"e",1);
if (flags & NOTIFY_STREAM) res = sdscatlen(res,"t",1);
+ if (flags & NOTIFY_MODULE) res = sdscatlen(res,"d",1);
}
if (flags & NOTIFY_KEYSPACE) res = sdscatlen(res,"K",1);
if (flags & NOTIFY_KEYEVENT) res = sdscatlen(res,"E",1);
diff --git a/src/object.c b/src/object.c
index ce2f3dc14..b75e547b9 100644
--- a/src/object.c
+++ b/src/object.c
@@ -727,7 +727,11 @@ int getRangeLongFromObjectOrReply(client *c, robj *o, long min, long max, long *
}
int getPositiveLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg) {
- return getRangeLongFromObjectOrReply(c, o, 0, LONG_MAX, target, msg);
+ if (msg) {
+ return getRangeLongFromObjectOrReply(c, o, 0, LONG_MAX, target, msg);
+ } else {
+ return getRangeLongFromObjectOrReply(c, o, 0, LONG_MAX, target, "value is out of range, must be positive");
+ }
}
int getIntFromObjectOrReply(client *c, robj *o, int *target, const char *msg) {
diff --git a/src/pubsub.c b/src/pubsub.c
index 5f7335bbe..3409deac2 100644
--- a/src/pubsub.c
+++ b/src/pubsub.c
@@ -331,21 +331,6 @@ int pubsubPublishMessage(robj *channel, robj *message) {
return receivers;
}
-/* This wraps handling ACL channel permissions for the given client. */
-int pubsubCheckACLPermissionsOrReply(client *c, int idx, int count, int literal) {
- /* Check if the user can run the command according to the current
- * ACLs. */
- int acl_chanpos;
- int acl_retval = ACLCheckPubsubPerm(c,idx,count,literal,&acl_chanpos);
- if (acl_retval == ACL_DENIED_CHANNEL) {
- addACLLogEntry(c,acl_retval,acl_chanpos,NULL);
- addReplyError(c,
- "-NOPERM this user has no permissions to access "
- "one of the channels used as arguments");
- }
- return acl_retval;
-}
-
/*-----------------------------------------------------------------------------
* Pubsub commands implementation
*----------------------------------------------------------------------------*/
@@ -353,7 +338,6 @@ int pubsubCheckACLPermissionsOrReply(client *c, int idx, int count, int literal)
/* SUBSCRIBE channel [channel ...] */
void subscribeCommand(client *c) {
int j;
- if (pubsubCheckACLPermissionsOrReply(c,1,c->argc-1,0) != ACL_OK) return;
if ((c->flags & CLIENT_DENY_BLOCKING) && !(c->flags & CLIENT_MULTI)) {
/**
* A client that has CLIENT_DENY_BLOCKING flag on
@@ -387,7 +371,6 @@ void unsubscribeCommand(client *c) {
/* PSUBSCRIBE pattern [pattern ...] */
void psubscribeCommand(client *c) {
int j;
- if (pubsubCheckACLPermissionsOrReply(c,1,c->argc-1,1) != ACL_OK) return;
if ((c->flags & CLIENT_DENY_BLOCKING) && !(c->flags & CLIENT_MULTI)) {
/**
* A client that has CLIENT_DENY_BLOCKING flag on
@@ -420,7 +403,6 @@ void punsubscribeCommand(client *c) {
/* PUBLISH <channel> <message> */
void publishCommand(client *c) {
- if (pubsubCheckACLPermissionsOrReply(c,1,1,0) != ACL_OK) return;
int receivers = pubsubPublishMessage(c->argv[1],c->argv[2]);
if (server.cluster_enabled)
clusterPropagatePublish(c->argv[1],c->argv[2]);
diff --git a/src/quicklist.c b/src/quicklist.c
index 7b7aa7839..5a1e41dcc 100644
--- a/src/quicklist.c
+++ b/src/quicklist.c
@@ -315,7 +315,9 @@ REDIS_STATIC void __quicklistCompress(const quicklist *quicklist,
if (forward == node || reverse == node)
in_depth = 1;
- if (forward == reverse)
+ /* We passed into compress depth of opposite side of the quicklist
+ * so there's no need to compress anything and we can exit. */
+ if (forward == reverse || forward->next == reverse)
return;
forward = forward->next;
@@ -325,11 +327,9 @@ REDIS_STATIC void __quicklistCompress(const quicklist *quicklist,
if (!in_depth)
quicklistCompressNode(node);
- if (depth > 2) {
- /* At this point, forward and reverse are one node beyond depth */
- quicklistCompressNode(forward);
- quicklistCompressNode(reverse);
- }
+ /* At this point, forward and reverse are one node beyond depth */
+ quicklistCompressNode(forward);
+ quicklistCompressNode(reverse);
}
#define quicklistCompress(_ql, _node) \
@@ -380,10 +380,11 @@ REDIS_STATIC void __quicklistInsertNode(quicklist *quicklist,
quicklist->head = quicklist->tail = new_node;
}
+ /* Update len first, so in __quicklistCompress we know exactly len */
+ quicklist->len++;
+
if (old_node)
quicklistCompress(quicklist, old_node);
-
- quicklist->len++;
}
/* Wrappers for node inserting around existing node. */
@@ -602,15 +603,16 @@ REDIS_STATIC void __quicklistDelNode(quicklist *quicklist,
quicklist->head = node->next;
}
+ /* Update len first, so in __quicklistCompress we know exactly len */
+ quicklist->len--;
+ quicklist->count -= node->count;
+
/* If we deleted a node within our compress depth, we
* now have compressed nodes needing to be decompressed. */
__quicklistCompress(quicklist, NULL);
- quicklist->count -= node->count;
-
zfree(node->zl);
zfree(node);
- quicklist->len--;
}
/* Delete one entry from list given the node for the entry and a pointer
@@ -1296,17 +1298,24 @@ void quicklistRotate(quicklist *quicklist) {
/* First, get the tail entry */
unsigned char *p = ziplistIndex(quicklist->tail->zl, -1);
- unsigned char *value;
+ unsigned char *value, *tmp;
long long longval;
unsigned int sz;
char longstr[32] = {0};
- ziplistGet(p, &value, &sz, &longval);
+ ziplistGet(p, &tmp, &sz, &longval);
/* If value found is NULL, then ziplistGet populated longval instead */
- if (!value) {
+ if (!tmp) {
/* Write the longval as a string so we can re-add it */
sz = ll2string(longstr, sizeof(longstr), longval);
value = (unsigned char *)longstr;
+ } else if (quicklist->len == 1) {
+ /* Copy buffer since there could be a memory overlap when move
+ * entity from tail to head in the same ziplist. */
+ value = zmalloc(sz);
+ memcpy(value, tmp, sz);
+ } else {
+ value = tmp;
}
/* Add tail entry to head (must happen before tail is deleted). */
@@ -1321,6 +1330,8 @@ void quicklistRotate(quicklist *quicklist) {
/* Remove tail entry. */
quicklistDelIndex(quicklist, quicklist->tail, &p);
+ if (value != (unsigned char*)longstr && value != tmp)
+ zfree(value);
}
/* pop from quicklist and return result in 'data' ptr. Value of 'data'
@@ -1509,8 +1520,6 @@ void quicklistBookmarksClear(quicklist *ql) {
#define yell(str, ...) printf("ERROR! " str "\n\n", __VA_ARGS__)
-#define OK printf("\tOK\n")
-
#define ERROR \
do { \
printf("\tERROR!\n"); \
@@ -1630,7 +1639,6 @@ static int _ql_verify(quicklist *ql, uint32_t len, uint32_t count,
}
if (ql->len == 0 && !errors) {
- OK;
return errors;
}
@@ -1679,8 +1687,6 @@ static int _ql_verify(quicklist *ql, uint32_t len, uint32_t count,
}
}
- if (!errors)
- OK;
return errors;
}
@@ -1692,9 +1698,10 @@ static char *genstr(char *prefix, int i) {
}
/* main test, but callable from other files */
-int quicklistTest(int argc, char *argv[]) {
+int quicklistTest(int argc, char *argv[], int accurate) {
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
unsigned int err = 0;
int optimize_start =
@@ -1703,11 +1710,14 @@ int quicklistTest(int argc, char *argv[]) {
printf("Starting optimization offset at: %d\n", optimize_start);
int options[] = {0, 1, 2, 3, 4, 5, 6, 10};
+ int fills[] = {-5, -4, -3, -2, -1, 0,
+ 1, 2, 32, 66, 128, 999};
size_t option_count = sizeof(options) / sizeof(*options);
+ int fill_count = (int)(sizeof(fills) / sizeof(*fills));
long long runtime[option_count];
for (int _i = 0; _i < (int)option_count; _i++) {
- printf("Testing Option %d\n", options[_i]);
+ printf("Testing Compression option %d\n", options[_i]);
long long start = mstime();
TEST("create list") {
@@ -1732,57 +1742,53 @@ int quicklistTest(int argc, char *argv[]) {
quicklistRelease(ql);
}
- for (int f = optimize_start; f < 32; f++) {
- TEST_DESC("add to tail 5x at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("add to tail 5x at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 5; i++)
quicklistPushTail(ql, genstr("hello", i), 32);
if (ql->count != 5)
ERROR;
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 5, 5, 5);
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 32; f++) {
- TEST_DESC("add to head 5x at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("add to head 5x at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 5; i++)
quicklistPushHead(ql, genstr("hello", i), 32);
if (ql->count != 5)
ERROR;
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 5, 5, 5);
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 512; f++) {
- TEST_DESC("add to tail 500x at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("add to tail 500x at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushTail(ql, genstr("hello", i), 64);
if (ql->count != 500)
ERROR;
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 16, 500, 32, 20);
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 512; f++) {
- TEST_DESC("add to head 500x at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("add to head 500x at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushHead(ql, genstr("hello", i), 32);
if (ql->count != 500)
ERROR;
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 16, 500, 20, 32);
quicklistRelease(ql);
}
@@ -1795,9 +1801,9 @@ int quicklistTest(int argc, char *argv[]) {
quicklistRelease(ql);
}
- for (int f = optimize_start; f < 32; f++) {
- TEST("rotate one val once") {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST("rotate one val once") {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
quicklistPushHead(ql, "hello", 6);
quicklistRotate(ql);
/* Ignore compression verify because ziplist is
@@ -1807,10 +1813,9 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 3; f++) {
- TEST_DESC("rotate 500 val 5000 times at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("rotate 500 val 5000 times at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
quicklistPushHead(ql, "900", 3);
quicklistPushHead(ql, "7000", 4);
quicklistPushHead(ql, "-1200", 5);
@@ -1822,11 +1827,11 @@ int quicklistTest(int argc, char *argv[]) {
ql_info(ql);
quicklistRotate(ql);
}
- if (f == 1)
+ if (fills[f] == 1)
ql_verify(ql, 504, 504, 1, 1);
- else if (f == 2)
+ else if (fills[f] == 2)
ql_verify(ql, 252, 504, 2, 2);
- else if (f == 32)
+ else if (fills[f] == 32)
ql_verify(ql, 16, 504, 32, 24);
quicklistRelease(ql);
}
@@ -2003,11 +2008,10 @@ int quicklistTest(int argc, char *argv[]) {
quicklistRelease(ql);
}
- for (int f = optimize_start; f < 12; f++) {
- TEST_DESC("insert once in elements while iterating at fill %d at "
- "compress %d\n",
- f, options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("insert once in elements while iterating at compress %d",
+ options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
quicklistPushTail(ql, "abc", 3);
quicklistSetFill(ql, 1);
quicklistPushTail(ql, "def", 3); /* force to unique node */
@@ -2059,12 +2063,10 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 1024; f++) {
- TEST_DESC(
- "insert [before] 250 new in middle of 500 elements at fill"
- " %d at compress %d",
- f, options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("insert [before] 250 new in middle of 500 elements at compress %d",
+ options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushTail(ql, genstr("hello", i), 32);
for (int i = 0; i < 250; i++) {
@@ -2072,17 +2074,16 @@ int quicklistTest(int argc, char *argv[]) {
quicklistIndex(ql, 250, &entry);
quicklistInsertBefore(ql, &entry, genstr("abc", i), 32);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 25, 750, 32, 20);
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 1024; f++) {
- TEST_DESC("insert [after] 250 new in middle of 500 elements at "
- "fill %d at compress %d",
- f, options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("insert [after] 250 new in middle of 500 elements at compress %d",
+ options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushHead(ql, genstr("hello", i), 32);
for (int i = 0; i < 250; i++) {
@@ -2094,7 +2095,7 @@ int quicklistTest(int argc, char *argv[]) {
if (ql->count != 750)
ERR("List size not 750, but rather %ld", ql->count);
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 26, 750, 20, 32);
quicklistRelease(ql);
}
@@ -2132,70 +2133,58 @@ int quicklistTest(int argc, char *argv[]) {
quicklistRelease(copy);
}
- for (int f = optimize_start; f < 512; f++) {
+ for (int f = 0; f < fill_count; f++) {
TEST_DESC("index 1,200 from 500 list at fill %d at compress %d", f,
options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushTail(ql, genstr("hello", i + 1), 32);
quicklistEntry entry;
quicklistIndex(ql, 1, &entry);
- if (!strcmp((char *)entry.value, "hello2"))
- OK;
- else
+ if (strcmp((char *)entry.value, "hello2") != 0)
ERR("Value: %s", entry.value);
quicklistIndex(ql, 200, &entry);
- if (!strcmp((char *)entry.value, "hello201"))
- OK;
- else
+ if (strcmp((char *)entry.value, "hello201") != 0)
ERR("Value: %s", entry.value);
quicklistRelease(ql);
}
- TEST_DESC("index -1,-2 from 500 list at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("index -1,-2 from 500 list at fill %d at compress %d",
+ fills[f], options[_i]) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushTail(ql, genstr("hello", i + 1), 32);
quicklistEntry entry;
quicklistIndex(ql, -1, &entry);
- if (!strcmp((char *)entry.value, "hello500"))
- OK;
- else
+ if (strcmp((char *)entry.value, "hello500") != 0)
ERR("Value: %s", entry.value);
quicklistIndex(ql, -2, &entry);
- if (!strcmp((char *)entry.value, "hello499"))
- OK;
- else
+ if (strcmp((char *)entry.value, "hello499") != 0)
ERR("Value: %s", entry.value);
quicklistRelease(ql);
}
- TEST_DESC("index -100 from 500 list at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("index -100 from 500 list at fill %d at compress %d",
+ fills[f], options[_i]) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 500; i++)
quicklistPushTail(ql, genstr("hello", i + 1), 32);
quicklistEntry entry;
quicklistIndex(ql, -100, &entry);
- if (!strcmp((char *)entry.value, "hello401"))
- OK;
- else
+ if (strcmp((char *)entry.value, "hello401") != 0)
ERR("Value: %s", entry.value);
quicklistRelease(ql);
}
TEST_DESC("index too big +1 from 50 list at fill %d at compress %d",
- f, options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ fills[f], options[_i]) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
for (int i = 0; i < 50; i++)
quicklistPushTail(ql, genstr("hello", i + 1), 32);
quicklistEntry entry;
if (quicklistIndex(ql, 50, &entry))
ERR("Index found at 50 with 50 list: %.*s", entry.sz,
entry.value);
- else
- OK;
quicklistRelease(ql);
}
}
@@ -2367,12 +2356,11 @@ int quicklistTest(int argc, char *argv[]) {
quicklistReplaceAtIndex(ql, 1, "foo", 3);
quicklistReplaceAtIndex(ql, -1, "bar", 3);
quicklistRelease(ql);
- OK;
}
- for (int f = optimize_start; f < 16; f++) {
- TEST_DESC("lrem test at fill %d at compress %d", f, options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("lrem test at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
char *words[] = {"abc", "foo", "bar", "foobar", "foobared",
"zap", "bar", "test", "foo"};
char *result[] = {"abc", "foo", "foobar", "foobared",
@@ -2397,14 +2385,12 @@ int quicklistTest(int argc, char *argv[]) {
/* check result of lrem 0 bar */
iter = quicklistGetIterator(ql, AL_START_HEAD);
i = 0;
- int ok = 1;
while (quicklistNext(iter, &entry)) {
/* Result must be: abc, foo, foobar, foobared, zap, test,
* foo */
if (strncmp((char *)entry.value, result[i], entry.sz)) {
ERR("No match at position %d, got %.*s instead of %s",
i, entry.sz, entry.value, result[i]);
- ok = 0;
}
i++;
}
@@ -2441,23 +2427,18 @@ int quicklistTest(int argc, char *argv[]) {
entry.sz)) {
ERR("No match at position %d, got %.*s instead of %s",
i, entry.sz, entry.value, resultB[resB - 1 - i]);
- ok = 0;
}
i++;
}
quicklistReleaseIterator(iter);
- /* final result of all tests */
- if (ok)
- OK;
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 16; f++) {
- TEST_DESC("iterate reverse + delete at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("iterate reverse + delete at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
quicklistPushTail(ql, "abc", 3);
quicklistPushTail(ql, "def", 3);
quicklistPushTail(ql, "hij", 3);
@@ -2494,10 +2475,9 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 800; f++) {
- TEST_DESC("iterator at index test at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("iterator at index test at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
char num[32];
long long nums[5000];
for (int i = 0; i < 760; i++) {
@@ -2521,10 +2501,9 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 40; f++) {
- TEST_DESC("ltrim test A at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("ltrim test A at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
char num[32];
long long nums[5000];
for (int i = 0; i < 32; i++) {
@@ -2532,7 +2511,7 @@ int quicklistTest(int argc, char *argv[]) {
int sz = ll2string(num, sizeof(num), nums[i]);
quicklistPushTail(ql, num, sz);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 32, 32, 32);
/* ltrim 25 53 (keep [25,32] inclusive = 7 remaining) */
quicklistDelRange(ql, 0, 25);
@@ -2545,18 +2524,17 @@ int quicklistTest(int argc, char *argv[]) {
"%lld",
entry.longval, nums[25 + i]);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 7, 7, 7);
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 40; f++) {
- TEST_DESC("ltrim test B at fill %d at compress %d", f,
- options[_i]) {
+ TEST_DESC("ltrim test B at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
/* Force-disable compression because our 33 sequential
* integers don't compress and the check always fails. */
- quicklist *ql = quicklistNew(f, QUICKLIST_NOCOMPRESS);
+ quicklist *ql = quicklistNew(fills[f], QUICKLIST_NOCOMPRESS);
char num[32];
long long nums[5000];
for (int i = 0; i < 33; i++) {
@@ -2564,24 +2542,20 @@ int quicklistTest(int argc, char *argv[]) {
int sz = ll2string(num, sizeof(num), nums[i]);
quicklistPushTail(ql, num, sz);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 2, 33, 32, 1);
/* ltrim 5 16 (keep [5,16] inclusive = 12 remaining) */
quicklistDelRange(ql, 0, 5);
quicklistDelRange(ql, -16, 16);
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 12, 12, 12);
quicklistEntry entry;
quicklistIndex(ql, 0, &entry);
if (entry.longval != 5)
ERR("A: longval not 5, but %lld", entry.longval);
- else
- OK;
quicklistIndex(ql, -1, &entry);
if (entry.longval != 16)
ERR("B! got instead: %lld", entry.longval);
- else
- OK;
quicklistPushTail(ql, "bobobob", 7);
quicklistIndex(ql, -1, &entry);
if (strncmp((char *)entry.value, "bobobob", 7))
@@ -2598,10 +2572,9 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 40; f++) {
- TEST_DESC("ltrim test C at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("ltrim test C at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
char num[32];
long long nums[5000];
for (int i = 0; i < 33; i++) {
@@ -2609,28 +2582,25 @@ int quicklistTest(int argc, char *argv[]) {
int sz = ll2string(num, sizeof(num), nums[i]);
quicklistPushTail(ql, num, sz);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 2, 33, 32, 1);
/* ltrim 3 3 (keep [3,3] inclusive = 1 remaining) */
quicklistDelRange(ql, 0, 3);
quicklistDelRange(ql, -29,
4000); /* make sure not loop forever */
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 1, 1, 1, 1);
quicklistEntry entry;
quicklistIndex(ql, 0, &entry);
if (entry.longval != -5157318210846258173)
ERROR;
- else
- OK;
quicklistRelease(ql);
}
}
- for (int f = optimize_start; f < 40; f++) {
- TEST_DESC("ltrim test D at fill %d at compress %d", f,
- options[_i]) {
- quicklist *ql = quicklistNew(f, options[_i]);
+ TEST_DESC("ltrim test D at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
+ quicklist *ql = quicklistNew(fills[f], options[_i]);
char num[32];
long long nums[5000];
for (int i = 0; i < 33; i++) {
@@ -2638,7 +2608,7 @@ int quicklistTest(int argc, char *argv[]) {
int sz = ll2string(num, sizeof(num), nums[i]);
quicklistPushTail(ql, num, sz);
}
- if (f == 32)
+ if (fills[f] == 32)
ql_verify(ql, 2, 33, 32, 1);
quicklistDelRange(ql, -12, 3);
if (ql->count != 30)
@@ -2648,9 +2618,8 @@ int quicklistTest(int argc, char *argv[]) {
}
}
- for (int f = optimize_start; f < 72; f++) {
- TEST_DESC("create quicklist from ziplist at fill %d at compress %d",
- f, options[_i]) {
+ TEST_DESC("create quicklist from ziplist at compress %d", options[_i]) {
+ for (int f = 0; f < fill_count; f++) {
unsigned char *zl = ziplistNew();
long long nums[64];
char num[64];
@@ -2664,12 +2633,12 @@ int quicklistTest(int argc, char *argv[]) {
zl = ziplistPush(zl, (unsigned char *)genstr("hello", i),
32, ZIPLIST_TAIL);
}
- quicklist *ql = quicklistCreateFromZiplist(f, options[_i], zl);
- if (f == 1)
+ quicklist *ql = quicklistCreateFromZiplist(fills[f], options[_i], zl);
+ if (fills[f] == 1)
ql_verify(ql, 66, 66, 1, 1);
- else if (f == 32)
+ else if (fills[f] == 32)
ql_verify(ql, 3, 66, 32, 2);
- else if (f == 66)
+ else if (fills[f] == 66)
ql_verify(ql, 1, 66, 66, 66);
quicklistRelease(ql);
}
@@ -2682,45 +2651,56 @@ int quicklistTest(int argc, char *argv[]) {
/* Run a longer test of compression depth outside of primary test loop. */
int list_sizes[] = {250, 251, 500, 999, 1000};
long long start = mstime();
- for (int list = 0; list < (int)(sizeof(list_sizes) / sizeof(*list_sizes));
- list++) {
- for (int f = optimize_start; f < 128; f++) {
- for (int depth = 1; depth < 40; depth++) {
- /* skip over many redundant test cases */
- TEST_DESC("verify specific compression of interior nodes with "
- "%d list "
- "at fill %d at compress %d",
- list_sizes[list], f, depth) {
- quicklist *ql = quicklistNew(f, depth);
+ int list_count = accurate ? (int)(sizeof(list_sizes) / sizeof(*list_sizes)) : 1;
+ for (int list = 0; list < list_count; list++) {
+ TEST_DESC("verify specific compression of interior nodes with %d list ",
+ list_sizes[list]) {
+ for (int f = 0; f < fill_count; f++) {
+ for (int depth = 1; depth < 40; depth++) {
+ /* skip over many redundant test cases */
+ quicklist *ql = quicklistNew(fills[f], depth);
for (int i = 0; i < list_sizes[list]; i++) {
quicklistPushTail(ql, genstr("hello TAIL", i + 1), 64);
quicklistPushHead(ql, genstr("hello HEAD", i + 1), 64);
}
- quicklistNode *node = ql->head;
- unsigned int low_raw = ql->compress;
- unsigned int high_raw = ql->len - ql->compress;
-
- for (unsigned int at = 0; at < ql->len;
- at++, node = node->next) {
- if (at < low_raw || at >= high_raw) {
- if (node->encoding != QUICKLIST_NODE_ENCODING_RAW) {
- ERR("Incorrect compression: node %d is "
- "compressed at depth %d ((%u, %u); total "
- "nodes: %lu; size: %u)",
- at, depth, low_raw, high_raw, ql->len,
- node->sz);
+ for (int step = 0; step < 2; step++) {
+ /* test remove node */
+ if (step == 1) {
+ for (int i = 0; i < list_sizes[list] / 2; i++) {
+ unsigned char *data;
+ quicklistPop(ql, QUICKLIST_HEAD, &data, NULL, NULL);
+ zfree(data);
+ quicklistPop(ql, QUICKLIST_TAIL, &data, NULL, NULL);
+ zfree(data);
}
- } else {
- if (node->encoding != QUICKLIST_NODE_ENCODING_LZF) {
- ERR("Incorrect non-compression: node %d is NOT "
- "compressed at depth %d ((%u, %u); total "
- "nodes: %lu; size: %u; attempted: %d)",
- at, depth, low_raw, high_raw, ql->len,
- node->sz, node->attempted_compress);
+ }
+ quicklistNode *node = ql->head;
+ unsigned int low_raw = ql->compress;
+ unsigned int high_raw = ql->len - ql->compress;
+
+ for (unsigned int at = 0; at < ql->len;
+ at++, node = node->next) {
+ if (at < low_raw || at >= high_raw) {
+ if (node->encoding != QUICKLIST_NODE_ENCODING_RAW) {
+ ERR("Incorrect compression: node %d is "
+ "compressed at depth %d ((%u, %u); total "
+ "nodes: %lu; size: %u)",
+ at, depth, low_raw, high_raw, ql->len,
+ node->sz);
+ }
+ } else {
+ if (node->encoding != QUICKLIST_NODE_ENCODING_LZF) {
+ ERR("Incorrect non-compression: node %d is NOT "
+ "compressed at depth %d ((%u, %u); total "
+ "nodes: %lu; size: %u; attempted: %d)",
+ at, depth, low_raw, high_raw, ql->len,
+ node->sz, node->attempted_compress);
+ }
}
}
}
+
quicklistRelease(ql);
}
}
diff --git a/src/quicklist.h b/src/quicklist.h
index fd9878af0..c9f493d80 100644
--- a/src/quicklist.h
+++ b/src/quicklist.h
@@ -199,7 +199,7 @@ quicklistNode *quicklistBookmarkFind(quicklist *ql, const char *name);
void quicklistBookmarksClear(quicklist *ql);
#ifdef REDIS_TEST
-int quicklistTest(int argc, char *argv[]);
+int quicklistTest(int argc, char *argv[], int accurate);
#endif
/* Directions for iterators */
diff --git a/src/rdb.c b/src/rdb.c
index 630417302..6f2f5165e 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -1073,8 +1073,7 @@ size_t rdbSavedObjectLen(robj *o, robj *key) {
/* Save a key-value pair, with expire time, type, key, value.
* On error -1 is returned.
- * On success if the key was actually saved 1 is returned, otherwise 0
- * is returned (the key was already expired). */
+ * On success if the key was actually saved 1 is returned. */
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) {
int savelru = server.maxmemory_policy & MAXMEMORY_FLAG_LRU;
int savelfu = server.maxmemory_policy & MAXMEMORY_FLAG_LFU;
@@ -2176,16 +2175,17 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
return NULL;
}
moduleType *mt = moduleTypeLookupModuleByID(moduleid);
- char name[10];
if (rdbCheckMode && rdbtype == RDB_TYPE_MODULE_2) {
+ char name[10];
moduleTypeNameByID(name,moduleid);
return rdbLoadCheckModuleValue(rdb,name);
}
if (mt == NULL) {
+ char name[10];
moduleTypeNameByID(name,moduleid);
- rdbReportCorruptRDB("The RDB file contains module data I can't load: no matching module '%s'", name);
+ rdbReportCorruptRDB("The RDB file contains module data I can't load: no matching module type '%s'", name);
return NULL;
}
RedisModuleIO io;
@@ -2212,7 +2212,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
return NULL;
}
if (eof != RDB_MODULE_OPCODE_EOF) {
- rdbReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by the proper module value EOF marker", name);
+ rdbReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by "
+ "the proper module value EOF marker", moduleTypeModuleName(mt));
if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
@@ -2222,8 +2223,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
}
if (ptr == NULL) {
- moduleTypeNameByID(name,moduleid);
- rdbReportCorruptRDB("The RDB file contains module data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name);
+ rdbReportCorruptRDB("The RDB file contains module data for the module type '%s', that the responsible "
+ "module is not able to load. Check for modules log above for additional clues.",
+ moduleTypeModuleName(mt));
return NULL;
}
o = createModuleObject(mt,ptr);
@@ -2729,7 +2731,7 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) {
* the cleanup needed. */
void killRDBChild(void) {
kill(server.child_pid, SIGUSR1);
- /* Because we are not using here wait4 (like we have in killAppendOnlyChild
+ /* Because we are not using here waitpid (like we have in killAppendOnlyChild
* and TerminateModuleForkChild), all the cleanup operations is done by
* checkChildrenDone, that later will find that the process killed.
* This includes:
diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c
index 186739766..351335862 100644
--- a/src/redis-benchmark.c
+++ b/src/redis-benchmark.c
@@ -102,7 +102,6 @@ static struct config {
int showerrors;
long long start;
long long totlatency;
- long long *latency;
const char *title;
list *clients;
int quiet;
@@ -1656,7 +1655,10 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData
const float instantaneous_rps = (float)(requests_finished-previous_requests_finished)/instantaneous_dt;
config.previous_tick = current_tick;
atomicSet(config.previous_requests_finished,requests_finished);
- config.last_printed_bytes = printf("%s: rps=%.1f (overall: %.1f) avg_msec=%.3f (overall: %.3f)\r", config.title, instantaneous_rps, rps, hdr_mean(config.current_sec_latency_histogram)/1000.0f, hdr_mean(config.latency_histogram)/1000.0f);
+ int printed_bytes = printf("%s: rps=%.1f (overall: %.1f) avg_msec=%.3f (overall: %.3f)\r", config.title, instantaneous_rps, rps, hdr_mean(config.current_sec_latency_histogram)/1000.0f, hdr_mean(config.latency_histogram)/1000.0f);
+ if (printed_bytes > config.last_printed_bytes){
+ config.last_printed_bytes = printed_bytes;
+ }
hdr_reset(config.current_sec_latency_histogram);
fflush(stdout);
return 250; /* every 250ms */
diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c
index 335e35189..4f451969a 100644
--- a/src/redis-check-rdb.c
+++ b/src/redis-check-rdb.c
@@ -192,6 +192,7 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
int closefile = (fp == NULL);
if (fp == NULL && (fp = fopen(rdbfilename,"r")) == NULL) return 1;
+ startLoadingFile(fp, rdbfilename, RDBFLAGS_NONE);
rioInitWithFile(&rdb,fp);
rdbstate.rio = &rdb;
rdb.update_cksum = rdbLoadProgressCallback;
@@ -208,7 +209,6 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
}
expiretime = -1;
- startLoadingFile(fp, rdbfilename, RDBFLAGS_NONE);
while(1) {
robj *key, *val;
diff --git a/src/redis-cli.c b/src/redis-cli.c
index 63d31f79a..7e1fe3934 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -227,7 +227,7 @@ static struct config {
int scan_mode;
int intrinsic_latency_mode;
int intrinsic_latency_duration;
- char *pattern;
+ sds pattern;
char *rdb_filename;
int bigkeys;
int memkeys;
@@ -237,6 +237,7 @@ static struct config {
char *auth;
int askpass;
char *user;
+ int quoted_input; /* Force input args to be treated as quoted strings */
int output; /* output mode, see OUTPUT_* defines */
int push_output; /* Should we display spontaneous PUSH replies */
sds mb_delim;
@@ -405,15 +406,17 @@ static void parseRedisUri(const char *uri) {
if (!strncasecmp(tlsscheme, curr, strlen(tlsscheme))) {
#ifdef USE_OPENSSL
config.tls = 1;
+ curr += strlen(tlsscheme);
#else
fprintf(stderr,"rediss:// is only supported when redis-cli is compiled with OpenSSL\n");
exit(1);
#endif
- } else if (strncasecmp(scheme, curr, strlen(scheme))) {
+ } else if (!strncasecmp(scheme, curr, strlen(scheme))) {
+ curr += strlen(scheme);
+ } else {
fprintf(stderr,"Invalid URI scheme\n");
exit(1);
}
- curr += strlen(scheme);
if (curr == end) return;
/* Extract user info. */
@@ -763,6 +766,23 @@ static void freeHintsCallback(void *ptr) {
* Networking / parsing
*--------------------------------------------------------------------------- */
+/* Unquote a null-terminated string and return it as a binary-safe sds. */
+static sds unquoteCString(char *str) {
+ int count;
+ sds *unquoted = sdssplitargs(str, &count);
+ sds res = NULL;
+
+ if (unquoted && count == 1) {
+ res = unquoted[0];
+ unquoted[0] = NULL;
+ }
+
+ if (unquoted)
+ sdsfreesplitres(unquoted, count);
+
+ return res;
+}
+
/* Send AUTH command to the server */
static int cliAuth(redisContext *ctx, char *user, char *auth) {
redisReply *reply;
@@ -1533,6 +1553,8 @@ static int parseOptions(int argc, char **argv) {
config.output = OUTPUT_RAW;
} else if (!strcmp(argv[i],"--no-raw")) {
config.output = OUTPUT_STANDARD;
+ } else if (!strcmp(argv[i],"--quoted-input")) {
+ config.quoted_input = 1;
} else if (!strcmp(argv[i],"--csv")) {
config.output = OUTPUT_CSV;
} else if (!strcmp(argv[i],"--latency")) {
@@ -1557,7 +1579,15 @@ static int parseOptions(int argc, char **argv) {
} else if (!strcmp(argv[i],"--scan")) {
config.scan_mode = 1;
} else if (!strcmp(argv[i],"--pattern") && !lastarg) {
- config.pattern = argv[++i];
+ sdsfree(config.pattern);
+ config.pattern = sdsnew(argv[++i]);
+ } else if (!strcmp(argv[i],"--quoted-pattern") && !lastarg) {
+ sdsfree(config.pattern);
+ config.pattern = unquoteCString(argv[++i]);
+ if (!config.pattern) {
+ fprintf(stderr,"Invalid quoted string specified for --quoted-pattern.\n");
+ exit(1);
+ }
} else if (!strcmp(argv[i],"--intrinsic-latency") && !lastarg) {
config.intrinsic_latency_mode = 1;
config.intrinsic_latency_duration = atoi(argv[++i]);
@@ -1841,6 +1871,7 @@ static void usage(void) {
" --raw Use raw formatting for replies (default when STDOUT is\n"
" not a tty).\n"
" --no-raw Force formatted output even when STDOUT is not a tty.\n"
+" --quoted-input Force input to be handled as quoted strings.\n"
" --csv Output in CSV format.\n"
" --show-pushes <yn> Whether to print RESP3 PUSH messages. Enabled by default when\n"
" STDOUT is a tty but can be overriden with --show-pushes no.\n"
@@ -1876,6 +1907,8 @@ static void usage(void) {
" --scan List all keys using the SCAN command.\n"
" --pattern <pat> Keys pattern when using the --scan, --bigkeys or --hotkeys\n"
" options (default: *).\n"
+" --quoted-pattern <pat> Same as --pattern, but the specified string can be\n"
+" quoted, in order to pass an otherwise non binary-safe string.\n"
" --intrinsic-latency <sec> Run a test to measure intrinsic system latency.\n"
" The test will run for the specified amount of seconds.\n"
" --eval <file> Send an EVAL command using the Lua script at <file>.\n"
@@ -1901,6 +1934,7 @@ static void usage(void) {
" redis-cli get mypasswd\n"
" redis-cli -r 100 lpush mylist x\n"
" redis-cli -r 100 -i 1 info | grep used_memory_human:\n"
+" redis-cli --quoted-input set '\"null-\\x00-separated\"' value\n"
" redis-cli --eval myscript.lua key1 key2 , arg1 arg2 arg3\n"
" redis-cli --scan --pattern '*:12345*'\n"
"\n"
@@ -1930,22 +1964,28 @@ static int confirmWithYes(char *msg, int ignore_force) {
return (nread != 0 && !strcmp("yes", buf));
}
-/* Turn the plain C strings into Sds strings */
-static char **convertToSds(int count, char** args) {
- int j;
- char **sds = zmalloc(sizeof(char*)*count);
-
- for(j = 0; j < count; j++)
- sds[j] = sdsnew(args[j]);
-
- return sds;
-}
+/* Create an sds array from argv, either as-is or by dequoting every
+ * element. When quoted is non-zero, may return a NULL to indicate an
+ * invalid quoted string.
+ */
+static sds *getSdsArrayFromArgv(int argc, char **argv, int quoted) {
+ sds *res = sds_malloc(sizeof(sds) * argc);
+
+ for (int j = 0; j < argc; j++) {
+ if (quoted) {
+ sds unquoted = unquoteCString(argv[j]);
+ if (!unquoted) {
+ while (--j >= 0) sdsfree(res[j]);
+ sds_free(res);
+ return NULL;
+ }
+ res[j] = unquoted;
+ } else {
+ res[j] = sdsnew(argv[j]);
+ }
+ }
-static void freeConvertedSds(int count, char **sds) {
- int j;
- for (j = 0; j < count; j++)
- sdsfree(sds[j]);
- zfree(sds);
+ return res;
}
static int issueCommandRepeat(int argc, char **argv, long repeat) {
@@ -2178,17 +2218,19 @@ static void repl(void) {
static int noninteractive(int argc, char **argv) {
int retval = 0;
-
- argv = convertToSds(argc, argv);
+ sds *sds_args = getSdsArrayFromArgv(argc, argv, config.quoted_input);
+ if (!sds_args) {
+ printf("Invalid quoted string\n");
+ return 1;
+ }
if (config.stdinarg) {
- argv = zrealloc(argv, (argc+1)*sizeof(char*));
- argv[argc] = readArgFromStdin();
- retval = issueCommand(argc+1, argv);
- sdsfree(argv[argc]);
- } else {
- retval = issueCommand(argc, argv);
+ sds_args = sds_realloc(sds_args, (argc + 1) * sizeof(sds));
+ sds_args[argc] = readArgFromStdin();
+ argc++;
}
- freeConvertedSds(argc, argv);
+
+ retval = issueCommand(argc, sds_args);
+ sdsfreesplitres(sds_args, argc);
return retval;
}
@@ -2913,8 +2955,12 @@ static int clusterManagerGetAntiAffinityScore(clusterManagerNodeArray *ipnodes,
else types = sdsempty();
/* Master type 'm' is always set as the first character of the
* types string. */
- if (!node->replicate) types = sdscatprintf(types, "m%s", types);
- else types = sdscat(types, "s");
+ if (node->replicate) types = sdscat(types, "s");
+ else {
+ sds s = sdscatsds(sdsnew("m"), types);
+ sdsfree(types);
+ types = s;
+ }
dictReplace(related, key, types);
}
/* Now it's trivial to check, for each related group having the
@@ -7156,7 +7202,10 @@ static void getRDB(clusterManagerNode *node) {
redisFree(s); /* Close the connection ASAP as fsync() may take time. */
if (node)
node->context = NULL;
- fsync(fd);
+ if (fsync(fd) == -1) {
+ fprintf(stderr,"Fail to fsync '%s': %s\n", filename, strerror(errno));
+ exit(1);
+ }
close(fd);
if (node) {
sdsfree(filename);
@@ -7332,8 +7381,8 @@ static redisReply *sendScan(unsigned long long *it) {
redisReply *reply;
if (config.pattern)
- reply = redisCommand(context,"SCAN %llu MATCH %s",
- *it,config.pattern);
+ reply = redisCommand(context, "SCAN %llu MATCH %b",
+ *it, config.pattern, sdslen(config.pattern));
else
reply = redisCommand(context,"SCAN %llu",*it);
@@ -7368,8 +7417,14 @@ static int getDbSize(void) {
reply = redisCommand(context, "DBSIZE");
- if(reply == NULL || reply->type != REDIS_REPLY_INTEGER) {
- fprintf(stderr, "Couldn't determine DBSIZE!\n");
+ if (reply == NULL) {
+ fprintf(stderr, "\nI/O error\n");
+ exit(1);
+ } else if (reply->type == REDIS_REPLY_ERROR) {
+ fprintf(stderr, "Couldn't determine DBSIZE: %s\n", reply->str);
+ exit(1);
+ } else if (reply->type != REDIS_REPLY_INTEGER) {
+ fprintf(stderr, "Non INTEGER response from DBSIZE!\n");
exit(1);
}
@@ -7945,23 +8000,16 @@ static void scanMode(void) {
unsigned long long cur = 0;
do {
- if (config.pattern)
- reply = redisCommand(context,"SCAN %llu MATCH %s",
- cur,config.pattern);
- else
- reply = redisCommand(context,"SCAN %llu",cur);
- if (reply == NULL) {
- printf("I/O error\n");
- exit(1);
- } else if (reply->type == REDIS_REPLY_ERROR) {
- printf("ERROR: %s\n", reply->str);
- exit(1);
- } else {
- unsigned int j;
-
- cur = strtoull(reply->element[0]->str,NULL,10);
- for (j = 0; j < reply->element[1]->elements; j++)
+ reply = sendScan(&cur);
+ for (unsigned int j = 0; j < reply->element[1]->elements; j++) {
+ if (config.output == OUTPUT_STANDARD) {
+ sds out = sdscatrepr(sdsempty(), reply->element[1]->element[j]->str,
+ reply->element[1]->element[j]->len);
+ printf("%s\n", out);
+ sdsfree(out);
+ } else {
printf("%s\n", reply->element[1]->element[j]->str);
+ }
}
freeReplyObject(reply);
} while(cur != 0);
diff --git a/src/redismodule.h b/src/redismodule.h
index ea271b82b..5520ca3cc 100644
--- a/src/redismodule.h
+++ b/src/redismodule.h
@@ -160,13 +160,14 @@ This flag should not be used directly by the module.
#define REDISMODULE_NOTIFY_STREAM (1<<10) /* t */
#define REDISMODULE_NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from REDISMODULE_NOTIFY_ALL on purpose) */
#define REDISMODULE_NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */
+#define REDISMODULE_NOTIFY_MODULE (1<<13) /* d, module key space notification */
/* Next notification flag, must be updated when adding new flags above!
This flag should not be used directly by the module.
* Use RedisModule_GetKeyspaceNotificationFlagsAll instead. */
-#define _REDISMODULE_NOTIFY_NEXT (1<<13)
+#define _REDISMODULE_NOTIFY_NEXT (1<<14)
-#define REDISMODULE_NOTIFY_ALL (REDISMODULE_NOTIFY_GENERIC | REDISMODULE_NOTIFY_STRING | REDISMODULE_NOTIFY_LIST | REDISMODULE_NOTIFY_SET | REDISMODULE_NOTIFY_HASH | REDISMODULE_NOTIFY_ZSET | REDISMODULE_NOTIFY_EXPIRED | REDISMODULE_NOTIFY_EVICTED | REDISMODULE_NOTIFY_STREAM) /* A */
+#define REDISMODULE_NOTIFY_ALL (REDISMODULE_NOTIFY_GENERIC | REDISMODULE_NOTIFY_STRING | REDISMODULE_NOTIFY_LIST | REDISMODULE_NOTIFY_SET | REDISMODULE_NOTIFY_HASH | REDISMODULE_NOTIFY_ZSET | REDISMODULE_NOTIFY_EXPIRED | REDISMODULE_NOTIFY_EVICTED | REDISMODULE_NOTIFY_STREAM | REDISMODULE_NOTIFY_MODULE) /* A */
/* A special pointer that we can use between the core and the module to signal
* field deletion, and that is impossible to be a valid pointer. */
@@ -193,6 +194,12 @@ This flag should not be used directly by the module.
#define REDISMODULE_NOT_USED(V) ((void) V)
+/* Logging level strings */
+#define REDISMODULE_LOGLEVEL_DEBUG "debug"
+#define REDISMODULE_LOGLEVEL_VERBOSE "verbose"
+#define REDISMODULE_LOGLEVEL_NOTICE "notice"
+#define REDISMODULE_LOGLEVEL_WARNING "warning"
+
/* Bit flags for aux_save_triggers and the aux_load and aux_save callbacks */
#define REDISMODULE_AUX_BEFORE_RDB (1<<0)
#define REDISMODULE_AUX_AFTER_RDB (1<<1)
@@ -635,6 +642,8 @@ REDISMODULE_API char * (*RedisModule_StringDMA)(RedisModuleKey *key, size_t *len
REDISMODULE_API int (*RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen) REDISMODULE_ATTR;
REDISMODULE_API mstime_t (*RedisModule_GetExpire)(RedisModuleKey *key) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire) REDISMODULE_ATTR;
+REDISMODULE_API mstime_t (*RedisModule_GetAbsExpire)(RedisModuleKey *key) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_SetAbsExpire)(RedisModuleKey *key, mstime_t expire) REDISMODULE_ATTR;
REDISMODULE_API void (*RedisModule_ResetDataset)(int restart_aof, int async) REDISMODULE_ATTR;
REDISMODULE_API unsigned long long (*RedisModule_DbSize)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
REDISMODULE_API RedisModuleString * (*RedisModule_RandomKey)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
@@ -835,7 +844,7 @@ REDISMODULE_API int (*RedisModule_DefragCursorSet)(RedisModuleDefragCtx *ctx, un
REDISMODULE_API int (*RedisModule_DefragCursorGet)(RedisModuleDefragCtx *ctx, unsigned long *cursor) REDISMODULE_ATTR;
#endif
-#define RedisModule_IsAOFClient(id) ((id) == CLIENT_ID_AOF)
+#define RedisModule_IsAOFClient(id) ((id) == UINT64_MAX)
/* This is included inline inside each Redis module. */
static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) REDISMODULE_ATTR_UNUSED;
@@ -907,6 +916,8 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
REDISMODULE_GET_API(StringTruncate);
REDISMODULE_GET_API(GetExpire);
REDISMODULE_GET_API(SetExpire);
+ REDISMODULE_GET_API(GetAbsExpire);
+ REDISMODULE_GET_API(SetAbsExpire);
REDISMODULE_GET_API(ResetDataset);
REDISMODULE_GET_API(DbSize);
REDISMODULE_GET_API(RandomKey);
diff --git a/src/replication.c b/src/replication.c
index eb5fa54c0..8177eb073 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -892,17 +892,34 @@ void syncCommand(client *c) {
}
/* REPLCONF <option> <value> <option> <value> ...
- * This command is used by a slave in order to configure the replication
+ * This command is used by a replica in order to configure the replication
* process before starting it with the SYNC command.
+ * This command is also used by a master in order to get the replication
+ * offset from a replica.
*
- * Currently the only use of this command is to communicate to the master
- * what is the listening port of the Slave redis instance, so that the
- * master can accurately list slaves and their listening ports in
- * the INFO output.
+ * Currently we support these options:
*
- * In the future the same command can be used in order to configure
- * the replication to initiate an incremental replication instead of a
- * full resync. */
+ * - listening-port <port>
+ * - ip-address <ip>
+ * What is the listening ip and port of the Replica redis instance, so that
+ * the master can accurately lists replicas and their listening ports in the
+ * INFO output.
+ *
+ * - capa <eof|psync2>
+ * What is the capabilities of this instance.
+ * eof: supports EOF-style RDB transfer for diskless replication.
+ * psync2: supports PSYNC v2, so understands +CONTINUE <new repl ID>.
+ *
+ * - ack <offset>
+ * Replica informs the master the amount of replication stream that it
+ * processed so far.
+ *
+ * - getack
+ * Unlike other subcommands, this is used by master to get the replication
+ * offset from a replica.
+ *
+ * - rdb-only
+ * Only wants RDB snapshot without replication buffer. */
void replconfCommand(client *c) {
int j;
@@ -1136,6 +1153,8 @@ void rdbPipeWriteHandlerConnRemoved(struct connection *conn) {
if (!connHasWriteHandler(conn))
return;
connSetWriteHandler(conn, NULL);
+ client *slave = connGetPrivateData(conn);
+ slave->repl_last_partial_write = 0;
server.rdb_pipe_numconns_writing--;
/* if there are no more writes for now for this conn, or write error: */
if (server.rdb_pipe_numconns_writing == 0) {
@@ -1163,8 +1182,10 @@ void rdbPipeWriteHandler(struct connection *conn) {
} else {
slave->repldboff += nwritten;
atomicIncr(server.stat_net_output_bytes, nwritten);
- if (slave->repldboff < server.rdb_pipe_bufflen)
+ if (slave->repldboff < server.rdb_pipe_bufflen) {
+ slave->repl_last_partial_write = server.unixtime;
return; /* more data to write.. */
+ }
}
rdbPipeWriteHandlerConnRemoved(conn);
}
@@ -1245,6 +1266,7 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData,
/* If we were unable to write all the data to one of the replicas,
* setup write handler (and disable pipe read handler, below) */
if (nwritten != server.rdb_pipe_bufflen) {
+ slave->repl_last_partial_write = server.unixtime;
server.rdb_pipe_numconns_writing++;
connSetWriteHandler(conn, rdbPipeWriteHandler);
}
@@ -1873,8 +1895,7 @@ void readSyncBulkPayload(connection *conn) {
serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Finished with success");
if (server.supervised_mode == SUPERVISED_SYSTEMD) {
- redisCommunicateSystemd("STATUS=MASTER <-> REPLICA sync: Finished with success. Ready to accept connections.\n");
- redisCommunicateSystemd("READY=1\n");
+ redisCommunicateSystemd("STATUS=MASTER <-> REPLICA sync: Finished with success. Ready to accept connections in read-write mode.\n");
}
/* Send the initial ACK immediately to put this replica in online state. */
@@ -2434,8 +2455,7 @@ void syncWithMaster(connection *conn) {
if (psync_result == PSYNC_CONTINUE) {
serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Master accepted a Partial Resynchronization.");
if (server.supervised_mode == SUPERVISED_SYSTEMD) {
- redisCommunicateSystemd("STATUS=MASTER <-> REPLICA sync: Partial Resynchronization accepted. Ready to accept connections.\n");
- redisCommunicateSystemd("READY=1\n");
+ redisCommunicateSystemd("STATUS=MASTER <-> REPLICA sync: Partial Resynchronization accepted. Ready to accept connections in read-write mode.\n");
}
return;
}
@@ -2683,6 +2703,9 @@ void replicationUnsetMaster(void) {
* starting from now. Otherwise the backlog will be freed after a
* failover if slaves do not connect immediately. */
server.repl_no_slaves_since = server.unixtime;
+
+ /* Reset down time so it'll be ready for when we turn into replica again. */
+ server.repl_down_since = 0;
/* Fire the role change modules event. */
moduleFireServerEvent(REDISMODULE_EVENT_REPLICATION_ROLE_CHANGED,
@@ -2758,7 +2781,7 @@ void replicaofCommand(client *c) {
if ((getLongFromObjectOrReply(c, c->argv[2], &port, NULL) != C_OK))
return;
- /* Check if we are already attached to the specified slave */
+ /* Check if we are already attached to the specified master */
if (server.masterhost && !strcasecmp(server.masterhost,c->argv[1]->ptr)
&& server.masterport == port) {
serverLog(LL_NOTICE,"REPLICAOF would result into synchronization "
@@ -3375,13 +3398,28 @@ void replicationCron(void) {
while((ln = listNext(&li))) {
client *slave = ln->value;
- if (slave->replstate != SLAVE_STATE_ONLINE) continue;
- if (slave->flags & CLIENT_PRE_PSYNC) continue;
- if ((server.unixtime - slave->repl_ack_time) > server.repl_timeout)
- {
- serverLog(LL_WARNING, "Disconnecting timedout replica: %s",
- replicationGetSlaveName(slave));
- freeClient(slave);
+ if (slave->replstate == SLAVE_STATE_ONLINE) {
+ if (slave->flags & CLIENT_PRE_PSYNC)
+ continue;
+ if ((server.unixtime - slave->repl_ack_time) > server.repl_timeout) {
+ serverLog(LL_WARNING, "Disconnecting timedout replica (streaming sync): %s",
+ replicationGetSlaveName(slave));
+ freeClient(slave);
+ continue;
+ }
+ }
+ /* We consider disconnecting only diskless replicas because disk-based replicas aren't fed
+ * by the fork child so if a disk-based replica is stuck it doesn't prevent the fork child
+ * from terminating. */
+ if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END && server.rdb_child_type == RDB_CHILD_TYPE_SOCKET) {
+ if (slave->repl_last_partial_write != 0 &&
+ (server.unixtime - slave->repl_last_partial_write) > server.repl_timeout)
+ {
+ serverLog(LL_WARNING, "Disconnecting timedout replica (full sync): %s",
+ replicationGetSlaveName(slave));
+ freeClient(slave);
+ continue;
+ }
}
}
}
@@ -3546,7 +3584,7 @@ void abortFailover(const char *err) {
}
/*
- * FAILOVER [TO <HOST> <IP> [FORCE]] [ABORT] [TIMEOUT <timeout>]
+ * FAILOVER [TO <HOST> <PORT> [FORCE]] [ABORT] [TIMEOUT <timeout>]
*
* This command will coordinate a failover between the master and one
* of its replicas. The happy path contains the following steps:
@@ -3649,7 +3687,7 @@ void failoverCommand(client *c) {
client *replica = findReplica(host, port);
if (replica == NULL) {
- addReplyError(c,"FAILOVER target HOST and IP is not "
+ addReplyError(c,"FAILOVER target HOST and PORT is not "
"a replica.");
return;
}
diff --git a/src/rio.c b/src/rio.c
index bbef81b68..0d107708f 100644
--- a/src/rio.c
+++ b/src/rio.c
@@ -117,7 +117,7 @@ static size_t rioFileWrite(rio *r, const void *buf, size_t len) {
r->io.file.buffered >= r->io.file.autosync)
{
fflush(r->io.file.fp);
- redis_fsync(fileno(r->io.file.fp));
+ if (redis_fsync(fileno(r->io.file.fp)) == -1) return 0;
r->io.file.buffered = 0;
}
return retval;
@@ -160,7 +160,7 @@ void rioInitWithFile(rio *r, FILE *fp) {
}
/* ------------------- Connection implementation -------------------
- * We use this RIO implemetnation when reading an RDB file directly from
+ * We use this RIO implementation when reading an RDB file directly from
* the connection to the memory via rdbLoadRio(), thus this implementation
* only implements reading from a connection that is, normally,
* just a socket. */
@@ -262,7 +262,7 @@ void rioInitWithConn(rio *r, connection *conn, size_t read_limit) {
sdsclear(r->io.conn.buf);
}
-/* Release the RIO tream. Optionally returns the unread buffered data
+/* Release the RIO stream. Optionally returns the unread buffered data
* when the SDS pointer 'remaining' is passed. */
void rioFreeConn(rio *r, sds *remaining) {
if (remaining && (size_t)r->io.conn.pos < sdslen(r->io.conn.buf)) {
diff --git a/src/scripting.c b/src/scripting.c
index 6830e7a70..299e60810 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -604,9 +604,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) {
/* Check the ACLs. */
int acl_errpos;
- int acl_retval = ACLCheckCommandPerm(c,&acl_errpos);
- if (acl_retval == ACL_OK && c->cmd->proc == publishCommand)
- acl_retval = ACLCheckPubsubPerm(c,1,1,0,&acl_errpos);
+ int acl_retval = ACLCheckAllPerm(c,&acl_errpos);
if (acl_retval != ACL_OK) {
addACLLogEntry(c,acl_retval,acl_errpos,NULL);
switch (acl_retval) {
@@ -1453,6 +1451,14 @@ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) {
if (server.lua_timedout) processEventsWhileBlocked();
if (server.lua_kill) {
serverLog(LL_WARNING,"Lua script killed by user with SCRIPT KILL.");
+
+ /*
+ * Set the hook to invoke all the time so the user
+         * will not be able to catch the error with pcall and invoke
+         * pcall again which will prevent the script from ever been killed
+ */
+ lua_sethook(lua, luaMaskCountHook, LUA_MASKLINE, 0);
+
lua_pushstring(lua,"Script killed by user with SCRIPT KILL...");
lua_error(lua);
}
@@ -1498,7 +1504,6 @@ void evalGenericCommand(client *c, int evalsha) {
server.lua_replicate_commands = server.lua_always_replicate_commands;
server.lua_multi_emitted = 0;
server.lua_repl = PROPAGATE_AOF|PROPAGATE_REPL;
- server.in_eval = 1;
/* Get the number of arguments that are keys */
if (getLongLongFromObjectOrReply(c,c->argv[2],&numkeys,NULL) != C_OK)
@@ -1570,6 +1575,7 @@ void evalGenericCommand(client *c, int evalsha) {
*
* If we are debugging, we set instead a "line" hook so that the
* debugger is call-back at every line executed by the script. */
+ server.in_eval = 1;
server.lua_caller = c;
server.lua_cur_script = funcname + 2;
server.lua_time_start = mstime();
@@ -1602,6 +1608,7 @@ void evalGenericCommand(client *c, int evalsha) {
if (server.masterhost && server.master)
queueClientForReprocessing(server.master);
}
+ server.in_eval = 0;
server.lua_caller = NULL;
server.lua_cur_script = NULL;
@@ -1678,8 +1685,6 @@ void evalGenericCommand(client *c, int evalsha) {
forceCommandPropagation(c,PROPAGATE_REPL|PROPAGATE_AOF);
}
}
-
- server.in_eval = 0;
}
void evalCommand(client *c) {
diff --git a/src/sds.c b/src/sds.c
index 6385ab14b..2ec3aa733 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -1234,9 +1234,10 @@ static sds sdsTestTemplateCallback(sds varname, void *arg) {
else return NULL;
}
-int sdsTest(int argc, char **argv) {
+int sdsTest(int argc, char **argv, int accurate) {
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
{
sds x = sdsnew("foo"), y;
diff --git a/src/sds.h b/src/sds.h
index 85dc0b680..7f8710745 100644
--- a/src/sds.h
+++ b/src/sds.h
@@ -277,7 +277,7 @@ void *sds_realloc(void *ptr, size_t size);
void sds_free(void *ptr);
#ifdef REDIS_TEST
-int sdsTest(int argc, char *argv[]);
+int sdsTest(int argc, char *argv[], int accurate);
#endif
#endif
diff --git a/src/sentinel.c b/src/sentinel.c
index 8597a10df..a56cd8b15 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -215,6 +215,7 @@ typedef struct sentinelRedisInstance {
/* Slave specific. */
mstime_t master_link_down_time; /* Slave replication link down time. */
int slave_priority; /* Slave priority according to its INFO output. */
+ int replica_announced; /* Replica announcing according to its INFO output. */
mstime_t slave_reconf_sent_time; /* Time at which we sent SLAVE OF <new> */
struct sentinelRedisInstance *master; /* Master instance if it's slave. */
char *slave_master_host; /* Master host as reported by INFO */
@@ -548,14 +549,12 @@ void initSentinel(void) {
server.sentinel_config = NULL;
}
-/* This function gets called when the server is in Sentinel mode, started,
- * loaded the configuration, and is ready for normal operations. */
-void sentinelIsRunning(void) {
- int j;
-
+/* This function is for checking whether sentinel config file has been set,
+ * also checking whether we have write permissions. */
+void sentinelCheckConfigFile(void) {
if (server.configfile == NULL) {
serverLog(LL_WARNING,
- "Sentinel started without a config file. Exiting...");
+ "Sentinel needs config file on disk to save state. Exiting...");
exit(1);
} else if (access(server.configfile,W_OK) == -1) {
serverLog(LL_WARNING,
@@ -563,6 +562,12 @@ void sentinelIsRunning(void) {
server.configfile,strerror(errno));
exit(1);
}
+}
+
+/* This function gets called when the server is in Sentinel mode, started,
+ * loaded the configuration, and is ready for normal operations. */
+void sentinelIsRunning(void) {
+ int j;
/* If this Sentinel has yet no ID set in the configuration file, we
* pick a random one and persist the config on disk. From now on this
@@ -870,6 +875,7 @@ void sentinelRunPendingScripts(void) {
sj->pid = 0;
} else if (pid == 0) {
/* Child */
+ tlsCleanup();
execve(sj->argv[0],sj->argv,environ);
/* If we are here an error occurred. */
_exit(2); /* Don't retry execution. */
@@ -903,7 +909,7 @@ void sentinelCollectTerminatedScripts(void) {
int statloc;
pid_t pid;
- while ((pid = wait3(&statloc,WNOHANG,NULL)) > 0) {
+ while ((pid = waitpid(-1, &statloc, WNOHANG)) > 0) {
int exitcode = WEXITSTATUS(statloc);
int bysignal = 0;
listNode *ln;
@@ -915,7 +921,7 @@ void sentinelCollectTerminatedScripts(void) {
ln = sentinelGetScriptListNodeByPid(pid);
if (ln == NULL) {
- serverLog(LL_WARNING,"wait3() returned a pid (%ld) we can't find in our scripts execution queue!", (long)pid);
+ serverLog(LL_WARNING,"waitpid() returned a pid (%ld) we can't find in our scripts execution queue!", (long)pid);
continue;
}
sj = ln->value;
@@ -1334,6 +1340,7 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *
ri->auth_pass = NULL;
ri->auth_user = NULL;
ri->slave_priority = SENTINEL_DEFAULT_SLAVE_PRIORITY;
+ ri->replica_announced = 1;
ri->slave_reconf_sent_time = 0;
ri->slave_master_host = NULL;
ri->slave_master_port = 0;
@@ -1719,19 +1726,12 @@ void sentinelPropagateDownAfterPeriod(sentinelRedisInstance *master) {
}
}
-char *sentinelGetInstanceTypeString(sentinelRedisInstance *ri) {
- if (ri->flags & SRI_MASTER) return "master";
- else if (ri->flags & SRI_SLAVE) return "slave";
- else if (ri->flags & SRI_SENTINEL) return "sentinel";
- else return "unknown";
-}
-
/* This function is used in order to send commands to Redis instances: the
* commands we send from Sentinel may be renamed, a common case is a master
* with CONFIG and SLAVEOF commands renamed for security concerns. In that
* case we check the ri->renamed_command table (or if the instance is a slave,
* we check the one of the master), and map the command that we should send
- * to the set of renamed commads. However, if the command was not renamed,
+ * to the set of renamed commands. However, if the command was not renamed,
* we just return "command" itself. */
char *sentinelInstanceMapCommand(sentinelRedisInstance *ri, char *command) {
sds sc = sdsnew(command);
@@ -2045,12 +2045,12 @@ const char *sentinelHandleConfiguration(char **argv, int argc) {
} else if (!strcasecmp(argv[0],"resolve-hostnames") && argc == 2) {
/* resolve-hostnames <yes|no> */
if ((sentinel.resolve_hostnames = yesnotoi(argv[1])) == -1) {
- return "Please specify yes or not for the resolve-hostnames option.";
+ return "Please specify yes or no for the resolve-hostnames option.";
}
} else if (!strcasecmp(argv[0],"announce-hostnames") && argc == 2) {
/* announce-hostnames <yes|no> */
if ((sentinel.announce_hostnames = yesnotoi(argv[1])) == -1) {
- return "Please specify yes or not for the announce-hostnames option.";
+ return "Please specify yes or no for the announce-hostnames option.";
}
} else {
return "Unrecognized sentinel configuration statement.";
@@ -2320,8 +2320,8 @@ void sentinelFlushConfig(void) {
return;
werr:
- if (fd != -1) close(fd);
serverLog(LL_WARNING,"WARNING: Sentinel was not able to save the new configuration on disk!!!: %s", strerror(errno));
+ if (fd != -1) close(fd);
}
/* ====================== hiredis connection handling ======================= */
@@ -2422,8 +2422,10 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) {
/* Commands connection. */
if (link->cc == NULL) {
link->cc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,NET_FIRST_BIND_ADDR);
- if (!link->cc->err) anetCloexec(link->cc->c.fd);
- if (!link->cc->err && server.tls_replication &&
+ if (link->cc && !link->cc->err) anetCloexec(link->cc->c.fd);
+ if (!link->cc) {
+ sentinelEvent(LL_DEBUG,"-cmd-link-reconnection",ri,"%@ #Failed to establish connection");
+ } else if (!link->cc->err && server.tls_replication &&
(instanceLinkNegotiateTLS(link->cc) == C_ERR)) {
sentinelEvent(LL_DEBUG,"-cmd-link-reconnection",ri,"%@ #Failed to initialize TLS");
instanceLinkCloseConnection(link,link->cc);
@@ -2450,8 +2452,10 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) {
/* Pub / Sub */
if ((ri->flags & (SRI_MASTER|SRI_SLAVE)) && link->pc == NULL) {
link->pc = redisAsyncConnectBind(ri->addr->ip,ri->addr->port,NET_FIRST_BIND_ADDR);
- if (!link->pc->err) anetCloexec(link->pc->c.fd);
- if (!link->pc->err && server.tls_replication &&
+ if (link->pc && !link->pc->err) anetCloexec(link->pc->c.fd);
+ if (!link->pc) {
+ sentinelEvent(LL_DEBUG,"-pubsub-link-reconnection",ri,"%@ #Failed to establish connection");
+ } else if (!link->pc->err && server.tls_replication &&
(instanceLinkNegotiateTLS(link->pc) == C_ERR)) {
sentinelEvent(LL_DEBUG,"-pubsub-link-reconnection",ri,"%@ #Failed to initialize TLS");
} else if (link->pc->err) {
@@ -2624,6 +2628,10 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) {
/* slave_repl_offset:<offset> */
if (sdslen(l) >= 18 && !memcmp(l,"slave_repl_offset:",18))
ri->slave_repl_offset = strtoull(l+18,NULL,10);
+
+ /* replica_announced:<announcement> */
+ if (sdslen(l) >= 18 && !memcmp(l,"replica_announced:",18))
+ ri->replica_announced = atoi(l+18);
}
}
ri->info_refresh = mstime();
@@ -2644,8 +2652,7 @@ void sentinelRefreshInstanceInfo(sentinelRedisInstance *ri, const char *info) {
((ri->flags & (SRI_MASTER|SRI_SLAVE)) == role) ?
"+role-change" : "-role-change",
ri, "%@ new reported role is %s",
- role == SRI_MASTER ? "master" : "slave",
- ri->flags & SRI_MASTER ? "master" : "slave");
+ role == SRI_MASTER ? "master" : "slave");
}
/* None of the following conditions are processed when in tilt mode, so
@@ -3291,6 +3298,8 @@ void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) {
if (ri->flags & SRI_RECONF_SENT) flags = sdscat(flags,"reconf_sent,");
if (ri->flags & SRI_RECONF_INPROG) flags = sdscat(flags,"reconf_inprog,");
if (ri->flags & SRI_RECONF_DONE) flags = sdscat(flags,"reconf_done,");
+ if (ri->flags & SRI_FORCE_FAILOVER) flags = sdscat(flags,"force_failover,");
+ if (ri->flags & SRI_SCRIPT_KILL_SENT) flags = sdscat(flags,"script_kill_sent,");
if (sdslen(flags) != 0) sdsrange(flags,0,-2); /* remove last "," */
addReplyBulkCString(c,flags);
@@ -3343,7 +3352,8 @@ void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) {
/* Masters and Slaves */
if (ri->flags & (SRI_MASTER|SRI_SLAVE)) {
addReplyBulkCString(c,"info-refresh");
- addReplyBulkLongLong(c,mstime() - ri->info_refresh);
+ addReplyBulkLongLong(c,
+ ri->info_refresh ? (mstime() - ri->info_refresh) : 0);
fields++;
addReplyBulkCString(c,"role-reported");
@@ -3423,6 +3433,10 @@ void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) {
addReplyBulkCString(c,"slave-repl-offset");
addReplyBulkLongLong(c,ri->slave_repl_offset);
fields++;
+
+ addReplyBulkCString(c,"replica-announced");
+ addReplyBulkLongLong(c,ri->replica_announced);
+ fields++;
}
/* Only sentinels */
@@ -3448,15 +3462,20 @@ void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) {
void addReplyDictOfRedisInstances(client *c, dict *instances) {
dictIterator *di;
dictEntry *de;
+ long slaves = 0;
+ void *replylen = addReplyDeferredLen(c);
di = dictGetIterator(instances);
- addReplyArrayLen(c,dictSize(instances));
while((de = dictNext(di)) != NULL) {
sentinelRedisInstance *ri = dictGetVal(de);
+ /* don't announce unannounced replicas */
+ if (ri->flags & SRI_SLAVE && !ri->replica_announced) continue;
addReplySentinelRedisInstance(c,ri);
+ slaves++;
}
dictReleaseIterator(di);
+ setDeferredArrayLen(c, replylen, slaves);
}
/* Lookup the named master into sentinel.masters.
@@ -3710,17 +3729,7 @@ NULL
ri = createSentinelRedisInstance(c->argv[2]->ptr,SRI_MASTER,
c->argv[3]->ptr,port,quorum,NULL);
if (ri == NULL) {
- switch(errno) {
- case EBUSY:
- addReplyError(c,"Duplicated master name");
- break;
- case EINVAL:
- addReplyError(c,"Invalid port number");
- break;
- default:
- addReplyError(c,"Unspecified error adding the instance");
- break;
- }
+ addReplyError(c,sentinelCheckCreateInstanceErrors(SRI_MASTER));
} else {
sentinelFlushConfig();
sentinelEvent(LL_WARNING,"+monitor",ri,"%@ quorum %d",ri->quorum);
@@ -3820,7 +3829,8 @@ NULL
addReplyBulkCBuffer(c,ri->name,strlen(ri->name));
addReplyArrayLen(c,dictSize(ri->slaves) + 1); /* +1 for self */
addReplyArrayLen(c,2);
- addReplyLongLong(c, now - ri->info_refresh);
+ addReplyLongLong(c,
+ ri->info_refresh ? (now - ri->info_refresh) : 0);
if (ri->info)
addReplyBulkCBuffer(c,ri->info,sdslen(ri->info));
else
@@ -3832,7 +3842,8 @@ NULL
while ((sde = dictNext(sdi)) != NULL) {
sentinelRedisInstance *sri = dictGetVal(sde);
addReplyArrayLen(c,2);
- addReplyLongLong(c, now - sri->info_refresh);
+ addReplyLongLong(c,
+ ri->info_refresh ? (now - sri->info_refresh) : 0);
if (sri->info)
addReplyBulkCBuffer(c,sri->info,sdslen(sri->info));
else
diff --git a/src/server.c b/src/server.c
index 4784431ba..993260619 100644
--- a/src/server.c
+++ b/src/server.c
@@ -901,7 +901,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,0,0,0,0,0,0},
{"hello",helloCommand,-1,
- "no-auth no-script fast no-monitor ok-loading ok-stale no-slowlog @connection",
+ "no-auth no-script fast no-monitor ok-loading ok-stale @connection",
0,NULL,0,0,0,0,0,0},
/* EVAL can modify the dataset, however it is not flagged as a write
@@ -1091,7 +1091,7 @@ struct redisCommand redisCommandTable[] = {
0,NULL,0,0,0,0,0,0},
{"acl",aclCommand,-2,
- "admin no-script no-slowlog ok-loading ok-stale",
+ "admin no-script ok-loading ok-stale",
0,NULL,0,0,0,0,0,0},
{"stralgo",stralgoCommand,-2,
@@ -1161,12 +1161,10 @@ void serverLogRaw(int level, const char *msg) {
/* Like serverLogRaw() but with printf-alike support. This is the function that
* is used across the code. The raw version is only used in order to dump
* the INFO output on crash. */
-void serverLog(int level, const char *fmt, ...) {
+void _serverLog(int level, const char *fmt, ...) {
va_list ap;
char msg[LOG_MAX_LEN];
- if ((level&0xff) < server.verbosity) return;
-
va_start(ap, fmt);
vsnprintf(msg, sizeof(msg), fmt, ap);
va_end(ap);
@@ -1332,21 +1330,14 @@ uint64_t dictEncObjHash(const void *key) {
if (sdsEncodedObject(o)) {
return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr));
- } else {
- if (o->encoding == OBJ_ENCODING_INT) {
- char buf[32];
- int len;
+ } else if (o->encoding == OBJ_ENCODING_INT) {
+ char buf[32];
+ int len;
- len = ll2string(buf,32,(long)o->ptr);
- return dictGenHashFunction((unsigned char*)buf, len);
- } else {
- uint64_t hash;
-
- o = getDecodedObject(o);
- hash = dictGenHashFunction(o->ptr, sdslen((sds)o->ptr));
- decrRefCount(o);
- return hash;
- }
+ len = ll2string(buf,32,(long)o->ptr);
+ return dictGenHashFunction((unsigned char*)buf, len);
+ } else {
+ serverPanic("Unknown string encoding");
}
}
@@ -1621,6 +1612,7 @@ void resetChildState() {
server.child_type = CHILD_TYPE_NONE;
server.child_pid = -1;
server.stat_current_cow_bytes = 0;
+ server.stat_current_cow_updated = 0;
server.stat_current_save_keys_processed = 0;
server.stat_module_progress = 0;
server.stat_current_save_keys_total = 0;
@@ -1729,33 +1721,17 @@ int clientsCronResizeQueryBuffer(client *c) {
* When we want to know what was recently the peak memory usage, we just scan
* such few slots searching for the maximum value. */
#define CLIENTS_PEAK_MEM_USAGE_SLOTS 8
-size_t ClientsPeakMemInput[CLIENTS_PEAK_MEM_USAGE_SLOTS];
-size_t ClientsPeakMemOutput[CLIENTS_PEAK_MEM_USAGE_SLOTS];
+size_t ClientsPeakMemInput[CLIENTS_PEAK_MEM_USAGE_SLOTS] = {0};
+size_t ClientsPeakMemOutput[CLIENTS_PEAK_MEM_USAGE_SLOTS] = {0};
-int clientsCronTrackExpansiveClients(client *c) {
+int clientsCronTrackExpansiveClients(client *c, int time_idx) {
size_t in_usage = sdsZmallocSize(c->querybuf) + c->argv_len_sum +
(c->argv ? zmalloc_size(c->argv) : 0);
size_t out_usage = getClientOutputBufferMemoryUsage(c);
- int i = server.unixtime % CLIENTS_PEAK_MEM_USAGE_SLOTS;
- int zeroidx = (i+1) % CLIENTS_PEAK_MEM_USAGE_SLOTS;
-
- /* Always zero the next sample, so that when we switch to that second, we'll
- * only register samples that are greater in that second without considering
- * the history of such slot.
- *
- * Note: our index may jump to any random position if serverCron() is not
- * called for some reason with the normal frequency, for instance because
- * some slow command is called taking multiple seconds to execute. In that
- * case our array may end containing data which is potentially older
- * than CLIENTS_PEAK_MEM_USAGE_SLOTS seconds: however this is not a problem
- * since here we want just to track if "recently" there were very expansive
- * clients from the POV of memory usage. */
- ClientsPeakMemInput[zeroidx] = 0;
- ClientsPeakMemOutput[zeroidx] = 0;
/* Track the biggest values observed so far in this slot. */
- if (in_usage > ClientsPeakMemInput[i]) ClientsPeakMemInput[i] = in_usage;
- if (out_usage > ClientsPeakMemOutput[i]) ClientsPeakMemOutput[i] = out_usage;
+ if (in_usage > ClientsPeakMemInput[time_idx]) ClientsPeakMemInput[time_idx] = in_usage;
+ if (out_usage > ClientsPeakMemOutput[time_idx]) ClientsPeakMemOutput[time_idx] = out_usage;
return 0; /* This function never terminates the client. */
}
@@ -1828,6 +1804,24 @@ void clientsCron(void) {
iterations = (numclients < CLIENTS_CRON_MIN_ITERATIONS) ?
numclients : CLIENTS_CRON_MIN_ITERATIONS;
+
+ int curr_peak_mem_usage_slot = server.unixtime % CLIENTS_PEAK_MEM_USAGE_SLOTS;
+ /* Always zero the next sample, so that when we switch to that second, we'll
+ * only register samples that are greater in that second without considering
+ * the history of such slot.
+ *
+ * Note: our index may jump to any random position if serverCron() is not
+ * called for some reason with the normal frequency, for instance because
+ * some slow command is called taking multiple seconds to execute. In that
+ * case our array may end containing data which is potentially older
+ * than CLIENTS_PEAK_MEM_USAGE_SLOTS seconds: however this is not a problem
+ * since here we want just to track if "recently" there were very expansive
+ * clients from the POV of memory usage. */
+ int zeroidx = (curr_peak_mem_usage_slot+1) % CLIENTS_PEAK_MEM_USAGE_SLOTS;
+ ClientsPeakMemInput[zeroidx] = 0;
+ ClientsPeakMemOutput[zeroidx] = 0;
+
+
while(listLength(server.clients) && iterations--) {
client *c;
listNode *head;
@@ -1843,7 +1837,7 @@ void clientsCron(void) {
* terminated. */
if (clientsCronHandleTimeout(c,now)) continue;
if (clientsCronResizeQueryBuffer(c)) continue;
- if (clientsCronTrackExpansiveClients(c)) continue;
+ if (clientsCronTrackExpansiveClients(c, curr_peak_mem_usage_slot)) continue;
if (clientsCronTrackClientsMemUsage(c)) continue;
}
}
@@ -1934,11 +1928,11 @@ void updateCachedTime(int update_daylight_info) {
}
void checkChildrenDone(void) {
- int statloc;
+ int statloc = 0;
pid_t pid;
- if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) {
- int exitcode = WEXITSTATUS(statloc);
+ if ((pid = waitpid(-1, &statloc, WNOHANG)) != 0) {
+ int exitcode = WIFEXITED(statloc) ? WEXITSTATUS(statloc) : -1;
int bysignal = 0;
if (WIFSIGNALED(statloc)) bysignal = WTERMSIG(statloc);
@@ -1946,15 +1940,14 @@ void checkChildrenDone(void) {
/* sigKillChildHandler catches the signal and calls exit(), but we
* must make sure not to flag lastbgsave_status, etc incorrectly.
* We could directly terminate the child process via SIGUSR1
- * without handling it, but in this case Valgrind will log an
- * annoying error. */
+ * without handling it */
if (exitcode == SERVER_CHILD_NOERROR_RETVAL) {
bysignal = SIGUSR1;
exitcode = 1;
}
if (pid == -1) {
- serverLog(LL_WARNING,"wait3() returned an error: %s. "
+ serverLog(LL_WARNING,"waitpid() returned an error: %s. "
"child_type: %s, child_pid = %d",
strerror(errno),
strChildType(server.child_type),
@@ -2662,6 +2655,7 @@ void initServerConfig(void) {
server.aof_rewrite_scheduled = 0;
server.aof_flush_sleep = 0;
server.aof_last_fsync = time(NULL);
+ atomicSet(server.aof_bio_fsync_status,C_OK);
server.aof_rewrite_time_last = -1;
server.aof_rewrite_time_start = -1;
server.aof_lastbgrewrite_status = C_OK;
@@ -3058,14 +3052,15 @@ int listenToPort(int port, socketFds *sfd) {
sfd->fd[sfd->count] = anetTcpServer(server.neterr,port,addr,server.tcp_backlog);
}
if (sfd->fd[sfd->count] == ANET_ERR) {
+ int net_errno = errno;
serverLog(LL_WARNING,
- "Could not create server TCP listening socket %s:%d: %s",
+ "Warning: Could not create server TCP listening socket %s:%d: %s",
addr, port, server.neterr);
- if (errno == EADDRNOTAVAIL && optional)
+ if (net_errno == EADDRNOTAVAIL && optional)
continue;
- if (errno == ENOPROTOOPT || errno == EPROTONOSUPPORT ||
- errno == ESOCKTNOSUPPORT || errno == EPFNOSUPPORT ||
- errno == EAFNOSUPPORT)
+ if (net_errno == ENOPROTOOPT || net_errno == EPROTONOSUPPORT ||
+ net_errno == ESOCKTNOSUPPORT || net_errno == EPFNOSUPPORT ||
+ net_errno == EAFNOSUPPORT)
continue;
/* Rollback successful listens before exiting */
@@ -3163,6 +3158,7 @@ void initServer(void) {
server.clients_pending_write = listCreate();
server.clients_pending_read = listCreate();
server.clients_timeout_table = raxNew();
+ server.replication_allowed = 1;
server.slaveseldb = -1; /* Force to emit the first SELECT command. */
server.unblocked_clients = listCreate();
server.ready_keys = listCreate();
@@ -3196,11 +3192,15 @@ void initServer(void) {
/* Open the TCP listening socket for the user commands. */
if (server.port != 0 &&
- listenToPort(server.port,&server.ipfd) == C_ERR)
+ listenToPort(server.port,&server.ipfd) == C_ERR) {
+ serverLog(LL_WARNING, "Failed listening on port %u (TCP), aborting.", server.port);
exit(1);
+ }
if (server.tls_port != 0 &&
- listenToPort(server.tls_port,&server.tlsfd) == C_ERR)
+ listenToPort(server.tls_port,&server.tlsfd) == C_ERR) {
+ serverLog(LL_WARNING, "Failed listening on port %u (TLS), aborting.", server.tls_port);
exit(1);
+ }
/* Open the listening Unix domain socket. */
if (server.unixsocket != NULL) {
@@ -3266,6 +3266,7 @@ void initServer(void) {
server.stat_starttime = time(NULL);
server.stat_peak_memory = 0;
server.stat_current_cow_bytes = 0;
+ server.stat_current_cow_updated = 0;
server.stat_current_save_keys_processed = 0;
server.stat_current_save_keys_total = 0;
server.stat_rdb_cow_bytes = 0;
@@ -3507,6 +3508,7 @@ void redisOpArrayFree(redisOpArray *oa) {
zfree(op->argv);
}
zfree(oa->ops);
+ oa->ops = NULL;
}
/* ====================== Commands lookup and execution ===================== */
@@ -3557,6 +3559,9 @@ struct redisCommand *lookupCommandOrOriginal(sds name) {
void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
int flags)
{
+ if (!server.replication_allowed)
+ return;
+
/* Propagate a MULTI request once we encounter the first command which
* is a write command.
* This way we'll deliver the MULTI/..../EXEC block as a whole and
@@ -3619,6 +3624,12 @@ void preventCommandPropagation(client *c) {
c->flags |= CLIENT_PREVENT_PROP;
}
+/* Avoid logging any information about this client's arguments
+ * since they contain sensitive information. */
+void preventCommandLogging(client *c) {
+ c->flags |= CLIENT_PREVENT_LOGGING;
+}
+
/* AOF specific version of preventCommandPropagation(). */
void preventCommandAOF(client *c) {
c->flags |= CLIENT_PREVENT_AOF_PROP;
@@ -3629,6 +3640,19 @@ void preventCommandReplication(client *c) {
c->flags |= CLIENT_PREVENT_REPL_PROP;
}
+/* Log the last command a client executed into the slowlog. */
+void slowlogPushCurrentCommand(client *c, struct redisCommand *cmd, ustime_t duration) {
+ /* Some commands may contain sensitive data that should not be available in the slowlog. */
+ if ((c->flags & CLIENT_PREVENT_LOGGING) || (cmd->flags & CMD_SKIP_SLOWLOG))
+ return;
+
+ /* If command argument vector was rewritten, use the original
+ * arguments. */
+ robj **argv = c->original_argv ? c->original_argv : c->argv;
+ int argc = c->original_argv ? c->original_argc : c->argc;
+ slowlogPushEntryIfNeeded(c,argv,argc,duration);
+}
+
/* Call() is the core of Redis execution of a command.
*
* The following flags can be passed:
@@ -3731,27 +3755,31 @@ void call(client *c, int flags) {
server.lua_caller->flags |= CLIENT_FORCE_AOF;
}
- /* Log the command into the Slow log if needed, and populate the
- * per-command statistics that we show in INFO commandstats. */
- if (flags & CMD_CALL_SLOWLOG && !(c->cmd->flags & CMD_SKIP_SLOWLOG)) {
- char *latency_event = (c->cmd->flags & CMD_FAST) ?
- "fast-command" : "command";
+ /* Note: the code below uses the real command that was executed
+ * c->cmd and c->lastcmd may be different, in case of MULTI-EXEC or
+ * re-written commands such as EXPIRE, GEOADD, etc. */
+
+ /* Record the latency this command induced on the main thread.
+ * unless instructed by the caller not to log. (happens when processing
+ * a MULTI-EXEC from inside an AOF). */
+ if (flags & CMD_CALL_SLOWLOG) {
+ char *latency_event = (real_cmd->flags & CMD_FAST) ?
+ "fast-command" : "command";
latencyAddSampleIfNeeded(latency_event,duration/1000);
- /* If command argument vector was rewritten, use the original
- * arguments. */
- robj **argv = c->original_argv ? c->original_argv : c->argv;
- int argc = c->original_argv ? c->original_argc : c->argc;
- /* If the client is blocked we will handle slowlog when it is unblocked . */
- if (!(c->flags & CLIENT_BLOCKED)) {
- slowlogPushEntryIfNeeded(c,argv,argc,duration);
- }
}
- freeClientOriginalArgv(c);
+ /* Log the command into the Slow log if needed.
+ * If the client is blocked we will handle slowlog when it is unblocked. */
+ if ((flags & CMD_CALL_SLOWLOG) && !(c->flags & CLIENT_BLOCKED))
+ slowlogPushCurrentCommand(c, real_cmd, duration);
+
+ /* Clear the original argv.
+ * If the client is blocked we will handle slowlog when it is unblocked. */
+ if (!(c->flags & CLIENT_BLOCKED))
+ freeClientOriginalArgv(c);
+
+ /* populate the per-command statistics that we show in INFO commandstats. */
if (flags & CMD_CALL_STATS) {
- /* use the real command that was executed (cmd and lastamc) may be
- * different, in case of MULTI-EXEC or re-written commands such as
- * EXPIRE, GEOADD, etc. */
real_cmd->microseconds += duration;
real_cmd->calls++;
}
@@ -3916,6 +3944,16 @@ static int cmdHasMovableKeys(struct redisCommand *cmd) {
* other operations can be performed by the caller. Otherwise
* if C_ERR is returned the client was destroyed (i.e. after QUIT). */
int processCommand(client *c) {
+ if (!server.lua_timedout) {
+ /* Both EXEC and EVAL call call() directly so there should be
+ * no way in_exec or in_eval or propagate_in_transaction is 1.
+ * That is unless lua_timedout, in which case client may run
+ * some commands. */
+ serverAssert(!server.propagate_in_transaction);
+ serverAssert(!server.in_exec);
+ serverAssert(!server.in_eval);
+ }
+
moduleCallCommandFilters(c);
/* The QUIT command is handled separately. Normal command procs will
@@ -3974,18 +4012,30 @@ int processCommand(client *c) {
/* Check if the user can run this command according to the current
* ACLs. */
- int acl_keypos;
- int acl_retval = ACLCheckCommandPerm(c,&acl_keypos);
+ int acl_errpos;
+ int acl_retval = ACLCheckAllPerm(c,&acl_errpos);
if (acl_retval != ACL_OK) {
- addACLLogEntry(c,acl_retval,acl_keypos,NULL);
- if (acl_retval == ACL_DENIED_CMD)
+ addACLLogEntry(c,acl_retval,acl_errpos,NULL);
+ switch (acl_retval) {
+ case ACL_DENIED_CMD:
rejectCommandFormat(c,
"-NOPERM this user has no permissions to run "
"the '%s' command or its subcommand", c->cmd->name);
- else
+ break;
+ case ACL_DENIED_KEY:
rejectCommandFormat(c,
"-NOPERM this user has no permissions to access "
"one of the keys used as arguments");
+ break;
+ case ACL_DENIED_CHANNEL:
+ rejectCommandFormat(c,
+ "-NOPERM this user has no permissions to access "
+ "one of the channels used as arguments");
+ break;
+ default:
+ rejectCommandFormat(c, "no permission");
+ break;
+ }
return C_OK;
}
@@ -4144,6 +4194,7 @@ int processCommand(client *c) {
c->cmd->proc != discardCommand &&
c->cmd->proc != watchCommand &&
c->cmd->proc != unwatchCommand &&
+ c->cmd->proc != resetCommand &&
!(c->cmd->proc == shutdownCommand &&
c->argc == 2 &&
tolower(((char*)c->argv[1]->ptr)[0]) == 'n') &&
@@ -4272,7 +4323,10 @@ int prepareForShutdown(int flags) {
/* Append only file: flush buffers and fsync() the AOF at exit */
serverLog(LL_NOTICE,"Calling fsync() on the AOF file.");
flushAppendOnlyFile(1);
- redis_fsync(server.aof_fd);
+ if (redis_fsync(server.aof_fd) == -1) {
+ serverLog(LL_WARNING,"Fail to fsync the AOF file: %s.",
+ strerror(errno));
+ }
}
/* Create a new RDB file before exiting. */
@@ -4335,13 +4389,20 @@ int writeCommandsDeniedByDiskError(void) {
server.lastbgsave_status == C_ERR)
{
return DISK_ERROR_TYPE_RDB;
- } else if (server.aof_state != AOF_OFF &&
- server.aof_last_write_status == C_ERR)
- {
- return DISK_ERROR_TYPE_AOF;
- } else {
- return DISK_ERROR_TYPE_NONE;
+ } else if (server.aof_state != AOF_OFF) {
+ if (server.aof_last_write_status == C_ERR) {
+ return DISK_ERROR_TYPE_AOF;
+ }
+ /* AOF fsync error. */
+ int aof_bio_fsync_status;
+ atomicGet(server.aof_bio_fsync_status,aof_bio_fsync_status);
+ if (aof_bio_fsync_status == C_ERR) {
+ atomicGet(server.aof_bio_fsync_errno,server.aof_last_write_errno);
+ return DISK_ERROR_TYPE_AOF;
+ }
}
+
+ return DISK_ERROR_TYPE_NONE;
}
/* The PING command. It works in a different way if the client is in
@@ -4801,12 +4862,15 @@ sds genRedisInfoString(const char *section) {
} else if (server.stat_current_save_keys_total) {
fork_perc = ((double)server.stat_current_save_keys_processed / server.stat_current_save_keys_total) * 100;
}
-
+ int aof_bio_fsync_status;
+ atomicGet(server.aof_bio_fsync_status,aof_bio_fsync_status);
+
info = sdscatprintf(info,
"# Persistence\r\n"
"loading:%d\r\n"
"current_cow_size:%zu\r\n"
- "current_fork_perc:%.2f%%\r\n"
+ "current_cow_size_age:%lu\r\n"
+ "current_fork_perc:%.2f\r\n"
"current_save_keys_processed:%zu\r\n"
"current_save_keys_total:%zu\r\n"
"rdb_changes_since_last_save:%lld\r\n"
@@ -4828,6 +4892,7 @@ sds genRedisInfoString(const char *section) {
"module_fork_last_cow_size:%zu\r\n",
(int)server.loading,
server.stat_current_cow_bytes,
+ server.stat_current_cow_updated ? (unsigned long) elapsedMs(server.stat_current_cow_updated) / 1000 : 0,
fork_perc,
server.stat_current_save_keys_processed,
server.stat_current_save_keys_total,
@@ -4846,7 +4911,8 @@ sds genRedisInfoString(const char *section) {
(intmax_t)((server.child_type != CHILD_TYPE_AOF) ?
-1 : time(NULL)-server.aof_rewrite_time_start),
(server.aof_lastbgrewrite_status == C_OK) ? "ok" : "err",
- (server.aof_last_write_status == C_OK) ? "ok" : "err",
+ (server.aof_last_write_status == C_OK &&
+ aof_bio_fsync_status == C_OK) ? "ok" : "err",
server.stat_aof_cow_bytes,
server.child_type == CHILD_TYPE_MODULE,
server.stat_module_cow_bytes);
@@ -5054,13 +5120,16 @@ sds genRedisInfoString(const char *section) {
if (server.repl_state != REPL_STATE_CONNECTED) {
info = sdscatprintf(info,
"master_link_down_since_seconds:%jd\r\n",
- (intmax_t)(server.unixtime-server.repl_down_since));
+ server.repl_down_since ?
+ (intmax_t)(server.unixtime-server.repl_down_since) : -1);
}
info = sdscatprintf(info,
"slave_priority:%d\r\n"
- "slave_read_only:%d\r\n",
+ "slave_read_only:%d\r\n"
+ "replica_announced:%d\r\n",
server.slave_priority,
- server.repl_slave_ro);
+ server.repl_slave_ro,
+ server.replica_announced);
}
info = sdscatprintf(info,
@@ -5800,6 +5869,7 @@ int redisFork(int purpose) {
server.child_pid = childpid;
server.child_type = purpose;
server.stat_current_cow_bytes = 0;
+ server.stat_current_cow_updated = 0;
server.stat_current_save_keys_processed = 0;
server.stat_module_progress = 0;
server.stat_current_save_keys_total = dbTotalServerKeyCount();
@@ -6035,36 +6105,78 @@ int iAmMaster(void) {
(server.cluster_enabled && nodeIsMaster(server.cluster->myself)));
}
+#ifdef REDIS_TEST
+typedef int redisTestProc(int argc, char **argv, int accurate);
+struct redisTest {
+ char *name;
+ redisTestProc *proc;
+ int failed;
+} redisTests[] = {
+ {"ziplist", ziplistTest},
+ {"quicklist", quicklistTest},
+ {"intset", intsetTest},
+ {"zipmap", zipmapTest},
+ {"sha1test", sha1Test},
+ {"util", utilTest},
+ {"endianconv", endianconvTest},
+ {"crc64", crc64Test},
+ {"zmalloc", zmalloc_test},
+ {"sds", sdsTest},
+ {"dict", dictTest}
+};
+redisTestProc *getTestProcByName(const char *name) {
+ int numtests = sizeof(redisTests)/sizeof(struct redisTest);
+ for (int j = 0; j < numtests; j++) {
+ if (!strcasecmp(name,redisTests[j].name)) {
+ return redisTests[j].proc;
+ }
+ }
+ return NULL;
+}
+#endif
+
int main(int argc, char **argv) {
struct timeval tv;
int j;
char config_from_stdin = 0;
#ifdef REDIS_TEST
- if (argc == 3 && !strcasecmp(argv[1], "test")) {
- if (!strcasecmp(argv[2], "ziplist")) {
- return ziplistTest(argc, argv);
- } else if (!strcasecmp(argv[2], "quicklist")) {
- quicklistTest(argc, argv);
- } else if (!strcasecmp(argv[2], "intset")) {
- return intsetTest(argc, argv);
- } else if (!strcasecmp(argv[2], "zipmap")) {
- return zipmapTest(argc, argv);
- } else if (!strcasecmp(argv[2], "sha1test")) {
- return sha1Test(argc, argv);
- } else if (!strcasecmp(argv[2], "util")) {
- return utilTest(argc, argv);
- } else if (!strcasecmp(argv[2], "endianconv")) {
- return endianconvTest(argc, argv);
- } else if (!strcasecmp(argv[2], "crc64")) {
- return crc64Test(argc, argv);
- } else if (!strcasecmp(argv[2], "zmalloc")) {
- return zmalloc_test(argc, argv);
- } else if (!strcasecmp(argv[2], "sds")) {
- return sdsTest(argc, argv);
+ if (argc >= 3 && !strcasecmp(argv[1], "test")) {
+ int accurate = 0;
+ for (j = 3; j < argc; j++) {
+ if (!strcasecmp(argv[j], "--accurate")) {
+ accurate = 1;
+ }
}
- return -1; /* test not found */
+ if (!strcasecmp(argv[2], "all")) {
+ int numtests = sizeof(redisTests)/sizeof(struct redisTest);
+ for (j = 0; j < numtests; j++) {
+ redisTests[j].failed = (redisTests[j].proc(argc,argv,accurate) != 0);
+ }
+
+ /* Report tests result */
+ int failed_num = 0;
+ for (j = 0; j < numtests; j++) {
+ if (redisTests[j].failed) {
+ failed_num++;
+ printf("[failed] Test - %s\n", redisTests[j].name);
+ } else {
+ printf("[ok] Test - %s\n", redisTests[j].name);
+ }
+ }
+
+ printf("%d tests, %d passed, %d failed\n", numtests,
+ numtests-failed_num, failed_num);
+
+ return failed_num == 0 ? 0 : 1;
+ } else {
+ redisTestProc *proc = getTestProcByName(argv[2]);
+ if (!proc) return -1; /* test not found */
+ return proc(argc,argv,accurate);
+ }
+
+ return 0;
}
#endif
@@ -6150,7 +6262,6 @@ int main(int argc, char **argv) {
server.exec_argv[1] = zstrdup(server.configfile);
j = 2; // Skip this arg when parsing options
}
-
while(j < argc) {
/* Either first or last argument - Should we read config from stdin? */
if (argv[j][0] == '-' && argv[j][1] == '\0' && (j == 1 || j == argc-1)) {
@@ -6173,16 +6284,11 @@ int main(int argc, char **argv) {
j++;
}
- if (server.sentinel_mode && ! server.configfile) {
- serverLog(LL_WARNING,
- "Sentinel needs config file on disk to save state. Exiting...");
- exit(1);
- }
loadServerConfig(server.configfile, config_from_stdin, options);
if (server.sentinel_mode) loadSentinelConfigFromQueue();
sdsfree(options);
}
-
+ if (server.sentinel_mode) sentinelCheckConfigFile();
server.supervised = redisIsSupervised(server.supervised_mode);
int background = server.daemonize && !server.supervised;
if (background) daemonize();
@@ -6197,7 +6303,7 @@ int main(int argc, char **argv) {
(int)getpid());
if (argc == 1) {
- serverLog(LL_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis");
+ serverLog(LL_WARNING, "Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/redis.conf", argv[0]);
} else {
serverLog(LL_WARNING, "Configuration loaded");
}
@@ -6251,10 +6357,10 @@ int main(int argc, char **argv) {
if (server.supervised_mode == SUPERVISED_SYSTEMD) {
if (!server.masterhost) {
redisCommunicateSystemd("STATUS=Ready to accept connections\n");
- redisCommunicateSystemd("READY=1\n");
} else {
- redisCommunicateSystemd("STATUS=Waiting for MASTER <-> REPLICA sync\n");
+ redisCommunicateSystemd("STATUS=Ready to accept connections in read-only mode. Waiting for MASTER <-> REPLICA sync\n");
}
+ redisCommunicateSystemd("READY=1\n");
}
} else {
ACLLoadUsersAtStartup();
diff --git a/src/server.h b/src/server.h
index e241bad70..d35eaa425 100644
--- a/src/server.h
+++ b/src/server.h
@@ -130,6 +130,11 @@ typedef long long ustime_t; /* microsecond time type. */
* special code. */
#define SERVER_CHILD_NOERROR_RETVAL 255
+/* Reading copy-on-write info is sometimes expensive and may slow down child
+ * processes that report it continuously. We measure the cost of obtaining it
+ * and hold back additional reading based on this factor. */
+#define CHILD_COW_DUTY_CYCLE 100
+
/* Instantaneous metrics tracking. */
#define STATS_METRIC_SAMPLES 16 /* Number of samples per metric. */
#define STATS_METRIC_COMMAND 0 /* Number of commands executed. */
@@ -274,6 +279,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
and AOF client */
#define CLIENT_REPL_RDBONLY (1ULL<<42) /* This client is a replica that only wants
RDB without replication buffer. */
+#define CLIENT_PREVENT_LOGGING (1ULL<<43) /* Prevent logging of command to slowlog */
/* Client block type (btype field in client structure)
* if CLIENT_BLOCKED flag is set. */
@@ -476,7 +482,8 @@ typedef enum {
#define NOTIFY_STREAM (1<<10) /* t */
#define NOTIFY_KEY_MISS (1<<11) /* m (Note: This one is excluded from NOTIFY_ALL on purpose) */
#define NOTIFY_LOADED (1<<12) /* module only key space notification, indicate a key loaded from rdb */
-#define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM) /* A flag */
+#define NOTIFY_MODULE (1<<13) /* d, module key space notification */
+#define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM | NOTIFY_MODULE) /* A flag */
/* Get the first bind addr or NULL */
#define NET_FIRST_BIND_ADDR (server.bindaddr_count ? server.bindaddr[0] : NULL)
@@ -735,8 +742,6 @@ typedef struct multiState {
int cmd_inv_flags; /* Same as cmd_flags, OR-ing the ~flags. so that it
is possible to know if all the commands have a
certain flag. */
- int minreplicas; /* MINREPLICAS for synchronous replication */
- time_t minreplicas_timeout; /* MINREPLICAS timeout as unixtime. */
} multiState;
/* This structure holds the blocking operation state for a client.
@@ -762,7 +767,6 @@ typedef struct blockingState {
size_t xread_count; /* XREAD COUNT option. */
robj *xread_group; /* XREADGROUP group name. */
robj *xread_consumer; /* XREADGROUP consumer name. */
- mstime_t xread_retry_time, xread_retry_ttl;
int xread_group_noack;
/* BLOCKED_WAIT */
@@ -841,7 +845,7 @@ typedef struct {
the flag ALLKEYS is set in the user. */
list *channels; /* A list of allowed Pub/Sub channel patterns. If this
field is NULL the user cannot mention any channel in a
- `PUBLISH` or [P][UNSUSBSCRIBE] command, unless the flag
+ `PUBLISH` or [P][UNSUBSCRIBE] command, unless the flag
ALLCHANNELS is set in the user. */
} user;
@@ -897,6 +901,7 @@ typedef struct client {
long long reploff; /* Applied replication offset if this is a master. */
long long repl_ack_off; /* Replication ack offset, if this is a slave. */
long long repl_ack_time;/* Replication ack time, if this is a slave. */
+ long long repl_last_partial_write; /* The last time the server did a partial write from the RDB child pipe to this replica */
long long psync_initial_offset; /* FULLRESYNC reply offset other slaves
copying this slave output buffer
should use. */
@@ -1116,8 +1121,10 @@ typedef struct socketFds {
typedef struct redisTLSContextConfig {
char *cert_file; /* Server side and optionally client side cert file name */
char *key_file; /* Private key filename for cert_file */
+ char *key_file_pass; /* Optional password for key_file */
char *client_cert_file; /* Certificate to use as a client; if none, use cert_file */
char *client_key_file; /* Private key filename for client_cert_file */
+ char *client_key_file_pass; /* Optional password for client_key_file */
char *dh_params_file;
char *ca_cert_file;
char *ca_cert_dir;
@@ -1281,6 +1288,7 @@ struct redisServer {
redisAtomic long long stat_net_input_bytes; /* Bytes read from network. */
redisAtomic long long stat_net_output_bytes; /* Bytes written to network. */
size_t stat_current_cow_bytes; /* Copy on write bytes while child is active. */
+ monotime stat_current_cow_updated; /* Last update time of stat_current_cow_bytes */
size_t stat_current_save_keys_processed; /* Processed keys while child is active. */
size_t stat_current_save_keys_total; /* Number of keys when child started. */
size_t stat_rdb_cow_bytes; /* Copy on write bytes during RDB saving. */
@@ -1353,9 +1361,11 @@ struct redisServer {
int aof_rewrite_incremental_fsync;/* fsync incrementally while aof rewriting? */
int rdb_save_incremental_fsync; /* fsync incrementally while rdb saving? */
int aof_last_write_status; /* C_OK or C_ERR */
- int aof_last_write_errno; /* Valid if aof_last_write_status is ERR */
+ int aof_last_write_errno; /* Valid if aof write/fsync status is ERR */
int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */
int aof_use_rdb_preamble; /* Use RDB preamble on AOF rewrites. */
+ redisAtomic int aof_bio_fsync_status; /* Status of AOF fsync in bio job. */
+ redisAtomic int aof_bio_fsync_errno; /* Errno of AOF fsync in bio job. */
/* AOF pipes used to communicate between parent and child during rewrite. */
int aof_pipe_write_data_to_child;
int aof_pipe_read_data_from_parent;
@@ -1403,6 +1413,7 @@ struct redisServer {
int child_info_nread; /* Num of bytes of the last read from pipe */
/* Propagation of commands in AOF / replication */
redisOpArray also_propagate; /* Additional command to propagate. */
+ int replication_allowed; /* Are we allowed to replicate? */
/* Logging */
char *logfile; /* Path of log file */
int syslog_enabled; /* Is syslog enabled? */
@@ -1461,6 +1472,7 @@ struct redisServer {
time_t repl_down_since; /* Unix time at which link with master went down */
int repl_disable_tcp_nodelay; /* Disable TCP_NODELAY after SYNC? */
int slave_priority; /* Reported in INFO and used by Sentinel. */
+ int replica_announced; /* If true, replica is announced by Sentinel */
int slave_announce_port; /* Give the master this listening port. */
char *slave_announce_ip; /* Give the master this ip address. */
/* The following two fields is where we store master PSYNC replid/offset
@@ -1534,6 +1546,7 @@ struct redisServer {
char *cluster_configfile; /* Cluster auto-generated config file name. */
struct clusterState *cluster; /* State of the cluster */
int cluster_migration_barrier; /* Cluster replicas migration barrier. */
+ int cluster_allow_replica_migration; /* Automatic replica migrations to orphaned masters and from empty masters */
int cluster_slave_validity_factor; /* Slave max data age for failover. */
int cluster_require_full_coverage; /* If true, put the cluster down if
there is at least an uncovered slot.*/
@@ -1541,6 +1554,7 @@ struct redisServer {
if the master is in failure state. */
char *cluster_announce_ip; /* IP address to announce on cluster bus. */
int cluster_announce_port; /* base port to announce on cluster bus. */
+ int cluster_announce_tls_port; /* TLS port to announce on cluster bus. */
int cluster_announce_bus_port; /* bus port to announce on cluster bus. */
int cluster_module_flags; /* Set of flags that Redis modules are able
to set in order to suppress certain
@@ -1585,7 +1599,7 @@ struct redisServer {
sds requirepass; /* Remember the cleartext password set with
the old "requirepass" directive for
backward compatibility with Redis <= 5. */
- int acl_pubusub_default; /* Default ACL pub/sub channels flag */
+ int acl_pubsub_default; /* Default ACL pub/sub channels flag */
/* Assert & bug reporting */
int watchdog_period; /* Software watchdog period in ms. 0 = off */
/* System hardware info */
@@ -1744,6 +1758,7 @@ void moduleLoadFromQueue(void);
int moduleGetCommandKeysViaAPI(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result);
moduleType *moduleTypeLookupModuleByID(uint64_t id);
void moduleTypeNameByID(char *name, uint64_t moduleid);
+const char *moduleTypeModuleName(moduleType *mt);
void moduleFreeContext(struct RedisModuleCtx *ctx);
void unblockClientFromModule(client *c);
void moduleHandleBlockedClients(void);
@@ -1934,7 +1949,8 @@ void flagTransaction(client *c);
void execCommandAbort(client *c, sds error);
void execCommandPropagateMulti(int dbid);
void execCommandPropagateExec(int dbid);
-void beforePropagateMultiOrExec(int multi);
+void beforePropagateMulti();
+void afterPropagateExec();
/* Redis object implementation */
void decrRefCount(robj *o);
@@ -2083,7 +2099,7 @@ int isMutuallyExclusiveChildType(int type);
extern rax *Users;
extern user *DefaultUser;
void ACLInit(void);
-/* Return values for ACLCheckCommandPerm() and ACLCheckPubsubPerm(). */
+/* Return values for ACLCheckAllPerm(). */
#define ACL_OK 0
#define ACL_DENIED_CMD 1
#define ACL_DENIED_KEY 2
@@ -2094,8 +2110,7 @@ int ACLAuthenticateUser(client *c, robj *username, robj *password);
unsigned long ACLGetCommandID(const char *cmdname);
void ACLClearCommandID(void);
user *ACLGetUserByName(const char *name, size_t namelen);
-int ACLCheckCommandPerm(client *c, int *keyidxptr);
-int ACLCheckPubsubPerm(client *c, int idx, int count, int literal, int *idxptr);
+int ACLCheckAllPerm(client *c, int *idxptr);
int ACLSetUser(user *u, const char *op, ssize_t oplen);
sds ACLDefaultUserFirstPassword(void);
uint64_t ACLGetCommandCategoryFlagByName(const char *name);
@@ -2113,21 +2128,18 @@ void ACLUpdateDefaultUserPassword(sds password);
/* Sorted sets data type */
/* Input flags. */
-#define ZADD_NONE 0
-#define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */
-#define ZADD_NX (1<<1) /* Don't touch elements not already existing. */
-#define ZADD_XX (1<<2) /* Only touch elements already existing. */
-#define ZADD_GT (1<<7) /* Only update existing when new scores are higher. */
-#define ZADD_LT (1<<8) /* Only update existing when new scores are lower. */
+#define ZADD_IN_NONE 0
+#define ZADD_IN_INCR (1<<0) /* Increment the score instead of setting it. */
+#define ZADD_IN_NX (1<<1) /* Don't touch elements not already existing. */
+#define ZADD_IN_XX (1<<2) /* Only touch elements already existing. */
+#define ZADD_IN_GT (1<<3) /* Only update existing when new scores are higher. */
+#define ZADD_IN_LT (1<<4) /* Only update existing when new scores are lower. */
/* Output flags. */
-#define ZADD_NOP (1<<3) /* Operation not performed because of conditionals.*/
-#define ZADD_NAN (1<<4) /* Only touch elements already existing. */
-#define ZADD_ADDED (1<<5) /* The element was new and was added. */
-#define ZADD_UPDATED (1<<6) /* The element already existed, score updated. */
-
-/* Flags only used by the ZADD command but not by zsetAdd() API: */
-#define ZADD_CH (1<<16) /* Return num of elements added or updated. */
+#define ZADD_OUT_NOP (1<<0) /* Operation not performed because of conditionals.*/
+#define ZADD_OUT_NAN (1<<1) /* Only touch elements already existing. */
+#define ZADD_OUT_ADDED (1<<2) /* The element was new and was added. */
+#define ZADD_OUT_UPDATED (1<<3) /* The element already existed, score updated. */
/* Struct to hold an inclusive/exclusive range spec by score comparison. */
typedef struct {
@@ -2158,7 +2170,7 @@ void zsetConvert(robj *zobj, int encoding);
void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
int zsetScore(robj *zobj, sds member, double *score);
unsigned long zslGetRank(zskiplist *zsl, double score, sds o);
-int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore);
+int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, double *newscore);
long zsetRank(robj *zobj, sds ele, int reverse);
int zsetDel(robj *zobj, sds ele);
robj *zsetDup(robj *o);
@@ -2199,14 +2211,16 @@ void redisOpArrayInit(redisOpArray *oa);
void redisOpArrayFree(redisOpArray *oa);
void forceCommandPropagation(client *c, int flags);
void preventCommandPropagation(client *c);
+void preventCommandLogging(client *c);
void preventCommandAOF(client *c);
void preventCommandReplication(client *c);
+void slowlogPushCurrentCommand(client *c, struct redisCommand *cmd, ustime_t duration);
int prepareForShutdown(int flags);
#ifdef __GNUC__
-void serverLog(int level, const char *fmt, ...)
+void _serverLog(int level, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
#else
-void serverLog(int level, const char *fmt, ...);
+void _serverLog(int level, const char *fmt, ...);
#endif
void serverLogRaw(int level, const char *msg);
void serverLogFromHandler(int level, const char *msg);
@@ -2395,6 +2409,7 @@ const char *sentinelHandleConfiguration(char **argv, int argc);
void queueSentinelConfig(sds *argv, int argc, int linenum, sds line);
void loadSentinelConfigFromQueue(void);
void sentinelIsRunning(void);
+void sentinelCheckConfigFile(void);
/* redis-check-rdb & aof */
int redis_check_rdb(char *rdbfilename, FILE *fp);
@@ -2712,8 +2727,16 @@ void killIOThreads(void);
void killThreads(void);
void makeThreadKillable(void);
+/* Use macro for checking log level to avoid evaluating arguments in cases log
+ * should be ignored due to low level. */
+#define serverLog(level, ...) do {\
+ if (((level)&0xff) < server.verbosity) break;\
+ _serverLog(level, __VA_ARGS__);\
+ } while(0)
+
/* TLS stuff */
void tlsInit(void);
+void tlsCleanup(void);
int tlsConfigure(redisTLSContextConfig *ctx_config);
#define redisDebug(fmt, ...) \
diff --git a/src/sha1.c b/src/sha1.c
index ce487e367..f2423c052 100644
--- a/src/sha1.c
+++ b/src/sha1.c
@@ -201,7 +201,7 @@ void SHA1Final(unsigned char digest[20], SHA1_CTX* context)
#define BUFSIZE 4096
#define UNUSED(x) (void)(x)
-int sha1Test(int argc, char **argv)
+int sha1Test(int argc, char **argv, int accurate)
{
SHA1_CTX ctx;
unsigned char hash[20], buf[BUFSIZE];
@@ -209,6 +209,7 @@ int sha1Test(int argc, char **argv)
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
for(i=0;i<BUFSIZE;i++)
buf[i] = i;
diff --git a/src/sha1.h b/src/sha1.h
index f41691258..9056f864a 100644
--- a/src/sha1.h
+++ b/src/sha1.h
@@ -19,6 +19,6 @@ void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len);
void SHA1Final(unsigned char digest[20], SHA1_CTX* context);
#ifdef REDIS_TEST
-int sha1Test(int argc, char **argv);
+int sha1Test(int argc, char **argv, int accurate);
#endif
#endif
diff --git a/src/t_hash.c b/src/t_hash.c
index eb980578c..d88b80b0f 100644
--- a/src/t_hash.c
+++ b/src/t_hash.c
@@ -572,7 +572,7 @@ static int _hashZiplistEntryValidation(unsigned char *p, void *userdata) {
return 1;
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we scan all the entries one by one. */
int hashZiplistValidateIntegrity(unsigned char *zl, size_t size, int deep) {
diff --git a/src/t_list.c b/src/t_list.c
index 961f59ae4..f8ca27458 100644
--- a/src/t_list.c
+++ b/src/t_list.c
@@ -592,20 +592,14 @@ void lposCommand(client *c) {
}
} else if (!strcasecmp(opt,"COUNT") && moreargs) {
j++;
- if (getLongFromObjectOrReply(c, c->argv[j], &count, NULL) != C_OK)
+ if (getPositiveLongFromObjectOrReply(c, c->argv[j], &count,
+ "COUNT can't be negative") != C_OK)
return;
- if (count < 0) {
- addReplyError(c,"COUNT can't be negative");
- return;
- }
} else if (!strcasecmp(opt,"MAXLEN") && moreargs) {
j++;
- if (getLongFromObjectOrReply(c, c->argv[j], &maxlen, NULL) != C_OK)
+ if (getPositiveLongFromObjectOrReply(c, c->argv[j], &maxlen,
+ "MAXLEN can't be negative") != C_OK)
return;
- if (maxlen < 0) {
- addReplyError(c,"MAXLEN can't be negative");
- return;
- }
} else {
addReplyErrorObject(c,shared.syntaxerr);
return;
diff --git a/src/t_stream.c b/src/t_stream.c
index 18138bec5..1b2fe3262 100644
--- a/src/t_stream.c
+++ b/src/t_stream.c
@@ -861,7 +861,7 @@ int64_t streamTrimByID(stream *s, streamID minid, int approx) {
return streamTrim(s, &args);
}
-/* Parse the arguements of XADD/XTRIM.
+/* Parse the arguments of XADD/XTRIM.
*
* See streamAddTrimArgs for more details about the arguments handled.
*
@@ -1313,7 +1313,8 @@ void streamLastValidID(stream *s, streamID *maxid)
streamIterator si;
streamIteratorStart(&si,s,NULL,NULL,1);
int64_t numfields;
- streamIteratorGetID(&si,maxid,&numfields);
+ if (!streamIteratorGetID(&si,maxid,&numfields) && s->length)
+ serverPanic("Corrupt stream, length is %llu, but no max id", (unsigned long long)s->length);
streamIteratorStop(&si);
}
@@ -3050,12 +3051,8 @@ void xautoclaimCommand(client *c) {
int moreargs = (c->argc-1) - j; /* Number of additional arguments. */
char *opt = c->argv[j]->ptr;
if (!strcasecmp(opt,"COUNT") && moreargs) {
- if (getPositiveLongFromObjectOrReply(c,c->argv[j+1],&count,NULL) != C_OK)
+ if (getRangeLongFromObjectOrReply(c,c->argv[j+1],1,LONG_MAX,&count,"COUNT must be > 0") != C_OK)
return;
- if (count == 0) {
- addReplyError(c,"COUNT must be > 0");
- return;
- }
j++;
} else if (!strcasecmp(opt,"JUSTID")) {
justid = 1;
@@ -3120,7 +3117,9 @@ void xautoclaimCommand(client *c) {
/* Update the consumer and idle time. */
nack->delivery_time = now;
- nack->delivery_count++;
+ /* Increment the delivery attempts counter unless JUSTID option provided */
+ if (!justid)
+ nack->delivery_count++;
if (nack->consumer != consumer) {
/* Add the entry in the new consumer local PEL. */
@@ -3148,6 +3147,9 @@ void xautoclaimCommand(client *c) {
server.dirty++;
}
+ /* We need to return the next entry as a cursor for the next XAUTOCLAIM call */
+ raxNext(&ri);
+
streamID endid;
if (raxEOF(&ri)) {
endid.ms = endid.seq = 0;
@@ -3545,8 +3547,8 @@ NULL
}
}
-/* Validate the integrity stream listpack entries stracture. Both in term of a
- * valid listpack, but also that the stracture of the entires matches a valid
+/* Validate the integrity stream listpack entries structure. Both in term of a
+ * valid listpack, but also that the structure of the entires matches a valid
* stream. return 1 if valid 0 if not valid. */
int streamValidateListpackIntegrity(unsigned char *lp, size_t size, int deep) {
int valid_record;
diff --git a/src/t_string.c b/src/t_string.c
index 3f73363e0..0967e30e1 100644
--- a/src/t_string.c
+++ b/src/t_string.c
@@ -723,7 +723,7 @@ void stralgoCommand(client *c) {
}
}
-/* STRALGO <algo> [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
+/* STRALGO <algo> [IDX] [LEN] [MINMATCHLEN <len>] [WITHMATCHLEN]
* STRINGS <string> <string> | KEYS <keya> <keyb>
*/
void stralgoLCS(client *c) {
diff --git a/src/t_zset.c b/src/t_zset.c
index 6df21e300..fb402816d 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -388,9 +388,8 @@ unsigned long zslDeleteRangeByScore(zskiplist *zsl, zrangespec *range, dict *dic
x = zsl->header;
for (i = zsl->level-1; i >= 0; i--) {
- while (x->level[i].forward && (range->minex ?
- x->level[i].forward->score <= range->min :
- x->level[i].forward->score < range->min))
+ while (x->level[i].forward &&
+ !zslValueGteMin(x->level[i].forward->score, range))
x = x->level[i].forward;
update[i] = x;
}
@@ -399,9 +398,7 @@ unsigned long zslDeleteRangeByScore(zskiplist *zsl, zrangespec *range, dict *dic
x = x->level[0].forward;
/* Delete nodes while in range. */
- while (x &&
- (range->maxex ? x->score < range->max : x->score <= range->max))
- {
+ while (x && zslValueLteMax(x->score, range)) {
zskiplistNode *next = x->level[0].forward;
zslDeleteNode(zsl,x,update);
dictDelete(dict,x->ele);
@@ -1279,9 +1276,7 @@ int zsetScore(robj *zobj, sds member, double *score) {
/* Add a new element or update the score of an existing element in a sorted
* set, regardless of its encoding.
*
- * The set of flags change the command behavior. They are passed with an integer
- * pointer since the function will clear the flags and populate them with
- * other flags to indicate different conditions.
+ * The set of flags change the command behavior.
*
* The input flags are the following:
*
@@ -1323,19 +1318,19 @@ int zsetScore(robj *zobj, sds member, double *score) {
*
* The function does not take ownership of the 'ele' SDS string, but copies
* it if needed. */
-int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
+int zsetAdd(robj *zobj, double score, sds ele, int in_flags, int *out_flags, double *newscore) {
/* Turn options into simple to check vars. */
- int incr = (*flags & ZADD_INCR) != 0;
- int nx = (*flags & ZADD_NX) != 0;
- int xx = (*flags & ZADD_XX) != 0;
- int gt = (*flags & ZADD_GT) != 0;
- int lt = (*flags & ZADD_LT) != 0;
- *flags = 0; /* We'll return our response flags. */
+ int incr = (in_flags & ZADD_IN_INCR) != 0;
+ int nx = (in_flags & ZADD_IN_NX) != 0;
+ int xx = (in_flags & ZADD_IN_XX) != 0;
+ int gt = (in_flags & ZADD_IN_GT) != 0;
+ int lt = (in_flags & ZADD_IN_LT) != 0;
+ *out_flags = 0; /* We'll return our response flags. */
double curscore;
/* NaN as input is an error regardless of all the other parameters. */
if (isnan(score)) {
- *flags = ZADD_NAN;
+ *out_flags = ZADD_OUT_NAN;
return 0;
}
@@ -1346,7 +1341,7 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
if ((eptr = zzlFind(zobj->ptr,ele,&curscore)) != NULL) {
/* NX? Return, same element already exists. */
if (nx) {
- *flags |= ZADD_NOP;
+ *out_flags |= ZADD_OUT_NOP;
return 1;
}
@@ -1354,22 +1349,24 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
if (incr) {
score += curscore;
if (isnan(score)) {
- *flags |= ZADD_NAN;
+ *out_flags |= ZADD_OUT_NAN;
return 0;
}
- if (newscore) *newscore = score;
}
+ /* GT/LT? Only update if score is greater/less than current. */
+ if ((lt && score >= curscore) || (gt && score <= curscore)) {
+ *out_flags |= ZADD_OUT_NOP;
+ return 1;
+ }
+
+ if (newscore) *newscore = score;
+
/* Remove and re-insert when score changed. */
- if (score != curscore &&
- /* LT? Only update if score is less than current. */
- (!lt || score < curscore) &&
- /* GT? Only update if score is greater than current. */
- (!gt || score > curscore))
- {
+ if (score != curscore) {
zobj->ptr = zzlDelete(zobj->ptr,eptr);
zobj->ptr = zzlInsert(zobj->ptr,ele,score);
- *flags |= ZADD_UPDATED;
+ *out_flags |= ZADD_OUT_UPDATED;
}
return 1;
} else if (!xx) {
@@ -1380,10 +1377,10 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
sdslen(ele) > server.zset_max_ziplist_value)
zsetConvert(zobj,OBJ_ENCODING_SKIPLIST);
if (newscore) *newscore = score;
- *flags |= ZADD_ADDED;
+ *out_flags |= ZADD_OUT_ADDED;
return 1;
} else {
- *flags |= ZADD_NOP;
+ *out_flags |= ZADD_OUT_NOP;
return 1;
}
} else if (zobj->encoding == OBJ_ENCODING_SKIPLIST) {
@@ -1395,45 +1392,48 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) {
if (de != NULL) {
/* NX? Return, same element already exists. */
if (nx) {
- *flags |= ZADD_NOP;
+ *out_flags |= ZADD_OUT_NOP;
return 1;
}
+
curscore = *(double*)dictGetVal(de);
/* Prepare the score for the increment if needed. */
if (incr) {
score += curscore;
if (isnan(score)) {
- *flags |= ZADD_NAN;
+ *out_flags |= ZADD_OUT_NAN;
return 0;
}
- if (newscore) *newscore = score;
}
+ /* GT/LT? Only update if score is greater/less than current. */
+ if ((lt && score >= curscore) || (gt && score <= curscore)) {
+ *out_flags |= ZADD_OUT_NOP;
+ return 1;
+ }
+
+ if (newscore) *newscore = score;
+
/* Remove and re-insert when score changes. */
- if (score != curscore &&
- /* LT? Only update if score is less than current. */
- (!lt || score < curscore) &&
- /* GT? Only update if score is greater than current. */
- (!gt || score > curscore))
- {
+ if (score != curscore) {
znode = zslUpdateScore(zs->zsl,curscore,ele,score);
/* Note that we did not removed the original element from
* the hash table representing the sorted set, so we just
* update the score. */
dictGetVal(de) = &znode->score; /* Update score ptr. */
- *flags |= ZADD_UPDATED;
+ *out_flags |= ZADD_OUT_UPDATED;
}
return 1;
} else if (!xx) {
ele = sdsdup(ele);
znode = zslInsert(zs->zsl,score,ele);
serverAssert(dictAdd(zs->dict,ele,&znode->score) == DICT_OK);
- *flags |= ZADD_ADDED;
+ *out_flags |= ZADD_OUT_ADDED;
if (newscore) *newscore = score;
return 1;
} else {
- *flags |= ZADD_NOP;
+ *out_flags |= ZADD_OUT_NOP;
return 1;
}
} else {
@@ -1636,7 +1636,7 @@ static int _zsetZiplistValidateIntegrity(unsigned char *p, void *userdata) {
return 1;
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we scan all the entries one by one. */
int zsetZiplistValidateIntegrity(unsigned char *zl, size_t size, int deep) {
@@ -1712,7 +1712,7 @@ void zaddGenericCommand(client *c, int flags) {
robj *zobj;
sds ele;
double score = 0, *scores = NULL;
- int j, elements;
+ int j, elements, ch = 0;
int scoreidx = 0;
/* The following vars are used in order to track what the command actually
* did during the execution, to reply to the client and to trigger the
@@ -1727,23 +1727,22 @@ void zaddGenericCommand(client *c, int flags) {
scoreidx = 2;
while(scoreidx < c->argc) {
char *opt = c->argv[scoreidx]->ptr;
- if (!strcasecmp(opt,"nx")) flags |= ZADD_NX;
- else if (!strcasecmp(opt,"xx")) flags |= ZADD_XX;
- else if (!strcasecmp(opt,"ch")) flags |= ZADD_CH;
- else if (!strcasecmp(opt,"incr")) flags |= ZADD_INCR;
- else if (!strcasecmp(opt,"gt")) flags |= ZADD_GT;
- else if (!strcasecmp(opt,"lt")) flags |= ZADD_LT;
+ if (!strcasecmp(opt,"nx")) flags |= ZADD_IN_NX;
+ else if (!strcasecmp(opt,"xx")) flags |= ZADD_IN_XX;
+ else if (!strcasecmp(opt,"ch")) ch = 1; /* Return num of elements added or updated. */
+ else if (!strcasecmp(opt,"incr")) flags |= ZADD_IN_INCR;
+ else if (!strcasecmp(opt,"gt")) flags |= ZADD_IN_GT;
+ else if (!strcasecmp(opt,"lt")) flags |= ZADD_IN_LT;
else break;
scoreidx++;
}
/* Turn options into simple to check vars. */
- int incr = (flags & ZADD_INCR) != 0;
- int nx = (flags & ZADD_NX) != 0;
- int xx = (flags & ZADD_XX) != 0;
- int ch = (flags & ZADD_CH) != 0;
- int gt = (flags & ZADD_GT) != 0;
- int lt = (flags & ZADD_LT) != 0;
+ int incr = (flags & ZADD_IN_INCR) != 0;
+ int nx = (flags & ZADD_IN_NX) != 0;
+ int xx = (flags & ZADD_IN_XX) != 0;
+ int gt = (flags & ZADD_IN_GT) != 0;
+ int lt = (flags & ZADD_IN_LT) != 0;
/* After the options, we expect to have an even number of args, since
* we expect any number of score-element pairs. */
@@ -1801,17 +1800,17 @@ void zaddGenericCommand(client *c, int flags) {
for (j = 0; j < elements; j++) {
double newscore;
score = scores[j];
- int retflags = flags;
+ int retflags = 0;
ele = c->argv[scoreidx+1+j*2]->ptr;
- int retval = zsetAdd(zobj, score, ele, &retflags, &newscore);
+ int retval = zsetAdd(zobj, score, ele, flags, &retflags, &newscore);
if (retval == 0) {
addReplyError(c,nanerr);
goto cleanup;
}
- if (retflags & ZADD_ADDED) added++;
- if (retflags & ZADD_UPDATED) updated++;
- if (!(retflags & ZADD_NOP)) processed++;
+ if (retflags & ZADD_OUT_ADDED) added++;
+ if (retflags & ZADD_OUT_UPDATED) updated++;
+ if (!(retflags & ZADD_OUT_NOP)) processed++;
score = newscore;
}
server.dirty += (added+updated);
@@ -1836,11 +1835,11 @@ cleanup:
}
void zaddCommand(client *c) {
- zaddGenericCommand(c,ZADD_NONE);
+ zaddGenericCommand(c,ZADD_IN_NONE);
}
void zincrbyCommand(client *c) {
- zaddGenericCommand(c,ZADD_INCR);
+ zaddGenericCommand(c,ZADD_IN_INCR);
}
void zremCommand(client *c) {
@@ -2577,8 +2576,8 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in
return;
if (setnum < 1) {
- addReplyError(c,
- "at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE/ZDIFFSTORE");
+ addReplyErrorFormat(c,
+ "at least 1 input key is needed for %s", c->cmd->name);
return;
}
@@ -2941,7 +2940,7 @@ static void zrangeResultEmitCBufferForStore(zrange_result_handler *handler,
double newscore;
int retflags = 0;
sds ele = sdsnewlen(value, value_length_in_bytes);
- int retval = zsetAdd(handler->dstobj, score, ele, &retflags, &newscore);
+ int retval = zsetAdd(handler->dstobj, score, ele, ZADD_IN_NONE, &retflags, &newscore);
sdsfree(ele);
serverAssert(retval);
}
@@ -2952,7 +2951,7 @@ static void zrangeResultEmitLongLongForStore(zrange_result_handler *handler,
double newscore;
int retflags = 0;
sds ele = sdsfromlonglong(value);
- int retval = zsetAdd(handler->dstobj, score, ele, &retflags, &newscore);
+ int retval = zsetAdd(handler->dstobj, score, ele, ZADD_IN_NONE, &retflags, &newscore);
sdsfree(ele);
serverAssert(retval);
}
diff --git a/src/tls.c b/src/tls.c
index bcfe53a35..ffd3b0ad0 100644
--- a/src/tls.c
+++ b/src/tls.c
@@ -147,7 +147,7 @@ void tlsInit(void) {
#if OPENSSL_VERSION_NUMBER < 0x10100000L
OPENSSL_config(NULL);
#else
- OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CONFIG, NULL);
+ OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CONFIG|OPENSSL_INIT_ATFORK, NULL);
#endif
ERR_load_crypto_strings();
SSL_load_error_strings();
@@ -164,11 +164,43 @@ void tlsInit(void) {
pending_list = listCreate();
}
+void tlsCleanup(void) {
+ if (redis_tls_ctx) {
+ SSL_CTX_free(redis_tls_ctx);
+ redis_tls_ctx = NULL;
+ }
+ if (redis_tls_client_ctx) {
+ SSL_CTX_free(redis_tls_client_ctx);
+ redis_tls_client_ctx = NULL;
+ }
+
+ #if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ OPENSSL_cleanup();
+ #endif
+}
+
+/* Callback for passing a keyfile password stored as an sds to OpenSSL */
+static int tlsPasswordCallback(char *buf, int size, int rwflag, void *u) {
+ UNUSED(rwflag);
+
+ const char *pass = u;
+ size_t pass_len;
+
+ if (!pass) return -1;
+ pass_len = strlen(pass);
+ if (pass_len > (size_t) size) return -1;
+ memcpy(buf, pass, pass_len);
+
+ return (int) pass_len;
+}
+
/* Create a *base* SSL_CTX using the SSL configuration provided. The base context
* includes everything that's common for both client-side and server-side connections.
*/
-static SSL_CTX *createSSLContext(redisTLSContextConfig *ctx_config, int protocols,
- const char *cert_file, const char *key_file) {
+static SSL_CTX *createSSLContext(redisTLSContextConfig *ctx_config, int protocols, int client) {
+ const char *cert_file = client ? ctx_config->client_cert_file : ctx_config->cert_file;
+ const char *key_file = client ? ctx_config->client_key_file : ctx_config->key_file;
+ const char *key_file_pass = client ? ctx_config->client_key_file_pass : ctx_config->key_file_pass;
char errbuf[256];
SSL_CTX *ctx = NULL;
@@ -200,6 +232,9 @@ static SSL_CTX *createSSLContext(redisTLSContextConfig *ctx_config, int protocol
SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE|SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
+ SSL_CTX_set_default_passwd_cb(ctx, tlsPasswordCallback);
+ SSL_CTX_set_default_passwd_cb_userdata(ctx, (void *) key_file_pass);
+
if (SSL_CTX_use_certificate_chain_file(ctx, cert_file) <= 0) {
ERR_error_string_n(ERR_get_error(), errbuf, sizeof(errbuf));
serverLog(LL_WARNING, "Failed to load certificate: %s: %s", cert_file, errbuf);
@@ -266,7 +301,7 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) {
if (protocols == -1) goto error;
/* Create server side/generla context */
- ctx = createSSLContext(ctx_config, protocols, ctx_config->cert_file, ctx_config->key_file);
+ ctx = createSSLContext(ctx_config, protocols, 0);
if (!ctx) goto error;
if (ctx_config->session_caching) {
@@ -317,7 +352,7 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) {
/* If a client-side certificate is configured, create an explicit client context */
if (ctx_config->client_cert_file && ctx_config->client_key_file) {
- client_ctx = createSSLContext(ctx_config, protocols, ctx_config->client_cert_file, ctx_config->client_key_file);
+ client_ctx = createSSLContext(ctx_config, protocols, 1);
if (!client_ctx) goto error;
}
@@ -948,6 +983,9 @@ sds connTLSGetPeerCert(connection *conn_) {
void tlsInit(void) {
}
+void tlsCleanup(void) {
+}
+
int tlsConfigure(redisTLSContextConfig *ctx_config) {
UNUSED(ctx_config);
return C_OK;
diff --git a/src/util.c b/src/util.c
index 8087c8b7a..df7a489a5 100644
--- a/src/util.c
+++ b/src/util.c
@@ -946,9 +946,10 @@ static void test_ll2string(void) {
}
#define UNUSED(x) (void)(x)
-int utilTest(int argc, char **argv) {
+int utilTest(int argc, char **argv, int accurate) {
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
test_string2ll();
test_string2l();
diff --git a/src/util.h b/src/util.h
index 3a15c793e..3bf8907c0 100644
--- a/src/util.h
+++ b/src/util.h
@@ -66,7 +66,7 @@ long getTimeZone(void);
int pathIsBaseName(char *path);
#ifdef REDIS_TEST
-int utilTest(int argc, char **argv);
+int utilTest(int argc, char **argv, int accurate);
#endif
#endif
diff --git a/src/version.h b/src/version.h
index 64fdbe89b..3c5dc02c5 100644
--- a/src/version.h
+++ b/src/version.h
@@ -1,2 +1,2 @@
-#define REDIS_VERSION "6.2.1"
-#define REDIS_VERSION_NUM 0x00060201
+#define REDIS_VERSION "6.2.2"
+#define REDIS_VERSION_NUM 0x00060202
diff --git a/src/ziplist.c b/src/ziplist.c
index b66f97ef8..85cb50991 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -1472,7 +1472,7 @@ void ziplistRepr(unsigned char *zl) {
printf("{end}\n\n");
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we scan all the entries one by one. */
int ziplistValidateIntegrity(unsigned char *zl, size_t size, int deep,
@@ -1823,15 +1823,17 @@ static size_t strEntryBytesLarge(size_t slen) {
return slen + zipStorePrevEntryLength(NULL, ZIP_BIG_PREVLEN) + zipStoreEntryEncoding(NULL, 0, slen);
}
-int ziplistTest(int argc, char **argv) {
+/* ./redis-server test ziplist <randomseed> --accurate */
+int ziplistTest(int argc, char **argv, int accurate) {
unsigned char *zl, *p;
unsigned char *entry;
unsigned int elen;
long long value;
+ int iteration;
/* If an argument is given, use it as the random seed. */
- if (argc == 2)
- srand(atoi(argv[1]));
+ if (argc >= 4)
+ srand(atoi(argv[3]));
zl = createIntList();
ziplistRepr(zl);
@@ -2339,7 +2341,8 @@ int ziplistTest(int argc, char **argv) {
unsigned int slen;
long long sval;
- for (i = 0; i < 20000; i++) {
+ iteration = accurate ? 20000 : 20;
+ for (i = 0; i < iteration; i++) {
zl = ziplistNew();
ref = listCreate();
listSetFreeMethod(ref,(void (*)(void*))sdsfree);
@@ -2405,15 +2408,17 @@ int ziplistTest(int argc, char **argv) {
printf("Stress with variable ziplist size:\n");
{
unsigned long long start = usec();
- stress(ZIPLIST_HEAD,100000,16384,256);
- stress(ZIPLIST_TAIL,100000,16384,256);
+ int maxsize = accurate ? 16384 : 16;
+ stress(ZIPLIST_HEAD,100000,maxsize,256);
+ stress(ZIPLIST_TAIL,100000,maxsize,256);
printf("Done. usec=%lld\n\n", usec()-start);
}
/* Benchmarks */
{
zl = ziplistNew();
- for (int i=0; i<100000; i++) {
+ iteration = accurate ? 100000 : 100;
+ for (int i=0; i<iteration; i++) {
char buf[4096] = "asdf";
zl = ziplistPush(zl, (unsigned char*)buf, 4, ZIPLIST_TAIL);
zl = ziplistPush(zl, (unsigned char*)buf, 40, ZIPLIST_TAIL);
@@ -2462,7 +2467,8 @@ int ziplistTest(int argc, char **argv) {
{
char data[ZIP_BIG_PREVLEN];
zl = ziplistNew();
- for (int i = 0; i < 100000; i++) {
+ iteration = accurate ? 100000 : 100;
+ for (int i = 0; i < iteration; i++) {
zl = ziplistPush(zl, (unsigned char*)data, ZIP_BIG_PREVLEN-4, ZIPLIST_TAIL);
}
unsigned long long start = usec();
diff --git a/src/ziplist.h b/src/ziplist.h
index 9dc1061b0..9e7997ad8 100644
--- a/src/ziplist.h
+++ b/src/ziplist.h
@@ -67,7 +67,7 @@ void ziplistRandomPairs(unsigned char *zl, unsigned int count, ziplistEntry *key
unsigned int ziplistRandomPairsUnique(unsigned char *zl, unsigned int count, ziplistEntry *keys, ziplistEntry *vals);
#ifdef REDIS_TEST
-int ziplistTest(int argc, char *argv[]);
+int ziplistTest(int argc, char *argv[], int accurate);
#endif
#endif /* _ZIPLIST_H */
diff --git a/src/zipmap.c b/src/zipmap.c
index bd41fe6a5..c24e81355 100644
--- a/src/zipmap.c
+++ b/src/zipmap.c
@@ -374,7 +374,7 @@ size_t zipmapBlobLen(unsigned char *zm) {
return totlen;
}
-/* Validate the integrity of the data stracture.
+/* Validate the integrity of the data structure.
* when `deep` is 0, only the integrity of the header is validated.
* when `deep` is 1, we scan all the entries one by one. */
int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
@@ -473,11 +473,12 @@ static void zipmapRepr(unsigned char *p) {
}
#define UNUSED(x) (void)(x)
-int zipmapTest(int argc, char *argv[]) {
+int zipmapTest(int argc, char *argv[], int accurate) {
unsigned char *zm;
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
zm = zipmapNew();
@@ -532,6 +533,7 @@ int zipmapTest(int argc, char *argv[]) {
printf(" %d:%.*s => %d:%.*s\n", klen, klen, key, vlen, vlen, value);
}
}
+ zfree(zm);
return 0;
}
#endif
diff --git a/src/zipmap.h b/src/zipmap.h
index daf8430a0..1b34a32ae 100644
--- a/src/zipmap.h
+++ b/src/zipmap.h
@@ -48,7 +48,7 @@ void zipmapRepr(unsigned char *p);
int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep);
#ifdef REDIS_TEST
-int zipmapTest(int argc, char *argv[]);
+int zipmapTest(int argc, char *argv[], int accurate);
#endif
#endif
diff --git a/src/zmalloc.c b/src/zmalloc.c
index 1dc662e57..3645efcf1 100644
--- a/src/zmalloc.c
+++ b/src/zmalloc.c
@@ -414,9 +414,9 @@ size_t zmalloc_get_rss(void) {
if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0)
#if defined(__FreeBSD__)
- return (size_t)info.ki_rssize;
+ return (size_t)info.ki_rssize * getpagesize();
#else
- return (size_t)info.kp_vm_rssize;
+ return (size_t)info.kp_vm_rssize * getpagesize();
#endif
return 0L;
@@ -436,7 +436,7 @@ size_t zmalloc_get_rss(void) {
mib[4] = sizeof(info);
mib[5] = 1;
if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0)
- return (size_t)info.p_vm_rssize;
+ return (size_t)info.p_vm_rssize * getpagesize();
return 0L;
}
@@ -613,6 +613,11 @@ size_t zmalloc_get_smap_bytes_by_field(char *field, long pid) {
}
#endif
+/* Return the total number bytes in pages marked as Private Dirty.
+ *
+ * Note: depending on the platform and memory footprint of the process, this
+ * call can be slow, exceeding 1000ms!
+ */
size_t zmalloc_get_private_dirty(long pid) {
return zmalloc_get_smap_bytes_by_field("Private_Dirty:",pid);
}
@@ -675,11 +680,12 @@ size_t zmalloc_get_memory_size(void) {
#ifdef REDIS_TEST
#define UNUSED(x) ((void)(x))
-int zmalloc_test(int argc, char **argv) {
+int zmalloc_test(int argc, char **argv, int accurate) {
void *ptr;
UNUSED(argc);
UNUSED(argv);
+ UNUSED(accurate);
printf("Malloc prefix size: %d\n", (int) PREFIX_SIZE);
printf("Initial used memory: %zu\n", zmalloc_used_memory());
ptr = zmalloc(123);
diff --git a/src/zmalloc.h b/src/zmalloc.h
index d44c7b389..bb4cbddbb 100644
--- a/src/zmalloc.h
+++ b/src/zmalloc.h
@@ -71,12 +71,21 @@
*/
#ifndef ZMALLOC_LIB
#define ZMALLOC_LIB "libc"
+
#if !defined(NO_MALLOC_USABLE_SIZE) && \
(defined(__GLIBC__) || defined(__FreeBSD__) || \
defined(USE_MALLOC_USABLE_SIZE))
+
+/* Includes for malloc_usable_size() */
+#ifdef __FreeBSD__
+#include <malloc_np.h>
+#else
#include <malloc.h>
+#endif
+
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_usable_size(p)
+
#endif
#endif
@@ -126,7 +135,7 @@ size_t zmalloc_usable_size(void *ptr);
#endif
#ifdef REDIS_TEST
-int zmalloc_test(int argc, char **argv);
+int zmalloc_test(int argc, char **argv, int accurate);
#endif
#endif /* __ZMALLOC_H */
diff --git a/tests/assets/nodefaultuser.acl b/tests/assets/nodefaultuser.acl
new file mode 100644
index 000000000..2557c7fe7
--- /dev/null
+++ b/tests/assets/nodefaultuser.acl
@@ -0,0 +1,2 @@
+user alice on nopass ~* +@all
+user bob on nopass ~* &* +@all \ No newline at end of file
diff --git a/tests/assets/user.acl b/tests/assets/user.acl
index 2f065dab6..67303512c 100644
--- a/tests/assets/user.acl
+++ b/tests/assets/user.acl
@@ -1,2 +1,3 @@
user alice on allcommands allkeys >alice
-user bob on -@all +@set +acl ~set* >bob \ No newline at end of file
+user bob on -@all +@set +acl ~set* >bob
+user default on nopass ~* +@all
diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl
index ffb268561..e95789282 100644
--- a/tests/cluster/cluster.tcl
+++ b/tests/cluster/cluster.tcl
@@ -4,6 +4,10 @@
# This software is released under the BSD License. See the COPYING file for
# more information.
+# Track cluster configuration as created by create_cluster below
+set ::cluster_master_nodes 0
+set ::cluster_replica_nodes 0
+
# Returns a parsed CLUSTER NODES output as a list of dictionaries.
proc get_cluster_nodes id {
set lines [split [R $id cluster nodes] "\r\n"]
@@ -120,6 +124,9 @@ proc create_cluster {masters slaves} {
cluster_allocate_slaves $masters $slaves
}
assert_cluster_state ok
+
+ set ::cluster_master_nodes $masters
+ set ::cluster_replica_nodes $slaves
}
# Set the cluster node-timeout to all the reachalbe nodes.
@@ -143,3 +150,28 @@ proc cluster_write_test {id} {
}
$cluster close
}
+
+# Check if cluster configuration is consistent.
+proc cluster_config_consistent {} {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ if {$j == 0} {
+ set base_cfg [R $j cluster slots]
+ } else {
+ set cfg [R $j cluster slots]
+ if {$cfg != $base_cfg} {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+# Wait for cluster configuration to propagate and be consistent across nodes.
+proc wait_for_cluster_propagation {} {
+ wait_for_condition 50 100 {
+ [cluster_config_consistent] eq 1
+ } else {
+ fail "cluster config did not reach a consistent state"
+ }
+}
diff --git a/tests/cluster/tests/04-resharding.tcl b/tests/cluster/tests/04-resharding.tcl
index 1dcdb5a2c..4d31d314c 100644
--- a/tests/cluster/tests/04-resharding.tcl
+++ b/tests/cluster/tests/04-resharding.tcl
@@ -54,7 +54,17 @@ proc process_is_running {pid} {
set numkeys 50000
set numops 200000
-set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+set start_node_port [get_instance_attrib redis 0 port]
+set cluster [redis_cluster 127.0.0.1:$start_node_port]
+if {$::tls} {
+ # setup a non-TLS cluster client to the TLS cluster
+ set plaintext_port [get_instance_attrib redis 0 plaintext-port]
+ set cluster_plaintext [redis_cluster 127.0.0.1:$plaintext_port 0]
+ puts "Testing TLS cluster on start node 127.0.0.1:$start_node_port, plaintext port $plaintext_port"
+} else {
+ set cluster_plaintext $cluster
+ puts "Testing using non-TLS cluster"
+}
catch {unset content}
array set content {}
set tribpid {}
@@ -94,8 +104,11 @@ test "Cluster consistency during live resharding" {
# This way we are able to stress Lua -> Redis command invocation
# as well, that has tests to prevent Lua to write into wrong
# hash slots.
- if {$listid % 2} {
+ # We also use both TLS and plaintext connections.
+ if {$listid % 3 == 0} {
$cluster rpush $key $ele
+ } elseif {$listid % 3 == 1} {
+ $cluster_plaintext rpush $key $ele
} else {
$cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele
}
diff --git a/tests/cluster/tests/12-replica-migration-2.tcl b/tests/cluster/tests/12-replica-migration-2.tcl
index dd18a979a..ea80d81d3 100644
--- a/tests/cluster/tests/12-replica-migration-2.tcl
+++ b/tests/cluster/tests/12-replica-migration-2.tcl
@@ -29,6 +29,12 @@ test "Each master should have at least two replicas attached" {
}
}
+test "Set allow-replica-migration yes" {
+ foreach_redis_id id {
+ R $id CONFIG SET cluster-allow-replica-migration yes
+ }
+}
+
set master0_id [dict get [get_myself 0] id]
test "Resharding all the master #0 slots away from it" {
set output [exec \
diff --git a/tests/cluster/tests/12.1-replica-migration-3.tcl b/tests/cluster/tests/12.1-replica-migration-3.tcl
new file mode 100644
index 000000000..46a9f79e3
--- /dev/null
+++ b/tests/cluster/tests/12.1-replica-migration-3.tcl
@@ -0,0 +1,71 @@
+# Replica migration test #2.
+#
+# Check that if 'cluster-allow-replica-migration' is set to 'no', slaves do not
+# migrate when master becomes empty.
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 5 master and 15 slaves, to make sure there are no
+# empty masters and make rebalancing simpler to handle during the test.
+test "Create a 5 nodes cluster" {
+ create_cluster 5 15
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Each master should have at least two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] >= 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
+test "Set allow-replica-migration no" {
+ foreach_redis_id id {
+ R $id CONFIG SET cluster-allow-replica-migration no
+ }
+}
+
+set master0_id [dict get [get_myself 0] id]
+test "Resharding all the master #0 slots away from it" {
+ set output [exec \
+ ../../../src/redis-cli --cluster rebalance \
+ 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ --cluster-weight ${master0_id}=0 >@ stdout ]
+}
+
+test "Wait cluster to be stable" {
+ wait_for_condition 1000 50 {
+ [catch {exec ../../../src/redis-cli --cluster \
+ check 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ }] == 0
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+}
+
+test "Master #0 stil should have its replicas" {
+ assert { [llength [lindex [R 0 role] 2]] >= 2 }
+}
+
+test "Each master should have at least two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] >= 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
diff --git a/tests/cluster/tests/15-cluster-slots.tcl b/tests/cluster/tests/15-cluster-slots.tcl
index 1b33c57bd..f154b7270 100644
--- a/tests/cluster/tests/15-cluster-slots.tcl
+++ b/tests/cluster/tests/15-cluster-slots.tcl
@@ -48,3 +48,16 @@ test "client can handle keys with hash tag" {
$cluster set foo{tag} bar
$cluster close
}
+
+if {$::tls} {
+ test {CLUSTER SLOTS from non-TLS client in TLS cluster} {
+ set slots_tls [R 0 cluster slots]
+ set host [get_instance_attrib redis 0 host]
+ set plaintext_port [get_instance_attrib redis 0 plaintext-port]
+ set client_plain [redis $host $plaintext_port 0 0]
+ set slots_plain [$client_plain cluster slots]
+ $client_plain close
+ # Compare the ports in the first row
+ assert_no_match [lindex $slots_tls 0 3 1] [lindex $slots_plain 0 3 1]
+ }
+}
diff --git a/tests/cluster/tests/17-diskless-load-swapdb.tcl b/tests/cluster/tests/17-diskless-load-swapdb.tcl
index 612818cb7..516e5170c 100644
--- a/tests/cluster/tests/17-diskless-load-swapdb.tcl
+++ b/tests/cluster/tests/17-diskless-load-swapdb.tcl
@@ -36,7 +36,7 @@ test "Right to restore backups when fail to diskless load " {
# Write a key that belongs to slot 0
set slot0_key "06S"
$master set $slot0_key 1
- after 100
+ wait_for_ofs_sync $master $replica
assert_equal {1} [$replica get $slot0_key]
assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
@@ -73,6 +73,13 @@ test "Right to restore backups when fail to diskless load " {
# Kill master, abort full sync
kill_instance redis $master_id
+ # Start full sync, wait till the replica detects the disconnection
+ wait_for_condition 500 10 {
+ [s $replica_id loading] eq 0
+ } else {
+ fail "Fail to full sync"
+ }
+
# Replica keys and keys to slots map still both are right
assert_equal {1} [$replica get $slot0_key]
assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
diff --git a/tests/cluster/tests/19-cluster-nodes-slots.tcl b/tests/cluster/tests/19-cluster-nodes-slots.tcl
index ca0b3ce0d..80f68d5d0 100644
--- a/tests/cluster/tests/19-cluster-nodes-slots.tcl
+++ b/tests/cluster/tests/19-cluster-nodes-slots.tcl
@@ -37,26 +37,35 @@ set master2 [Rn 1]
test "Continuous slots distribution" {
assert_match "* 0-8191*" [$master1 CLUSTER NODES]
assert_match "* 8192-16383*" [$master2 CLUSTER NODES]
+ assert_match "*0 8191*" [$master1 CLUSTER SLOTS]
+ assert_match "*8192 16383*" [$master2 CLUSTER SLOTS]
$master1 CLUSTER DELSLOTS 4096
assert_match "* 0-4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*0 4095*4097 8191*" [$master1 CLUSTER SLOTS]
+
$master2 CLUSTER DELSLOTS 12288
assert_match "* 8192-12287 12289-16383*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12287*12289 16383*" [$master2 CLUSTER SLOTS]
}
test "Discontinuous slots distribution" {
# Remove middle slots
$master1 CLUSTER DELSLOTS 4092 4094
assert_match "* 0-4091 4093 4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*0 4091*4093 4093*4095 4095*4097 8191*" [$master1 CLUSTER SLOTS]
$master2 CLUSTER DELSLOTS 12284 12286
assert_match "* 8192-12283 12285 12287 12289-16383*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12283*12285 12285*12287 12287*12289 16383*" [$master2 CLUSTER SLOTS]
# Remove head slots
$master1 CLUSTER DELSLOTS 0 2
assert_match "* 1 3-4091 4093 4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*1 1*3 4091*4093 4093*4095 4095*4097 8191*" [$master1 CLUSTER SLOTS]
# Remove tail slots
$master2 CLUSTER DELSLOTS 16380 16382 16383
assert_match "* 8192-12283 12285 12287 12289-16379 16381*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12283*12285 12285*12287 12287*12289 16379*16381 16381*" [$master2 CLUSTER SLOTS]
}
diff --git a/tests/cluster/tests/20-half-migrated-slot.tcl b/tests/cluster/tests/20-half-migrated-slot.tcl
new file mode 100644
index 000000000..229b3a86d
--- /dev/null
+++ b/tests/cluster/tests/20-half-migrated-slot.tcl
@@ -0,0 +1,98 @@
+# Tests for fixing migrating slot at all stages:
+# 1. when migration is half inited on "migrating" node
+# 2. when migration is half inited on "importing" node
+# 3. migration inited, but not finished
+# 4. migration is half finished on "migrating" node
+# 5. migration is half finished on "importing" node
+
+# TODO: Test is currently disabled until it is stabilized (fixing the test
+# itself or real issues in Redis).
+
+if {false} {
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+test "Create a 2 nodes cluster" {
+ create_cluster 2 0
+ config_set_all_nodes cluster-allow-replica-migration no
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset nodefrom}
+catch {unset nodeto}
+
+proc reset_cluster {} {
+ uplevel 1 {
+ $cluster refresh_nodes_map
+ array set nodefrom [$cluster masternode_for_slot 609]
+ array set nodeto [$cluster masternode_notfor_slot 609]
+ }
+}
+
+reset_cluster
+
+$cluster set aga xyz
+
+test "Half init migration in 'migrating' is fixable" {
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half init migration in 'importing' is fixable" {
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Init migration and move key" {
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ wait_for_cluster_propagation
+ assert_equal "xyz" [$cluster get aga]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+reset_cluster
+
+test "Move key again" {
+ wait_for_cluster_propagation
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ wait_for_cluster_propagation
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half-finish migration" {
+ # half finish migration on 'migrating' node
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 node $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+reset_cluster
+
+test "Move key back" {
+ # 'aga' key is in 609 slot
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half-finish importing" {
+ # Now we half finish 'importing' node
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 node $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+config_set_all_nodes cluster-allow-replica-migration yes
+}
diff --git a/tests/cluster/tests/21-many-slot-migration.tcl b/tests/cluster/tests/21-many-slot-migration.tcl
new file mode 100644
index 000000000..ccfff74e0
--- /dev/null
+++ b/tests/cluster/tests/21-many-slot-migration.tcl
@@ -0,0 +1,64 @@
+# Tests for many simlutaneous migrations.
+
+# TODO: Test is currently disabled until it is stabilized (fixing the test
+# itself or real issues in Redis).
+
+if {false} {
+
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+# TODO: This test currently runs without replicas, as failovers (which may
+# happen on lower-end CI platforms) are still not handled properly by the
+# cluster during slot migration (related to #6339).
+
+test "Create a 10 nodes cluster" {
+ create_cluster 10 0
+ config_set_all_nodes cluster-allow-replica-migration no
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset nodefrom}
+catch {unset nodeto}
+
+$cluster refresh_nodes_map
+
+test "Set many keys" {
+ for {set i 0} {$i < 40000} {incr i} {
+ $cluster set key:$i val:$i
+ }
+}
+
+test "Keys are accessible" {
+ for {set i 0} {$i < 40000} {incr i} {
+ assert { [$cluster get key:$i] eq "val:$i" }
+ }
+}
+
+test "Init migration of many slots" {
+ for {set slot 0} {$slot < 1000} {incr slot} {
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ $nodefrom(link) cluster setslot $slot migrating $nodeto(id)
+ $nodeto(link) cluster setslot $slot importing $nodefrom(id)
+ }
+}
+
+test "Fix cluster" {
+ wait_for_cluster_propagation
+ fix_cluster $nodefrom(addr)
+}
+
+test "Keys are accessible" {
+ for {set i 0} {$i < 40000} {incr i} {
+ assert { [$cluster get key:$i] eq "val:$i" }
+ }
+}
+
+config_set_all_nodes cluster-allow-replica-migration yes
+}
diff --git a/tests/cluster/tests/includes/utils.tcl b/tests/cluster/tests/includes/utils.tcl
new file mode 100644
index 000000000..48c40a050
--- /dev/null
+++ b/tests/cluster/tests/includes/utils.tcl
@@ -0,0 +1,25 @@
+source "../../../tests/support/cli.tcl"
+
+proc config_set_all_nodes {keyword value} {
+ foreach_redis_id id {
+ R $id config set $keyword $value
+ }
+}
+
+proc fix_cluster {addr} {
+ set code [catch {
+ exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster fix $addr << yes
+ } result]
+ if {$code != 0} {
+ puts "redis-cli --cluster fix returns non-zero exit code, output below:\n$result"
+ }
+ # Note: redis-cli --cluster fix may return a non-zero exit code if nodes don't agree,
+ # but we can ignore that and rely on the check below.
+ assert_cluster_state ok
+ wait_for_condition 100 100 {
+ [catch {exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster check $addr} result] == 0
+ } else {
+ puts "redis-cli --cluster check returns non-zero exit code, output below:\n$result"
+ fail "Cluster could not settle with configuration"
+ }
+}
diff --git a/tests/instances.tcl b/tests/instances.tcl
index 255d9740f..1395efa0c 100644
--- a/tests/instances.tcl
+++ b/tests/instances.tcl
@@ -64,6 +64,8 @@ proc exec_instance {type dirname cfgfile} {
proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
for {set j 0} {$j < $count} {incr j} {
set port [find_available_port $base_port $::redis_port_count]
+ # plaintext port (only used for TLS cluster)
+ set pport 0
# Create a directory for this instance.
set dirname "${type}_${j}"
lappend ::dirs $dirname
@@ -83,7 +85,9 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
puts $cfg "tls-port $port"
puts $cfg "tls-replication yes"
puts $cfg "tls-cluster yes"
- puts $cfg "port 0"
+ # plaintext port, only used by plaintext clients in a TLS cluster
+ set pport [find_available_port $base_port $::redis_port_count]
+ puts $cfg "port $pport"
puts $cfg [format "tls-cert-file %s/../../tls/server.crt" [pwd]]
puts $cfg [format "tls-key-file %s/../../tls/server.key" [pwd]]
puts $cfg [format "tls-client-cert-file %s/../../tls/client.crt" [pwd]]
@@ -118,6 +122,8 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
set cfg [open $cfgfile a+]
if {$::tls} {
puts $cfg "tls-port $port"
+ set pport [find_available_port $base_port $::redis_port_count]
+ puts $cfg "port $pport"
} else {
puts $cfg "port $port"
}
@@ -143,6 +149,7 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
pid $pid \
host $::host \
port $port \
+ plaintext-port $pport \
link $link \
]
}
@@ -492,6 +499,14 @@ proc RI {n field} {
get_info_field [R $n info] $field
}
+proc RPort {n} {
+ if {$::tls} {
+ return [lindex [R $n config get tls-port] 1]
+ } else {
+ return [lindex [R $n config get port] 1]
+ }
+}
+
# Iterate over IDs of sentinel or redis instances.
proc foreach_instance_id {instances idvar code} {
upvar 1 $idvar id
diff --git a/tests/integration/corrupt-dump.tcl b/tests/integration/corrupt-dump.tcl
index f5079e5ed..fe2537b03 100644
--- a/tests/integration/corrupt-dump.tcl
+++ b/tests/integration/corrupt-dump.tcl
@@ -518,5 +518,16 @@ test {corrupt payload: fuzzer findings - HRANDFIELD on bad ziplist} {
}
}
+test {corrupt payload: fuzzer findings - stream with no records} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x78\x4D\x55\x68\x09\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x3E\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x50\x01\x00\x01\x01\x01\x02\x01\x05\x23\xFF\x02\x81\x00\x00\x01\x78\x4D\x55\x68\x59\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x01\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x00\x00\x00\x00\x00\x00\x00\x9F\x68\x55\x4D\x78\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x85\x68\x55\x4D\x78\x01\x00\x00\x01\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xF1\xC0\x72\x70\x39\x40\x1E\xA9" replace
+ catch {r XREAD STREAMS _stream $}
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "Guru Meditation"] 1
+ }
+}
+
} ;# tags
diff --git a/tests/integration/psync2-pingoff.tcl b/tests/integration/psync2-pingoff.tcl
index cdecfc5c6..7744010a4 100644
--- a/tests/integration/psync2-pingoff.tcl
+++ b/tests/integration/psync2-pingoff.tcl
@@ -111,7 +111,13 @@ start_server {} {
$replica1 replicaof no one
$replica2 replicaof 127.0.0.1 1 ;# we can't promote it to master since that will cycle the replication id
$master config set repl-ping-replica-period 1
- after 1500
+ set replofs [status $master master_repl_offset]
+ wait_for_condition 50 100 {
+ [status $replica3 master_repl_offset] > $replofs &&
+ [status $replica4 master_repl_offset] > $replofs
+ } else {
+ fail "replica didn't sync in time"
+ }
# make everyone sync from the replica1 that didn't get the last ping from the old master
# replica4 will keep syncing from the old master which now syncs from replica1
@@ -195,10 +201,16 @@ start_server {} {
fail "Chained replica not replicating from its master"
}
- # Do a write on the master, and wait for 3 seconds for the master to
+ # Do a write on the master, and wait for the master to
# send some PINGs to its replica
$R(0) INCR counter2
- after 2000
+ set replofs [status $R(0) master_repl_offset]
+ wait_for_condition 50 100 {
+ [status $R(1) master_repl_offset] > $replofs &&
+ [status $R(2) master_repl_offset] > $replofs
+ } else {
+ fail "replica didn't sync in time"
+ }
set sync_partial_master [status $R(0) sync_partial_ok]
set sync_partial_replica [status $R(1) sync_partial_ok]
$R(0) CONFIG SET repl-ping-replica-period 100
diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl
index d877542ed..7e8b41fca 100644
--- a/tests/integration/redis-cli.tcl
+++ b/tests/integration/redis-cli.tcl
@@ -207,6 +207,28 @@ start_server {tags {"cli"}} {
assert_equal "foo\nbar" [run_cli lrange list 0 -1]
}
+ test_nontty_cli "Quoted input arguments" {
+ r set "\x00\x00" "value"
+ assert_equal "value" [run_cli --quoted-input get {"\x00\x00"}]
+ }
+
+ test_nontty_cli "No accidental unquoting of input arguments" {
+ run_cli --quoted-input set {"\x41\x41"} quoted-val
+ run_cli set {"\x41\x41"} unquoted-val
+
+ assert_equal "quoted-val" [r get AA]
+ assert_equal "unquoted-val" [r get {"\x41\x41"}]
+ }
+
+ test_nontty_cli "Invalid quoted input arguments" {
+ catch {run_cli --quoted-input set {"Unterminated}} err
+ assert_match {*exited abnormally*} $err
+
+ # A single arg that unquotes to two arguments is also not expected
+ catch {run_cli --quoted-input set {"arg1" "arg2"}} err
+ assert_match {*exited abnormally*} $err
+ }
+
test_nontty_cli "Read last argument from pipe" {
assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key]
assert_equal "foo\n" [r get key]
@@ -247,6 +269,20 @@ start_server {tags {"cli"}} {
test_redis_cli_rdb_dump
}
+ test "Scan mode" {
+ r flushdb
+ populate 1000 key: 1
+
+ # basic use
+ assert_equal 1000 [llength [split [run_cli --scan]]]
+
+ # pattern
+ assert_equal {key:2} [run_cli --scan --pattern "*:2"]
+
+ # pattern matching with a quoted string
+ assert_equal {key:2} [run_cli --scan --quoted-pattern {"*:\x32"}]
+ }
+
test "Connecting as a replica" {
set fd [open_cli "--replica"]
wait_for_condition 500 500 {
diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl
index c867001b8..8715ae999 100644
--- a/tests/integration/replication-4.tcl
+++ b/tests/integration/replication-4.tcl
@@ -79,12 +79,16 @@ start_server {tags {"repl"}} {
$master config set min-slaves-max-lag 2
$master config set min-slaves-to-write 1
assert {[$master set foo bar] eq {OK}}
- $slave deferred 1
- $slave debug sleep 6
- after 4000
- catch {$master set foo bar} e
- set e
- } {NOREPLICAS*}
+ exec kill -SIGSTOP [srv 0 pid]
+ wait_for_condition 100 100 {
+ [catch {$master set foo bar}] != 0
+ } else {
+ fail "Master didn't become readonly"
+ }
+ catch {$master set foo bar} err
+ assert_match {NOREPLICAS*} $err
+ exec kill -SIGCONT [srv 0 pid]
+ }
}
}
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index 8d09c68c1..1a089ef4b 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -196,9 +196,11 @@ start_server {tags {"repl"}} {
} {master}
test {SLAVEOF should start with link status "down"} {
+ r multi
r slaveof [srv -1 host] [srv -1 port]
- s master_link_status
- } {down}
+ r info replication
+ r exec
+ } {*master_link_status:down*}
test {The role should immediately be changed to "replica"} {
s role
@@ -595,9 +597,9 @@ start_server {tags {"repl"}} {
$master debug populate 20000 test 10000
$master config set rdbcompression no
# If running on Linux, we also measure utime/stime to detect possible I/O handling issues
- set os [catch {exec unamee}]
+ set os [catch {exec uname}]
set measure_time [expr {$os == "Linux"} ? 1 : 0]
- foreach all_drop {no slow fast all} {
+ foreach all_drop {no slow fast all timeout} {
test "diskless $all_drop replicas drop during rdb pipe" {
set replicas {}
set replicas_alive {}
@@ -614,7 +616,7 @@ start_server {tags {"repl"}} {
# so that the whole rdb generation process is bound to that
set loglines [count_log_lines -1]
[lindex $replicas 0] config set repl-diskless-load swapdb
- [lindex $replicas 0] config set key-load-delay 100
+ [lindex $replicas 0] config set key-load-delay 100 ;# 20k keys and 100 microseconds sleep means at least 2 seconds
[lindex $replicas 0] replicaof $master_host $master_port
[lindex $replicas 1] replicaof $master_host $master_port
@@ -645,6 +647,12 @@ start_server {tags {"repl"}} {
exec kill [srv -1 pid]
set replicas_alive [lreplace $replicas_alive 0 0]
}
+ if {$all_drop == "timeout"} {
+ $master config set repl-timeout 2
+ # we want the slow replica to hang on a key for very long so it'll reach repl-timeout
+ exec kill -SIGSTOP [srv -1 pid]
+ after 2000
+ }
# wait for rdb child to exit
wait_for_condition 500 100 {
@@ -663,6 +671,14 @@ start_server {tags {"repl"}} {
if {$all_drop == "slow" || $all_drop == "fast"} {
wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1
}
+ if {$all_drop == "timeout"} {
+ wait_for_log_messages -2 {"*Disconnecting timedout replica (full sync)*"} $loglines 1 1
+ wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1
+ # master disconnected the slow replica, remove from array
+ set replicas_alive [lreplace $replicas_alive 0 0]
+ # release it
+ exec kill -SIGCONT [srv -1 pid]
+ }
# make sure we don't have a busy loop going thought epoll_wait
if {$measure_time} {
@@ -676,7 +692,7 @@ start_server {tags {"repl"}} {
puts "master utime: $master_utime"
puts "master stime: $master_stime"
}
- if {!$::no_latency && ($all_drop == "all" || $all_drop == "slow")} {
+ if {!$::no_latency && ($all_drop == "all" || $all_drop == "slow" || $all_drop == "timeout")} {
assert {$master_utime < 70}
assert {$master_stime < 70}
}
@@ -720,7 +736,7 @@ start_server {tags {"repl"}} {
test "diskless replication child being killed is collected" {
# when diskless master is waiting for the replica to become writable
# it removes the read event from the rdb pipe so if the child gets killed
- # the replica will hung. and the master may not collect the pid with wait3
+ # the replica will hung. and the master may not collect the pid with waitpid
start_server {tags {"repl"}} {
set master [srv 0 client]
set master_host [srv 0 host]
diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c
index be259d738..9305774cd 100644
--- a/tests/modules/keyspace_events.c
+++ b/tests/modules/keyspace_events.c
@@ -38,6 +38,8 @@
/** strores all the keys on which we got 'loaded' keyspace notification **/
RedisModuleDict *loaded_event_log = NULL;
+/** stores all the keys on which we got 'module' keyspace notification **/
+RedisModuleDict *module_event_log = NULL;
static int KeySpace_NotificationLoaded(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
REDISMODULE_NOT_USED(ctx);
@@ -78,6 +80,50 @@ static int KeySpace_NotificationGeneric(RedisModuleCtx *ctx, int type, const cha
return REDISMODULE_OK;
}
+static int KeySpace_NotificationModule(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char* keyName = RedisModule_StringPtrLen(key, NULL);
+ int nokey;
+ RedisModule_DictGetC(module_event_log, (void*)keyName, strlen(keyName), &nokey);
+ if(nokey){
+ RedisModule_DictSetC(module_event_log, (void*)keyName, strlen(keyName), RedisModule_HoldString(ctx, key));
+ }
+ return REDISMODULE_OK;
+}
+
+static int cmdNotify(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ RedisModule_NotifyKeyspaceEvent(ctx, REDISMODULE_NOTIFY_MODULE, "notify", argv[1]);
+ RedisModule_ReplyWithNull(ctx);
+ return REDISMODULE_OK;
+}
+
+static int cmdIsModuleKeyNotified(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* key = RedisModule_StringPtrLen(argv[1], NULL);
+
+ int nokey;
+ RedisModuleString* keyStr = RedisModule_DictGetC(module_event_log, (void*)key, strlen(key), &nokey);
+
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithLongLong(ctx, !nokey);
+ if(nokey){
+ RedisModule_ReplyWithNull(ctx);
+ }else{
+ RedisModule_ReplyWithString(ctx, keyStr);
+ }
+ return REDISMODULE_OK;
+}
+
static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
if(argc != 2){
return RedisModule_WrongArity(ctx);
@@ -171,6 +217,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
}
loaded_event_log = RedisModule_CreateDict(ctx);
+ module_event_log = RedisModule_CreateDict(ctx);
int keySpaceAll = RedisModule_GetKeyspaceNotificationFlagsAll();
@@ -187,6 +234,18 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
return REDISMODULE_ERR;
}
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_MODULE, KeySpace_NotificationModule) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"keyspace.notify", cmdNotify,"",0,0,0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"keyspace.is_module_key_notified", cmdIsModuleKeyNotified,"",0,0,0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
if (RedisModule_CreateCommand(ctx,"keyspace.is_key_loaded", cmdIsKeyLoaded,"",0,0,0) == REDISMODULE_ERR){
return REDISMODULE_ERR;
}
@@ -219,6 +278,16 @@ int RedisModule_OnUnload(RedisModuleCtx *ctx) {
RedisModule_FreeString(ctx, val);
}
RedisModule_FreeDict(ctx, loaded_event_log);
+ RedisModule_DictIteratorStop(iter);
loaded_event_log = NULL;
+
+ iter = RedisModule_DictIteratorStartC(module_event_log, "^", NULL, 0);
+ while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){
+ RedisModule_FreeString(ctx, val);
+ }
+ RedisModule_FreeDict(ctx, module_event_log);
+ RedisModule_DictIteratorStop(iter);
+ module_event_log = NULL;
+
return REDISMODULE_OK;
}
diff --git a/tests/modules/propagate.c b/tests/modules/propagate.c
index 70cddacbd..ac04d4f9d 100644
--- a/tests/modules/propagate.c
+++ b/tests/modules/propagate.c
@@ -70,6 +70,44 @@ int propagateTestTimerCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int
return REDISMODULE_OK;
}
+/* Timer callback. */
+void timerNestedHandler(RedisModuleCtx *ctx, void *data) {
+ int repl = (long long)data;
+
+ /* The goal is the trigger a module command that calls RM_Replicate
+ * in order to test MULTI/EXEC structre */
+ RedisModule_Replicate(ctx,"INCRBY","cc","timer-nested-start","1");
+ RedisModuleCallReply *reply = RedisModule_Call(ctx,"propagate-test.nested", repl? "!" : "");
+ RedisModule_FreeCallReply(reply);
+ RedisModule_Replicate(ctx,"INCRBY","cc","timer-nested-end","1");
+}
+
+int propagateTestTimerNestedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerNestedHandler,(void*)0);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestTimerNestedReplCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerNestedHandler,(void*)1);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
/* The thread entry point. */
void *threadMain(void *arg) {
REDISMODULE_NOT_USED(arg);
@@ -131,6 +169,42 @@ int propagateTestMixedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int
return REDISMODULE_OK;
}
+int propagateTestNestedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleCallReply *reply;
+
+ /* This test mixes multiple propagation systems. */
+ reply = RedisModule_Call(ctx, "INCR", "c!", "using-call");
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx,"propagate-test.simple", "!");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_Replicate(ctx,"INCR","c","counter-3");
+ RedisModule_Replicate(ctx,"INCR","c","counter-4");
+
+ reply = RedisModule_Call(ctx, "INCR", "c!", "after-call");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestIncr(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleCallReply *reply;
+
+ /* This test propagates the module command, not the INCR it executes. */
+ reply = RedisModule_Call(ctx, "INCR", "s", argv[1]);
+ RedisModule_ReplyWithCallReply(ctx,reply);
+ RedisModule_FreeCallReply(reply);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
@@ -143,6 +217,16 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
"",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-nested",
+ propagateTestTimerNestedCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-nested-repl",
+ propagateTestTimerNestedReplCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
if (RedisModule_CreateCommand(ctx,"propagate-test.thread",
propagateTestThreadCommand,
"",1,1,1) == REDISMODULE_ERR)
@@ -158,5 +242,15 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
"",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"propagate-test.nested",
+ propagateTestNestedCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.incr",
+ propagateTestIncr,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
return REDISMODULE_OK;
}
diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl
index 75baf9817..a1bbb2789 100644
--- a/tests/sentinel/tests/00-base.tcl
+++ b/tests/sentinel/tests/00-base.tcl
@@ -9,7 +9,7 @@ if {$::simulate_error} {
}
test "Basic failover works if the master is down" {
- set old_port [RI $master_id tcp_port]
+ set old_port [RPort $master_id]
set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
assert {[lindex $addr 1] == $old_port}
kill_instance redis $master_id
@@ -53,7 +53,7 @@ test "ODOWN is not possible without N (quorum) Sentinels reports" {
foreach_sentinel_id id {
S $id SENTINEL SET mymaster quorum [expr $sentinels+1]
}
- set old_port [RI $master_id tcp_port]
+ set old_port [RPort $master_id]
set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
assert {[lindex $addr 1] == $old_port}
kill_instance redis $master_id
diff --git a/tests/sentinel/tests/01-conf-update.tcl b/tests/sentinel/tests/01-conf-update.tcl
index d45b1b08e..5dca55601 100644
--- a/tests/sentinel/tests/01-conf-update.tcl
+++ b/tests/sentinel/tests/01-conf-update.tcl
@@ -3,7 +3,7 @@
source "../tests/includes/init-tests.tcl"
test "We can failover with Sentinel 1 crashed" {
- set old_port [RI $master_id tcp_port]
+ set old_port [RPort $master_id]
set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
assert {[lindex $addr 1] == $old_port}
diff --git a/tests/sentinel/tests/02-slaves-reconf.tcl b/tests/sentinel/tests/02-slaves-reconf.tcl
index 28964c968..9edf775ec 100644
--- a/tests/sentinel/tests/02-slaves-reconf.tcl
+++ b/tests/sentinel/tests/02-slaves-reconf.tcl
@@ -10,7 +10,7 @@ source "../tests/includes/init-tests.tcl"
proc 02_test_slaves_replication {} {
uplevel 1 {
test "Check that slaves replicate from current master" {
- set master_port [RI $master_id tcp_port]
+ set master_port [RPort $master_id]
foreach_redis_id id {
if {$id == $master_id} continue
if {[instance_is_killed redis $id]} continue
@@ -28,7 +28,7 @@ proc 02_test_slaves_replication {} {
proc 02_crash_and_failover {} {
uplevel 1 {
test "Crash the master and force a failover" {
- set old_port [RI $master_id tcp_port]
+ set old_port [RPort $master_id]
set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
assert {[lindex $addr 1] == $old_port}
kill_instance redis $master_id
diff --git a/tests/sentinel/tests/05-manual.tcl b/tests/sentinel/tests/05-manual.tcl
index ed568aa03..c97a0e53c 100644
--- a/tests/sentinel/tests/05-manual.tcl
+++ b/tests/sentinel/tests/05-manual.tcl
@@ -3,7 +3,7 @@
source "../tests/includes/init-tests.tcl"
test "Manual failover works" {
- set old_port [RI $master_id tcp_port]
+ set old_port [RPort $master_id]
set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
assert {[lindex $addr 1] == $old_port}
catch {S 0 SENTINEL FAILOVER mymaster} reply
diff --git a/tests/sentinel/tests/10-replica-priority.tcl b/tests/sentinel/tests/10-replica-priority.tcl
new file mode 100644
index 000000000..00248a734
--- /dev/null
+++ b/tests/sentinel/tests/10-replica-priority.tcl
@@ -0,0 +1,73 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Check acceptable replica-priority values" {
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+
+ # ensure replica-announced accepts yes and no
+ catch {R $id CONFIG SET replica-announced no} e
+ if {$e ne "OK"} {
+ fail "Unable to set replica-announced to no"
+ }
+ catch {R $id CONFIG SET replica-announced yes} e
+ if {$e ne "OK"} {
+ fail "Unable to set replica-announced to yes"
+ }
+
+ # ensure a random value throw error
+ catch {R $id CONFIG SET replica-announced 321} e
+ if {$e eq "OK"} {
+ fail "Able to set replica-announced with something else than yes or no (321) whereas it should not be possible"
+ }
+ catch {R $id CONFIG SET replica-announced a3b2c1} e
+ if {$e eq "OK"} {
+ fail "Able to set replica-announced with something else than yes or no (a3b2c1) whereas it should not be possible"
+ }
+
+ # test only the first redis replica, no need to double test
+ break
+ }
+}
+
+proc 10_test_number_of_replicas {n_replicas_expected} {
+ test "Check sentinel replies with $n_replicas_expected replicas" {
+ # ensure sentinels replies with the right number of replicas
+ foreach_sentinel_id id {
+ # retries 40 x 500ms = 20s as SENTINEL_INFO_PERIOD = 10s
+ set len [llength [S $id SENTINEL REPLICAS mymaster]]
+ wait_for_condition 40 500 {
+ [llength [S $id SENTINEL REPLICAS mymaster]] == $n_replicas_expected
+ } else {
+ fail "Sentinel replies with a wrong number of replicas with replica-announced=yes (expected $n_replicas_expected but got $len) on sentinel $id"
+ }
+ }
+ }
+}
+
+proc 10_set_replica_announced {master_id announced n_replicas} {
+ test "Set replica-announced=$announced on $n_replicas replicas" {
+ set i 0
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+ #puts "set replica-announce=$announced on redis #$id"
+ R $id CONFIG SET replica-announced "$announced"
+ incr i
+ if { $n_replicas!="all" && $i >= $n_replicas } { break }
+ }
+ }
+}
+
+# ensure all replicas are announced
+10_set_replica_announced $master_id "yes" "all"
+# ensure all replicas are announced by sentinels
+10_test_number_of_replicas 4
+
+# ensure the first 2 replicas are not announced
+10_set_replica_announced $master_id "no" 2
+# ensure sentinels are not announcing the first 2 replicas that have been set unannounced
+10_test_number_of_replicas 2
+
+# ensure all replicas are announced
+10_set_replica_announced $master_id "yes" "all"
+# ensure all replicas are not announced by sentinels
+10_test_number_of_replicas 4
diff --git a/tests/support/cluster.tcl b/tests/support/cluster.tcl
index fb8c46e75..df4b7f3d0 100644
--- a/tests/support/cluster.tcl
+++ b/tests/support/cluster.tcl
@@ -4,7 +4,7 @@
#
# Example usage:
#
-# set c [redis_cluster 127.0.0.1 6379 127.0.0.1 6380]
+# set c [redis_cluster {127.0.0.1:6379 127.0.0.1:6380}]
# $c set foo
# $c get foo
# $c close
@@ -17,6 +17,7 @@ set ::redis_cluster::id 0
array set ::redis_cluster::startup_nodes {}
array set ::redis_cluster::nodes {}
array set ::redis_cluster::slots {}
+array set ::redis_cluster::tls {}
# List of "plain" commands, which are commands where the sole key is always
# the first argument.
@@ -34,11 +35,14 @@ set ::redis_cluster::plain_commands {
dump bitcount bitpos pfadd pfcount
}
-proc redis_cluster {nodes} {
+# Create a cluster client. The nodes are given as a list of host:port. The TLS
+# parameter (1 or 0) is optional and defaults to the global $::tls.
+proc redis_cluster {nodes {tls -1}} {
set id [incr ::redis_cluster::id]
set ::redis_cluster::startup_nodes($id) $nodes
set ::redis_cluster::nodes($id) {}
set ::redis_cluster::slots($id) {}
+ set ::redis_cluster::tls($id) [expr $tls == -1 ? $::tls : $tls]
set handle [interp alias {} ::redis_cluster::instance$id {} ::redis_cluster::__dispatch__ $id]
$handle refresh_nodes_map
return $handle
@@ -60,9 +64,10 @@ proc ::redis_cluster::__method__refresh_nodes_map {id} {
foreach start_node $::redis_cluster::startup_nodes($id) {
set ip_port [lindex [split $start_node @] 0]
lassign [split $ip_port :] start_host start_port
+ set tls $::redis_cluster::tls($id)
if {[catch {
set r {}
- set r [redis $start_host $start_port 0 $::tls]
+ set r [redis $start_host $start_port 0 $tls]
set nodes_descr [$r cluster nodes]
$r close
} e]} {
@@ -107,7 +112,8 @@ proc ::redis_cluster::__method__refresh_nodes_map {id} {
# Connect to the node
set link {}
- catch {set link [redis $host $port 0 $::tls]}
+ set tls $::redis_cluster::tls($id)
+ catch {set link [redis $host $port 0 $tls]}
# Build this node description as an hash.
set node [dict create \
@@ -161,9 +167,32 @@ proc ::redis_cluster::__method__close {id} {
catch {unset ::redis_cluster::startup_nodes($id)}
catch {unset ::redis_cluster::nodes($id)}
catch {unset ::redis_cluster::slots($id)}
+ catch {unset ::redis_cluster::tls($id)}
catch {interp alias {} ::redis_cluster::instance$id {}}
}
+proc ::redis_cluster::__method__masternode_for_slot {id slot} {
+ # Get the node mapped to this slot.
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ if {$node_addr eq {}} {
+ error "No mapped node for slot $slot."
+ }
+ return [dict get $::redis_cluster::nodes($id) $node_addr]
+}
+
+proc ::redis_cluster::__method__masternode_notfor_slot {id slot} {
+ # Get a node that is not mapped to this slot.
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ set addrs [dict keys $::redis_cluster::nodes($id)]
+ foreach addr [lshuffle $addrs] {
+ set node [dict get $::redis_cluster::nodes($id) $addr]
+ if {$node_addr ne $addr && [dict get $node slaveof] eq "-"} {
+ return $node
+ }
+ }
+ error "Slot $slot is everywhere"
+}
+
proc ::redis_cluster::__dispatch__ {id method args} {
if {[info command ::redis_cluster::__method__$method] eq {}} {
# Get the keys from the command.
@@ -186,10 +215,15 @@ proc ::redis_cluster::__dispatch__ {id method args} {
# Execute the command in the node we think is the slot owner.
set retry 100
+ set asking 0
while {[incr retry -1]} {
if {$retry < 5} {after 100}
set node [dict get $::redis_cluster::nodes($id) $node_addr]
set link [dict get $node link]
+ if {$asking} {
+ $link ASKING
+ set asking 0
+ }
if {[catch {$link $method {*}$args} e]} {
if {$link eq {} || \
[string range $e 0 4] eq {MOVED} || \
@@ -202,6 +236,7 @@ proc ::redis_cluster::__dispatch__ {id method args} {
} elseif {[string range $e 0 2] eq {ASK}} {
# ASK redirection.
set node_addr [lindex $e 2]
+ set asking 1
continue
} else {
# Non redirecting error.
diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl
index 373058daf..4d321c975 100644
--- a/tests/support/redis.tcl
+++ b/tests/support/redis.tcl
@@ -35,6 +35,7 @@ array set ::redis::addr {}
array set ::redis::blocking {}
array set ::redis::deferred {}
array set ::redis::reconnect {}
+array set ::redis::tls {}
array set ::redis::callback {}
array set ::redis::state {} ;# State in non-blocking reply reading
array set ::redis::statestack {} ;# Stack of states, for nested mbulks
@@ -58,7 +59,7 @@ proc redis {{server 127.0.0.1} {port 6379} {defer 0} {tls 0} {tlsoptions {}}} {
set ::redis::blocking($id) 1
set ::redis::deferred($id) $defer
set ::redis::reconnect($id) 0
- set ::redis::tls $tls
+ set ::redis::tls($id) $tls
::redis::redis_reset_state $id
interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id
}
@@ -83,7 +84,7 @@ proc ::redis::__dispatch__raw__ {id method argv} {
# Reconnect the link if needed.
if {$fd eq {}} {
lassign $::redis::addr($id) host port
- if {$::redis::tls} {
+ if {$::redis::tls($id)} {
set ::redis::fd($id) [::tls::socket $host $port]
} else {
set ::redis::fd($id) [socket $host $port]
@@ -158,6 +159,7 @@ proc ::redis::__method__close {id fd} {
catch {unset ::redis::blocking($id)}
catch {unset ::redis::deferred($id)}
catch {unset ::redis::reconnect($id)}
+ catch {unset ::redis::tls($id)}
catch {unset ::redis::state($id)}
catch {unset ::redis::statestack($id)}
catch {unset ::redis::callback($id)}
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
index 3ff923d7e..e8fa3e6f2 100644
--- a/tests/support/server.tcl
+++ b/tests/support/server.tcl
@@ -253,7 +253,7 @@ proc wait_server_started {config_file stdout pid} {
# Check if the port is actually busy and the server failed
# for this reason.
- if {[regexp {Could not create server TCP} [exec cat $stdout]]} {
+ if {[regexp {Failed listening on port} [exec cat $stdout]]} {
set port_busy 1
break
}
@@ -508,6 +508,7 @@ proc start_server {options {code undefined}} {
set num_tests $::num_tests
if {[catch { uplevel 1 $code } error]} {
set backtrace $::errorInfo
+ set assertion [string match "assertion:*" $error]
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
set srv [lindex $::servers end]
@@ -519,17 +520,23 @@ proc start_server {options {code undefined}} {
dict set srv "skipleaks" 1
kill_server $srv
- # Print warnings from log
- puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]]
- set warnings [warnings_from_file [dict get $srv "stdout"]]
- if {[string length $warnings] > 0} {
- puts "$warnings"
+ if {$::dump_logs && $assertion} {
+ # if we caught an assertion ($::num_failed isn't incremented yet)
+ # this happens when the test spawns a server and not the other way around
+ dump_server_log $srv
} else {
- puts "(none)"
+ # Print crash report from log
+ set crashlog [crashlog_from_file [dict get $srv "stdout"]]
+ if {[string length $crashlog] > 0} {
+ puts [format "\nLogged crash report (pid %d):" [dict get $srv "pid"]]
+ puts "$crashlog"
+ puts ""
+ }
}
- puts ""
- if {$::durable} {
+ if {!$assertion && $::durable} {
+ # durable is meant to prevent the whole tcl test from exiting on
+ # an exception. an assertion will be caught by the test proc.
set msg [string range $error 10 end]
lappend details $msg
lappend details $backtrace
diff --git a/tests/support/test.tcl b/tests/support/test.tcl
index 39aebe156..29d0cbf41 100644
--- a/tests/support/test.tcl
+++ b/tests/support/test.tcl
@@ -165,6 +165,8 @@ proc test {name code {okpattern undefined} {options undefined}} {
if {[catch {set retval [uplevel 1 $code]} error]} {
set assertion [string match "assertion:*" $error]
if {$assertion || $::durable} {
+ # durable prevents the whole tcl test from exiting on an exception.
+ # an assertion is handled gracefully anyway.
set msg [string range $error 10 end]
lappend details $msg
if {!$assertion} {
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
index 886ef5020..b00aa159a 100644
--- a/tests/support/util.tcl
+++ b/tests/support/util.tcl
@@ -31,7 +31,7 @@ proc zlistAlikeSort {a b} {
# Return all log lines starting with the first line that contains a warning.
# Generally, this will be an assertion error with a stack trace.
-proc warnings_from_file {filename} {
+proc crashlog_from_file {filename} {
set lines [split [exec cat $filename] "\n"]
set matched 0
set logall 0
@@ -506,18 +506,18 @@ proc stop_write_load {handle} {
proc K { x y } { set x }
-# Shuffle a list. From Tcl wiki. Originally from Steve Cohen that improved
-# other versions. Code should be under public domain.
+# Shuffle a list with Fisher-Yates algorithm.
proc lshuffle {list} {
set n [llength $list]
- while {$n>0} {
+ while {$n>1} {
set j [expr {int(rand()*$n)}]
- lappend slist [lindex $list $j]
incr n -1
- set temp [lindex $list $n]
- set list [lreplace [K $list [set list {}]] $j $j $temp]
+ if {$n==$j} continue
+ set v [lindex $list $j]
+ lset list $j [lindex $list $n]
+ lset list $n $v
}
- return $slist
+ return $list
}
# Execute a background process writing complex data for the specified number
@@ -682,20 +682,83 @@ proc string2printable s {
return $res
}
-# Check that probability of each element are between {min_prop} and {max_prop}.
-proc check_histogram_distribution {res min_prop max_prop} {
+# Calculation value of Chi-Square Distribution. By this value
+# we can verify the random distribution sample confidence.
+# Based on the following wiki:
+# https://en.wikipedia.org/wiki/Chi-square_distribution
+#
+# param res Random sample list
+# return Value of Chi-Square Distribution
+#
+# x2_value: return of chi_square_value function
+# df: Degrees of freedom, Number of independent values minus 1
+#
+# By using x2_value and df to back check the cardinality table,
+# we can know the confidence of the random sample.
+proc chi_square_value {res} {
unset -nocomplain mydict
foreach key $res {
dict incr mydict $key 1
}
+ set x2_value 0
+ set p [expr [llength $res] / [dict size $mydict]]
foreach key [dict keys $mydict] {
set value [dict get $mydict $key]
- set probability [expr {double($value) / [llength $res]}]
- if {$probability < $min_prop || $probability > $max_prop} {
- return false
+
+ # Aggregate the chi-square value of each element
+ set v [expr {pow($value - $p, 2) / $p}]
+ set x2_value [expr {$x2_value + $v}]
+ }
+
+ return $x2_value
+}
+
+#subscribe to Pub/Sub channels
+proc consume_subscribe_messages {client type channels} {
+ set numsub -1
+ set counts {}
+
+ for {set i [llength $channels]} {$i > 0} {incr i -1} {
+ set msg [$client read]
+ assert_equal $type [lindex $msg 0]
+
+ # when receiving subscribe messages the channels names
+ # are ordered. when receiving unsubscribe messages
+ # they are unordered
+ set idx [lsearch -exact $channels [lindex $msg 1]]
+ if {[string match "*unsubscribe" $type]} {
+ assert {$idx >= 0}
+ } else {
+ assert {$idx == 0}
}
+ set channels [lreplace $channels $idx $idx]
+
+ # aggregate the subscription count to return to the caller
+ lappend counts [lindex $msg 2]
}
- return true
+ # we should have received messages for channels
+ assert {[llength $channels] == 0}
+ return $counts
+}
+
+proc subscribe {client channels} {
+ $client subscribe {*}$channels
+ consume_subscribe_messages $client subscribe $channels
+}
+
+proc unsubscribe {client {channels {}}} {
+ $client unsubscribe {*}$channels
+ consume_subscribe_messages $client unsubscribe $channels
+}
+
+proc psubscribe {client channels} {
+ $client psubscribe {*}$channels
+ consume_subscribe_messages $client psubscribe $channels
}
+
+proc punsubscribe {client {channels {}}} {
+ $client punsubscribe {*}$channels
+ consume_subscribe_messages $client punsubscribe $channels
+} \ No newline at end of file
diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl
index 7c09195a1..3424c044f 100644
--- a/tests/unit/acl.tcl
+++ b/tests/unit/acl.tcl
@@ -113,6 +113,46 @@ start_server {tags {"acl"}} {
set e
} {*NOPERM*channel*}
+ test {Validate subset of channels is prefixed with resetchannels flag} {
+ r ACL setuser hpuser on nopass resetchannels &foo +@all
+
+ # Verify resetchannels flag is prefixed before the channel name(s)
+ set users [r ACL LIST]
+ set curruser "hpuser"
+ foreach user [lshuffle $users] {
+ if {[string first $curruser $user] != -1} {
+ assert_equal {user hpuser on nopass resetchannels &foo +@all} $user
+ }
+ }
+
+ # authenticate as hpuser
+ r AUTH hpuser pass
+
+ assert_equal {0} [r PUBLISH foo bar]
+ catch {r PUBLISH bar game} e
+
+ # Falling back to psuser for the below tests
+ r AUTH psuser pspass
+ r ACL deluser hpuser
+ set e
+ } {*NOPERM*channel*}
+
+ test {In transaction queue publish/subscribe/psubscribe to unauthorized channel will fail} {
+ r ACL setuser psuser +multi +discard
+ r MULTI
+ catch {r PUBLISH notexits helloworld} e
+ r DISCARD
+ assert_match {*NOPERM*} $e
+ r MULTI
+ catch {r SUBSCRIBE notexits foo:1} e
+ r DISCARD
+ assert_match {*NOPERM*} $e
+ r MULTI
+ catch {r PSUBSCRIBE notexits:* bar:*} e
+ r DISCARD
+ assert_match {*NOPERM*} $e
+ }
+
test {It's possible to allow subscribing to a subset of channels} {
set rd [redis_deferring_client]
$rd AUTH psuser pspass
@@ -409,6 +449,14 @@ start_server {tags {"acl"}} {
set e
} {*NOAUTH*}
+ test {When default user has no command permission, hello command still works for other users} {
+ r ACL setuser secure-user >supass on +@all
+ r ACL setuser default -@all
+ r HELLO 2 AUTH secure-user supass
+ r ACL setuser default nopass +@all
+ r AUTH default ""
+ }
+
test {ACL HELP should not have unexpected options} {
catch {r ACL help xxx} e
assert_match "*Unknown subcommand or wrong number of arguments*" $e
@@ -437,14 +485,44 @@ exec cp -f tests/assets/user.acl $server_path
start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"]] {
# user alice on allcommands allkeys >alice
# user bob on -@all +@set +acl ~set* >bob
+ # user default on nopass ~* +@all
+
+ test {default: load from include file, can access any channels} {
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
- test "Alice: can excute all command" {
+ test {default: with config acl-pubsub-default allchannels after reset, can access any channels} {
+ r ACL setuser default reset on nopass ~* +@all
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
+
+ test {default: with config acl-pubsub-default resetchannels after reset, can not access any channels} {
+ r CONFIG SET acl-pubsub-default resetchannels
+ r ACL setuser default reset on nopass ~* +@all
+ catch {r SUBSCRIBE foo} e
+ assert_match {*NOPERM*} $e
+ catch {r PSUBSCRIBE bar*} e
+ assert_match {*NOPERM*} $e
+ catch {r PUBLISH hello world} e
+ assert_match {*NOPERM*} $e
+ r CONFIG SET acl-pubsub-default resetchannels
+ }
+
+ test {Alice: can execute all command} {
r AUTH alice alice
assert_equal "alice" [r acl whoami]
r SET key value
}
- test "Bob: just excute @set and acl command" {
+ test {Bob: just execute @set and acl command} {
r AUTH bob bob
assert_equal "bob" [r acl whoami]
assert_equal "3" [r sadd set 1 2 3]
@@ -452,7 +530,7 @@ start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"]] {
set e
} {*NOPERM*}
- test "ACL load and save" {
+ test {ACL load and save} {
r ACL setuser eve +get allkeys >eve on
r ACL save
@@ -469,4 +547,85 @@ start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"]] {
catch {r SET key value} e
set e
} {*NOPERM*}
+
+ test {ACL load and save with restricted channels} {
+ r AUTH alice alice
+ r ACL setuser harry on nopass resetchannels &test +@all ~*
+ r ACL save
+
+ # ACL load will free user and kill clients
+ r ACL load
+ catch {r ACL LIST} e
+ assert_match {*I/O error*} $e
+
+ reconnect
+ r AUTH harry anything
+ r publish test bar
+ catch {r publish test1 bar} e
+ r ACL deluser harry
+ set e
+ } {*NOPERM*}
+}
+
+set server_path [tmpdir "resetchannels.acl"]
+exec cp -f tests/assets/nodefaultuser.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "resetchannels" "aclfile" "nodefaultuser.acl"]] {
+
+ test {Default user has access to all channels irrespective of flag} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "*" $channelinfo
+ set channelinfo [dict get [r ACL getuser alice] channels]
+ assert_equal "" $channelinfo
+ }
+
+ test {Update acl-pubsub-default, existing users shouldn't get affected} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "*" $channelinfo
+ r CONFIG set acl-pubsub-default allchannels
+ r ACL setuser mydefault
+ set channelinfo [dict get [r ACL getuser mydefault] channels]
+ assert_equal "*" $channelinfo
+ r CONFIG set acl-pubsub-default resetchannels
+ set channelinfo [dict get [r ACL getuser mydefault] channels]
+ assert_equal "*" $channelinfo
+ }
+
+ test {Single channel is valid} {
+ r ACL setuser onechannel &test
+ set channelinfo [dict get [r ACL getuser onechannel] channels]
+ assert_equal test $channelinfo
+ r ACL deluser onechannel
+ }
+
+ test {Single channel is not valid with allchannels} {
+ r CONFIG set acl-pubsub-default allchannels
+ catch {r ACL setuser onechannel &test} err
+ r CONFIG set acl-pubsub-default resetchannels
+ set err
+ } {*start with an empty list of channels*}
+}
+
+set server_path [tmpdir "resetchannels.acl"]
+exec cp -f tests/assets/nodefaultuser.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "resetchannels" "aclfile" "nodefaultuser.acl"]] {
+
+ test {Only default user has access to all channels irrespective of flag} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "*" $channelinfo
+ set channelinfo [dict get [r ACL getuser alice] channels]
+ assert_equal "" $channelinfo
+ }
+}
+
+
+start_server {overrides {user "default on nopass ~* +@all"}} {
+ test {default: load from config file, can access any channels} {
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
}
diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl
index 900856848..459503adc 100644
--- a/tests/unit/expire.tcl
+++ b/tests/unit/expire.tcl
@@ -76,7 +76,7 @@ start_server {tags {"expire"}} {
# This test is very likely to do a false positive if the
# server is under pressure, so if it does not work give it a few more
# chances.
- for {set j 0} {$j < 3} {incr j} {
+ for {set j 0} {$j < 10} {incr j} {
r del x
r setex x 1 somevalue
after 900
@@ -85,6 +85,9 @@ start_server {tags {"expire"}} {
set b [r get x]
if {$a eq {somevalue} && $b eq {}} break
}
+ if {$::verbose} {
+ puts "millisecond expire test attempts: $j"
+ }
list $a $b
} {somevalue {}}
diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl
index 698ed7789..7ce89aa01 100644
--- a/tests/unit/introspection.tcl
+++ b/tests/unit/introspection.tcl
@@ -180,6 +180,26 @@ start_server {tags {"introspection"}} {
}
}
+ test {CONFIG REWRITE handles save properly} {
+ r config set save "3600 1 300 100 60 10000"
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {3600 1 300 100 60 10000}}
+
+ r config set save ""
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {}}
+
+ start_server {config "minimal.conf"} {
+ assert_equal [r config get save] {save {3600 1 300 100 60 10000}}
+ r config set save ""
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {}}
+ }
+ }
+
# Config file at this point is at a wierd state, and includes all
# known keywords. Might be a good idea to avoid adding tests here.
}
diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl
index ed5657860..99321acfe 100644
--- a/tests/unit/maxmemory.tcl
+++ b/tests/unit/maxmemory.tcl
@@ -178,7 +178,7 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline}
set orig_client_buf [s -1 mem_clients_normal]
set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
- set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 20*1024}]
+ set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 32*1024}]
if {$limit_memory==1} {
$master config set maxmemory $limit
diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl
index 293a62e4e..60800bbff 100644
--- a/tests/unit/moduleapi/keyspace_events.tcl
+++ b/tests/unit/moduleapi/keyspace_events.tcl
@@ -67,5 +67,20 @@ tags "modules" {
assert_equal {1} [r get lua]
r get x
} {3}
+
+ test {Test module key space event} {
+ r keyspace.notify x
+ assert_equal {1 x} [r keyspace.is_module_key_notified x]
+ }
+
+ test "Keyspace notifications: module events test" {
+ r config set notify-keyspace-events Kd
+ r del x
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r keyspace.notify x
+ assert_equal {pmessage * __keyspace@9__:x notify} [$rd1 read]
+ $rd1 close
+ }
}
}
diff --git a/tests/unit/moduleapi/propagate.tcl b/tests/unit/moduleapi/propagate.tcl
index adebd37a6..a8c710074 100644
--- a/tests/unit/moduleapi/propagate.tcl
+++ b/tests/unit/moduleapi/propagate.tcl
@@ -2,7 +2,7 @@ set testmodule [file normalize tests/modules/propagate.so]
tags "modules" {
test {Modules can propagate in async and threaded contexts} {
- start_server {} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
@@ -42,6 +42,59 @@ tags "modules" {
close_replication_stream $repl
}
+ test {module propagates nested ctx case1} {
+ set repl [attach_to_replication_stream]
+
+ $master del timer-nested-start
+ $master del timer-nested-end
+ $master propagate-test.timer-nested
+
+ wait_for_condition 5000 10 {
+ [$replica get timer-nested-end] eq "1"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {select *}
+ {multi}
+ {incrby timer-nested-start 1}
+ {incrby timer-nested-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates nested ctx case2} {
+ set repl [attach_to_replication_stream]
+
+ $master del timer-nested-start
+ $master del timer-nested-end
+ $master propagate-test.timer-nested-repl
+
+ wait_for_condition 5000 10 {
+ [$replica get timer-nested-end] eq "1"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ # Note the 'after-call' and 'timer-nested-start' propagation below is out of order (known limitation)
+ assert_replication_stream $repl {
+ {select *}
+ {multi}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr after-call}
+ {incr counter-3}
+ {incr counter-4}
+ {incrby timer-nested-start 1}
+ {incrby timer-nested-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
test {module propagates from thread} {
set repl [attach_to_replication_stream]
@@ -88,6 +141,55 @@ tags "modules" {
close_replication_stream $repl
}
+ test {module propagates from from command after good EVAL} {
+ set repl [attach_to_replication_stream]
+
+ assert_equal [ $master eval { return "hello" } 0 ] {hello}
+ $master propagate-test.simple
+ $master propagate-test.mixed
+
+ # Note the 'after-call' propagation below is out of order (known limitation)
+ assert_replication_stream $repl {
+ {select *}
+ {multi}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ {multi}
+ {incr using-call}
+ {incr after-call}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from from command after bad EVAL} {
+ set repl [attach_to_replication_stream]
+
+ catch { $master eval { return "hello" } -12 } e
+ assert_equal $e {ERR Number of keys can't be negative}
+ $master propagate-test.simple
+ $master propagate-test.mixed
+
+ # Note the 'after-call' propagation below is out of order (known limitation)
+ assert_replication_stream $repl {
+ {select *}
+ {multi}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ {multi}
+ {incr using-call}
+ {incr after-call}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
test {module propagates from from multi-exec} {
set repl [attach_to_replication_stream]
@@ -111,6 +213,31 @@ tags "modules" {
}
close_replication_stream $repl
}
+
+ test {module RM_Call of expired key propagation} {
+ $master debug set-active-expire 0
+
+ $master set k1 900 px 100
+ wait_for_ofs_sync $master $replica
+ after 110
+
+ set repl [attach_to_replication_stream]
+ $master propagate-test.incr k1
+ wait_for_ofs_sync $master $replica
+
+ assert_replication_stream $repl {
+ {select *}
+ {del k1}
+ {propagate-test.incr k1}
+ }
+ close_replication_stream $repl
+
+ assert_equal [$master get k1] 1
+ assert_equal [$master ttl k1] -1
+ assert_equal [$replica get k1] 1
+ assert_equal [$replica ttl k1] -1
+ }
+
assert_equal [s -1 unexpected_error_replies] 0
}
}
diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl
index 456d3ac82..38a643385 100644
--- a/tests/unit/obuf-limits.tcl
+++ b/tests/unit/obuf-limits.tcl
@@ -31,7 +31,11 @@ start_server {tags {"obuf-limits"}} {
set start_time 0
set time_elapsed 0
while 1 {
- r publish foo bar
+ if {$start_time != 0} {
+ # Slow down loop when omen has reached the limit.
+ after 10
+ }
+ r publish foo [string repeat "x" 1000]
set clients [split [r client list] "\r\n"]
set c [split [lindex $clients 1] " "]
if {![regexp {omem=([0-9]+)} $c - omem]} break
@@ -57,7 +61,11 @@ start_server {tags {"obuf-limits"}} {
set start_time 0
set time_elapsed 0
while 1 {
- r publish foo bar
+ if {$start_time != 0} {
+ # Slow down loop when omen has reached the limit.
+ after 10
+ }
+ r publish foo [string repeat "x" 1000]
set clients [split [r client list] "\r\n"]
set c [split [lindex $clients 1] " "]
if {![regexp {omem=([0-9]+)} $c - omem]} break
diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl
index 16ed092be..a6b0d0132 100644
--- a/tests/unit/other.tcl
+++ b/tests/unit/other.tcl
@@ -1,4 +1,4 @@
-start_server {tags {"other"}} {
+start_server {overrides {save ""} tags {"other"}} {
if {$::force_failure} {
# This is used just for test suite development purposes.
test {Failing test} {
@@ -309,6 +309,12 @@ start_server {tags {"other"}} {
populate 4096 "" 1
r bgsave
+ wait_for_condition 10 100 {
+ [s rdb_bgsave_in_progress] eq 1
+ } else {
+ fail "bgsave did not start in time"
+ }
+
r mset k1 v1 k2 v2
# Hash table should not rehash
assert_no_match "*table size: 8192*" [r debug HTSTATS 9]
diff --git a/tests/unit/pendingquerybuf.tcl b/tests/unit/pendingquerybuf.tcl
index bee85db36..ebf17fdf8 100644
--- a/tests/unit/pendingquerybuf.tcl
+++ b/tests/unit/pendingquerybuf.tcl
@@ -25,11 +25,12 @@ start_server {} {
$slave slaveof $master_host $master_port
set _v [prepare_value [expr 32*1024*1024]]
$master set key $_v
- after 2000
- set m_usedmemory [info_memory $master used_memory]
- set s_usedmemory [info_memory $slave used_memory]
- if { $s_usedmemory > $m_usedmemory + 10*1024*1024 } {
- fail "the used_memory of replica is much larger than master. Master:$m_usedmemory Replica:$s_usedmemory"
+ wait_for_ofs_sync $master $slave
+
+ wait_for_condition 50 100 {
+ [info_memory $slave used_memory] <= [info_memory $master used_memory] + 10*1024*1024
+ } else {
+ fail "the used_memory of replica is much larger than master."
}
}
}}
diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl
index 966565ae1..1906805a7 100644
--- a/tests/unit/pubsub.tcl
+++ b/tests/unit/pubsub.tcl
@@ -1,52 +1,4 @@
start_server {tags {"pubsub network"}} {
- proc __consume_subscribe_messages {client type channels} {
- set numsub -1
- set counts {}
-
- for {set i [llength $channels]} {$i > 0} {incr i -1} {
- set msg [$client read]
- assert_equal $type [lindex $msg 0]
-
- # when receiving subscribe messages the channels names
- # are ordered. when receiving unsubscribe messages
- # they are unordered
- set idx [lsearch -exact $channels [lindex $msg 1]]
- if {[string match "*unsubscribe" $type]} {
- assert {$idx >= 0}
- } else {
- assert {$idx == 0}
- }
- set channels [lreplace $channels $idx $idx]
-
- # aggregate the subscription count to return to the caller
- lappend counts [lindex $msg 2]
- }
-
- # we should have received messages for channels
- assert {[llength $channels] == 0}
- return $counts
- }
-
- proc subscribe {client channels} {
- $client subscribe {*}$channels
- __consume_subscribe_messages $client subscribe $channels
- }
-
- proc unsubscribe {client {channels {}}} {
- $client unsubscribe {*}$channels
- __consume_subscribe_messages $client unsubscribe $channels
- }
-
- proc psubscribe {client channels} {
- $client psubscribe {*}$channels
- __consume_subscribe_messages $client psubscribe $channels
- }
-
- proc punsubscribe {client {channels {}}} {
- $client punsubscribe {*}$channels
- __consume_subscribe_messages $client punsubscribe $channels
- }
-
test "Pub/Sub PING" {
set rd1 [redis_deferring_client]
subscribe $rd1 somechannel
diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl
index c44ec74f5..0efe34cad 100644
--- a/tests/unit/scripting.tcl
+++ b/tests/unit/scripting.tcl
@@ -612,6 +612,71 @@ start_server {tags {"scripting"}} {
assert_equal [r ping] "PONG"
}
+ test {Timedout read-only scripts can be killed by SCRIPT KILL even when use pcall} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ $rd eval {local f = function() while 1 do redis.call('ping') end end while 1 do pcall(f) end} 0
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 1
+ } else {
+ fail "Can't wait for script to start running"
+ }
+ catch {r ping} e
+ assert_match {BUSY*} $e
+
+ r script kill
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 0
+ } else {
+ fail "Can't wait for script to be killed"
+ }
+ assert_equal [r ping] "PONG"
+
+ catch {$rd read} res
+ $rd close
+
+ assert_match {*killed by user*} $res
+ }
+
+ test {Timedout script does not cause a false dead client} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+
+ # senging (in a pipeline):
+ # 1. eval "while 1 do redis.call('ping') end" 0
+ # 2. ping
+ set buf "*3\r\n\$4\r\neval\r\n\$33\r\nwhile 1 do redis.call('ping') end\r\n\$1\r\n0\r\n"
+ append buf "*1\r\n\$4\r\nping\r\n"
+ $rd write $buf
+ $rd flush
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 1
+ } else {
+ fail "Can't wait for script to start running"
+ }
+ catch {r ping} e
+ assert_match {BUSY*} $e
+
+ r script kill
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 0
+ } else {
+ fail "Can't wait for script to be killed"
+ }
+ assert_equal [r ping] "PONG"
+
+ catch {$rd read} res
+ assert_match {*killed by user*} $res
+
+ set res [$rd read]
+ assert_match {*PONG*} $res
+
+ $rd close
+ }
+
test {Timedout script link is still usable after Lua returns} {
r config set lua-time-limit 10
r eval {for i=1,100000 do redis.call('ping') end return 'ok'} 0
diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl
index e782682e4..eb9dfc65d 100644
--- a/tests/unit/slowlog.tcl
+++ b/tests/unit/slowlog.tcl
@@ -41,6 +41,22 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
assert_equal {foobar} [lindex $e 5]
}
+ test {SLOWLOG - Certain commands are omitted that contain sensitive information} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r config set masterauth ""
+ r acl setuser slowlog-test-user
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-log-slower-than 10000
+ set slowlog_resp [r slowlog get]
+
+ # Make sure normal configs work, but the two sensitive
+ # commands are omitted
+ assert_equal 2 [llength $slowlog_resp]
+ assert_equal {slowlog reset} [lindex [lindex [r slowlog get] 1] 3]
+ assert_equal {config set slowlog-log-slower-than 0} [lindex [lindex [r slowlog get] 0] 3]
+ }
+
test {SLOWLOG - Rewritten commands are logged as their original command} {
r config set slowlog-log-slower-than 0
@@ -74,6 +90,22 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
# INCRBYFLOAT is replicated as SET
r INCRBYFLOAT A 1.0
assert_equal {INCRBYFLOAT A 1.0} [lindex [lindex [r slowlog get] 0] 3]
+
+ # blocked BLPOP is replicated as LPOP
+ set rd [redis_deferring_client]
+ $rd blpop l 0
+ wait_for_condition 50 100 {
+ [s blocked_clients] eq {1}
+ } else {
+ fail "Clients are not blocked"
+ }
+ r multi
+ r lpush l foo
+ r slowlog reset
+ r exec
+ $rd read
+ $rd close
+ assert_equal {blpop l 0} [lindex [lindex [r slowlog get] 0] 3]
}
test {SLOWLOG - commands with too many arguments are trimmed} {
diff --git a/tests/unit/tls.tcl b/tests/unit/tls.tcl
index 14e06fcdf..29fe39fbf 100644
--- a/tests/unit/tls.tcl
+++ b/tests/unit/tls.tcl
@@ -139,5 +139,20 @@ start_server {tags {"tls"}} {
$rd PING
$rd close
}
+
+ test {TLS: Working with an encrypted keyfile} {
+ # Create an encrypted version
+ set keyfile [lindex [r config get tls-key-file] 1]
+ set keyfile_encrypted "$keyfile.encrypted"
+ exec -ignorestderr openssl rsa -in $keyfile -out $keyfile_encrypted -aes256 -passout pass:1234 2>/dev/null
+
+ # Using it without a password fails
+ catch {r config set tls-key-file $keyfile_encrypted} e
+ assert_match {*Unable to update TLS*} $e
+
+ # Now use a password
+ r config set tls-key-file-pass 1234
+ r config set tls-key-file $keyfile_encrypted
+ }
}
}
diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl
index 2eea98890..fcf97eed7 100644
--- a/tests/unit/type/hash.tcl
+++ b/tests/unit/type/hash.tcl
@@ -105,8 +105,9 @@ start_server {tags {"hash"}} {
assert_equal [llength $res] 2002
# Test random uniform distribution
+ # df = 9, 40 means 0.00001 probability
set res [r hrandfield myhash -1000]
- assert_equal [check_histogram_distribution $res 0.05 0.15] true
+ assert_lessthan [chi_square_value $res] 40
# 2) Check that all the elements actually belong to the original hash.
foreach {key val} $res {
@@ -199,7 +200,8 @@ start_server {tags {"hash"}} {
}
}
assert_equal $all_ele_return true
- assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
}
}
r config set hash-max-ziplist-value $original_max_value
diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl
index 4eb93a21e..5548ca3a2 100644
--- a/tests/unit/type/set.tcl
+++ b/tests/unit/type/set.tcl
@@ -533,8 +533,9 @@ start_server {
}
# Use negative count (PATH 1).
+ # df = 9, 40 means 0.00001 probability
set res [r srandmember myset -1000]
- assert_equal [check_histogram_distribution $res 0.05 0.15] true
+ assert_lessthan [chi_square_value $res] 40
# Use positive count (both PATH 3 and PATH 4).
foreach size {8 2} {
@@ -547,7 +548,8 @@ start_server {
lappend allkey $ele
}
}
- assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
}
}
}
diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl
index f8de0741d..edc5af6bd 100644
--- a/tests/unit/type/stream-cgroups.tcl
+++ b/tests/unit/type/stream-cgroups.tcl
@@ -373,7 +373,7 @@ start_server {
after 200
set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 1]
assert_equal [llength $reply] 2
- assert_equal [lindex $reply 0] $id1
+ assert_equal [lindex $reply 0] "0-0"
assert_equal [llength [lindex $reply 1]] 1
assert_equal [llength [lindex $reply 1 0]] 2
assert_equal [llength [lindex $reply 1 0 1]] 2
@@ -392,7 +392,7 @@ start_server {
set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 2]
# id1 is self-claimed here but not id2 ('count' was set to 2)
assert_equal [llength $reply] 2
- assert_equal [lindex $reply 0] $id2
+ assert_equal [lindex $reply 0] $id3
assert_equal [llength [lindex $reply 1]] 2
assert_equal [llength [lindex $reply 1 0]] 2
assert_equal [llength [lindex $reply 1 0 1]] 2
@@ -438,22 +438,22 @@ start_server {
set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 2]
assert_equal [llength $reply] 2
set cursor [lindex $reply 0]
- assert_equal $cursor $id2
+ assert_equal $cursor $id3
assert_equal [llength [lindex $reply 1]] 2
assert_equal [llength [lindex $reply 1 0 1]] 2
assert_equal [lindex $reply 1 0 1] {a 1}
# Claim 2 more entries
- set reply [r XAUTOCLAIM mystream mygroup consumer2 10 ($cursor COUNT 2]
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 2]
assert_equal [llength $reply] 2
set cursor [lindex $reply 0]
- assert_equal $cursor $id4
+ assert_equal $cursor $id5
assert_equal [llength [lindex $reply 1]] 2
assert_equal [llength [lindex $reply 1 0 1]] 2
assert_equal [lindex $reply 1 0 1] {c 3}
# Claim last entry
- set reply [r XAUTOCLAIM mystream mygroup consumer2 10 ($cursor COUNT 2]
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 1]
assert_equal [llength $reply] 2
set cursor [lindex $reply 0]
assert_equal $cursor {0-0}
@@ -462,6 +462,10 @@ start_server {
assert_equal [lindex $reply 1 0 1] {e 5}
}
+ test {XAUTOCLAIM COUNT must be > 0} {
+ assert_error "ERR COUNT must be > 0" {r XAUTOCLAIM key group consumer 1 1 COUNT 0}
+ }
+
test {XINFO FULL output} {
r del x
r XADD x 100 a 1
diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl
index 2456815f2..96647f778 100644
--- a/tests/unit/type/zset.tcl
+++ b/tests/unit/type/zset.tcl
@@ -41,11 +41,11 @@ start_server {tags {"zset"}} {
assert_error "*not*float*" {r zadd myzset nan abc}
}
- test "ZSET element can't be set to NaN with ZINCRBY" {
+ test "ZSET element can't be set to NaN with ZINCRBY - $encoding" {
assert_error "*not*float*" {r zadd myzset nan abc}
}
- test "ZADD with options syntax error with incomplete pair" {
+ test "ZADD with options syntax error with incomplete pair - $encoding" {
r del ztmp
catch {r zadd ztmp xx 10 x 20} err
set err
@@ -64,14 +64,14 @@ start_server {tags {"zset"}} {
assert {[r zcard ztmp] == 1}
}
- test "ZADD XX returns the number of elements actually added" {
+ test "ZADD XX returns the number of elements actually added - $encoding" {
r del ztmp
r zadd ztmp 10 x
set retval [r zadd ztmp 10 x 20 y 30 z]
assert {$retval == 2}
}
- test "ZADD XX updates existing elements score" {
+ test "ZADD XX updates existing elements score - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
r zadd ztmp xx 5 foo 11 x 21 y 40 zap
@@ -80,7 +80,7 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp y] == 21}
}
- test "ZADD GT updates existing elements when new scores are greater" {
+ test "ZADD GT updates existing elements when new scores are greater - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp gt ch 5 foo 11 x 21 y 29 z] == 3}
@@ -90,7 +90,7 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp z] == 30}
}
- test "ZADD LT updates existing elements when new scores are lower" {
+ test "ZADD LT updates existing elements when new scores are lower - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp lt ch 5 foo 11 x 21 y 29 z] == 2}
@@ -100,7 +100,7 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp z] == 29}
}
- test "ZADD GT XX updates existing elements when new scores are greater and skips new elements" {
+ test "ZADD GT XX updates existing elements when new scores are greater and skips new elements - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp gt xx ch 5 foo 11 x 21 y 29 z] == 2}
@@ -110,7 +110,7 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp z] == 30}
}
- test "ZADD LT XX updates existing elements when new scores are lower and skips new elements" {
+ test "ZADD LT XX updates existing elements when new scores are lower and skips new elements - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp lt xx ch 5 foo 11 x 21 y 29 z] == 1}
@@ -120,19 +120,19 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp z] == 29}
}
- test "ZADD XX and NX are not compatible" {
+ test "ZADD XX and NX are not compatible - $encoding" {
r del ztmp
catch {r zadd ztmp xx nx 10 x} err
set err
} {ERR*}
- test "ZADD NX with non existing key" {
+ test "ZADD NX with non existing key - $encoding" {
r del ztmp
r zadd ztmp nx 10 x 20 y 30 z
assert {[r zcard ztmp] == 3}
}
- test "ZADD NX only add new elements without updating old ones" {
+ test "ZADD NX only add new elements without updating old ones - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp nx 11 x 21 y 100 a 200 b] == 2}
@@ -142,73 +142,105 @@ start_server {tags {"zset"}} {
assert {[r zscore ztmp b] == 200}
}
- test "ZADD GT and NX are not compatible" {
+ test "ZADD GT and NX are not compatible - $encoding" {
r del ztmp
catch {r zadd ztmp gt nx 10 x} err
set err
} {ERR*}
- test "ZADD LT and NX are not compatible" {
+ test "ZADD LT and NX are not compatible - $encoding" {
r del ztmp
catch {r zadd ztmp lt nx 10 x} err
set err
} {ERR*}
- test "ZADD LT and GT are not compatible" {
+ test "ZADD LT and GT are not compatible - $encoding" {
r del ztmp
catch {r zadd ztmp lt gt 10 x} err
set err
} {ERR*}
- test "ZADD INCR works like ZINCRBY" {
+ test "ZADD INCR LT/GT replies with nill if score not updated - $encoding" {
+ r del ztmp
+ r zadd ztmp 28 x
+ assert {[r zadd ztmp lt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == 28}
+ assert {[r zadd ztmp gt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == 28}
+ }
+
+ test "ZADD INCR LT/GT with inf - $encoding" {
+ r del ztmp
+ r zadd ztmp +inf x -inf y
+
+ assert {[r zadd ztmp lt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp gt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp lt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp gt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+
+ assert {[r zadd ztmp lt incr 1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp gt incr -1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp lt incr -1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp gt incr 1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ }
+
+ test "ZADD INCR works like ZINCRBY - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
r zadd ztmp INCR 15 x
assert {[r zscore ztmp x] == 25}
}
- test "ZADD INCR works with a single score-elemenet pair" {
+ test "ZADD INCR works with a single score-elemenet pair - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
catch {r zadd ztmp INCR 15 x 10 y} err
set err
} {ERR*}
- test "ZADD CH option changes return value to all changed elements" {
+ test "ZADD CH option changes return value to all changed elements - $encoding" {
r del ztmp
r zadd ztmp 10 x 20 y 30 z
assert {[r zadd ztmp 11 x 21 y 30 z] == 0}
assert {[r zadd ztmp ch 12 x 22 y 30 z] == 2}
}
- test "ZINCRBY calls leading to NaN result in error" {
+ test "ZINCRBY calls leading to NaN result in error - $encoding" {
r zincrby myzset +inf abc
assert_error "*NaN*" {r zincrby myzset -inf abc}
}
- test {ZADD - Variadic version base case} {
+ test {ZADD - Variadic version base case - $encoding} {
r del myzset
list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores]
} {3 {a 10 b 20 c 30}}
- test {ZADD - Return value is the number of actually added items} {
+ test {ZADD - Return value is the number of actually added items - $encoding} {
list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores]
} {1 {x 5 a 10 b 20 c 30}}
- test {ZADD - Variadic version does not add nothing on single parsing err} {
+ test {ZADD - Variadic version does not add nothing on single parsing err - $encoding} {
r del myzset
catch {r zadd myzset 10 a 20 b 30.badscore c} e
assert_match {*ERR*not*float*} $e
r exists myzset
} {0}
- test {ZADD - Variadic version will raise error on missing arg} {
+ test {ZADD - Variadic version will raise error on missing arg - $encoding} {
r del myzset
catch {r zadd myzset 10 a 20 b 30 c 40} e
assert_match {*ERR*syntax*} $e
}
- test {ZINCRBY does not work variadic even if shares ZADD implementation} {
+ test {ZINCRBY does not work variadic even if shares ZADD implementation - $encoding} {
r del myzset
catch {r zincrby myzset 10 a 20 b 30 c} e
assert_match {*ERR*wrong*number*arg*} $e
@@ -221,7 +253,7 @@ start_server {tags {"zset"}} {
assert_equal 0 [r zcard zdoesntexist]
}
- test "ZREM removes key after last element is removed" {
+ test "ZREM removes key after last element is removed - $encoding" {
r del ztmp
r zadd ztmp 10 x
r zadd ztmp 20 y
@@ -233,7 +265,7 @@ start_server {tags {"zset"}} {
assert_equal 0 [r exists ztmp]
}
- test "ZREM variadic version" {
+ test "ZREM variadic version - $encoding" {
r del ztmp
r zadd ztmp 10 a 20 b 30 c
assert_equal 2 [r zrem ztmp x y a b k]
@@ -242,7 +274,7 @@ start_server {tags {"zset"}} {
r exists ztmp
} {0}
- test "ZREM variadic version -- remove elements after key deletion" {
+ test "ZREM variadic version -- remove elements after key deletion - $encoding" {
r del ztmp
r zadd ztmp 10 a 20 b 30 c
r zrem ztmp a b c d e f g
@@ -350,7 +382,7 @@ start_server {tags {"zset"}} {
assert_equal 6 [r zscore zset bar]
}
- test "ZINCRBY return value" {
+ test "ZINCRBY return value - $encoding" {
r del ztmp
set retval [r zincrby ztmp 1.0 x]
assert {$retval == 1.0}
@@ -360,7 +392,7 @@ start_server {tags {"zset"}} {
create_zset zset {-inf a 1 b 2 c 3 d 4 e 5 f +inf g}
}
- test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics" {
+ test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics - $encoding" {
create_default_zset
# inclusive range
@@ -412,13 +444,13 @@ start_server {tags {"zset"}} {
assert_equal {} [r zrangebyscore zset (2.4 (2.6]
}
- test "ZRANGEBYSCORE with WITHSCORES" {
+ test "ZRANGEBYSCORE with WITHSCORES - $encoding" {
create_default_zset
assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores]
assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores]
}
- test "ZRANGEBYSCORE with LIMIT" {
+ test "ZRANGEBYSCORE with LIMIT - $encoding" {
create_default_zset
assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2]
assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3]
@@ -430,14 +462,14 @@ start_server {tags {"zset"}} {
assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10]
}
- test "ZRANGEBYSCORE with LIMIT and WITHSCORES" {
+ test "ZRANGEBYSCORE with LIMIT and WITHSCORES - $encoding" {
create_default_zset
assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES]
assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES]
assert_equal {} [r zrangebyscore zset 2 5 LIMIT 12 13 WITHSCORES]
}
- test "ZRANGEBYSCORE with non-value min or max" {
+ test "ZRANGEBYSCORE with non-value min or max - $encoding" {
assert_error "*not*float*" {r zrangebyscore fooz str 1}
assert_error "*not*float*" {r zrangebyscore fooz 1 str}
assert_error "*not*float*" {r zrangebyscore fooz 1 NaN}
@@ -449,7 +481,7 @@ start_server {tags {"zset"}} {
0 omega}
}
- test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics" {
+ test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics - $encoding" {
create_default_lex_zset
# inclusive range
@@ -478,7 +510,7 @@ start_server {tags {"zset"}} {
assert_equal {} [r zrevrangebylex zset (hill (omega]
}
- test "ZLEXCOUNT advanced" {
+ test "ZLEXCOUNT advanced - $encoding" {
create_default_lex_zset
assert_equal 9 [r zlexcount zset - +]
@@ -494,7 +526,7 @@ start_server {tags {"zset"}} {
assert_equal 1 [r zlexcount zset (maxstring +]
}
- test "ZRANGEBYSLEX with LIMIT" {
+ test "ZRANGEBYSLEX with LIMIT - $encoding" {
create_default_lex_zset
assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2]
assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2]
@@ -507,7 +539,7 @@ start_server {tags {"zset"}} {
assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4]
}
- test "ZRANGEBYLEX with invalid lex range specifiers" {
+ test "ZRANGEBYLEX with invalid lex range specifiers - $encoding" {
assert_error "*not*string*" {r zrangebylex fooz foo bar}
assert_error "*not*string*" {r zrangebylex fooz \[foo bar}
assert_error "*not*string*" {r zrangebylex fooz foo \[bar}
@@ -515,7 +547,7 @@ start_server {tags {"zset"}} {
assert_error "*not*string*" {r zrangebylex fooz -x \[bar}
}
- test "ZREMRANGEBYSCORE basics" {
+ test "ZREMRANGEBYSCORE basics - $encoding" {
proc remrangebyscore {min max} {
create_zset zset {1 a 2 b 3 c 4 d 5 e}
assert_equal 1 [r exists zset]
@@ -571,13 +603,13 @@ start_server {tags {"zset"}} {
assert_equal 0 [r exists zset]
}
- test "ZREMRANGEBYSCORE with non-value min or max" {
+ test "ZREMRANGEBYSCORE with non-value min or max - $encoding" {
assert_error "*not*float*" {r zremrangebyscore fooz str 1}
assert_error "*not*float*" {r zremrangebyscore fooz 1 str}
assert_error "*not*float*" {r zremrangebyscore fooz 1 NaN}
}
- test "ZREMRANGEBYRANK basics" {
+ test "ZREMRANGEBYRANK basics - $encoding" {
proc remrangebyrank {min max} {
create_zset zset {1 a 2 b 3 c 4 d 5 e}
assert_equal 1 [r exists zset]
@@ -774,7 +806,7 @@ start_server {tags {"zset"}} {
assert_equal -inf [r zscore zsetinf3 key]
}
- test "$cmd with NaN weights $encoding" {
+ test "$cmd with NaN weights - $encoding" {
r del zsetinf1 zsetinf2
r zadd zsetinf1 1.0 key
@@ -833,7 +865,7 @@ start_server {tags {"zset"}} {
assert_equal {a 1 e 5} [r zrange zsete 0 -1 withscores]
}
- test "ZDIFF fuzzing" {
+ test "ZDIFF fuzzing - $encoding" {
for {set j 0} {$j < 100} {incr j} {
unset -nocomplain s
array set s {}
@@ -1655,8 +1687,9 @@ start_server {tags {"zset"}} {
assert_equal [llength $res] 2002
# Test random uniform distribution
+ # df = 9, 40 means 0.00001 probability
set res [r zrandmember myzset -1000]
- assert_equal [check_histogram_distribution $res 0.05 0.15] true
+ assert_lessthan [chi_square_value $res] 40
# 2) Check that all the elements actually belong to the original zset.
foreach {key val} $res {
@@ -1749,7 +1782,8 @@ start_server {tags {"zset"}} {
}
}
assert_equal $all_ele_return true
- assert_equal [check_histogram_distribution $allkey 0.05 0.15] true
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
}
}
r config set zset-max-ziplist-value $original_max_value
diff --git a/utils/whatisdoing.sh b/utils/whatisdoing.sh
index e4059caed..68d7f7cca 100755
--- a/utils/whatisdoing.sh
+++ b/utils/whatisdoing.sh
@@ -4,7 +4,7 @@
# Software Watchdog, which provides a similar functionality but in
# a more reliable / easy to use way.
#
-# Check http://redis.io/topics/latency for more information.
+# Check https://redis.io/topics/latency for more information.
#!/bin/bash
nsamples=1