summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/ci.yml14
-rw-r--r--.github/workflows/codeql-analysis.yml4
-rw-r--r--.github/workflows/daily.yml336
-rw-r--r--.github/workflows/external.yml6
-rw-r--r--.github/workflows/spell-check.yml4
-rw-r--r--00-RELEASENOTES87
-rw-r--r--redis.conf14
-rwxr-xr-xruntest-moduleapi1
-rw-r--r--src/acl.c96
-rw-r--r--src/aof.c32
-rw-r--r--src/bitops.c24
-rw-r--r--src/blocked.c140
-rw-r--r--src/call_reply.c15
-rw-r--r--src/call_reply.h1
-rw-r--r--src/cli_common.c27
-rw-r--r--src/cli_common.h3
-rw-r--r--src/cluster.c318
-rw-r--r--src/cluster.h7
-rw-r--r--src/commands.c354
-rw-r--r--src/commands/client-pause.json4
-rw-r--r--src/commands/cluster-shards.json18
-rw-r--r--src/commands/cluster-slaves.json3
-rw-r--r--src/commands/cluster-slots.json7
-rw-r--r--src/commands/command-docs.json3
-rw-r--r--src/commands/command-getkeysandflags.json2
-rw-r--r--src/commands/command-info.json3
-rw-r--r--src/commands/command-list.json3
-rw-r--r--src/commands/command.json2
-rw-r--r--src/commands/flushall.json2
-rw-r--r--src/commands/flushdb.json2
-rw-r--r--src/commands/function-list.json3
-rw-r--r--src/commands/function-load.json16
-rw-r--r--src/commands/function-stats.json3
-rw-r--r--src/commands/latency-doctor.json5
-rw-r--r--src/commands/latency-graph.json5
-rw-r--r--src/commands/latency-histogram.json5
-rw-r--r--src/commands/latency-history.json5
-rw-r--r--src/commands/latency-latest.json5
-rw-r--r--src/commands/latency-reset.json4
-rw-r--r--src/commands/memory-doctor.json4
-rw-r--r--src/commands/memory-malloc-stats.json4
-rw-r--r--src/commands/memory-purge.json6
-rw-r--r--src/commands/memory-stats.json4
-rw-r--r--src/commands/migrate.json2
-rw-r--r--src/commands/module-list.json3
-rw-r--r--src/commands/module-loadex.json53
-rw-r--r--src/commands/monitor.json15
-rw-r--r--src/commands/msetnx.json4
-rw-r--r--src/commands/object-refcount.json3
-rw-r--r--src/commands/psync.json2
-rw-r--r--src/commands/pubsub-channels.json2
-rw-r--r--src/commands/pubsub-shardchannels.json42
-rw-r--r--src/commands/pubsub-shardnumsub.json44
-rw-r--r--src/commands/rename.json7
-rw-r--r--src/commands/replicaof.json2
-rw-r--r--src/commands/script-flush.json2
-rw-r--r--src/commands/sentinel-ckquorum.json6
-rw-r--r--src/commands/sentinel-config.json30
-rw-r--r--src/commands/sentinel-debug.json17
-rw-r--r--src/commands/sentinel-failover.json6
-rw-r--r--src/commands/sentinel-get-master-addr-by-name.json6
-rw-r--r--src/commands/sentinel-info-cache.json9
-rw-r--r--src/commands/sentinel-is-master-down-by-addr.json18
-rw-r--r--src/commands/sentinel-master.json6
-rw-r--r--src/commands/sentinel-monitor.json18
-rw-r--r--src/commands/sentinel-remove.json6
-rw-r--r--src/commands/sentinel-replicas.json6
-rw-r--r--src/commands/sentinel-reset.json6
-rw-r--r--src/commands/sentinel-sentinels.json6
-rw-r--r--src/commands/sentinel-set.json21
-rw-r--r--src/commands/sentinel-simulate-failure.json22
-rw-r--r--src/commands/shutdown.json2
-rw-r--r--src/commands/slaveof.json2
-rw-r--r--src/commands/sort.json2
-rw-r--r--src/commands/sort_ro.json2
-rw-r--r--src/commands/spublish.json86
-rw-r--r--src/commands/ssubscribe.json78
-rw-r--r--src/commands/subscribe.json7
-rw-r--r--src/commands/sunsubscribe.json80
-rw-r--r--src/config.c878
-rw-r--r--src/db.c59
-rw-r--r--src/debug.c31
-rw-r--r--src/defrag.c4
-rw-r--r--src/eval.c1
-rw-r--r--src/functions.c168
-rw-r--r--src/functions.h4
-rw-r--r--src/geohash.c2
-rw-r--r--src/help.h14
-rw-r--r--src/listpack.c2
-rw-r--r--src/localtime.c2
-rw-r--r--src/module.c775
-rw-r--r--src/modules/helloacl.c1
-rw-r--r--src/modules/helloblock.c1
-rw-r--r--src/modules/hellocluster.c1
-rw-r--r--src/modules/hellodict.c1
-rw-r--r--src/modules/hellohook.c1
-rw-r--r--src/modules/hellotimer.c1
-rw-r--r--src/networking.c113
-rw-r--r--src/object.c5
-rw-r--r--src/pqsort.c4
-rw-r--r--src/rax.c4
-rw-r--r--src/rdb.c108
-rw-r--r--src/rdb.h5
-rw-r--r--src/redis-benchmark.c5
-rw-r--r--src/redis-check-rdb.c9
-rw-r--r--src/redis-cli.c228
-rw-r--r--src/redismodule.h53
-rw-r--r--src/replication.c24
-rw-r--r--src/script.c49
-rw-r--r--src/sentinel.c26
-rw-r--r--src/server.c153
-rw-r--r--src/server.h88
-rw-r--r--src/sort.c18
-rw-r--r--src/t_zset.c9
-rw-r--r--src/util.c4
-rw-r--r--src/util.h15
-rw-r--r--src/version.h4
-rw-r--r--src/ziplist.c4
-rw-r--r--tests/cluster/cluster.tcl15
-rw-r--r--tests/cluster/tests/00-base.tcl2
-rw-r--r--tests/cluster/tests/11-manual-takeover.tcl24
-rw-r--r--tests/cluster/tests/12-replica-migration-2.tcl7
-rw-r--r--tests/cluster/tests/27-endpoints.tcl18
-rw-r--r--tests/cluster/tests/28-cluster-shards.tcl185
-rw-r--r--tests/integration/redis-cli.tcl30
-rw-r--r--tests/integration/replication-4.tcl2
-rw-r--r--tests/integration/replication.tcl12
-rw-r--r--tests/modules/Makefile4
-rw-r--r--tests/modules/aclcheck.c20
-rw-r--r--tests/modules/auth.c14
-rw-r--r--tests/modules/basics.c31
-rw-r--r--tests/modules/blockedclient.c2
-rw-r--r--tests/modules/blockonkeys.c13
-rw-r--r--tests/modules/eventloop.c1
-rw-r--r--tests/modules/hooks.c15
-rw-r--r--tests/modules/moduleconfigs.c142
-rw-r--r--tests/modules/moduleconfigstwo.c39
-rw-r--r--tests/sentinel/tests/03-runtime-reconf.tcl156
-rw-r--r--tests/sentinel/tests/07-down-conditions.tcl9
-rw-r--r--tests/sentinel/tests/08-hostname-conf.tcl2
-rw-r--r--tests/sentinel/tests/09-acl-support.tcl15
-rw-r--r--tests/sentinel/tests/includes/init-tests.tcl22
-rw-r--r--tests/sentinel/tests/includes/utils.tcl22
-rw-r--r--tests/support/redis.tcl4
-rw-r--r--tests/support/server.tcl8
-rw-r--r--tests/support/util.tcl4
-rw-r--r--tests/unit/acl-v2.tcl56
-rw-r--r--tests/unit/aofrw.tcl4
-rw-r--r--tests/unit/bitops.tcl6
-rw-r--r--tests/unit/client-eviction.tcl34
-rw-r--r--tests/unit/cluster.tcl147
-rw-r--r--tests/unit/functions.tcl348
-rw-r--r--tests/unit/introspection.tcl1
-rw-r--r--tests/unit/memefficiency.tcl10
-rw-r--r--tests/unit/moduleapi/aclcheck.tcl5
-rw-r--r--tests/unit/moduleapi/auth.tcl16
-rw-r--r--tests/unit/moduleapi/blockedclient.tcl8
-rw-r--r--tests/unit/moduleapi/blockonkeys.tcl32
-rw-r--r--tests/unit/moduleapi/cluster.tcl14
-rw-r--r--tests/unit/moduleapi/hooks.tcl6
-rw-r--r--tests/unit/moduleapi/moduleconfigs.tcl234
-rw-r--r--tests/unit/other.tcl3
-rw-r--r--tests/unit/pause.tcl8
-rw-r--r--tests/unit/replybufsize.tcl8
-rw-r--r--tests/unit/scripting.tcl35
-rw-r--r--tests/unit/shutdown.tcl36
-rw-r--r--tests/unit/type/stream-cgroups.tcl107
-rw-r--r--tests/unit/type/zset.tcl8
-rw-r--r--utils/create-cluster/.gitignore1
-rw-r--r--utils/hashtable/README13
-rw-r--r--utils/hashtable/rehashing.c143
171 files changed, 5495 insertions, 1704 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 073cbe2a2..2b406f74b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,7 +7,7 @@ jobs:
test-ubuntu-latest:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
# Fail build if there are warnings
# build with TLS just for compilation coverage
@@ -22,7 +22,7 @@ jobs:
test-sanitizer-address:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: make SANITIZER=address REDIS_CFLAGS='-Werror'
- name: testprep
@@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest
container: debian:oldoldstable
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: |
apt-get update && apt-get install -y build-essential
@@ -45,14 +45,14 @@ jobs:
build-macos-latest:
runs-on: macos-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: make REDIS_CFLAGS='-Werror'
build-32bit:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: |
sudo apt-get update && sudo apt-get install libc6-dev-i386
@@ -61,7 +61,7 @@ jobs:
build-libc-malloc:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: make REDIS_CFLAGS='-Werror' MALLOC=libc
@@ -69,7 +69,7 @@ jobs:
runs-on: ubuntu-latest
container: centos:7
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: make
run: |
yum -y install gcc make
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 603a05fdd..ad798886c 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -4,7 +4,7 @@ on:
push:
pull_request:
schedule:
- # run weekly new vulnerability was added to the the database
+ # run weekly new vulnerability was added to the database
- cron: '0 0 * * 0'
jobs:
@@ -20,7 +20,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml
index da266fdce..f524d6523 100644
--- a/.github/workflows/daily.yml
+++ b/.github/workflows/daily.yml
@@ -11,7 +11,7 @@ on:
inputs:
skipjobs:
description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)'
- default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,ubuntu,centos,malloc'
+ default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc'
skiptests:
description: 'tests to skip (delete the ones you wanna keep, do not leave empty)'
default: 'redis,modules,sentinel,cluster,unittest'
@@ -34,8 +34,8 @@ jobs:
test-ubuntu-jemalloc:
runs-on: ubuntu-latest
if: |
- github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'ubuntu')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'ubuntu')
timeout-minutes: 14400
steps:
- name: prep
@@ -44,7 +44,7 @@ jobs:
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipping: ${{github.event.inputs.skipjobs}} and ${{github.event.inputs.skiptests}}"
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -71,8 +71,8 @@ jobs:
test-ubuntu-libc-malloc:
runs-on: ubuntu-latest
if: |
- github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'malloc')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400
steps:
- name: prep
@@ -80,7 +80,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -104,8 +104,8 @@ jobs:
test-ubuntu-no-malloc-usable-size:
runs-on: ubuntu-latest
if: |
- github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'malloc')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'malloc')
timeout-minutes: 14400
steps:
- name: prep
@@ -113,7 +113,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -137,8 +137,8 @@ jobs:
test-ubuntu-32bit:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, '32bit')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, '32bit')
timeout-minutes: 14400
steps:
- name: prep
@@ -146,7 +146,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -177,8 +177,8 @@ jobs:
test-ubuntu-tls:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'tls')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'tls')
timeout-minutes: 14400
steps:
- name: prep
@@ -186,7 +186,47 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: |
+ make BUILD_TLS=yes REDIS_CFLAGS='-Werror'
+ - name: testprep
+ run: |
+ sudo apt-get install tcl8.6 tclx tcl-tls
+ ./utils/gen-test-certs.sh
+ - name: test
+ if: true && !contains(github.event.inputs.skiptests, 'redis')
+ run: |
+ ./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
+ - name: module api test
+ if: true && !contains(github.event.inputs.skiptests, 'modules')
+ run: |
+ ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
+ - name: sentinel tests
+ if: true && !contains(github.event.inputs.skiptests, 'sentinel')
+ run: |
+ ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
+ - name: cluster tests
+ if: true && !contains(github.event.inputs.skiptests, 'cluster')
+ run: |
+ ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
+
+ test-ubuntu-tls-no-tls:
+ runs-on: ubuntu-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'tls')
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -200,29 +240,25 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
- ./runtest --accurate --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
- ./runtest-moduleapi --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
- ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
- ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
./runtest-cluster ${{github.event.inputs.cluster_test_args}}
test-ubuntu-io-threads:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'iothreads')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'iothreads')
timeout-minutes: 14400
steps:
- name: prep
@@ -230,7 +266,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -246,11 +282,11 @@ jobs:
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes ${{github.event.inputs.cluster_test_args}}
- test-valgrind:
+ test-valgrind-test:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'valgrind')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'redis')
timeout-minutes: 14400
steps:
- name: prep
@@ -258,7 +294,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -271,6 +307,29 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
+
+ test-valgrind-misc:
+ runs-on: ubuntu-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: make valgrind REDIS_CFLAGS='-Werror -DREDIS_TEST'
+ - name: testprep
+ run: |
+ sudo apt-get update
+ sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
@@ -280,11 +339,11 @@ jobs:
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
- test-valgrind-no-malloc-usable-size:
+ test-valgrind-no-malloc-usable-size-test:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'valgrind')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'redis')
timeout-minutes: 14400
steps:
- name: prep
@@ -292,12 +351,12 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
- run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE" REDIS_CFLAGS='-Werror'
+ run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DREDIS_TEST" REDIS_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get update
@@ -305,15 +364,43 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
+
+ test-valgrind-no-malloc-usable-size-misc:
+ runs-on: ubuntu-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest'))
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DREDIS_TEST" REDIS_CFLAGS='-Werror'
+ - name: testprep
+ run: |
+ sudo apt-get update
+ sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
+ - name: unittest
+ if: true && !contains(github.event.inputs.skiptests, 'unittest')
+ run: |
+ valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
+ if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-sanitizer-address:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'sanitizer')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400
strategy:
matrix:
@@ -326,7 +413,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -355,8 +442,8 @@ jobs:
test-sanitizer-undefined:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'sanitizer')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'sanitizer')
timeout-minutes: 14400
strategy:
matrix:
@@ -369,7 +456,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -398,8 +485,8 @@ jobs:
test-centos7-jemalloc:
runs-on: ubuntu-latest
if: |
- github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis') && !contains(github.event.inputs.skipjobs, 'centos')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'centos')
container: centos:7
timeout-minutes: 14400
steps:
@@ -408,7 +495,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -434,8 +521,8 @@ jobs:
test-centos7-tls:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'tls')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'tls')
container: centos:7
timeout-minutes: 14400
steps:
@@ -444,7 +531,50 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: |
+ yum -y install centos-release-scl epel-release
+ yum -y install devtoolset-7 openssl-devel openssl
+ scl enable devtoolset-7 "make BUILD_TLS=yes REDIS_CFLAGS='-Werror'"
+ - name: testprep
+ run: |
+ yum -y install tcl tcltls tclx
+ ./utils/gen-test-certs.sh
+ - name: test
+ if: true && !contains(github.event.inputs.skiptests, 'redis')
+ run: |
+ ./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
+ - name: module api test
+ if: true && !contains(github.event.inputs.skiptests, 'modules')
+ run: |
+ ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
+ - name: sentinel tests
+ if: true && !contains(github.event.inputs.skiptests, 'sentinel')
+ run: |
+ ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
+ - name: cluster tests
+ if: true && !contains(github.event.inputs.skiptests, 'cluster')
+ run: |
+ ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
+
+ test-centos7-tls-no-tls:
+ runs-on: ubuntu-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'tls')
+ container: centos:7
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -460,29 +590,25 @@ jobs:
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: |
- ./runtest --accurate --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
- ./runtest-moduleapi --verbose --tls --dump-logs ${{github.event.inputs.test_args}}
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
- ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}}
./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: |
- ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}}
./runtest-cluster ${{github.event.inputs.cluster_test_args}}
test-macos-latest:
runs-on: macos-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'macos')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 14400
steps:
- name: prep
@@ -490,7 +616,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -502,9 +628,47 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --no-latency --dump-logs ${{github.event.inputs.test_args}}
+
+ test-macos-latest-sentinel:
+ runs-on: macos-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel')
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: make REDIS_CFLAGS='-Werror'
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
+
+ test-macos-latest-cluster:
+ runs-on: macos-latest
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster')
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: make
+ run: make REDIS_CFLAGS='-Werror'
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
@@ -512,8 +676,8 @@ jobs:
test-freebsd:
runs-on: macos-10.15
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'freebsd')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'freebsd') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
timeout-minutes: 14400
steps:
- name: prep
@@ -521,7 +685,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -536,14 +700,66 @@ jobs:
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq redis ; then ./runtest --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq modules ; then MAKE=gmake ./runtest-moduleapi --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
+
+ test-freebsd-sentinel:
+ runs-on: macos-10.15
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'sentinel')
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: test
+ uses: vmactions/freebsd-vm@v0.1.6
+ with:
+ usesh: true
+ sync: rsync
+ copyback: false
+ prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
+ run: >
+ gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq sentinel ; then ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
+
+ test-freebsd-cluster:
+ runs-on: macos-10.15
+ if: |
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'cluster')
+ timeout-minutes: 14400
+ steps:
+ - name: prep
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
+ echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ repository: ${{ env.GITHUB_REPOSITORY }}
+ ref: ${{ env.GITHUB_HEAD_REF }}
+ - name: test
+ uses: vmactions/freebsd-vm@v0.1.6
+ with:
+ usesh: true
+ sync: rsync
+ copyback: false
+ prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
+ run: >
+ gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq cluster ; then ./runtest-cluster ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
test-alpine-jemalloc:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'alpine')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'alpine')
container: alpine:latest
steps:
- name: prep
@@ -551,7 +767,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
@@ -577,8 +793,8 @@ jobs:
test-alpine-libc-malloc:
runs-on: ubuntu-latest
if: |
- (github.event_name == 'workflow_dispatch' ||
- (github.event_name == 'schedule' && github.repository == 'redis/redis')) && !contains(github.event.inputs.skipjobs, 'alpine')
+ (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
+ !contains(github.event.inputs.skipjobs, 'alpine')
container: alpine:latest
steps:
- name: prep
@@ -586,7 +802,7 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml
index b8671f43a..fc03af06a 100644
--- a/.github/workflows/external.yml
+++ b/.github/workflows/external.yml
@@ -12,7 +12,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
@@ -36,7 +36,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
@@ -63,7 +63,7 @@ jobs:
if: github.event_name != 'schedule' || github.repository == 'redis/redis'
timeout-minutes: 14400
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
diff --git a/.github/workflows/spell-check.yml b/.github/workflows/spell-check.yml
index ac9cf2532..533607412 100644
--- a/.github/workflows/spell-check.yml
+++ b/.github/workflows/spell-check.yml
@@ -16,10 +16,10 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: pip cache
- uses: actions/cache@v2
+ uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
diff --git a/00-RELEASENOTES b/00-RELEASENOTES
index 2caa0492d..eb24e0baf 100644
--- a/00-RELEASENOTES
+++ b/00-RELEASENOTES
@@ -1,5 +1,87 @@
================================================================================
-Redis 7.0 RC1 Released Mon Feb 28 12:00:00 IST 2022
+Redis 7.0 RC3 Released Tue Apr 5 12:00:00 IST 2022
+================================================================================
+
+Upgrade urgency LOW: This is another Release Candidate of Redis 7.0.
+
+New Features
+============
+
+New administrative and introspection commands and command arguments
+-------------------------------------------------------------------
+
+* CLUSTER SHARDS command deprecates CLUSTER SLOTS (#10293)
+
+
+Potentially Breaking Changes
+============================
+
+* CONFIG GET response returned in a non-deterministic order.
+ It's possible that a client was relying on configs order (#10323)
+* SORT / SORT_RO commands reject keys access patterns in GET and BY if ACL
+ doesn't grant the command full keyspace access (#10340)
+* FUNCTION LOAD command introduced in 7.0-RC1 was stripped of the ENGINE, and
+ NAME arguments which are now part of the script itself. The DESCRIPTION
+ argument was completely removed (#10500)
+* Set disable-thp config to be immutable (#10409)
+
+
+Performance and resource utilization improvements
+=================================================
+
+* Optimize performance and memory usage on replicas (#10413)
+* A faster and more robust code of zslRandomLevel using RAND_MAX (#5539)
+
+
+Changes in CLI tools
+====================
+
+* redis-cli: Use exit code 1 on error (#10468)
+* redis-cli: Do DNS lookup before sending CLUSTER MEET (#10436)
+* redis-benchmark: Fix --cluster with IPv6. (#10393)
+* redis-cli: Better --json Unicode support and --quoted-json (#10286)
+
+
+INFO fields and introspection changes
+=====================================
+
+* MEMORY STATS: Show cluster.links memory usage (#10302)
+
+
+Module API changes
+==================
+
+* APIs for exposing module configs to config file and CONFIG command (#10285)
+* Add an event notifying about configuration changes (#10311)
+* Add API for redacting command arguments from SLOWLOG and MONITOR (#10425)
+* RM_Call: new flags for script mode compatibility, no writes, and error replies (#10372)
+
+
+Bug Fixes
+=========
+
+* Sentinel: Fix no reconnect after auth-pass is changed (#10400)
+* Cluster: Fix race condition: Turn into replica on SETSLOT (#10489, #10381)
+* XREADGROUP: Unblock client when the stream key is deleted (#10306)
+
+
+Fixes for issue in previous release candidates of Redis 7.0
+-----------------------------------------------------------
+
+* ACL DRYRUN does not validate the verified command args. (#10405)
+* ACL DRYRUN returns the tested common permission error (#10359)
+* Incorrect parsing of hostname information from nodes.conf (#10435)
+* BITSET and BITFIELD SET should propagate even if just length changed (#10459)
+* SHUTDOWN, Fix a possible crash when the shutdown was aborted (#10440)
+* Script should not allow may-replicate commands when client pause write (#10364)
+* Optimization tracking memory usage from i/o threads. (#10401)
+* Initialize help when using redis-cli help or redis-cli ? (#10382)
+* Dismiss COW of client output buffer now that it's dynamic (#10371)
+* Fix memory corruption when EVAL fails before being processed (#10519)
+
+
+================================================================================
+Redis 7.0 RC2 Released Mon Feb 28 12:00:00 IST 2022
================================================================================
Upgrade urgency LOW: This is another Release Candidate of Redis 7.0.
@@ -439,6 +521,7 @@ A special thank you for the amount of work put into this release by:
- Viktor Söderqvist
- Wang Yuan
- Harkrishn Patro
+- Nick Chun
- Ozan Tezcan
- Wen Hui
- Huang Zhw
@@ -449,8 +532,10 @@ A special thank you for the amount of work put into this release by:
- Itamar Haber
- Zhao Zhao
- Itay Perry
+- Moti Cohen
- Ning Sun
- zhugezy
+- Ran Shidlansik
- menwen
- Andy Pan
diff --git a/redis.conf b/redis.conf
index d018c8824..34b755718 100644
--- a/redis.conf
+++ b/redis.conf
@@ -126,7 +126,7 @@ protected-mode yes
#
# no - Block for any connection (remain immutable)
# yes - Allow for any connection (no protection)
-# local - Allow only for local local connections. Ones originating from the
+# local - Allow only for local connections. Ones originating from the
# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets.
#
# enable-protected-configs no
@@ -627,7 +627,7 @@ repl-diskless-sync-max-replicas 0
#
# In many cases the disk is slower than the network, and storing and loading
# the RDB file may increase replication time (and even increase the master's
-# Copy on Write memory and salve buffers).
+# Copy on Write memory and replica buffers).
# However, parsing the RDB file directly from the socket may mean that we have
# to flush the contents of the current database before the full rdb was
# received. For this reason we have the following options:
@@ -1224,7 +1224,7 @@ replica-lazy-flush no
lazyfree-lazy-user-del no
-# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
+# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously.
@@ -1287,7 +1287,7 @@ lazyfree-lazy-user-flush no
# attempt to have background child processes killed before all others, and
# replicas killed before masters.
#
-# Redis supports three options:
+# Redis supports these options:
#
# no: Don't make changes to oom-score-adj (default).
# yes: Alias to "relative" see below.
@@ -1640,7 +1640,7 @@ aof-timestamp-enabled no
# cluster-replica-no-failover no
# This option, when set to yes, allows nodes to serve read traffic while the
-# the cluster is in a down state, as long as it believes it owns the slots.
+# cluster is in a down state, as long as it believes it owns the slots.
#
# This is useful for two cases. The first case is for when an application
# doesn't require consistency of data during node failures or network partitions.
@@ -1958,7 +1958,7 @@ activerehashing yes
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
-# replica -> replica clients
+# replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
@@ -2164,7 +2164,7 @@ rdb-save-incremental-fsync yes
# defragmentation process. If you are not sure about what they mean it is
# a good idea to leave the defaults untouched.
-# Enabled active defragmentation
+# Active defragmentation is disabled by default
# activedefrag no
# Minimum amount of fragmentation waste to start active defrag
diff --git a/runtest-moduleapi b/runtest-moduleapi
index a3aab1f7a..38c0d5434 100755
--- a/runtest-moduleapi
+++ b/runtest-moduleapi
@@ -20,6 +20,7 @@ $TCLSH tests/test_helper.tcl \
--single unit/moduleapi/fork \
--single unit/moduleapi/testrdb \
--single unit/moduleapi/infotest \
+--single unit/moduleapi/moduleconfigs \
--single unit/moduleapi/infra \
--single unit/moduleapi/propagate \
--single unit/moduleapi/hooks \
diff --git a/src/acl.c b/src/acl.c
index 7399ded74..0054402b3 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -120,10 +120,7 @@ typedef struct {
* understand if the command can be executed. */
uint64_t allowed_commands[USER_COMMAND_BITS_COUNT/64];
/* allowed_firstargs is used by ACL rules to block access to a command unless a
- * specific argv[1] is given (or argv[2] in case it is applied on a sub-command).
- * For example, a user can use the rule "-select +select|0" to block all
- * SELECT commands, except "SELECT 0".
- * And for a sub-command: "+config -config|set +config|set|loglevel"
+ * specific argv[1] is given.
*
* For each command ID (corresponding to the command bit set in allowed_commands),
* This array points to an array of SDS strings, terminated by a NULL pointer,
@@ -1531,6 +1528,37 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle
return ACL_DENIED_KEY;
}
+/* Checks if the provided selector selector has access specified in flags
+ * to all keys in the keyspace. For example, CMD_KEY_READ access requires either
+ * '%R~*', '~*', or allkeys to be granted to the selector. Returns 1 if all
+ * the access flags are satisfied with this selector or 0 otherwise.
+ */
+static int ACLSelectorHasUnrestrictedKeyAccess(aclSelector *selector, int flags) {
+ /* The selector can access any key */
+ if (selector->flags & SELECTOR_FLAG_ALLKEYS) return 1;
+
+ listIter li;
+ listNode *ln;
+ listRewind(selector->patterns,&li);
+
+ int access_flags = 0;
+ if (flags & CMD_KEY_ACCESS) access_flags |= ACL_READ_PERMISSION;
+ if (flags & CMD_KEY_INSERT) access_flags |= ACL_WRITE_PERMISSION;
+ if (flags & CMD_KEY_DELETE) access_flags |= ACL_WRITE_PERMISSION;
+ if (flags & CMD_KEY_UPDATE) access_flags |= ACL_WRITE_PERMISSION;
+
+ /* Test this key against every pattern. */
+ while((ln = listNext(&li))) {
+ keyPattern *pattern = listNodeValue(ln);
+ if ((pattern->flags & access_flags) != access_flags)
+ continue;
+ if (!strcmp(pattern->pattern,"*")) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
/* Checks a channel against a provided list of channels. The is_pattern
* argument should only be used when subscribing (not when publishing)
* and controls whether the input channel is evaluated as a channel pattern
@@ -1675,6 +1703,39 @@ int ACLUserCheckKeyPerm(user *u, const char *key, int keylen, int flags) {
return ACL_DENIED_KEY;
}
+/* Checks if the user can execute the given command with the added restriction
+ * it must also have the access specified in flags to any key in the key space.
+ * For example, CMD_KEY_READ access requires either '%R~*', '~*', or allkeys to be
+ * granted in addition to the access required by the command. Returns 1
+ * if the user has access or 0 otherwise.
+ */
+int ACLUserCheckCmdWithUnrestrictedKeyAccess(user *u, struct redisCommand *cmd, robj **argv, int argc, int flags) {
+ listIter li;
+ listNode *ln;
+ int local_idxptr;
+
+ /* If there is no associated user, the connection can run anything. */
+ if (u == NULL) return 1;
+
+ /* For multiple selectors, we cache the key result in between selector
+ * calls to prevent duplicate lookups. */
+ aclKeyResultCache cache;
+ initACLKeyResultCache(&cache);
+
+ /* Check each selector sequentially */
+ listRewind(u->selectors,&li);
+ while((ln = listNext(&li))) {
+ aclSelector *s = (aclSelector *) listNodeValue(ln);
+ int acl_retval = ACLSelectorCheckCmd(s, cmd, argv, argc, &local_idxptr, &cache);
+ if (acl_retval == ACL_OK && ACLSelectorHasUnrestrictedKeyAccess(s, flags)) {
+ cleanupACLKeyResultCache(&cache);
+ return 1;
+ }
+ }
+ cleanupACLKeyResultCache(&cache);
+ return 0;
+}
+
/* Check if the channel can be accessed by the client according to
* the ACLs associated with the specified user.
*
@@ -2411,6 +2472,22 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username
}
}
+const char* getAclErrorMessage(int acl_res) {
+ /* Notice that a variant of this code also exists on aclCommand so
+ * it also need to be updated on changed. */
+ switch (acl_res) {
+ case ACL_DENIED_CMD:
+ return "can't run this command or subcommand";
+ case ACL_DENIED_KEY:
+ return "can't access at least one of the keys mentioned in the command arguments";
+ case ACL_DENIED_CHANNEL:
+ return "can't publish to the channel mentioned in the command";
+ default:
+ return "lacking the permissions for the command";
+ }
+ serverPanic("Reached deadcode on getAclErrorMessage");
+}
+
/* =============================================================================
* ACL related commands
* ==========================================================================*/
@@ -2793,13 +2870,22 @@ setuser_cleanup:
return;
}
+ if ((cmd->arity > 0 && cmd->arity != c->argc-3) ||
+ (c->argc-3 < -cmd->arity))
+ {
+ addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->fullname);
+ return;
+ }
+
int idx;
int result = ACLCheckAllUserCommandPerm(u, cmd, c->argv + 3, c->argc - 3, &idx);
+ /* Notice that a variant of this code also exists on getAclErrorMessage so
+ * it also need to be updated on changed. */
if (result != ACL_OK) {
sds err = sdsempty();
if (result == ACL_DENIED_CMD) {
err = sdscatfmt(err, "This user has no permissions to run "
- "the '%s' command", c->cmd->fullname);
+ "the '%s' command", cmd->fullname);
} else if (result == ACL_DENIED_KEY) {
err = sdscatfmt(err, "This user has no permissions to access "
"the '%s' key", c->argv[idx + 3]->ptr);
diff --git a/src/aof.c b/src/aof.c
index 9d4587781..39d452390 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -813,10 +813,10 @@ int openNewIncrAofForAppend(void) {
* AOFs has not reached the limit threshold.
* */
#define AOF_REWRITE_LIMITE_THRESHOLD 3
-#define AOF_REWRITE_LIMITE_NAX_MINUTES 60 /* 1 hour */
+#define AOF_REWRITE_LIMITE_MAX_MINUTES 60 /* 1 hour */
int aofRewriteLimited(void) {
int limit = 0;
- static int limit_deley_minutes = 0;
+ static int limit_delay_minutes = 0;
static time_t next_rewrite_time = 0;
unsigned long incr_aof_num = listLength(server.aof_manifest->incr_aof_list);
@@ -824,25 +824,25 @@ int aofRewriteLimited(void) {
if (server.unixtime < next_rewrite_time) {
limit = 1;
} else {
- if (limit_deley_minutes == 0) {
+ if (limit_delay_minutes == 0) {
limit = 1;
- limit_deley_minutes = 1;
+ limit_delay_minutes = 1;
} else {
- limit_deley_minutes *= 2;
+ limit_delay_minutes *= 2;
}
- if (limit_deley_minutes > AOF_REWRITE_LIMITE_NAX_MINUTES) {
- limit_deley_minutes = AOF_REWRITE_LIMITE_NAX_MINUTES;
+ if (limit_delay_minutes > AOF_REWRITE_LIMITE_MAX_MINUTES) {
+ limit_delay_minutes = AOF_REWRITE_LIMITE_MAX_MINUTES;
}
- next_rewrite_time = server.unixtime + limit_deley_minutes * 60;
+ next_rewrite_time = server.unixtime + limit_delay_minutes * 60;
serverLog(LL_WARNING,
"Background AOF rewrite has repeatedly failed %ld times and triggered the limit, will retry in %d minutes",
- incr_aof_num, limit_deley_minutes);
+ incr_aof_num, limit_delay_minutes);
}
} else {
- limit_deley_minutes = 0;
+ limit_delay_minutes = 0;
next_rewrite_time = 0;
}
@@ -2142,19 +2142,9 @@ static int rewriteFunctions(rio *aof) {
dictEntry *entry = NULL;
while ((entry = dictNext(iter))) {
functionLibInfo *li = dictGetVal(entry);
- if (li->desc) {
- if (rioWrite(aof, "*7\r\n", 4) == 0) goto werr;
- } else {
- if (rioWrite(aof, "*5\r\n", 4) == 0) goto werr;
- }
+ if (rioWrite(aof, "*3\r\n", 4) == 0) goto werr;
char function_load[] = "$8\r\nFUNCTION\r\n$4\r\nLOAD\r\n";
if (rioWrite(aof, function_load, sizeof(function_load) - 1) == 0) goto werr;
- if (rioWriteBulkString(aof, li->ei->name, sdslen(li->ei->name)) == 0) goto werr;
- if (rioWriteBulkString(aof, li->name, sdslen(li->name)) == 0) goto werr;
- if (li->desc) {
- if (rioWriteBulkString(aof, "description", 11) == 0) goto werr;
- if (rioWriteBulkString(aof, li->desc, sdslen(li->desc)) == 0) goto werr;
- }
if (rioWriteBulkString(aof, li->code, sdslen(li->code)) == 0) goto werr;
}
dictReleaseIterator(iter);
diff --git a/src/bitops.c b/src/bitops.c
index 14bcc2371..8a6dee44d 100644
--- a/src/bitops.c
+++ b/src/bitops.c
@@ -478,19 +478,21 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) {
* so that the 'maxbit' bit can be addressed. The object is finally
* returned. Otherwise if the key holds a wrong type NULL is returned and
* an error is sent to the client. */
-robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *created) {
+robj *lookupStringForBitCommand(client *c, uint64_t maxbit, int *dirty) {
size_t byte = maxbit >> 3;
robj *o = lookupKeyWrite(c->db,c->argv[1]);
if (checkType(c,o,OBJ_STRING)) return NULL;
+ if (dirty) *dirty = 0;
if (o == NULL) {
- if (created) *created = 1;
o = createObject(OBJ_STRING,sdsnewlen(NULL, byte+1));
dbAdd(c->db,c->argv[1],o);
+ if (dirty) *dirty = 1;
} else {
- if (created) *created = 0;
o = dbUnshareStringValue(c->db,c->argv[1],o);
+ size_t oldlen = sdslen(o->ptr);
o->ptr = sdsgrowzero(o->ptr,byte+1);
+ if (dirty && oldlen != sdslen(o->ptr)) *dirty = 1;
}
return o;
}
@@ -547,8 +549,8 @@ void setbitCommand(client *c) {
return;
}
- int created;
- if ((o = lookupStringForBitCommand(c,bitoffset,&created)) == NULL) return;
+ int dirty;
+ if ((o = lookupStringForBitCommand(c,bitoffset,&dirty)) == NULL) return;
/* Get current values */
byte = bitoffset >> 3;
@@ -556,10 +558,10 @@ void setbitCommand(client *c) {
bit = 7 - (bitoffset & 0x7);
bitval = byteval & (1 << bit);
- /* Either it is newly created, or the bit changes before and after.
+ /* Either it is newly created, changed length, or the bit changes before and after.
* Note that the bitval here is actually a decimal number.
* So we need to use `!!` to convert it to 0 or 1 for comparison. */
- if (created || (!!bitval != on)) {
+ if (dirty || (!!bitval != on)) {
/* Update byte with new bit value. */
byteval &= ~(1 << bit);
byteval |= ((on & 0x1) << bit);
@@ -1028,7 +1030,7 @@ struct bitfieldOp {
void bitfieldGeneric(client *c, int flags) {
robj *o;
uint64_t bitoffset;
- int j, numops = 0, changes = 0, created = 0;
+ int j, numops = 0, changes = 0, dirty = 0;
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
int readonly = 1;
@@ -1122,7 +1124,7 @@ void bitfieldGeneric(client *c, int flags) {
/* Lookup by making room up to the farthest bit reached by
* this operation. */
if ((o = lookupStringForBitCommand(c,
- highest_write_offset,&created)) == NULL) {
+ highest_write_offset,&dirty)) == NULL) {
zfree(ops);
return;
}
@@ -1172,7 +1174,7 @@ void bitfieldGeneric(client *c, int flags) {
setSignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
- if (created || (oldval != newval))
+ if (dirty || (oldval != newval))
changes++;
} else {
addReplyNull(c);
@@ -1204,7 +1206,7 @@ void bitfieldGeneric(client *c, int flags) {
setUnsignedBitfield(o->ptr,thisop->offset,
thisop->bits,newval);
- if (created || (oldval != newval))
+ if (dirty || (oldval != newval))
changes++;
} else {
addReplyNull(c);
diff --git a/src/blocked.c b/src/blocked.c
index aa298cffb..65b584213 100644
--- a/src/blocked.c
+++ b/src/blocked.c
@@ -141,12 +141,7 @@ void processUnblockedClients(void) {
* the code is conceptually more correct this way. */
if (!(c->flags & CLIENT_BLOCKED)) {
/* If we have a queued command, execute it now. */
- if (processPendingCommandsAndResetClient(c) == C_OK) {
- /* Now process client if it has more data in it's buffer. */
- if (c->querybuf && sdslen(c->querybuf) > 0) {
- if (processInputBuffer(c) == C_ERR) c = NULL;
- }
- } else {
+ if (processPendingCommandAndInputBuffer(c) == C_ERR) {
c = NULL;
}
}
@@ -204,7 +199,7 @@ void unblockClient(client *c) {
* we do not do it immediately after the command returns (when the
* client got blocked) in order to be still able to access the argument
* vector from module callbacks and updateStatsOnUnblock. */
- if (c->btype != BLOCKED_POSTPONE) {
+ if (c->btype != BLOCKED_POSTPONE && c->btype != BLOCKED_SHUTDOWN) {
freeClientOriginalArgv(c);
resetClient(c);
}
@@ -288,25 +283,24 @@ void disconnectAllBlockedClients(void) {
* when there may be clients blocked on a list key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
+ /* Optimization: If no clients are in type BLOCKED_LIST,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_LIST]) return;
+
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
- int numclients = listLength(clients);
- int deleted = 0;
-
- while(numclients--) {
- listNode *clientnode = listFirst(clients);
- client *receiver = clientnode->value;
+ listNode *ln;
+ listIter li;
+ listRewind(clients,&li);
- if (receiver->btype != BLOCKED_LIST) {
- /* Put at the tail, so that at the next call
- * we'll not run into it again. */
- listRotateHeadToTail(clients);
- continue;
- }
+ while((ln = listNext(&li))) {
+ client *receiver = listNodeValue(ln);
+ if (receiver->btype != BLOCKED_LIST) continue;
+ int deleted = 0;
robj *dstkey = receiver->bpop.target;
int wherefrom = receiver->bpop.blockpos.wherefrom;
int whereto = receiver->bpop.blockpos.whereto;
@@ -342,25 +336,24 @@ void serveClientsBlockedOnListKey(robj *o, readyList *rl) {
* when there may be clients blocked on a sorted set key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
+ /* Optimization: If no clients are in type BLOCKED_ZSET,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_ZSET]) return;
+
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
- int numclients = listLength(clients);
- int deleted = 0;
-
- while (numclients--) {
- listNode *clientnode = listFirst(clients);
- client *receiver = clientnode->value;
+ listNode *ln;
+ listIter li;
+ listRewind(clients,&li);
- if (receiver->btype != BLOCKED_ZSET) {
- /* Put at the tail, so that at the next call
- * we'll not run into it again. */
- listRotateHeadToTail(clients);
- continue;
- }
+ while((ln = listNext(&li))) {
+ client *receiver = listNodeValue(ln);
+ if (receiver->btype != BLOCKED_ZSET) continue;
+ int deleted = 0;
long llen = zsetLength(o);
long count = receiver->bpop.count;
int where = receiver->bpop.blockpos.wherefrom;
@@ -407,6 +400,10 @@ void serveClientsBlockedOnSortedSetKey(robj *o, readyList *rl) {
* when there may be clients blocked on a stream key, and there may be new
* data to fetch (the key is ready). */
void serveClientsBlockedOnStreamKey(robj *o, readyList *rl) {
+ /* Optimization: If no clients are in type BLOCKED_STREAM,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
+
dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
stream *s = o->ptr;
@@ -520,30 +517,21 @@ unblock_receiver:
* see if the key is really able to serve the client, and in that case,
* unblock it. */
void serveClientsBlockedOnKeyByModule(readyList *rl) {
- dictEntry *de;
-
/* Optimization: If no clients are in type BLOCKED_MODULE,
* we can skip this loop. */
if (!server.blocked_clients_by_type[BLOCKED_MODULE]) return;
/* We serve clients in the same order they blocked for
* this key, from the first blocked to the last. */
- de = dictFind(rl->db->blocking_keys,rl->key);
+ dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
if (de) {
list *clients = dictGetVal(de);
- int numclients = listLength(clients);
-
- while(numclients--) {
- listNode *clientnode = listFirst(clients);
- client *receiver = clientnode->value;
-
- /* Put at the tail, so that at the next call
- * we'll not run into it again: clients here may not be
- * ready to be served, so they'll remain in the list
- * sometimes. We want also be able to skip clients that are
- * not blocked for the MODULE type safely. */
- listRotateHeadToTail(clients);
+ listNode *ln;
+ listIter li;
+ listRewind(clients,&li);
+ while((ln = listNext(&li))) {
+ client *receiver = listNodeValue(ln);
if (receiver->btype != BLOCKED_MODULE) continue;
/* Note that if *this* client cannot be served by this key,
@@ -566,6 +554,49 @@ void serveClientsBlockedOnKeyByModule(readyList *rl) {
}
}
+/* Helper function for handleClientsBlockedOnKeys(). This function is called
+ * when there may be clients blocked, via XREADGROUP, on an existing stream which
+ * was deleted. We need to unblock the clients in that case.
+ * The idea is that a client that is blocked via XREADGROUP is different from
+ * any other blocking type in the sense that it depends on the existence of both
+ * the key and the group. Even if the key is deleted and then revived with XADD
+ * it won't help any clients blocked on XREADGROUP because the group no longer
+ * exist, so they would fail with -NOGROUP anyway.
+ * The conclusion is that it's better to unblock these client (with error) upon
+ * the deletion of the key, rather than waiting for the first XADD. */
+void unblockDeletedStreamReadgroupClients(readyList *rl) {
+ /* Optimization: If no clients are in type BLOCKED_STREAM,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
+
+ /* We serve clients in the same order they blocked for
+ * this key, from the first blocked to the last. */
+ dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
+ if (de) {
+ list *clients = dictGetVal(de);
+ listNode *ln;
+ listIter li;
+ listRewind(clients,&li);
+
+ while((ln = listNext(&li))) {
+ client *receiver = listNodeValue(ln);
+ if (receiver->btype != BLOCKED_STREAM || !receiver->bpop.xread_group)
+ continue;
+
+ long long prev_error_replies = server.stat_total_error_replies;
+ client *old_client = server.current_client;
+ server.current_client = receiver;
+ monotime replyTimer;
+ elapsedStart(&replyTimer);
+ addReplyError(receiver, "-UNBLOCKED the stream key no longer exists");
+ updateStatsOnUnblock(receiver, 0, elapsedUs(replyTimer), server.stat_total_error_replies != prev_error_replies);
+ unblockClient(receiver);
+ afterCommand(receiver);
+ server.current_client = old_client;
+ }
+ }
+}
+
/* This function should be called by Redis every time a single command,
* a MULTI/EXEC block, or a Lua script, terminated its execution after
* being called by a client. It handles serving clients blocked in
@@ -624,17 +655,27 @@ void handleClientsBlockedOnKeys(void) {
/* Serve clients blocked on the key. */
robj *o = lookupKeyReadWithFlags(rl->db, rl->key, LOOKUP_NONOTIFY | LOOKUP_NOSTATS);
if (o != NULL) {
- if (o->type == OBJ_LIST)
+ int objtype = o->type;
+ if (objtype == OBJ_LIST)
serveClientsBlockedOnListKey(o,rl);
- else if (o->type == OBJ_ZSET)
+ else if (objtype == OBJ_ZSET)
serveClientsBlockedOnSortedSetKey(o,rl);
- else if (o->type == OBJ_STREAM)
+ else if (objtype == OBJ_STREAM)
serveClientsBlockedOnStreamKey(o,rl);
/* We want to serve clients blocked on module keys
* regardless of the object type: we don't know what the
* module is trying to accomplish right now. */
serveClientsBlockedOnKeyByModule(rl);
+ /* If we have XREADGROUP clients blocked on this key, and
+ * the key is not a stream, it must mean that the key was
+ * overwritten by either SET or something like
+ * (MULTI, DEL key, SADD key e, EXEC).
+ * In this case we need to unblock all these clients. */
+ if (objtype != OBJ_STREAM)
+ unblockDeletedStreamReadgroupClients(rl);
} else {
+ /* Unblock all XREADGROUP clients of this deleted key */
+ unblockDeletedStreamReadgroupClients(rl);
/* Edge case: If lookupKeyReadWithFlags decides to expire the key we have to
* take care of the propagation here, because afterCommand wasn't called */
if (server.also_propagate.numops > 0)
@@ -823,4 +864,3 @@ void signalKeyAsReady(redisDb *db, robj *key, int type) {
incrRefCount(key);
serverAssert(dictAdd(db->ready_keys,key,NULL) == DICT_OK);
}
-
diff --git a/src/call_reply.c b/src/call_reply.c
index 3694db55e..759cd792a 100644
--- a/src/call_reply.c
+++ b/src/call_reply.c
@@ -525,3 +525,18 @@ CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_d
res->deferred_error_list = deferred_error_list;
return res;
}
+
+/* Create a new CallReply struct from the reply blob representing an error message.
+ * Automatically creating deferred_error_list and set a copy of the reply in it.
+ * Refer to callReplyCreate for detailed explanation. */
+CallReply *callReplyCreateError(sds reply, void *private_data) {
+ sds err_buff = reply;
+ if (err_buff[0] != '-') {
+ err_buff = sdscatfmt(sdsempty(), "-ERR %S\r\n", reply);
+ sdsfree(reply);
+ }
+ list *deferred_error_list = listCreate();
+ listSetFreeMethod(deferred_error_list, (void (*)(void*))sdsfree);
+ listAddNodeTail(deferred_error_list, sdsnew(err_buff));
+ return callReplyCreate(err_buff, deferred_error_list, private_data);
+}
diff --git a/src/call_reply.h b/src/call_reply.h
index ff98f7f5a..ff1c4ba3f 100644
--- a/src/call_reply.h
+++ b/src/call_reply.h
@@ -35,6 +35,7 @@
typedef struct CallReply CallReply;
CallReply *callReplyCreate(sds reply, list *deferred_error_list, void *private_data);
+CallReply *callReplyCreateError(sds reply, void *private_data);
int callReplyType(CallReply *rep);
const char *callReplyGetString(CallReply *rep, size_t *len);
long long callReplyGetLongLong(CallReply *rep);
diff --git a/src/cli_common.c b/src/cli_common.c
index 7064a096b..33069017b 100644
--- a/src/cli_common.c
+++ b/src/cli_common.c
@@ -299,7 +299,7 @@ static sds percentDecode(const char *pe, size_t len) {
}
/* Parse a URI and extract the server connection information.
- * URI scheme is based on the the provisional specification[1] excluding support
+ * URI scheme is based on the provisional specification[1] excluding support
* for query parameters. Valid URIs are:
* scheme: "redis://"
* authority: [[<username> ":"] <password> "@"] [<hostname> [":" <port>]]
@@ -371,3 +371,28 @@ void freeCliConnInfo(cliConnInfo connInfo){
if (connInfo.auth) sdsfree(connInfo.auth);
if (connInfo.user) sdsfree(connInfo.user);
}
+
+/*
+ * Escape a Unicode string for JSON output (--json), following RFC 7159:
+ * https://datatracker.ietf.org/doc/html/rfc7159#section-7
+*/
+sds escapeJsonString(sds s, const char *p, size_t len) {
+ s = sdscatlen(s,"\"",1);
+ while(len--) {
+ switch(*p) {
+ case '\\':
+ case '"':
+ s = sdscatprintf(s,"\\%c",*p);
+ break;
+ case '\n': s = sdscatlen(s,"\\n",2); break;
+ case '\f': s = sdscatlen(s,"\\f",2); break;
+ case '\r': s = sdscatlen(s,"\\r",2); break;
+ case '\t': s = sdscatlen(s,"\\t",2); break;
+ case '\b': s = sdscatlen(s,"\\b",2); break;
+ default:
+ s = sdscatprintf(s,(*p >= 0 && *p <= 0x1f) ? "\\u%04x" : "%c",*p);
+ }
+ p++;
+ }
+ return sdscatlen(s,"\"",1);
+}
diff --git a/src/cli_common.h b/src/cli_common.h
index 1cb76c6b9..c5c4c11aa 100644
--- a/src/cli_common.h
+++ b/src/cli_common.h
@@ -48,4 +48,7 @@ sds unquoteCString(char *str);
void parseRedisUri(const char *uri, const char* tool_name, cliConnInfo *connInfo, int *tls_flag);
void freeCliConnInfo(cliConnInfo connInfo);
+
+sds escapeJsonString(sds s, const char *p, size_t len);
+
#endif /* __CLICOMMON_H */
diff --git a/src/cluster.c b/src/cluster.c
index 32335bbf9..701871b36 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -56,7 +56,6 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request);
void clusterUpdateState(void);
int clusterNodeGetSlotBit(clusterNode *n, int slot);
sds clusterGenNodesDescription(int filter, int use_pport);
-clusterNode *clusterLookupNode(const char *name);
list *clusterGetNodesServingMySlots(clusterNode *node);
int clusterNodeAddSlave(clusterNode *master, clusterNode *slave);
int clusterAddSlot(clusterNode *n, int slot);
@@ -74,6 +73,8 @@ void clusterCloseAllSlots(void);
void clusterSetNodeAsMaster(clusterNode *n);
void clusterDelNode(clusterNode *delnode);
sds representClusterNodeFlags(sds ci, uint16_t flags);
+sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count);
+void clusterFreeNodesSlotsInfo(clusterNode *n);
uint64_t clusterGetMaxEpoch(void);
int clusterBumpConfigEpochWithoutConsensus(void);
void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8_t type, const unsigned char *payload, uint32_t len);
@@ -210,7 +211,11 @@ int clusterLoadConfig(char *filename) {
}
/* Create this node if it does not exist */
- n = clusterLookupNode(argv[0]);
+ if (verifyClusterNodeId(argv[0], sdslen(argv[0])) == C_ERR) {
+ sdsfreesplitres(argv, argc);
+ goto fmterr;
+ }
+ n = clusterLookupNode(argv[0], sdslen(argv[0]));
if (!n) {
n = createClusterNode(argv[0],0);
clusterAddNode(n);
@@ -218,6 +223,17 @@ int clusterLoadConfig(char *filename) {
/* Format for the node address information:
* ip:port[@cport][,hostname] */
+ /* Hostname is an optional argument that defines the endpoint
+ * that can be reported to clients instead of IP. */
+ char *hostname = strchr(argv[1], ',');
+ if (hostname) {
+ *hostname = '\0';
+ hostname++;
+ n->hostname = sdscpy(n->hostname, hostname);
+ } else if (sdslen(n->hostname) != 0) {
+ sdsclear(n->hostname);
+ }
+
/* Address and port */
if ((p = strrchr(argv[1],':')) == NULL) {
sdsfreesplitres(argv,argc);
@@ -237,17 +253,6 @@ int clusterLoadConfig(char *filename) {
* base port. */
n->cport = busp ? atoi(busp) : n->port + CLUSTER_PORT_INCR;
- /* Hostname is an optional argument that defines the endpoint
- * that can be reported to clients instead of IP. */
- char *hostname = strchr(p, ',');
- if (hostname) {
- *hostname = '\0';
- hostname++;
- n->hostname = sdscpy(n->hostname, hostname);
- } else if (sdslen(n->hostname) != 0) {
- sdsclear(n->hostname);
- }
-
/* The plaintext port for client in a TLS cluster (n->pport) is not
* stored in nodes.conf. It is received later over the bus protocol. */
@@ -286,7 +291,11 @@ int clusterLoadConfig(char *filename) {
/* Get master if any. Set the master and populate master's
* slave list. */
if (argv[3][0] != '-') {
- master = clusterLookupNode(argv[3]);
+ if (verifyClusterNodeId(argv[3], sdslen(argv[3])) == C_ERR) {
+ sdsfreesplitres(argv, argc);
+ goto fmterr;
+ }
+ master = clusterLookupNode(argv[3], sdslen(argv[3]));
if (!master) {
master = createClusterNode(argv[3],0);
clusterAddNode(master);
@@ -322,7 +331,14 @@ int clusterLoadConfig(char *filename) {
goto fmterr;
}
p += 3;
- cn = clusterLookupNode(p);
+
+ char *pr = strchr(p, ']');
+ size_t node_len = pr - p;
+ if (pr == NULL || verifyClusterNodeId(p, node_len) == C_ERR) {
+ sdsfreesplitres(argv, argc);
+ goto fmterr;
+ }
+ cn = clusterLookupNode(p, CLUSTER_NAMELEN);
if (!cn) {
cn = createClusterNode(p,0);
clusterAddNode(cn);
@@ -796,7 +812,7 @@ void setClusterNodeToInboundClusterLink(clusterNode *node, clusterLink *link) {
* we would always process the disconnection of the existing inbound link before
* accepting a new existing inbound link. Therefore, it's possible to have more than
* one inbound link from the same node at the same time. */
- serverLog(LL_DEBUG, "Replacing inbound link fd %d from node %s with fd %d",
+ serverLog(LL_DEBUG, "Replacing inbound link fd %d from node %.40s with fd %d",
node->inbound_link->conn->fd, node->name, link->conn->fd);
}
node->inbound_link = link;
@@ -942,7 +958,9 @@ clusterNode *createClusterNode(char *nodename, int flags) {
node->configEpoch = 0;
node->flags = flags;
memset(node->slots,0,sizeof(node->slots));
- node->slots_info = NULL;
+ node->slot_info_pairs = NULL;
+ node->slot_info_pairs_count = 0;
+ node->slot_info_pairs_alloc = 0;
node->numslots = 0;
node->numslaves = 0;
node->slaves = NULL;
@@ -1178,12 +1196,23 @@ void clusterDelNode(clusterNode *delnode) {
freeClusterNode(delnode);
}
-/* Node lookup by name */
-clusterNode *clusterLookupNode(const char *name) {
- sds s = sdsnewlen(name, CLUSTER_NAMELEN);
- dictEntry *de;
+/* Cluster node sanity check. Returns C_OK if the node id
+ * is valid an C_ERR otherwise. */
+int verifyClusterNodeId(const char *name, int length) {
+ if (length != CLUSTER_NAMELEN) return C_ERR;
+ for (int i = 0; i < length; i++) {
+ if (name[i] >= 'a' && name[i] <= 'z') continue;
+ if (name[i] >= '0' && name[i] <= '9') continue;
+ return C_ERR;
+ }
+ return C_OK;
+}
- de = dictFind(server.cluster->nodes,s);
+/* Node lookup by name */
+clusterNode *clusterLookupNode(const char *name, int length) {
+ if (verifyClusterNodeId(name, length) != C_OK) return NULL;
+ sds s = sdsnewlen(name, length);
+ dictEntry *de = dictFind(server.cluster->nodes, s);
sdsfree(s);
if (de == NULL) return NULL;
return dictGetVal(de);
@@ -1599,7 +1628,7 @@ int clusterStartHandshake(char *ip, int port, int cport) {
void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
uint16_t count = ntohs(hdr->count);
clusterMsgDataGossip *g = (clusterMsgDataGossip*) hdr->data.ping.gossip;
- clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender);
+ clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
while(count--) {
uint16_t flags = ntohs(g->flags);
@@ -1618,7 +1647,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
}
/* Update our state accordingly to the gossip sections */
- node = clusterLookupNode(g->nodename);
+ node = clusterLookupNode(g->nodename, CLUSTER_NAMELEN);
if (node) {
/* We already know this node.
Handle failure reports, only when the sender is a master. */
@@ -1895,6 +1924,17 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_UPDATE_STATE|
CLUSTER_TODO_FSYNC_CONFIG);
+ } else if (myself->slaveof && myself->slaveof->slaveof) {
+ /* Safeguard against sub-replicas. A replica's master can turn itself
+ * into a replica if its last slot is removed. If no other node takes
+ * over the slot, there is nothing else to trigger replica migration. */
+ serverLog(LL_WARNING,
+ "I'm a sub-replica! Reconfiguring myself as a replica of grandmaster %.40s",
+ myself->slaveof->slaveof->name);
+ clusterSetMaster(myself->slaveof->slaveof);
+ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
+ CLUSTER_TODO_UPDATE_STATE|
+ CLUSTER_TODO_FSYNC_CONFIG);
} else if (dirty_slots_count) {
/* If we are here, we received an update message which removed
* ownership for certain slots we still have keys about, but still
@@ -1970,7 +2010,7 @@ int writeHostnamePingExt(clusterMsgPingExt **cursor) {
/* We previously validated the extensions, so this function just needs to
* handle the extensions. */
void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) {
- clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender);
+ clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
char *ext_hostname = NULL;
uint16_t extensions = ntohs(hdr->extensions);
/* Loop through all the extensions and process them */
@@ -2003,7 +2043,7 @@ static clusterNode *getNodeFromLinkAndMsg(clusterLink *link, clusterMsg *hdr) {
sender = link->node;
} else {
/* Otherwise, fetch sender based on the message */
- sender = clusterLookupNode(hdr->sender);
+ sender = clusterLookupNode(hdr->sender, CLUSTER_NAMELEN);
/* We know the sender node but haven't associate it with the link. This must
* be an inbound link because only for inbound links we didn't know which node
* to associate when they were created. */
@@ -2213,7 +2253,7 @@ int clusterProcessPacket(clusterLink *link) {
if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG ||
type == CLUSTERMSG_TYPE_MEET)
{
- serverLog(LL_DEBUG,"%s packet received: %s",
+ serverLog(LL_DEBUG,"%s packet received: %.40s",
clusterGetMessageTypeString(type),
link->node ? link->node->name : "NULL");
if (!link->inbound) {
@@ -2314,7 +2354,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterSetNodeAsMaster(sender);
} else {
/* Node is a slave. */
- clusterNode *master = clusterLookupNode(hdr->slaveof);
+ clusterNode *master = clusterLookupNode(hdr->slaveof, CLUSTER_NAMELEN);
if (nodeIsMaster(sender)) {
/* Master turned into a slave! Reconfigure the node. */
@@ -2429,7 +2469,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterNode *failing;
if (sender) {
- failing = clusterLookupNode(hdr->data.fail.about.nodename);
+ failing = clusterLookupNode(hdr->data.fail.about.nodename, CLUSTER_NAMELEN);
if (failing &&
!(failing->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_MYSELF)))
{
@@ -2517,7 +2557,7 @@ int clusterProcessPacket(clusterLink *link) {
ntohu64(hdr->data.update.nodecfg.configEpoch);
if (!sender) return 1; /* We don't know the sender. */
- n = clusterLookupNode(hdr->data.update.nodecfg.nodename);
+ n = clusterLookupNode(hdr->data.update.nodecfg.nodename, CLUSTER_NAMELEN);
if (!n) return 1; /* We don't know the reported node. */
if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */
@@ -3148,7 +3188,7 @@ int clusterSendModuleMessageToTarget(const char *target, uint64_t module_id, uin
clusterNode *node = NULL;
if (target != NULL) {
- node = clusterLookupNode(target);
+ node = clusterLookupNode(target, strlen(target));
if (node == NULL || node->link == NULL) return C_ERR;
}
@@ -4561,6 +4601,22 @@ sds representClusterNodeFlags(sds ci, uint16_t flags) {
return ci;
}
+/* Concatenate the slot ownership information to the given SDS string 'ci'.
+ * If the slot ownership is in a contiguous block, it's represented as start-end pair,
+ * else each slot is added separately. */
+sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count) {
+ for (int i = 0; i< slot_info_pairs_count; i+=2) {
+ unsigned long start = slot_info_pairs[i];
+ unsigned long end = slot_info_pairs[i+1];
+ if (start == end) {
+ ci = sdscatfmt(ci, " %i", start);
+ } else {
+ ci = sdscatfmt(ci, " %i-%i", start, end);
+ }
+ }
+ return ci;
+}
+
/* Generate a csv-alike representation of the specified cluster node.
* See clusterGenNodesDescription() top comment for more information.
*
@@ -4609,8 +4665,8 @@ sds clusterGenNodeDescription(clusterNode *node, int use_pport) {
/* Slots served by this instance. If we already have slots info,
* append it directly, otherwise, generate slots only if it has. */
- if (node->slots_info) {
- ci = sdscatsds(ci, node->slots_info);
+ if (node->slot_info_pairs) {
+ ci = representSlotInfo(ci, node->slot_info_pairs, node->slot_info_pairs_count);
} else if (node->numslots > 0) {
start = -1;
for (j = 0; j < CLUSTER_SLOTS; j++) {
@@ -4670,12 +4726,15 @@ void clusterGenNodesSlotsInfo(int filter) {
* or end of slot. */
if (i == CLUSTER_SLOTS || n != server.cluster->slots[i]) {
if (!(n->flags & filter)) {
- if (n->slots_info == NULL) n->slots_info = sdsempty();
- if (start == i-1) {
- n->slots_info = sdscatfmt(n->slots_info," %i",start);
- } else {
- n->slots_info = sdscatfmt(n->slots_info," %i-%i",start,i-1);
+ if (n->slot_info_pairs_count+2 > n->slot_info_pairs_alloc) {
+ if (n->slot_info_pairs_alloc == 0)
+ n->slot_info_pairs_alloc = 8;
+ else
+ n->slot_info_pairs_alloc *= 2;
+ n->slot_info_pairs = zrealloc(n->slot_info_pairs, n->slot_info_pairs_alloc * sizeof(uint16_t));
}
+ n->slot_info_pairs[n->slot_info_pairs_count++] = start;
+ n->slot_info_pairs[n->slot_info_pairs_count++] = i-1;
}
if (i == CLUSTER_SLOTS) break;
n = server.cluster->slots[i];
@@ -4684,6 +4743,13 @@ void clusterGenNodesSlotsInfo(int filter) {
}
}
+void clusterFreeNodesSlotsInfo(clusterNode *n) {
+ zfree(n->slot_info_pairs);
+ n->slot_info_pairs = NULL;
+ n->slot_info_pairs_count = 0;
+ n->slot_info_pairs_alloc = 0;
+}
+
/* Generate a csv-alike representation of the nodes we are aware of,
* including the "myself" node, and return an SDS string containing the
* representation (it is up to the caller to free it).
@@ -4718,10 +4784,7 @@ sds clusterGenNodesDescription(int filter, int use_pport) {
ci = sdscatlen(ci,"\n",1);
/* Release slots info. */
- if (node->slots_info) {
- sdsfree(node->slots_info);
- node->slots_info = NULL;
- }
+ clusterFreeNodesSlotsInfo(node);
}
dictReleaseIterator(di);
return ci;
@@ -4942,6 +5005,136 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in
setDeferredArrayLen(c, nested_replylen, nested_elements);
}
+/* Add detailed information of a node to the output buffer of the given client. */
+void addNodeDetailsToShardReply(client *c, clusterNode *node) {
+ int reply_count = 0;
+ void *node_replylen = addReplyDeferredLen(c);
+ addReplyBulkCString(c, "id");
+ addReplyBulkCBuffer(c, node->name, CLUSTER_NAMELEN);
+ reply_count++;
+
+ /* We use server.tls_cluster as a proxy for whether or not
+ * the remote port is the tls port or not */
+ int plaintext_port = server.tls_cluster ? node->pport : node->port;
+ int tls_port = server.tls_cluster ? node->port : 0;
+ if (plaintext_port) {
+ addReplyBulkCString(c, "port");
+ addReplyLongLong(c, plaintext_port);
+ reply_count++;
+ }
+
+ if (tls_port) {
+ addReplyBulkCString(c, "tls-port");
+ addReplyLongLong(c, tls_port);
+ reply_count++;
+ }
+
+ addReplyBulkCString(c, "ip");
+ addReplyBulkCString(c, node->ip);
+ reply_count++;
+
+ addReplyBulkCString(c, "endpoint");
+ addReplyBulkCString(c, getPreferredEndpoint(node));
+ reply_count++;
+
+ if (node->hostname) {
+ addReplyBulkCString(c, "hostname");
+ addReplyBulkCString(c, node->hostname);
+ reply_count++;
+ }
+
+ long long node_offset;
+ if (node->flags & CLUSTER_NODE_MYSELF) {
+ node_offset = nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset;
+ } else {
+ node_offset = node->repl_offset;
+ }
+
+ addReplyBulkCString(c, "role");
+ addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master");
+ reply_count++;
+
+ addReplyBulkCString(c, "replication-offset");
+ addReplyLongLong(c, node_offset);
+ reply_count++;
+
+ addReplyBulkCString(c, "health");
+ const char *health_msg = NULL;
+ if (nodeFailed(node)) {
+ health_msg = "fail";
+ } else if (nodeIsSlave(node) && node_offset == 0) {
+ health_msg = "loading";
+ } else {
+ health_msg = "online";
+ }
+ addReplyBulkCString(c, health_msg);
+ reply_count++;
+
+ setDeferredMapLen(c, node_replylen, reply_count);
+}
+
+/* Add the shard reply of a single shard based off the given primary node. */
+void addShardReplyForClusterShards(client *c, clusterNode *node, uint16_t *slot_info_pairs, int slot_pairs_count) {
+ addReplyMapLen(c, 2);
+ addReplyBulkCString(c, "slots");
+ if (slot_info_pairs) {
+ serverAssert((slot_pairs_count % 2) == 0);
+ addReplyArrayLen(c, slot_pairs_count);
+ for (int i = 0; i < slot_pairs_count; i++)
+ addReplyBulkLongLong(c, (unsigned long)slot_info_pairs[i]);
+ } else {
+ /* If no slot info pair is provided, the node owns no slots */
+ addReplyArrayLen(c, 0);
+ }
+
+ addReplyBulkCString(c, "nodes");
+ list *nodes_for_slot = clusterGetNodesServingMySlots(node);
+ /* At least the provided node should be serving its slots */
+ serverAssert(nodes_for_slot);
+ addReplyArrayLen(c, listLength(nodes_for_slot));
+ if (listLength(nodes_for_slot) != 0) {
+ listIter li;
+ listNode *ln;
+ listRewind(nodes_for_slot, &li);
+ while ((ln = listNext(&li))) {
+ clusterNode *node = listNodeValue(ln);
+ addNodeDetailsToShardReply(c, node);
+ }
+ listRelease(nodes_for_slot);
+ }
+}
+
+/* Add to the output buffer of the given client, an array of slot (start, end)
+ * pair owned by the shard, also the primary and set of replica(s) along with
+ * information about each node. */
+void clusterReplyShards(client *c) {
+ void *shard_replylen = addReplyDeferredLen(c);
+ int shard_count = 0;
+ /* This call will add slot_info_pairs to all nodes */
+ clusterGenNodesSlotsInfo(0);
+ dictIterator *di = dictGetSafeIterator(server.cluster->nodes);
+ dictEntry *de;
+ /* Iterate over all the available nodes in the cluster, for each primary
+ * node return generate the cluster shards response. if the primary node
+ * doesn't own any slot, cluster shard response contains the node related
+ * information and an empty slots array. */
+ while((de = dictNext(di)) != NULL) {
+ clusterNode *n = dictGetVal(de);
+ if (nodeIsSlave(n)) {
+ /* You can force a replica to own slots, even though it'll get reverted,
+ * so freeing the slot pair here just in case. */
+ clusterFreeNodesSlotsInfo(n);
+ continue;
+ }
+ shard_count++;
+ /* n->slot_info_pairs is set to NULL when the the node owns no slots. */
+ addShardReplyForClusterShards(c, n, n->slot_info_pairs, n->slot_info_pairs_count);
+ clusterFreeNodesSlotsInfo(n);
+ }
+ dictReleaseIterator(di);
+ setDeferredArrayLen(c, shard_replylen, shard_count);
+}
+
void clusterReplyMultiBulkSlots(client * c) {
/* Format: 1) 1) start slot
* 2) end slot
@@ -5035,6 +5228,8 @@ void clusterCommand(client *c) {
"SLOTS",
" Return information about slots range mappings. Each range is made of:",
" start, end, master and replicas IP addresses, ports and ids",
+"SHARDS",
+" Return information about slot range mappings and the nodes associated with them.",
"LINKS",
" Return information about all network links between this node and its peers.",
" Output format is an array where each array element is a map containing attributes of a link",
@@ -5084,6 +5279,9 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"slots") && c->argc == 2) {
/* CLUSTER SLOTS */
clusterReplyMultiBulkSlots(c);
+ } else if (!strcasecmp(c->argv[1]->ptr,"shards") && c->argc == 2) {
+ /* CLUSTER SHARDS */
+ clusterReplyShards(c);
} else if (!strcasecmp(c->argv[1]->ptr,"flushslots") && c->argc == 2) {
/* CLUSTER FLUSHSLOTS */
if (dictSize(server.db[0].dict) != 0) {
@@ -5181,7 +5379,8 @@ NULL
addReplyErrorFormat(c,"I'm not the owner of hash slot %u",slot);
return;
}
- if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) {
+ n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
+ if (n == NULL) {
addReplyErrorFormat(c,"I don't know about node %s",
(char*)c->argv[4]->ptr);
return;
@@ -5197,7 +5396,8 @@ NULL
"I'm already the owner of hash slot %u",slot);
return;
}
- if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) {
+ n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
+ if (n == NULL) {
addReplyErrorFormat(c,"I don't know about node %s",
(char*)c->argv[4]->ptr);
return;
@@ -5213,8 +5413,7 @@ NULL
server.cluster->migrating_slots_to[slot] = NULL;
} else if (!strcasecmp(c->argv[3]->ptr,"node") && c->argc == 5) {
/* CLUSTER SETSLOT <SLOT> NODE <NODE ID> */
- clusterNode *n = clusterLookupNode(c->argv[4]->ptr);
-
+ n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s",
(char*)c->argv[4]->ptr);
@@ -5241,9 +5440,26 @@ NULL
server.cluster->migrating_slots_to[slot])
server.cluster->migrating_slots_to[slot] = NULL;
+ int slot_was_mine = server.cluster->slots[slot] == myself;
clusterDelSlot(slot);
clusterAddSlot(n,slot);
+ /* If we are a master left without slots, we should turn into a
+ * replica of the new master. */
+ if (slot_was_mine &&
+ n != myself &&
+ myself->numslots == 0 &&
+ server.cluster_allow_replica_migration)
+ {
+ serverLog(LL_WARNING,
+ "Configuration change detected. Reconfiguring myself "
+ "as a replica of %.40s", n->name);
+ clusterSetMaster(n);
+ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG |
+ CLUSTER_TODO_UPDATE_STATE |
+ CLUSTER_TODO_FSYNC_CONFIG);
+ }
+
/* If this node was importing this slot, assigning the slot to
* itself also clears the importing status. */
if (n == myself &&
@@ -5409,8 +5625,7 @@ NULL
}
} else if (!strcasecmp(c->argv[1]->ptr,"forget") && c->argc == 3) {
/* CLUSTER FORGET <NODE ID> */
- clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
-
+ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
return;
@@ -5428,9 +5643,8 @@ NULL
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"replicate") && c->argc == 3) {
/* CLUSTER REPLICATE <NODE ID> */
- clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
-
/* Lookup the specified node in our table. */
+ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
return;
@@ -5466,7 +5680,7 @@ NULL
} else if ((!strcasecmp(c->argv[1]->ptr,"slaves") ||
!strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) {
/* CLUSTER SLAVES <NODE ID> */
- clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
+ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
int j;
/* Lookup the specified node in our table. */
@@ -5493,7 +5707,7 @@ NULL
c->argc == 3)
{
/* CLUSTER COUNT-FAILURE-REPORTS <NODE ID> */
- clusterNode *n = clusterLookupNode(c->argv[2]->ptr);
+ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr));
if (!n) {
addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr);
diff --git a/src/cluster.h b/src/cluster.h
index 314b747be..27e9e7770 100644
--- a/src/cluster.h
+++ b/src/cluster.h
@@ -118,7 +118,9 @@ typedef struct clusterNode {
int flags; /* CLUSTER_NODE_... */
uint64_t configEpoch; /* Last configEpoch observed for this node */
unsigned char slots[CLUSTER_SLOTS/8]; /* slots handled by this node */
- sds slots_info; /* Slots info represented by string. */
+ uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */
+ int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */
+ int slot_info_pairs_alloc; /* Allocated number of slots in slot_info_pairs */
int numslots; /* Number of slots handled by this node */
int numslaves; /* Number of slave nodes, if this is a master */
struct clusterNode **slaves; /* pointers to slave nodes */
@@ -375,7 +377,8 @@ void clusterInit(void);
void clusterCron(void);
void clusterBeforeSleep(void);
clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask);
-clusterNode *clusterLookupNode(const char *name);
+int verifyClusterNodeId(const char *name, int length);
+clusterNode *clusterLookupNode(const char *name, int length);
int clusterRedirectBlockedClientIfNeeded(client *c);
void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_code);
void migrateCloseTimedoutSockets(void);
diff --git a/src/commands.c b/src/commands.c
index d3b2e45b3..efc159ad8 100644
--- a/src/commands.c
+++ b/src/commands.c
@@ -602,6 +602,17 @@ struct redisCommandArg CLUSTER_SETSLOT_Args[] = {
{0}
};
+/********** CLUSTER SHARDS ********************/
+
+/* CLUSTER SHARDS history */
+#define CLUSTER_SHARDS_History NULL
+
+/* CLUSTER SHARDS tips */
+const char *CLUSTER_SHARDS_tips[] = {
+"nondeterministic_output",
+NULL
+};
+
/********** CLUSTER SLAVES ********************/
/* CLUSTER SLAVES history */
@@ -624,7 +635,7 @@ struct redisCommandArg CLUSTER_SLAVES_Args[] = {
/* CLUSTER SLOTS history */
commandHistory CLUSTER_SLOTS_History[] = {
{"4.0.0","Added node IDs."},
-{"7.0.0","Added additional networking metadata and added support for hostnames and unknown endpoints."},
+{"7.0.0","Added additional networking metadata field."},
{0}
};
@@ -660,8 +671,9 @@ struct redisCommand CLUSTER_Subcommands[] = {
{"saveconfig","Forces the node to save cluster state on disk","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,CLUSTER_SAVECONFIG_tips,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0},
{"set-config-epoch","Set the configuration epoch in a new node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,CLUSTER_SET_CONFIG_EPOCH_tips,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SET_CONFIG_EPOCH_Args},
{"setslot","Bind a hash slot to a specific node","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,CLUSTER_SETSLOT_tips,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SETSLOT_Args},
-{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_NONE,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
-{"slots","Get array of Cluster slot to node mappings","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,CLUSTER_SLOTS_tips,clusterCommand,2,CMD_STALE,0},
+{"shards","Get array of cluster slots to node mappings","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,CLUSTER_SHARDS_tips,clusterCommand,2,CMD_STALE,0},
+{"slaves","List replica nodes of the specified master node","O(1)","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,CLUSTER_SLAVES_tips,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,.args=CLUSTER_SLAVES_Args},
+{"slots","Get array of Cluster slot to node mappings","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER SHARDS`","7.0.0",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,CLUSTER_SLOTS_tips,clusterCommand,2,CMD_STALE,0},
{0}
};
@@ -871,7 +883,6 @@ struct redisCommandArg CLIENT_NO_EVICT_Args[] = {
/* CLIENT PAUSE history */
commandHistory CLIENT_PAUSE_History[] = {
-{"3.2.10","Client pause prevents client pause and key eviction as well."},
{"6.2.0","`CLIENT PAUSE WRITE` mode added along with the `mode` option."},
{0}
};
@@ -1312,7 +1323,7 @@ struct redisCommandArg MIGRATE_username_password_Subargs[] = {
/* MIGRATE argument table */
struct redisCommandArg MIGRATE_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
-{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"key_or_empty_string",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=MIGRATE_key_or_empty_string_Subargs},
{"destination-db",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"timeout",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
@@ -1404,7 +1415,10 @@ struct redisCommandArg OBJECT_IDLETIME_Args[] = {
#define OBJECT_REFCOUNT_History NULL
/* OBJECT REFCOUNT tips */
-#define OBJECT_REFCOUNT_tips NULL
+const char *OBJECT_REFCOUNT_tips[] = {
+"nondeterministic_output",
+NULL
+};
/* OBJECT REFCOUNT argument table */
struct redisCommandArg OBJECT_REFCOUNT_Args[] = {
@@ -1549,10 +1563,7 @@ NULL
/********** RENAME ********************/
/* RENAME history */
-commandHistory RENAME_History[] = {
-{"3.2.0","The command no longer returns an error when source and destination names are the same."},
-{0}
-};
+#define RENAME_History NULL
/* RENAME tips */
#define RENAME_tips NULL
@@ -1658,7 +1669,7 @@ struct redisCommandArg SORT_Args[] = {
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
{"pattern",ARG_TYPE_PATTERN,1,"BY",NULL,NULL,CMD_ARG_OPTIONAL},
{"offset_count",ARG_TYPE_BLOCK,-1,"LIMIT",NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_offset_count_Subargs},
-{"pattern",ARG_TYPE_STRING,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
+{"pattern",ARG_TYPE_PATTERN,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"order",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_order_Subargs},
{"sorting",ARG_TYPE_PURE_TOKEN,-1,"ALPHA",NULL,NULL,CMD_ARG_OPTIONAL},
{"destination",ARG_TYPE_KEY,2,"STORE",NULL,NULL,CMD_ARG_OPTIONAL},
@@ -1692,7 +1703,7 @@ struct redisCommandArg SORT_RO_Args[] = {
{"key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE},
{"pattern",ARG_TYPE_PATTERN,1,"BY",NULL,NULL,CMD_ARG_OPTIONAL},
{"offset_count",ARG_TYPE_BLOCK,-1,"LIMIT",NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_RO_offset_count_Subargs},
-{"pattern",ARG_TYPE_STRING,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
+{"pattern",ARG_TYPE_PATTERN,1,"GET",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE|CMD_ARG_MULTIPLE_TOKEN},
{"order",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,.subargs=SORT_RO_order_Subargs},
{"sorting",ARG_TYPE_PURE_TOKEN,-1,"ALPHA",NULL,NULL,CMD_ARG_OPTIONAL},
{0}
@@ -3036,7 +3047,7 @@ struct redisCommandArg PUBLISH_Args[] = {
/* PUBSUB CHANNELS argument table */
struct redisCommandArg PUBSUB_CHANNELS_Args[] = {
-{"pattern",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
+{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{0}
};
@@ -3080,7 +3091,7 @@ struct redisCommandArg PUBSUB_NUMSUB_Args[] = {
/* PUBSUB SHARDCHANNELS argument table */
struct redisCommandArg PUBSUB_SHARDCHANNELS_Args[] = {
-{"pattern",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
+{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL},
{0}
};
@@ -3163,10 +3174,7 @@ struct redisCommandArg SSUBSCRIBE_Args[] = {
/********** SUBSCRIBE ********************/
/* SUBSCRIBE history */
-commandHistory SUBSCRIBE_History[] = {
-{"6.2.0","`RESET` can be called to exit subscribed state."},
-{0}
-};
+#define SUBSCRIBE_History NULL
/* SUBSCRIBE tips */
#define SUBSCRIBE_tips NULL
@@ -3384,7 +3392,10 @@ NULL
#define FUNCTION_LIST_History NULL
/* FUNCTION LIST tips */
-#define FUNCTION_LIST_tips NULL
+const char *FUNCTION_LIST_tips[] = {
+"nondeterministic_output_order",
+NULL
+};
/* FUNCTION LIST argument table */
struct redisCommandArg FUNCTION_LIST_Args[] = {
@@ -3407,10 +3418,7 @@ NULL
/* FUNCTION LOAD argument table */
struct redisCommandArg FUNCTION_LOAD_Args[] = {
-{"engine-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
-{"library-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"replace",ARG_TYPE_PURE_TOKEN,-1,"REPLACE",NULL,NULL,CMD_ARG_OPTIONAL},
-{"library-description",ARG_TYPE_STRING,-1,"DESCRIPTION",NULL,NULL,CMD_ARG_OPTIONAL},
{"function-code",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
@@ -3449,8 +3457,9 @@ struct redisCommandArg FUNCTION_RESTORE_Args[] = {
/* FUNCTION STATS tips */
const char *FUNCTION_STATS_tips[] = {
+"nondeterministic_output",
"request_policy:all_shards",
-"response_policy:one_succeeded",
+"response_policy:special",
NULL
};
@@ -3462,7 +3471,7 @@ struct redisCommand FUNCTION_Subcommands[] = {
{"help","Show helpful text about the different subcommands","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_HELP_History,FUNCTION_HELP_tips,functionHelpCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_SCRIPTING},
{"kill","Kill the function currently in execution.","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_KILL_History,FUNCTION_KILL_tips,functionKillCommand,2,CMD_NOSCRIPT|CMD_ALLOW_BUSY,ACL_CATEGORY_SCRIPTING},
{"list","List information about all the functions","O(N) where N is the number of functions","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LIST_History,FUNCTION_LIST_tips,functionListCommand,-2,CMD_NOSCRIPT,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LIST_Args},
-{"load","Create a function with the given arguments (name, code, description)","O(1) (considering compilation time is redundant)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LOAD_History,FUNCTION_LOAD_tips,functionLoadCommand,-5,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LOAD_Args},
+{"load","Create a function with the given arguments (name, code, description)","O(1) (considering compilation time is redundant)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_LOAD_History,FUNCTION_LOAD_tips,functionLoadCommand,-3,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_LOAD_Args},
{"restore","Restore all the functions on the given payload","O(N) where N is the number of functions on the payload","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_RESTORE_History,FUNCTION_RESTORE_tips,functionRestoreCommand,-3,CMD_NOSCRIPT|CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_SCRIPTING,.args=FUNCTION_RESTORE_Args},
{"stats","Return information about the function currently running (name, description, duration)","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SCRIPTING,FUNCTION_STATS_History,FUNCTION_STATS_tips,functionStatsCommand,2,CMD_NOSCRIPT|CMD_ALLOW_BUSY,ACL_CATEGORY_SCRIPTING},
{0}
@@ -3520,7 +3529,7 @@ struct redisCommandArg SCRIPT_EXISTS_Args[] = {
/* SCRIPT FLUSH history */
commandHistory SCRIPT_FLUSH_History[] = {
-{"6.2.0","Added the `ASYNC` and `SYNC` flushing mode modifiers, as well as the **lazyfree-lazy-user-flush** configuration directive."},
+{"6.2.0","Added the `ASYNC` and `SYNC` flushing mode modifiers."},
{0}
};
@@ -3609,6 +3618,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL CKQUORUM tips */
#define SENTINEL_CKQUORUM_tips NULL
+/* SENTINEL CKQUORUM argument table */
+struct redisCommandArg SENTINEL_CKQUORUM_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL CONFIG ********************/
/* SENTINEL CONFIG history */
@@ -3617,6 +3632,26 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL CONFIG tips */
#define SENTINEL_CONFIG_tips NULL
+/* SENTINEL CONFIG set_or_get set_param_value argument table */
+struct redisCommandArg SENTINEL_CONFIG_set_or_get_set_param_value_Subargs[] = {
+{"parameter",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* SENTINEL CONFIG set_or_get argument table */
+struct redisCommandArg SENTINEL_CONFIG_set_or_get_Subargs[] = {
+{"set_param_value",ARG_TYPE_BLOCK,-1,"SET",NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_CONFIG_set_or_get_set_param_value_Subargs},
+{"parameter",ARG_TYPE_STRING,-1,"GET",NULL,NULL,CMD_ARG_MULTIPLE},
+{0}
+};
+
+/* SENTINEL CONFIG argument table */
+struct redisCommandArg SENTINEL_CONFIG_Args[] = {
+{"set_or_get",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,.subargs=SENTINEL_CONFIG_set_or_get_Subargs},
+{0}
+};
+
/********** SENTINEL DEBUG ********************/
/* SENTINEL DEBUG history */
@@ -3625,6 +3660,19 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL DEBUG tips */
#define SENTINEL_DEBUG_tips NULL
+/* SENTINEL DEBUG parameter_value argument table */
+struct redisCommandArg SENTINEL_DEBUG_parameter_value_Subargs[] = {
+{"parameter",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* SENTINEL DEBUG argument table */
+struct redisCommandArg SENTINEL_DEBUG_Args[] = {
+{"parameter_value",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_DEBUG_parameter_value_Subargs},
+{0}
+};
+
/********** SENTINEL FAILOVER ********************/
/* SENTINEL FAILOVER history */
@@ -3633,6 +3681,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL FAILOVER tips */
#define SENTINEL_FAILOVER_tips NULL
+/* SENTINEL FAILOVER argument table */
+struct redisCommandArg SENTINEL_FAILOVER_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL FLUSHCONFIG ********************/
/* SENTINEL FLUSHCONFIG history */
@@ -3649,6 +3703,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL GET_MASTER_ADDR_BY_NAME tips */
#define SENTINEL_GET_MASTER_ADDR_BY_NAME_tips NULL
+/* SENTINEL GET_MASTER_ADDR_BY_NAME argument table */
+struct redisCommandArg SENTINEL_GET_MASTER_ADDR_BY_NAME_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL HELP ********************/
/* SENTINEL HELP history */
@@ -3665,6 +3725,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL INFO_CACHE tips */
#define SENTINEL_INFO_CACHE_tips NULL
+/* SENTINEL INFO_CACHE argument table */
+struct redisCommandArg SENTINEL_INFO_CACHE_Args[] = {
+{"nodename",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE},
+{0}
+};
+
/********** SENTINEL IS_MASTER_DOWN_BY_ADDR ********************/
/* SENTINEL IS_MASTER_DOWN_BY_ADDR history */
@@ -3673,6 +3739,15 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL IS_MASTER_DOWN_BY_ADDR tips */
#define SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips NULL
+/* SENTINEL IS_MASTER_DOWN_BY_ADDR argument table */
+struct redisCommandArg SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args[] = {
+{"ip",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"current-epoch",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"runid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL MASTER ********************/
/* SENTINEL MASTER history */
@@ -3681,6 +3756,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL MASTER tips */
#define SENTINEL_MASTER_tips NULL
+/* SENTINEL MASTER argument table */
+struct redisCommandArg SENTINEL_MASTER_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL MASTERS ********************/
/* SENTINEL MASTERS history */
@@ -3697,6 +3778,15 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL MONITOR tips */
#define SENTINEL_MONITOR_tips NULL
+/* SENTINEL MONITOR argument table */
+struct redisCommandArg SENTINEL_MONITOR_Args[] = {
+{"name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"ip",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"quorum",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL MYID ********************/
/* SENTINEL MYID history */
@@ -3721,6 +3811,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL REMOVE tips */
#define SENTINEL_REMOVE_tips NULL
+/* SENTINEL REMOVE argument table */
+struct redisCommandArg SENTINEL_REMOVE_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL REPLICAS ********************/
/* SENTINEL REPLICAS history */
@@ -3729,6 +3825,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL REPLICAS tips */
#define SENTINEL_REPLICAS_tips NULL
+/* SENTINEL REPLICAS argument table */
+struct redisCommandArg SENTINEL_REPLICAS_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL RESET ********************/
/* SENTINEL RESET history */
@@ -3737,6 +3839,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL RESET tips */
#define SENTINEL_RESET_tips NULL
+/* SENTINEL RESET argument table */
+struct redisCommandArg SENTINEL_RESET_Args[] = {
+{"pattern",ARG_TYPE_PATTERN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL SENTINELS ********************/
/* SENTINEL SENTINELS history */
@@ -3745,6 +3853,12 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SENTINELS tips */
#define SENTINEL_SENTINELS_tips NULL
+/* SENTINEL SENTINELS argument table */
+struct redisCommandArg SENTINEL_SENTINELS_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
/********** SENTINEL SET ********************/
/* SENTINEL SET history */
@@ -3753,6 +3867,20 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SET tips */
#define SENTINEL_SET_tips NULL
+/* SENTINEL SET option_value argument table */
+struct redisCommandArg SENTINEL_SET_option_value_Subargs[] = {
+{"option",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* SENTINEL SET argument table */
+struct redisCommandArg SENTINEL_SET_Args[] = {
+{"master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"option_value",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,.subargs=SENTINEL_SET_option_value_Subargs},
+{0}
+};
+
/********** SENTINEL SIMULATE_FAILURE ********************/
/* SENTINEL SIMULATE_FAILURE history */
@@ -3761,28 +3889,42 @@ struct redisCommand SCRIPT_Subcommands[] = {
/* SENTINEL SIMULATE_FAILURE tips */
#define SENTINEL_SIMULATE_FAILURE_tips NULL
+/* SENTINEL SIMULATE_FAILURE mode argument table */
+struct redisCommandArg SENTINEL_SIMULATE_FAILURE_mode_Subargs[] = {
+{"crash-after-election",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"crash-after-promotion",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"help",ARG_TYPE_PURE_TOKEN,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* SENTINEL SIMULATE_FAILURE argument table */
+struct redisCommandArg SENTINEL_SIMULATE_FAILURE_Args[] = {
+{"mode",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=SENTINEL_SIMULATE_FAILURE_mode_Subargs},
+{0}
+};
+
/* SENTINEL command table */
struct redisCommand SENTINEL_Subcommands[] = {
-{"ckquorum","Check for a Sentinel quorum",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,SENTINEL_CKQUORUM_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"config","Configure Sentinel","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,SENTINEL_CONFIG_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"debug","List or update the current configurable parameters","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,SENTINEL_DEBUG_tips,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"failover","Force a failover",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,SENTINEL_FAILOVER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
+{"ckquorum","Check for a Sentinel quorum",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,SENTINEL_CKQUORUM_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_CKQUORUM_Args},
+{"config","Configure Sentinel","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,SENTINEL_CONFIG_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_CONFIG_Args},
+{"debug","List or update the current configurable parameters","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,SENTINEL_DEBUG_tips,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_DEBUG_Args},
+{"failover","Force a failover",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,SENTINEL_FAILOVER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_FAILOVER_Args},
{"flushconfig","Rewrite configuration file","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_FLUSHCONFIG_History,SENTINEL_FLUSHCONFIG_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"get-master-addr-by-name","Get port and address of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,SENTINEL_GET_MASTER_ADDR_BY_NAME_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
+{"get-master-addr-by-name","Get port and address of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,SENTINEL_GET_MASTER_ADDR_BY_NAME_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args},
{"help","Show helpful text about the different subcommands","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_HELP_History,SENTINEL_HELP_tips,sentinelCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"info-cache","Get cached INFO from the instances in the deployment","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,SENTINEL_INFO_CACHE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"is-master-down-by-addr","Check if a master is down","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"master","Shows the state of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,SENTINEL_MASTER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
+{"info-cache","Get cached INFO from the instances in the deployment","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,SENTINEL_INFO_CACHE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_INFO_CACHE_Args},
+{"is-master-down-by-addr","Check if a master is down","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,SENTINEL_IS_MASTER_DOWN_BY_ADDR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args},
+{"master","Shows the state of a master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,SENTINEL_MASTER_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_MASTER_Args},
{"masters","List the monitored masters","O(N) where N is the number of masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,SENTINEL_MASTERS_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"monitor","Start monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,SENTINEL_MONITOR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
+{"monitor","Start monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,SENTINEL_MONITOR_tips,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_MONITOR_Args},
{"myid","Get the Sentinel instance ID","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_MYID_History,SENTINEL_MYID_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
{"pending-scripts","Get information about pending scripts",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_PENDING_SCRIPTS_History,SENTINEL_PENDING_SCRIPTS_tips,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"remove","Stop monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,SENTINEL_REMOVE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"replicas","List the monitored replicas","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,SENTINEL_REPLICAS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"reset","Reset masters by name pattern","O(N) where N is the number of monitored masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,SENTINEL_RESET_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"sentinels","List the Sentinel instances","O(N) where N is the number of Sentinels","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SENTINELS_History,SENTINEL_SENTINELS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"set","Change the configuration of a monitored master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,SENTINEL_SET_tips,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
-{"simulate-failure","Simulate failover scenarios",NULL,"3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SIMULATE_FAILURE_History,SENTINEL_SIMULATE_FAILURE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0},
+{"remove","Stop monitoring","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,SENTINEL_REMOVE_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_REMOVE_Args},
+{"replicas","List the monitored replicas","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,SENTINEL_REPLICAS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_REPLICAS_Args},
+{"reset","Reset masters by name pattern","O(N) where N is the number of monitored masters","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,SENTINEL_RESET_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_RESET_Args},
+{"sentinels","List the Sentinel instances","O(N) where N is the number of Sentinels","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SENTINELS_History,SENTINEL_SENTINELS_tips,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SENTINELS_Args},
+{"set","Change the configuration of a monitored master","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,SENTINEL_SET_tips,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SET_Args},
+{"simulate-failure","Simulate failover scenarios",NULL,"3.2.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SENTINEL,SENTINEL_SIMULATE_FAILURE_History,SENTINEL_SIMULATE_FAILURE_tips,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,.args=SENTINEL_SIMULATE_FAILURE_Args},
{0}
};
@@ -4023,7 +4165,10 @@ struct redisCommandArg BGSAVE_Args[] = {
#define COMMAND_DOCS_History NULL
/* COMMAND DOCS tips */
-#define COMMAND_DOCS_tips NULL
+const char *COMMAND_DOCS_tips[] = {
+"nondeterministic_output_order",
+NULL
+};
/* COMMAND DOCS argument table */
struct redisCommandArg COMMAND_DOCS_Args[] = {
@@ -4064,7 +4209,10 @@ commandHistory COMMAND_INFO_History[] = {
};
/* COMMAND INFO tips */
-#define COMMAND_INFO_tips NULL
+const char *COMMAND_INFO_tips[] = {
+"nondeterministic_output_order",
+NULL
+};
/* COMMAND INFO argument table */
struct redisCommandArg COMMAND_INFO_Args[] = {
@@ -4078,7 +4226,10 @@ struct redisCommandArg COMMAND_INFO_Args[] = {
#define COMMAND_LIST_History NULL
/* COMMAND LIST tips */
-#define COMMAND_LIST_tips NULL
+const char *COMMAND_LIST_tips[] = {
+"nondeterministic_output_order",
+NULL
+};
/* COMMAND LIST filterby argument table */
struct redisCommandArg COMMAND_LIST_filterby_Subargs[] = {
@@ -4099,7 +4250,7 @@ struct redisCommand COMMAND_Subcommands[] = {
{"count","Get total number of Redis commands","O(1)","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_COUNT_History,COMMAND_COUNT_tips,commandCountCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"docs","Get array of specific Redis command documentation","O(N) where N is the number of commands to look up","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_DOCS_History,COMMAND_DOCS_tips,commandDocsCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_DOCS_Args},
{"getkeys","Extract keys given a full Redis command","O(N) where N is the number of arguments to the command","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYS_History,COMMAND_GETKEYS_tips,commandGetKeysCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
-{"getkeysandflags","Extract keys given a full Redis command","O(N) where N is the number of arguments to the command","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYSANDFLAGS_History,COMMAND_GETKEYSANDFLAGS_tips,commandGetKeysAndFlagsCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
+{"getkeysandflags","Extract keys and access flags given a full Redis command","O(N) where N is the number of arguments to the command","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_GETKEYSANDFLAGS_History,COMMAND_GETKEYSANDFLAGS_tips,commandGetKeysAndFlagsCommand,-4,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"help","Show helpful text about the different subcommands","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_HELP_History,COMMAND_HELP_tips,commandHelpCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION},
{"info","Get array of specific Redis command details, or all when no argument is given.","O(N) where N is the number of commands to look up","2.8.13",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_INFO_History,COMMAND_INFO_tips,commandInfoCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_INFO_Args},
{"list","Get an array of Redis command names","O(N) where N is the total number of Redis commands","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,COMMAND_LIST_History,COMMAND_LIST_tips,commandListCommand,-2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,.args=COMMAND_LIST_Args},
@@ -4113,7 +4264,7 @@ struct redisCommand COMMAND_Subcommands[] = {
/* COMMAND tips */
const char *COMMAND_tips[] = {
-"nondeterministic_output",
+"nondeterministic_output_order",
NULL
};
@@ -4259,7 +4410,7 @@ struct redisCommandArg FAILOVER_Args[] = {
/* FLUSHALL history */
commandHistory FLUSHALL_History[] = {
{"4.0.0","Added the `ASYNC` flushing mode modifier."},
-{"6.2.0","Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."},
+{"6.2.0","Added the `SYNC` flushing mode modifier."},
{0}
};
@@ -4288,7 +4439,7 @@ struct redisCommandArg FLUSHALL_Args[] = {
/* FLUSHDB history */
commandHistory FLUSHDB_History[] = {
{"4.0.0","Added the `ASYNC` flushing mode modifier."},
-{"6.2.0","Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."},
+{"6.2.0","Added the `SYNC` flushing mode modifier."},
{0}
};
@@ -4351,7 +4502,12 @@ NULL
#define LATENCY_DOCTOR_History NULL
/* LATENCY DOCTOR tips */
-#define LATENCY_DOCTOR_tips NULL
+const char *LATENCY_DOCTOR_tips[] = {
+"nondeterministic_output",
+"request_policy:all_nodes",
+"response_policy:special",
+NULL
+};
/********** LATENCY GRAPH ********************/
@@ -4359,7 +4515,12 @@ NULL
#define LATENCY_GRAPH_History NULL
/* LATENCY GRAPH tips */
-#define LATENCY_GRAPH_tips NULL
+const char *LATENCY_GRAPH_tips[] = {
+"nondeterministic_output",
+"request_policy:all_nodes",
+"response_policy:special",
+NULL
+};
/* LATENCY GRAPH argument table */
struct redisCommandArg LATENCY_GRAPH_Args[] = {
@@ -4381,7 +4542,12 @@ struct redisCommandArg LATENCY_GRAPH_Args[] = {
#define LATENCY_HISTOGRAM_History NULL
/* LATENCY HISTOGRAM tips */
-#define LATENCY_HISTOGRAM_tips NULL
+const char *LATENCY_HISTOGRAM_tips[] = {
+"nondeterministic_output",
+"request_policy:all_nodes",
+"response_policy:special",
+NULL
+};
/* LATENCY HISTOGRAM argument table */
struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
@@ -4395,7 +4561,12 @@ struct redisCommandArg LATENCY_HISTOGRAM_Args[] = {
#define LATENCY_HISTORY_History NULL
/* LATENCY HISTORY tips */
-#define LATENCY_HISTORY_tips NULL
+const char *LATENCY_HISTORY_tips[] = {
+"nondeterministic_output",
+"request_policy:all_nodes",
+"response_policy:special",
+NULL
+};
/* LATENCY HISTORY argument table */
struct redisCommandArg LATENCY_HISTORY_Args[] = {
@@ -4409,7 +4580,12 @@ struct redisCommandArg LATENCY_HISTORY_Args[] = {
#define LATENCY_LATEST_History NULL
/* LATENCY LATEST tips */
-#define LATENCY_LATEST_tips NULL
+const char *LATENCY_LATEST_tips[] = {
+"nondeterministic_output",
+"request_policy:all_nodes",
+"response_policy:special",
+NULL
+};
/********** LATENCY RESET ********************/
@@ -4417,7 +4593,11 @@ struct redisCommandArg LATENCY_HISTORY_Args[] = {
#define LATENCY_RESET_History NULL
/* LATENCY RESET tips */
-#define LATENCY_RESET_tips NULL
+const char *LATENCY_RESET_tips[] = {
+"request_policy:all_nodes",
+"response_policy:all_succeeded",
+NULL
+};
/* LATENCY RESET argument table */
struct redisCommandArg LATENCY_RESET_Args[] = {
@@ -4467,6 +4647,8 @@ struct redisCommandArg LOLWUT_Args[] = {
/* MEMORY DOCTOR tips */
const char *MEMORY_DOCTOR_tips[] = {
"nondeterministic_output",
+"request_policy:all_shards",
+"response_policy:special",
NULL
};
@@ -4486,6 +4668,8 @@ NULL
/* MEMORY MALLOC_STATS tips */
const char *MEMORY_MALLOC_STATS_tips[] = {
"nondeterministic_output",
+"request_policy:all_shards",
+"response_policy:special",
NULL
};
@@ -4495,7 +4679,11 @@ NULL
#define MEMORY_PURGE_History NULL
/* MEMORY PURGE tips */
-#define MEMORY_PURGE_tips NULL
+const char *MEMORY_PURGE_tips[] = {
+"request_policy:all_shards",
+"response_policy:all_succeeded",
+NULL
+};
/********** MEMORY STATS ********************/
@@ -4505,6 +4693,8 @@ NULL
/* MEMORY STATS tips */
const char *MEMORY_STATS_tips[] = {
"nondeterministic_output",
+"request_policy:all_shards",
+"response_policy:special",
NULL
};
@@ -4556,7 +4746,10 @@ struct redisCommand MEMORY_Subcommands[] = {
#define MODULE_LIST_History NULL
/* MODULE LIST tips */
-#define MODULE_LIST_tips NULL
+const char *MODULE_LIST_tips[] = {
+"nondeterministic_output_order",
+NULL
+};
/********** MODULE LOAD ********************/
@@ -4573,6 +4766,35 @@ struct redisCommandArg MODULE_LOAD_Args[] = {
{0}
};
+/********** MODULE LOADEX ********************/
+
+/* MODULE LOADEX history */
+#define MODULE_LOADEX_History NULL
+
+/* MODULE LOADEX tips */
+#define MODULE_LOADEX_tips NULL
+
+/* MODULE LOADEX configs argument table */
+struct redisCommandArg MODULE_LOADEX_configs_Subargs[] = {
+{"name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"value",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* MODULE LOADEX args argument table */
+struct redisCommandArg MODULE_LOADEX_args_Subargs[] = {
+{"arg",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{0}
+};
+
+/* MODULE LOADEX argument table */
+struct redisCommandArg MODULE_LOADEX_Args[] = {
+{"path",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"configs",ARG_TYPE_BLOCK,-1,"CONFIG",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=MODULE_LOADEX_configs_Subargs},
+{"args",ARG_TYPE_BLOCK,-1,"ARGS",NULL,NULL,CMD_ARG_OPTIONAL|CMD_ARG_MULTIPLE,.subargs=MODULE_LOADEX_args_Subargs},
+{0}
+};
+
/********** MODULE UNLOAD ********************/
/* MODULE UNLOAD history */
@@ -4592,6 +4814,7 @@ struct redisCommand MODULE_Subcommands[] = {
{"help","Show helpful text about the different subcommands","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_HELP_History,MODULE_HELP_tips,moduleCommand,2,CMD_LOADING|CMD_STALE,0},
{"list","List all modules loaded by the server","O(N) where N is the number of loaded modules.","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LIST_History,MODULE_LIST_tips,moduleCommand,2,CMD_ADMIN|CMD_NOSCRIPT,0},
{"load","Load a module","O(1)","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LOAD_History,MODULE_LOAD_tips,moduleCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_LOAD_Args},
+{"loadex","Load a module with extended parameters","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_LOADEX_History,MODULE_LOADEX_tips,moduleCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_LOADEX_Args},
{"unload","Unload a module","O(1)","4.0.0",CMD_DOC_NONE,NULL,NULL,COMMAND_GROUP_SERVER,MODULE_UNLOAD_History,MODULE_UNLOAD_tips,moduleCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_PROTECTED,0,.args=MODULE_UNLOAD_Args},
{0}
};
@@ -4607,12 +4830,7 @@ struct redisCommand MODULE_Subcommands[] = {
/********** MONITOR ********************/
/* MONITOR history */
-commandHistory MONITOR_History[] = {
-{"6.0.0","`AUTH` excluded from the command's output."},
-{"6.2.0","`RESET` can be called to exit monitor mode."},
-{"6.2.4","`AUTH`, `HELLO`, `EVAL`, `EVAL_RO`, `EVALSHA` and `EVALSHA_RO` included in the command's output."},
-{0}
-};
+#define MONITOR_History NULL
/* MONITOR tips */
#define MONITOR_tips NULL
@@ -4627,7 +4845,7 @@ commandHistory MONITOR_History[] = {
/* PSYNC argument table */
struct redisCommandArg PSYNC_Args[] = {
-{"replicationid",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"replicationid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{"offset",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
@@ -4651,7 +4869,7 @@ struct redisCommandArg PSYNC_Args[] = {
/* REPLICAOF argument table */
struct redisCommandArg REPLICAOF_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
-{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
@@ -4683,7 +4901,7 @@ struct redisCommandArg REPLICAOF_Args[] = {
/* SHUTDOWN history */
commandHistory SHUTDOWN_History[] = {
-{"7.0.0","Added the `NOW`, `FORCE` and `ABORT` modifiers. Introduced waiting for lagging replicas before exiting."},
+{"7.0.0","Added the `NOW`, `FORCE` and `ABORT` modifiers."},
{0}
};
@@ -4717,7 +4935,7 @@ struct redisCommandArg SHUTDOWN_Args[] = {
/* SLAVEOF argument table */
struct redisCommandArg SLAVEOF_Args[] = {
{"host",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
-{"port",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE},
+{"port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE},
{0}
};
@@ -6652,7 +6870,11 @@ struct redisCommandArg MSET_Args[] = {
#define MSETNX_History NULL
/* MSETNX tips */
-#define MSETNX_tips NULL
+const char *MSETNX_tips[] = {
+"request_policy:multi_shard",
+"response_policy:agg_min",
+NULL
+};
/* MSETNX key_value argument table */
struct redisCommandArg MSETNX_key_value_Subargs[] = {
diff --git a/src/commands/client-pause.json b/src/commands/client-pause.json
index abb26fea2..3a1d9be83 100644
--- a/src/commands/client-pause.json
+++ b/src/commands/client-pause.json
@@ -9,10 +9,6 @@
"function": "clientCommand",
"history": [
[
- "3.2.10",
- "Client pause prevents client pause and key eviction as well."
- ],
- [
"6.2.0",
"`CLIENT PAUSE WRITE` mode added along with the `mode` option."
]
diff --git a/src/commands/cluster-shards.json b/src/commands/cluster-shards.json
new file mode 100644
index 000000000..925ce8bf3
--- /dev/null
+++ b/src/commands/cluster-shards.json
@@ -0,0 +1,18 @@
+{
+ "SHARDS": {
+ "summary": "Get array of cluster slots to node mappings",
+ "complexity": "O(N) where N is the total number of cluster nodes",
+ "group": "cluster",
+ "since": "7.0.0",
+ "arity": 2,
+ "container": "CLUSTER",
+ "function": "clusterCommand",
+ "history": [],
+ "command_flags": [
+ "STALE"
+ ],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT"
+ ]
+ }
+}
diff --git a/src/commands/cluster-slaves.json b/src/commands/cluster-slaves.json
index 8f83dce66..0ea77a876 100644
--- a/src/commands/cluster-slaves.json
+++ b/src/commands/cluster-slaves.json
@@ -9,6 +9,9 @@
"function": "clusterCommand",
"deprecated_since": "5.0.0",
"replaced_by": "`CLUSTER REPLICAS`",
+ "doc_flags": [
+ "DEPRECATED"
+ ],
"command_flags": [
"ADMIN",
"STALE"
diff --git a/src/commands/cluster-slots.json b/src/commands/cluster-slots.json
index fda842466..e8782420e 100644
--- a/src/commands/cluster-slots.json
+++ b/src/commands/cluster-slots.json
@@ -7,6 +7,11 @@
"arity": 2,
"container": "CLUSTER",
"function": "clusterCommand",
+ "deprecated_since": "7.0.0",
+ "replaced_by": "`CLUSTER SHARDS`",
+ "doc_flags": [
+ "DEPRECATED"
+ ],
"history": [
[
"4.0.0",
@@ -14,7 +19,7 @@
],
[
"7.0.0",
- "Added additional networking metadata and added support for hostnames and unknown endpoints."
+ "Added additional networking metadata field."
]
],
"command_flags": [
diff --git a/src/commands/command-docs.json b/src/commands/command-docs.json
index 9d2e20566..68a32f8a1 100644
--- a/src/commands/command-docs.json
+++ b/src/commands/command-docs.json
@@ -14,6 +14,9 @@
"acl_categories": [
"CONNECTION"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT_ORDER"
+ ],
"arguments": [
{
"name": "command-name",
diff --git a/src/commands/command-getkeysandflags.json b/src/commands/command-getkeysandflags.json
index 1ac8e990d..44b3ddcb1 100644
--- a/src/commands/command-getkeysandflags.json
+++ b/src/commands/command-getkeysandflags.json
@@ -1,6 +1,6 @@
{
"GETKEYSANDFLAGS": {
- "summary": "Extract keys given a full Redis command",
+ "summary": "Extract keys and access flags given a full Redis command",
"complexity": "O(N) where N is the number of arguments to the command",
"group": "server",
"since": "7.0.0",
diff --git a/src/commands/command-info.json b/src/commands/command-info.json
index 9291f8912..40d60a3ec 100644
--- a/src/commands/command-info.json
+++ b/src/commands/command-info.json
@@ -20,6 +20,9 @@
"acl_categories": [
"CONNECTION"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT_ORDER"
+ ],
"arguments": [
{
"name": "command-name",
diff --git a/src/commands/command-list.json b/src/commands/command-list.json
index ca7cb71ed..49e9558ca 100644
--- a/src/commands/command-list.json
+++ b/src/commands/command-list.json
@@ -14,6 +14,9 @@
"acl_categories": [
"CONNECTION"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT_ORDER"
+ ],
"arguments": [
{
"name": "filterby",
diff --git a/src/commands/command.json b/src/commands/command.json
index 83458e30c..0bed04040 100644
--- a/src/commands/command.json
+++ b/src/commands/command.json
@@ -15,7 +15,7 @@
"CONNECTION"
],
"command_tips": [
- "NONDETERMINISTIC_OUTPUT"
+ "NONDETERMINISTIC_OUTPUT_ORDER"
]
}
}
diff --git a/src/commands/flushall.json b/src/commands/flushall.json
index a85f35925..ef6a1f883 100644
--- a/src/commands/flushall.json
+++ b/src/commands/flushall.json
@@ -13,7 +13,7 @@
],
[
"6.2.0",
- "Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."
+ "Added the `SYNC` flushing mode modifier."
]
],
"command_flags": [
diff --git a/src/commands/flushdb.json b/src/commands/flushdb.json
index 40f8912ba..408ab326b 100644
--- a/src/commands/flushdb.json
+++ b/src/commands/flushdb.json
@@ -13,7 +13,7 @@
],
[
"6.2.0",
- "Added the `SYNC` flushing mode modifier and the **lazyfree-lazy-user-flush** configuration directive."
+ "Added the `SYNC` flushing mode modifier."
]
],
"command_flags": [
diff --git a/src/commands/function-list.json b/src/commands/function-list.json
index 601299345..6513b80cb 100644
--- a/src/commands/function-list.json
+++ b/src/commands/function-list.json
@@ -10,6 +10,9 @@
"command_flags": [
"NOSCRIPT"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT_ORDER"
+ ],
"acl_categories": [
"SCRIPTING"
],
diff --git a/src/commands/function-load.json b/src/commands/function-load.json
index 0a363e328..d04721279 100644
--- a/src/commands/function-load.json
+++ b/src/commands/function-load.json
@@ -4,7 +4,7 @@
"complexity": "O(1) (considering compilation time is redundant)",
"group": "scripting",
"since": "7.0.0",
- "arity": -5,
+ "arity": -3,
"container": "FUNCTION",
"function": "functionLoadCommand",
"command_flags": [
@@ -21,26 +21,12 @@
],
"arguments": [
{
- "name": "engine-name",
- "type": "string"
- },
- {
- "name": "library-name",
- "type": "string"
- },
- {
"name": "replace",
"type": "pure-token",
"token": "REPLACE",
"optional": true
},
{
- "name": "library-description",
- "type": "string",
- "token": "DESCRIPTION",
- "optional": true
- },
- {
"name": "function-code",
"type": "string"
}
diff --git a/src/commands/function-stats.json b/src/commands/function-stats.json
index fed0e8d0a..0d055b65b 100644
--- a/src/commands/function-stats.json
+++ b/src/commands/function-stats.json
@@ -15,8 +15,9 @@
"SCRIPTING"
],
"command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
"REQUEST_POLICY:ALL_SHARDS",
- "RESPONSE_POLICY:ONE_SUCCEEDED"
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/latency-doctor.json b/src/commands/latency-doctor.json
index 2686c0d58..129b32358 100644
--- a/src/commands/latency-doctor.json
+++ b/src/commands/latency-doctor.json
@@ -12,6 +12,11 @@
"NOSCRIPT",
"LOADING",
"STALE"
+ ],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/latency-graph.json b/src/commands/latency-graph.json
index 0c8fe69c7..0644c1cb0 100644
--- a/src/commands/latency-graph.json
+++ b/src/commands/latency-graph.json
@@ -13,6 +13,11 @@
"LOADING",
"STALE"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:SPECIAL"
+ ],
"arguments": [
{
"name": "event",
diff --git a/src/commands/latency-histogram.json b/src/commands/latency-histogram.json
index 6ffecd80d..dc14d47f8 100644
--- a/src/commands/latency-histogram.json
+++ b/src/commands/latency-histogram.json
@@ -13,6 +13,11 @@
"LOADING",
"STALE"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:SPECIAL"
+ ],
"arguments": [
{
"name": "COMMAND",
diff --git a/src/commands/latency-history.json b/src/commands/latency-history.json
index 126e0d9f4..6d9267064 100644
--- a/src/commands/latency-history.json
+++ b/src/commands/latency-history.json
@@ -13,6 +13,11 @@
"LOADING",
"STALE"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:SPECIAL"
+ ],
"arguments": [
{
"name": "event",
diff --git a/src/commands/latency-latest.json b/src/commands/latency-latest.json
index f9a349e58..f513689c5 100644
--- a/src/commands/latency-latest.json
+++ b/src/commands/latency-latest.json
@@ -12,6 +12,11 @@
"NOSCRIPT",
"LOADING",
"STALE"
+ ],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/latency-reset.json b/src/commands/latency-reset.json
index cdd488caf..30295cc05 100644
--- a/src/commands/latency-reset.json
+++ b/src/commands/latency-reset.json
@@ -13,6 +13,10 @@
"LOADING",
"STALE"
],
+ "command_tips": [
+ "REQUEST_POLICY:ALL_NODES",
+ "RESPONSE_POLICY:ALL_SUCCEEDED"
+ ],
"arguments": [
{
"name": "event",
diff --git a/src/commands/memory-doctor.json b/src/commands/memory-doctor.json
index 590778a7c..b6691dfa0 100644
--- a/src/commands/memory-doctor.json
+++ b/src/commands/memory-doctor.json
@@ -8,7 +8,9 @@
"container": "MEMORY",
"function": "memoryCommand",
"command_tips": [
- "NONDETERMINISTIC_OUTPUT"
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_SHARDS",
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/memory-malloc-stats.json b/src/commands/memory-malloc-stats.json
index 69265c6e7..5106781fe 100644
--- a/src/commands/memory-malloc-stats.json
+++ b/src/commands/memory-malloc-stats.json
@@ -8,7 +8,9 @@
"container": "MEMORY",
"function": "memoryCommand",
"command_tips": [
- "NONDETERMINISTIC_OUTPUT"
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_SHARDS",
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/memory-purge.json b/src/commands/memory-purge.json
index 7cac7389d..b862534d1 100644
--- a/src/commands/memory-purge.json
+++ b/src/commands/memory-purge.json
@@ -6,6 +6,10 @@
"since": "4.0.0",
"arity": 2,
"container": "MEMORY",
- "function": "memoryCommand"
+ "function": "memoryCommand",
+ "command_tips": [
+ "REQUEST_POLICY:ALL_SHARDS",
+ "RESPONSE_POLICY:ALL_SUCCEEDED"
+ ]
}
}
diff --git a/src/commands/memory-stats.json b/src/commands/memory-stats.json
index aaef41d61..76e6baa3e 100644
--- a/src/commands/memory-stats.json
+++ b/src/commands/memory-stats.json
@@ -8,7 +8,9 @@
"container": "MEMORY",
"function": "memoryCommand",
"command_tips": [
- "NONDETERMINISTIC_OUTPUT"
+ "NONDETERMINISTIC_OUTPUT",
+ "REQUEST_POLICY:ALL_SHARDS",
+ "RESPONSE_POLICY:SPECIAL"
]
}
}
diff --git a/src/commands/migrate.json b/src/commands/migrate.json
index 5a633e0dc..d07fe4b15 100644
--- a/src/commands/migrate.json
+++ b/src/commands/migrate.json
@@ -84,7 +84,7 @@
},
{
"name": "port",
- "type": "string"
+ "type": "integer"
},
{
"name": "key_or_empty_string",
diff --git a/src/commands/module-list.json b/src/commands/module-list.json
index 56a7cd3f5..ed6e7d19b 100644
--- a/src/commands/module-list.json
+++ b/src/commands/module-list.json
@@ -10,6 +10,9 @@
"command_flags": [
"ADMIN",
"NOSCRIPT"
+ ],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT_ORDER"
]
}
}
diff --git a/src/commands/module-loadex.json b/src/commands/module-loadex.json
new file mode 100644
index 000000000..e772cbfe4
--- /dev/null
+++ b/src/commands/module-loadex.json
@@ -0,0 +1,53 @@
+{
+ "LOADEX": {
+ "summary": "Load a module with extended parameters",
+ "complexity": "O(1)",
+ "group": "server",
+ "since": "7.0.0",
+ "arity": -3,
+ "container": "MODULE",
+ "function": "moduleCommand",
+ "command_flags": [
+ "NO_ASYNC_LOADING",
+ "ADMIN",
+ "NOSCRIPT",
+ "PROTECTED"
+ ],
+ "arguments": [
+ {
+ "name": "path",
+ "type": "string"
+ },
+ {
+ "name": "configs",
+ "token": "CONFIG",
+ "type": "block",
+ "multiple": true,
+ "optional": true,
+ "arguments": [
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "args",
+ "token": "ARGS",
+ "type": "block",
+ "multiple": true,
+ "optional": true,
+ "arguments": [
+ {
+ "name": "arg",
+ "type": "string"
+ }
+ ]
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/src/commands/monitor.json b/src/commands/monitor.json
index 5c9f0629c..a305c4fce 100644
--- a/src/commands/monitor.json
+++ b/src/commands/monitor.json
@@ -5,20 +5,7 @@
"since": "1.0.0",
"arity": 1,
"function": "monitorCommand",
- "history": [
- [
- "6.0.0",
- "`AUTH` excluded from the command's output."
- ],
- [
- "6.2.0",
- "`RESET` can be called to exit monitor mode."
- ],
- [
- "6.2.4",
- "`AUTH`, `HELLO`, `EVAL`, `EVAL_RO`, `EVALSHA` and `EVALSHA_RO` included in the command's output."
- ]
- ],
+ "history": [],
"command_flags": [
"ADMIN",
"NOSCRIPT",
diff --git a/src/commands/msetnx.json b/src/commands/msetnx.json
index 7984598a2..544ac6450 100644
--- a/src/commands/msetnx.json
+++ b/src/commands/msetnx.json
@@ -13,6 +13,10 @@
"acl_categories": [
"STRING"
],
+ "command_tips": [
+ "REQUEST_POLICY:MULTI_SHARD",
+ "RESPONSE_POLICY:AGG_MIN"
+ ],
"key_specs": [
{
"flags": [
diff --git a/src/commands/object-refcount.json b/src/commands/object-refcount.json
index 273dd0759..0f36f5092 100644
--- a/src/commands/object-refcount.json
+++ b/src/commands/object-refcount.json
@@ -13,6 +13,9 @@
"acl_categories": [
"KEYSPACE"
],
+ "command_tips": [
+ "NONDETERMINISTIC_OUTPUT"
+ ],
"key_specs": [
{
"flags": [
diff --git a/src/commands/psync.json b/src/commands/psync.json
index 1e0425220..91175a198 100644
--- a/src/commands/psync.json
+++ b/src/commands/psync.json
@@ -14,7 +14,7 @@
"arguments": [
{
"name": "replicationid",
- "type": "integer"
+ "type": "string"
},
{
"name": "offset",
diff --git a/src/commands/pubsub-channels.json b/src/commands/pubsub-channels.json
index 2b4e765d1..0522504b1 100644
--- a/src/commands/pubsub-channels.json
+++ b/src/commands/pubsub-channels.json
@@ -15,7 +15,7 @@
"arguments": [
{
"name": "pattern",
- "type": "string",
+ "type": "pattern",
"optional": true
}
]
diff --git a/src/commands/pubsub-shardchannels.json b/src/commands/pubsub-shardchannels.json
index 450cd8dcd..3bffa221f 100644
--- a/src/commands/pubsub-shardchannels.json
+++ b/src/commands/pubsub-shardchannels.json
@@ -1,23 +1,23 @@
{
- "SHARDCHANNELS": {
- "summary": "List active shard channels",
- "complexity": "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short channels).",
- "group": "pubsub",
- "since": "7.0.0",
- "arity": -2,
- "container": "PUBSUB",
- "function": "pubsubCommand",
- "command_flags": [
- "PUBSUB",
- "LOADING",
- "STALE"
- ],
- "arguments": [
- {
- "name": "pattern",
- "type": "string",
- "optional": true
- }
- ]
- }
+ "SHARDCHANNELS": {
+ "summary": "List active shard channels",
+ "complexity": "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short channels).",
+ "group": "pubsub",
+ "since": "7.0.0",
+ "arity": -2,
+ "container": "PUBSUB",
+ "function": "pubsubCommand",
+ "command_flags": [
+ "PUBSUB",
+ "LOADING",
+ "STALE"
+ ],
+ "arguments": [
+ {
+ "name": "pattern",
+ "type": "pattern",
+ "optional": true
+ }
+ ]
+ }
}
diff --git a/src/commands/pubsub-shardnumsub.json b/src/commands/pubsub-shardnumsub.json
index 55f5101ac..8c51549b0 100644
--- a/src/commands/pubsub-shardnumsub.json
+++ b/src/commands/pubsub-shardnumsub.json
@@ -1,24 +1,24 @@
{
- "SHARDNUMSUB": {
- "summary": "Get the count of subscribers for shard channels",
- "complexity": "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested channels",
- "group": "pubsub",
- "since": "7.0.0",
- "arity": -2,
- "container": "PUBSUB",
- "function": "pubsubCommand",
- "command_flags": [
- "PUBSUB",
- "LOADING",
- "STALE"
- ],
- "arguments": [
- {
- "name": "channel",
- "type": "string",
- "optional": true,
- "multiple": true
- }
- ]
- }
+ "SHARDNUMSUB": {
+ "summary": "Get the count of subscribers for shard channels",
+ "complexity": "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested channels",
+ "group": "pubsub",
+ "since": "7.0.0",
+ "arity": -2,
+ "container": "PUBSUB",
+ "function": "pubsubCommand",
+ "command_flags": [
+ "PUBSUB",
+ "LOADING",
+ "STALE"
+ ],
+ "arguments": [
+ {
+ "name": "channel",
+ "type": "string",
+ "optional": true,
+ "multiple": true
+ }
+ ]
+ }
}
diff --git a/src/commands/rename.json b/src/commands/rename.json
index 92d53979a..1196abaf4 100644
--- a/src/commands/rename.json
+++ b/src/commands/rename.json
@@ -6,12 +6,7 @@
"since": "1.0.0",
"arity": 3,
"function": "renameCommand",
- "history": [
- [
- "3.2.0",
- "The command no longer returns an error when source and destination names are the same."
- ]
- ],
+ "history": [],
"command_flags": [
"WRITE"
],
diff --git a/src/commands/replicaof.json b/src/commands/replicaof.json
index 805b81e4c..6299ea3ff 100644
--- a/src/commands/replicaof.json
+++ b/src/commands/replicaof.json
@@ -19,7 +19,7 @@
},
{
"name": "port",
- "type": "string"
+ "type": "integer"
}
]
}
diff --git a/src/commands/script-flush.json b/src/commands/script-flush.json
index 3503d1cad..b246bdef5 100644
--- a/src/commands/script-flush.json
+++ b/src/commands/script-flush.json
@@ -10,7 +10,7 @@
"history": [
[
"6.2.0",
- "Added the `ASYNC` and `SYNC` flushing mode modifiers, as well as the **lazyfree-lazy-user-flush** configuration directive."
+ "Added the `ASYNC` and `SYNC` flushing mode modifiers."
]
],
"command_flags": [
diff --git a/src/commands/sentinel-ckquorum.json b/src/commands/sentinel-ckquorum.json
index b49e2abc6..6180614cc 100644
--- a/src/commands/sentinel-ckquorum.json
+++ b/src/commands/sentinel-ckquorum.json
@@ -10,6 +10,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-config.json b/src/commands/sentinel-config.json
index 02a1f3730..74bcdbd50 100644
--- a/src/commands/sentinel-config.json
+++ b/src/commands/sentinel-config.json
@@ -11,6 +11,36 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name":"set_or_get",
+ "type":"oneof",
+ "arguments":[
+ {
+ "name":"set_param_value",
+ "token":"SET",
+ "type":"block",
+ "multiple":true,
+ "arguments":[
+ {
+ "name":"parameter",
+ "type":"string"
+ },
+ {
+ "name":"value",
+ "type":"string"
+ }
+ ]
+ },
+ {
+ "token":"GET",
+ "multiple":true,
+ "name":"parameter",
+ "type":"string"
+ }
+ ]
+ }
]
}
}
diff --git a/src/commands/sentinel-debug.json b/src/commands/sentinel-debug.json
index 44c6bec9b..b3335409d 100644
--- a/src/commands/sentinel-debug.json
+++ b/src/commands/sentinel-debug.json
@@ -11,6 +11,23 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "parameter_value",
+ "type": "block",
+ "multiple": true,
+ "arguments": [
+ {
+ "name": "parameter",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "type": "string"
+ }
+ ]
+ }
]
}
}
diff --git a/src/commands/sentinel-failover.json b/src/commands/sentinel-failover.json
index 346528e88..f6640168a 100644
--- a/src/commands/sentinel-failover.json
+++ b/src/commands/sentinel-failover.json
@@ -10,6 +10,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-get-master-addr-by-name.json b/src/commands/sentinel-get-master-addr-by-name.json
index 7489649c3..e0fde851c 100644
--- a/src/commands/sentinel-get-master-addr-by-name.json
+++ b/src/commands/sentinel-get-master-addr-by-name.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-info-cache.json b/src/commands/sentinel-info-cache.json
index db63ae25e..5c7855663 100644
--- a/src/commands/sentinel-info-cache.json
+++ b/src/commands/sentinel-info-cache.json
@@ -4,13 +4,20 @@
"complexity": "O(N) where N is the number of instances",
"group": "sentinel",
"since": "3.2.0",
- "arity": 3,
+ "arity": -3,
"container": "SENTINEL",
"function": "sentinelCommand",
"command_flags": [
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "nodename",
+ "type": "string",
+ "multiple": true
+ }
]
}
}
diff --git a/src/commands/sentinel-is-master-down-by-addr.json b/src/commands/sentinel-is-master-down-by-addr.json
index af5ce9ad5..456ad183a 100644
--- a/src/commands/sentinel-is-master-down-by-addr.json
+++ b/src/commands/sentinel-is-master-down-by-addr.json
@@ -11,6 +11,24 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "ip",
+ "type": "string"
+ },
+ {
+ "name": "port",
+ "type": "integer"
+ },
+ {
+ "name": "current-epoch",
+ "type": "integer"
+ },
+ {
+ "name": "runid",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-master.json b/src/commands/sentinel-master.json
index 6935332d3..ec10f43fd 100644
--- a/src/commands/sentinel-master.json
+++ b/src/commands/sentinel-master.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-monitor.json b/src/commands/sentinel-monitor.json
index 7b115ab4e..2c01df900 100644
--- a/src/commands/sentinel-monitor.json
+++ b/src/commands/sentinel-monitor.json
@@ -11,6 +11,24 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "ip",
+ "type": "string"
+ },
+ {
+ "name": "port",
+ "type": "integer"
+ },
+ {
+ "name": "quorum",
+ "type": "integer"
+ }
]
}
}
diff --git a/src/commands/sentinel-remove.json b/src/commands/sentinel-remove.json
index 0c499ab97..2e655e7f4 100644
--- a/src/commands/sentinel-remove.json
+++ b/src/commands/sentinel-remove.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-replicas.json b/src/commands/sentinel-replicas.json
index f0a987346..dc175a7ec 100644
--- a/src/commands/sentinel-replicas.json
+++ b/src/commands/sentinel-replicas.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-reset.json b/src/commands/sentinel-reset.json
index dc9fadd3a..9c60c6be7 100644
--- a/src/commands/sentinel-reset.json
+++ b/src/commands/sentinel-reset.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "pattern",
+ "type": "pattern"
+ }
]
}
}
diff --git a/src/commands/sentinel-sentinels.json b/src/commands/sentinel-sentinels.json
index c61435278..01319ce83 100644
--- a/src/commands/sentinel-sentinels.json
+++ b/src/commands/sentinel-sentinels.json
@@ -11,6 +11,12 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ }
]
}
}
diff --git a/src/commands/sentinel-set.json b/src/commands/sentinel-set.json
index 9b962bd22..afe036065 100644
--- a/src/commands/sentinel-set.json
+++ b/src/commands/sentinel-set.json
@@ -11,6 +11,27 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "master-name",
+ "type": "string"
+ },
+ {
+ "name": "option_value",
+ "type": "block",
+ "multiple": true,
+ "arguments": [
+ {
+ "name": "option",
+ "type": "string"
+ },
+ {
+ "name": "value",
+ "type": "string"
+ }
+ ]
+ }
]
}
}
diff --git a/src/commands/sentinel-simulate-failure.json b/src/commands/sentinel-simulate-failure.json
index 85423a2f5..4912a8b70 100644
--- a/src/commands/sentinel-simulate-failure.json
+++ b/src/commands/sentinel-simulate-failure.json
@@ -10,6 +10,28 @@
"ADMIN",
"SENTINEL",
"ONLY_SENTINEL"
+ ],
+ "arguments": [
+ {
+ "name": "mode",
+ "type": "oneof",
+ "optional":true,
+ "multiple":true,
+ "arguments": [
+ {
+ "name": "crash-after-election",
+ "type": "pure-token"
+ },
+ {
+ "name": "crash-after-promotion",
+ "type": "pure-token"
+ },
+ {
+ "name": "help",
+ "type": "pure-token"
+ }
+ ]
+ }
]
}
}
diff --git a/src/commands/shutdown.json b/src/commands/shutdown.json
index 605cc2b16..63da3ca03 100644
--- a/src/commands/shutdown.json
+++ b/src/commands/shutdown.json
@@ -9,7 +9,7 @@
"history": [
[
"7.0.0",
- "Added the `NOW`, `FORCE` and `ABORT` modifiers. Introduced waiting for lagging replicas before exiting."
+ "Added the `NOW`, `FORCE` and `ABORT` modifiers."
]
],
"command_flags": [
diff --git a/src/commands/slaveof.json b/src/commands/slaveof.json
index 271eb2d1b..70560f1b6 100644
--- a/src/commands/slaveof.json
+++ b/src/commands/slaveof.json
@@ -24,7 +24,7 @@
},
{
"name": "port",
- "type": "string"
+ "type": "integer"
}
]
}
diff --git a/src/commands/sort.json b/src/commands/sort.json
index 01ac2745d..3f077e0e5 100644
--- a/src/commands/sort.json
+++ b/src/commands/sort.json
@@ -96,7 +96,7 @@
"token": "GET",
"name": "pattern",
"key_spec_index": 1,
- "type": "string",
+ "type": "pattern",
"optional": true,
"multiple": true,
"multiple_token": true
diff --git a/src/commands/sort_ro.json b/src/commands/sort_ro.json
index 11336c52c..83b48d1b3 100644
--- a/src/commands/sort_ro.json
+++ b/src/commands/sort_ro.json
@@ -82,7 +82,7 @@
"token": "GET",
"name": "pattern",
"key_spec_index": 1,
- "type": "string",
+ "type": "pattern",
"optional": true,
"multiple": true,
"multiple_token": true
diff --git a/src/commands/spublish.json b/src/commands/spublish.json
index 2cbcdc19a..816ff0ad3 100644
--- a/src/commands/spublish.json
+++ b/src/commands/spublish.json
@@ -1,46 +1,46 @@
{
- "SPUBLISH": {
- "summary": "Post a message to a shard channel",
- "complexity": "O(N) where N is the number of clients subscribed to the receiving shard channel.",
- "group": "pubsub",
- "since": "7.0.0",
- "arity": 3,
- "function": "spublishCommand",
- "command_flags": [
- "PUBSUB",
- "LOADING",
- "STALE",
- "FAST",
- "MAY_REPLICATE"
- ],
- "arguments": [
- {
- "name": "channel",
- "type": "string"
- },
- {
- "name": "message",
- "type": "string"
- }
- ],
- "key_specs": [
- {
- "flags": [
- "NOT_KEY"
+ "SPUBLISH": {
+ "summary": "Post a message to a shard channel",
+ "complexity": "O(N) where N is the number of clients subscribed to the receiving shard channel.",
+ "group": "pubsub",
+ "since": "7.0.0",
+ "arity": 3,
+ "function": "spublishCommand",
+ "command_flags": [
+ "PUBSUB",
+ "LOADING",
+ "STALE",
+ "FAST",
+ "MAY_REPLICATE"
],
- "begin_search": {
- "index": {
- "pos": 1
- }
- },
- "find_keys": {
- "range": {
- "lastkey": 0,
- "step": 1,
- "limit": 0
- }
- }
- }
- ]
- }
+ "arguments": [
+ {
+ "name": "channel",
+ "type": "string"
+ },
+ {
+ "name": "message",
+ "type": "string"
+ }
+ ],
+ "key_specs": [
+ {
+ "flags": [
+ "NOT_KEY"
+ ],
+ "begin_search": {
+ "index": {
+ "pos": 1
+ }
+ },
+ "find_keys": {
+ "range": {
+ "lastkey": 0,
+ "step": 1,
+ "limit": 0
+ }
+ }
+ }
+ ]
+ }
}
diff --git a/src/commands/ssubscribe.json b/src/commands/ssubscribe.json
index eb570ea53..c49f801a6 100644
--- a/src/commands/ssubscribe.json
+++ b/src/commands/ssubscribe.json
@@ -1,42 +1,42 @@
{
- "SSUBSCRIBE": {
- "summary": "Listen for messages published to the given shard channels",
- "complexity": "O(N) where N is the number of shard channels to subscribe to.",
- "group": "pubsub",
- "since": "7.0.0",
- "arity": -2,
- "function": "ssubscribeCommand",
- "command_flags": [
- "PUBSUB",
- "NOSCRIPT",
- "LOADING",
- "STALE"
- ],
- "arguments": [
- {
- "name": "channel",
- "type": "string",
- "multiple": true
- }
- ],
- "key_specs": [
- {
- "flags": [
- "NOT_KEY"
+ "SSUBSCRIBE": {
+ "summary": "Listen for messages published to the given shard channels",
+ "complexity": "O(N) where N is the number of shard channels to subscribe to.",
+ "group": "pubsub",
+ "since": "7.0.0",
+ "arity": -2,
+ "function": "ssubscribeCommand",
+ "command_flags": [
+ "PUBSUB",
+ "NOSCRIPT",
+ "LOADING",
+ "STALE"
],
- "begin_search": {
- "index": {
- "pos": 1
- }
- },
- "find_keys": {
- "range": {
- "lastkey": -1,
- "step": 1,
- "limit": 0
- }
- }
- }
- ]
- }
+ "arguments": [
+ {
+ "name": "channel",
+ "type": "string",
+ "multiple": true
+ }
+ ],
+ "key_specs": [
+ {
+ "flags": [
+ "NOT_KEY"
+ ],
+ "begin_search": {
+ "index": {
+ "pos": 1
+ }
+ },
+ "find_keys": {
+ "range": {
+ "lastkey": -1,
+ "step": 1,
+ "limit": 0
+ }
+ }
+ }
+ ]
+ }
}
diff --git a/src/commands/subscribe.json b/src/commands/subscribe.json
index b0f00d9e6..fa6ac076a 100644
--- a/src/commands/subscribe.json
+++ b/src/commands/subscribe.json
@@ -6,12 +6,7 @@
"since": "2.0.0",
"arity": -2,
"function": "subscribeCommand",
- "history": [
- [
- "6.2.0",
- "`RESET` can be called to exit subscribed state."
- ]
- ],
+ "history": [],
"command_flags": [
"PUBSUB",
"NOSCRIPT",
diff --git a/src/commands/sunsubscribe.json b/src/commands/sunsubscribe.json
index 481415490..5b428237b 100644
--- a/src/commands/sunsubscribe.json
+++ b/src/commands/sunsubscribe.json
@@ -1,43 +1,43 @@
{
- "SUNSUBSCRIBE": {
- "summary": "Stop listening for messages posted to the given shard channels",
- "complexity": "O(N) where N is the number of clients already subscribed to a channel.",
- "group": "pubsub",
- "since": "7.0.0",
- "arity": -1,
- "function": "sunsubscribeCommand",
- "command_flags": [
- "PUBSUB",
- "NOSCRIPT",
- "LOADING",
- "STALE"
- ],
- "arguments": [
- {
- "name": "channel",
- "type": "string",
- "optional": true,
- "multiple": true
- }
- ],
- "key_specs": [
- {
- "flags": [
- "NOT_KEY"
+ "SUNSUBSCRIBE": {
+ "summary": "Stop listening for messages posted to the given shard channels",
+ "complexity": "O(N) where N is the number of clients already subscribed to a channel.",
+ "group": "pubsub",
+ "since": "7.0.0",
+ "arity": -1,
+ "function": "sunsubscribeCommand",
+ "command_flags": [
+ "PUBSUB",
+ "NOSCRIPT",
+ "LOADING",
+ "STALE"
],
- "begin_search": {
- "index": {
- "pos": 1
- }
- },
- "find_keys": {
- "range": {
- "lastkey": -1,
- "step": 1,
- "limit": 0
- }
- }
- }
- ]
- }
+ "arguments": [
+ {
+ "name": "channel",
+ "type": "string",
+ "optional": true,
+ "multiple": true
+ }
+ ],
+ "key_specs": [
+ {
+ "flags": [
+ "NOT_KEY"
+ ],
+ "begin_search": {
+ "index": {
+ "pos": 1
+ }
+ },
+ "find_keys": {
+ "range": {
+ "lastkey": -1,
+ "step": 1,
+ "limit": 0
+ }
+ }
+ }
+ ]
+ }
}
diff --git a/src/config.c b/src/config.c
index a52e00f66..3b5d4d349 100644
--- a/src/config.c
+++ b/src/config.c
@@ -40,11 +40,6 @@
* Config file name-value maps.
*----------------------------------------------------------------------------*/
-typedef struct configEnum {
- const char *name;
- const int val;
-} configEnum;
-
typedef struct deprecatedConfig {
const char *name;
const int argc_min;
@@ -168,7 +163,7 @@ int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT] = { 0, 200, 800 };
* rewrite. */
typedef struct boolConfigData {
int *config; /* The pointer to the server config this value is stored in */
- const int default_value; /* The default value of the config on rewrite */
+ int default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(int val, const char **err); /* Optional function to check validity of new value (generic doc above) */
} boolConfigData;
@@ -182,7 +177,7 @@ typedef struct stringConfigData {
typedef struct sdsConfigData {
sds *config; /* Pointer to the server config this value is stored in. */
- const char *default_value; /* Default value of the config on rewrite. */
+ char *default_value; /* Default value of the config on rewrite. */
int (*is_valid_fn)(sds val, const char **err); /* Optional function to check validity of new value (generic doc above) */
int convert_empty_to_null; /* Boolean indicating if empty SDS strings should
be stored as a NULL value. */
@@ -191,7 +186,7 @@ typedef struct sdsConfigData {
typedef struct enumConfigData {
int *config; /* The pointer to the server config this value is stored in */
configEnum *enum_value; /* The underlying enum type this data represents */
- const int default_value; /* The default value of the config on rewrite */
+ int default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(int val, const char **err); /* Optional function to check validity of new value (generic doc above) */
} enumConfigData;
@@ -208,11 +203,6 @@ typedef enum numericType {
NUMERIC_TYPE_TIME_T,
} numericType;
-#define INTEGER_CONFIG 0 /* No flags means a simple integer configuration */
-#define MEMORY_CONFIG (1<<0) /* Indicates if this value can be loaded as a memory value */
-#define PERCENT_CONFIG (1<<1) /* Indicates if this value can be loaded as a percent (and stored as a negative int) */
-#define OCTAL_CONFIG (1<<2) /* This value uses octal representation */
-
typedef struct numericConfigData {
union {
int *i;
@@ -230,7 +220,7 @@ typedef struct numericConfigData {
numericType numeric_type; /* An enum indicating the type of this value */
long long lower_bound; /* The lower bound of this numeric value */
long long upper_bound; /* The upper bound of this numeric value */
- const long long default_value; /* The default value of the config on rewrite */
+ long long default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(long long val, const char **err); /* Optional function to check validity of new value (generic doc above) */
} numericConfigData;
@@ -242,43 +232,44 @@ typedef union typeData {
numericConfigData numeric;
} typeData;
+typedef struct standardConfig standardConfig;
+
typedef int (*apply_fn)(const char **err);
typedef struct typeInterface {
/* Called on server start, to init the server with default value */
- void (*init)(typeData data);
+ void (*init)(standardConfig *config);
/* Called on server startup and CONFIG SET, returns 1 on success,
* 2 meaning no actual change done, 0 on error and can set a verbose err
* string */
- int (*set)(typeData data, sds *argv, int argc, const char **err);
+ int (*set)(standardConfig *config, sds *argv, int argc, const char **err);
/* Optional: called after `set()` to apply the config change. Used only in
* the context of CONFIG SET. Returns 1 on success, 0 on failure.
* Optionally set err to a static error string. */
apply_fn apply;
/* Called on CONFIG GET, returns sds to be used in reply */
- sds (*get)(typeData data);
+ sds (*get)(standardConfig *config);
/* Called on CONFIG REWRITE, required to rewrite the config state */
- void (*rewrite)(typeData data, const char *name, struct rewriteConfigState *state);
+ void (*rewrite)(standardConfig *config, const char *name, struct rewriteConfigState *state);
} typeInterface;
-typedef struct standardConfig {
+struct standardConfig {
const char *name; /* The user visible name of this config */
const char *alias; /* An alias that can also be used for this config */
- const unsigned int flags; /* Flags for this specific config */
+ unsigned int flags; /* Flags for this specific config */
typeInterface interface; /* The function pointers that define the type interface */
typeData data; /* The type specific data exposed used by the interface */
-} standardConfig;
+ configType type; /* The type of config this is. */
+ void *privdata; /* privdata for this config, for module configs this is a ModuleConfig struct */
+};
-#define MODIFIABLE_CONFIG 0 /* This is the implied default for a standard
- * config, which is mutable. */
-#define IMMUTABLE_CONFIG (1ULL<<0) /* Can this value only be set at startup? */
-#define SENSITIVE_CONFIG (1ULL<<1) /* Does this value contain sensitive information */
-#define DEBUG_CONFIG (1ULL<<2) /* Values that are useful for debugging. */
-#define MULTI_ARG_CONFIG (1ULL<<3) /* This config receives multiple arguments. */
-#define HIDDEN_CONFIG (1ULL<<4) /* This config is hidden in `config get <pattern>` (used for tests/debugging) */
-#define PROTECTED_CONFIG (1ULL<<5) /* Becomes immutable if enable-protected-configs is enabled. */
-#define DENY_LOADING_CONFIG (1ULL<<6) /* This config is forbidden during loading. */
+dict *configs = NULL; /* Runtime config values */
-standardConfig configs[];
+/* Lookup a config by the provided sds string name, or return NULL
+ * if the config does not exist */
+static standardConfig *lookupConfig(sds name) {
+ dictEntry *de = dictFind(configs, name);
+ return de ? dictGetVal(de) : NULL;
+}
/*-----------------------------------------------------------------------------
* Enum access functions
@@ -407,12 +398,6 @@ static int updateClientOutputBufferLimit(sds *args, int arg_len, const char **er
return 1;
}
-void initConfigValues() {
- for (standardConfig *config = configs; config->name != NULL; config++) {
- if (config->interface.init) config->interface.init(config->data);
- }
-}
-
/* Note this is here to support detecting we're running a config set from
* within conf file parsing. This is only needed to support the deprecated
* abnormal aggregate `save T C` functionality. Remove in the future. */
@@ -458,29 +443,23 @@ void loadServerConfigFromString(char *config) {
sdstolower(argv[0]);
/* Iterate the configs that are standard */
- int match = 0;
- for (standardConfig *config = configs; config->name != NULL; config++) {
- if ((!strcasecmp(argv[0],config->name) ||
- (config->alias && !strcasecmp(argv[0],config->alias))))
- {
- /* For normal single arg configs enforce we have a single argument.
- * Note that MULTI_ARG_CONFIGs need to validate arg count on their own */
- if (!(config->flags & MULTI_ARG_CONFIG) && argc != 2) {
- err = "wrong number of arguments";
- goto loaderr;
- }
- /* Set config using all arguments that follows */
- if (!config->interface.set(config->data, &argv[1], argc-1, &err)) {
- goto loaderr;
- }
-
- match = 1;
- break;
+ standardConfig *config = lookupConfig(argv[0]);
+ if (config) {
+ /* For normal single arg configs enforce we have a single argument.
+ * Note that MULTI_ARG_CONFIGs need to validate arg count on their own */
+ if (!(config->flags & MULTI_ARG_CONFIG) && argc != 2) {
+ err = "wrong number of arguments";
+ goto loaderr;
+ }
+ /* Set config using all arguments that follows */
+ if (!config->interface.set(config, &argv[1], argc-1, &err)) {
+ goto loaderr;
}
- }
- /* If there's no matching above, we try matching them with deprecated configs */
- if (!match) {
+ sdsfreesplitres(argv,argc);
+ continue;
+ } else {
+ int match = 0;
for (deprecatedConfig *config = deprecated_configs; config->name != NULL; config++) {
if (!strcasecmp(argv[0], config->name) &&
config->argc_min <= argc &&
@@ -490,11 +469,10 @@ void loadServerConfigFromString(char *config) {
break;
}
}
- }
-
- if (match) {
- sdsfreesplitres(argv,argc);
- continue;
+ if (match) {
+ sdsfreesplitres(argv,argc);
+ continue;
+ }
}
/* Execute config directives */
@@ -535,6 +513,13 @@ void loadServerConfigFromString(char *config) {
}
} else if (!strcasecmp(argv[0],"loadmodule") && argc >= 2) {
queueLoadModule(argv[1],&argv[2],argc-2);
+ } else if (strchr(argv[0], '.')) {
+ if (argc != 2) {
+ err = "Module config specified without value";
+ goto loaderr;
+ }
+ sds name = sdsdup(argv[0]);
+ if (!dictReplace(server.module_configs_queue, name, sdsdup(argv[1]))) sdsfree(name);
} else if (!strcasecmp(argv[0],"sentinel")) {
/* argc == 1 is handled by main() as we need to enter the sentinel
* mode ASAP. */
@@ -597,9 +582,10 @@ loaderr:
* Both filename and options can be NULL, in such a case are considered
* empty. This way loadServerConfig can be used to just load a file or
* just load a string. */
+#define CONFIG_READ_LEN 1024
void loadServerConfig(char *filename, char config_from_stdin, char *options) {
sds config = sdsempty();
- char buf[CONFIG_MAX_LINE+1];
+ char buf[CONFIG_READ_LEN+1];
FILE *fp;
glob_t globbuf;
@@ -631,7 +617,7 @@ void loadServerConfig(char *filename, char config_from_stdin, char *options) {
globbuf.gl_pathv[i], strerror(errno));
exit(1);
}
- while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL)
+ while(fgets(buf,CONFIG_READ_LEN+1,fp) != NULL)
config = sdscat(config,buf);
fclose(fp);
}
@@ -647,7 +633,7 @@ void loadServerConfig(char *filename, char config_from_stdin, char *options) {
filename, strerror(errno));
exit(1);
}
- while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL)
+ while(fgets(buf,CONFIG_READ_LEN+1,fp) != NULL)
config = sdscat(config,buf);
fclose(fp);
}
@@ -657,7 +643,7 @@ void loadServerConfig(char *filename, char config_from_stdin, char *options) {
if (config_from_stdin) {
serverLog(LL_WARNING,"Reading config from stdin");
fp = stdin;
- while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL)
+ while(fgets(buf,CONFIG_READ_LEN+1,fp) != NULL)
config = sdscat(config,buf);
}
@@ -682,12 +668,45 @@ static int performInterfaceSet(standardConfig *config, sds value, const char **e
}
/* Set the config */
- res = config->interface.set(config->data, argv, argc, errstr);
+ res = config->interface.set(config, argv, argc, errstr);
if (config->flags & MULTI_ARG_CONFIG) sdsfreesplitres(argv, argc);
return res;
}
-static void restoreBackupConfig(standardConfig **set_configs, sds *old_values, int count, apply_fn *apply_fns) {
+/* Find the config by name and attempt to set it to value. */
+int performModuleConfigSetFromName(sds name, sds value, const char **err) {
+ standardConfig *config = lookupConfig(name);
+ if (!config || !(config->flags & MODULE_CONFIG)) {
+ *err = "Config name not found";
+ return 0;
+ }
+ return performInterfaceSet(config, value, err);
+}
+
+/* Find config by name and attempt to set it to its default value. */
+int performModuleConfigSetDefaultFromName(sds name, const char **err) {
+ standardConfig *config = lookupConfig(name);
+ serverAssert(config);
+ if (!(config->flags & MODULE_CONFIG)) {
+ *err = "Config name not found";
+ return 0;
+ }
+ switch (config->type) {
+ case BOOL_CONFIG:
+ return setModuleBoolConfig(config->privdata, config->data.yesno.default_value, err);
+ case SDS_CONFIG:
+ return setModuleStringConfig(config->privdata, config->data.sds.default_value, err);
+ case NUMERIC_CONFIG:
+ return setModuleNumericConfig(config->privdata, config->data.numeric.default_value, err);
+ case ENUM_CONFIG:
+ return setModuleEnumConfig(config->privdata, config->data.enumd.default_value, err);
+ default:
+ serverPanic("Config type of module config is not allowed.");
+ }
+ return 0;
+}
+
+static void restoreBackupConfig(standardConfig **set_configs, sds *old_values, int count, apply_fn *apply_fns, list *module_configs) {
int i;
const char *errstr = "unknown error";
/* Set all backup values */
@@ -703,6 +722,10 @@ static void restoreBackupConfig(standardConfig **set_configs, sds *old_values, i
serverLog(LL_WARNING, "Failed applying restored failed CONFIG SET command: %s", errstr);
}
}
+ if (module_configs) {
+ if (!moduleConfigApplyConfig(module_configs, &errstr, NULL))
+ serverLog(LL_WARNING, "Failed applying restored failed CONFIG SET command: %s", errstr);
+ }
}
/*-----------------------------------------------------------------------------
@@ -714,6 +737,8 @@ void configSetCommand(client *c) {
const char *invalid_arg_name = NULL;
const char *err_arg_name = NULL;
standardConfig **set_configs; /* TODO: make this a dict for better performance */
+ list *module_configs_apply;
+ const char **config_names;
sds *new_values;
sds *old_values = NULL;
apply_fn *apply_fns; /* TODO: make this a set for better performance */
@@ -728,7 +753,9 @@ void configSetCommand(client *c) {
}
config_count = (c->argc - 2) / 2;
+ module_configs_apply = listCreate();
set_configs = zcalloc(sizeof(standardConfig*)*config_count);
+ config_names = zcalloc(sizeof(char*)*config_count);
new_values = zmalloc(sizeof(sds*)*config_count);
old_values = zcalloc(sizeof(sds*)*config_count);
apply_fns = zcalloc(sizeof(apply_fn)*config_count);
@@ -736,73 +763,76 @@ void configSetCommand(client *c) {
/* Find all relevant configs */
for (i = 0; i < config_count; i++) {
- for (standardConfig *config = configs; config->name != NULL; config++) {
- if ((!strcasecmp(c->argv[2+i*2]->ptr,config->name) ||
- (config->alias && !strcasecmp(c->argv[2]->ptr,config->alias)))) {
-
- /* Note: it's important we run over ALL passed configs and check if we need to call `redactClientCommandArgument()`.
- * This is in order to avoid anyone using this command for a log/slowlog/monitor/etc. displaying sensitive info.
- * So even if we encounter an error we still continue running over the remaining arguments. */
- if (config->flags & SENSITIVE_CONFIG) {
- redactClientCommandArgument(c,2+i*2+1);
- }
+ standardConfig *config = lookupConfig(c->argv[2+i*2]->ptr);
+ /* Fail if we couldn't find this config */
+ if (!config) {
+ if (!invalid_args) {
+ invalid_arg_name = c->argv[2+i*2]->ptr;
+ invalid_args = 1;
+ }
+ continue;
+ }
- if (!invalid_args) {
- if (config->flags & IMMUTABLE_CONFIG ||
- (config->flags & PROTECTED_CONFIG && !allowProtectedAction(server.enable_protected_configs, c)))
- {
- /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
- errstr = (config->flags & IMMUTABLE_CONFIG) ? "can't set immutable config" : "can't set protected config";
- err_arg_name = c->argv[2+i*2]->ptr;
- invalid_args = 1;
- }
+ /* Note: it's important we run over ALL passed configs and check if we need to call `redactClientCommandArgument()`.
+ * This is in order to avoid anyone using this command for a log/slowlog/monitor/etc. displaying sensitive info.
+ * So even if we encounter an error we still continue running over the remaining arguments. */
+ if (config->flags & SENSITIVE_CONFIG) {
+ redactClientCommandArgument(c,2+i*2+1);
+ }
- if (server.loading && config->flags & DENY_LOADING_CONFIG) {
- /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
- deny_loading_error = 1;
- invalid_args = 1;
- }
+ /* We continue to make sure we redact all the configs */
+ if (invalid_args) continue;
- /* If this config appears twice then fail */
- for (j = 0; j < i; j++) {
- if (set_configs[j] == config) {
- /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
- errstr = "duplicate parameter";
- err_arg_name = c->argv[2+i*2]->ptr;
- invalid_args = 1;
- break;
- }
- }
- set_configs[i] = config;
- new_values[i] = c->argv[2+i*2+1]->ptr;
- }
- break;
- }
+ if (config->flags & IMMUTABLE_CONFIG ||
+ (config->flags & PROTECTED_CONFIG && !allowProtectedAction(server.enable_protected_configs, c)))
+ {
+ /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
+ errstr = (config->flags & IMMUTABLE_CONFIG) ? "can't set immutable config" : "can't set protected config";
+ err_arg_name = c->argv[2+i*2]->ptr;
+ invalid_args = 1;
+ continue;
}
- /* Fail if we couldn't find this config */
- /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
- if (!invalid_args && !set_configs[i]) {
- invalid_arg_name = c->argv[2+i*2]->ptr;
+
+ if (server.loading && config->flags & DENY_LOADING_CONFIG) {
+ /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
+ deny_loading_error = 1;
invalid_args = 1;
+ continue;
+ }
+
+ /* If this config appears twice then fail */
+ for (j = 0; j < i; j++) {
+ if (set_configs[j] == config) {
+ /* Note: we don't abort the loop since we still want to handle redacting sensitive configs (above) */
+ errstr = "duplicate parameter";
+ err_arg_name = c->argv[2+i*2]->ptr;
+ invalid_args = 1;
+ break;
+ }
}
+ set_configs[i] = config;
+ config_names[i] = config->name;
+ new_values[i] = c->argv[2+i*2+1]->ptr;
}
if (invalid_args) goto err;
/* Backup old values before setting new ones */
for (i = 0; i < config_count; i++)
- old_values[i] = set_configs[i]->interface.get(set_configs[i]->data);
+ old_values[i] = set_configs[i]->interface.get(set_configs[i]);
/* Set all new values (don't apply yet) */
for (i = 0; i < config_count; i++) {
int res = performInterfaceSet(set_configs[i], new_values[i], &errstr);
if (!res) {
- restoreBackupConfig(set_configs, old_values, i+1, NULL);
+ restoreBackupConfig(set_configs, old_values, i+1, NULL, NULL);
err_arg_name = set_configs[i]->name;
goto err;
} else if (res == 1) {
/* A new value was set, if this config has an apply function then store it for execution later */
- if (set_configs[i]->interface.apply) {
+ if (set_configs[i]->flags & MODULE_CONFIG) {
+ addModuleConfigApply(module_configs_apply, set_configs[i]->privdata);
+ } else if (set_configs[i]->interface.apply) {
/* Check if this apply function is already stored */
int exists = 0;
for (j = 0; apply_fns[j] != NULL && j <= i; j++) {
@@ -824,11 +854,20 @@ void configSetCommand(client *c) {
for (i = 0; i < config_count && apply_fns[i] != NULL; i++) {
if (!apply_fns[i](&errstr)) {
serverLog(LL_WARNING, "Failed applying new configuration. Possibly related to new %s setting. Restoring previous settings.", set_configs[config_map_fns[i]]->name);
- restoreBackupConfig(set_configs, old_values, config_count, apply_fns);
+ restoreBackupConfig(set_configs, old_values, config_count, apply_fns, NULL);
err_arg_name = set_configs[config_map_fns[i]]->name;
goto err;
}
}
+ /* Apply all module configs that were set. */
+ if (!moduleConfigApplyConfig(module_configs_apply, &errstr, &err_arg_name)) {
+ serverLogRaw(LL_WARNING, "Failed applying new module configuration. Restoring previous settings.");
+ restoreBackupConfig(set_configs, old_values, config_count, apply_fns, module_configs_apply);
+ goto err;
+ }
+
+ RedisModuleConfigChangeV1 cc = {.num_changes = config_count, .config_names = config_names};
+ moduleFireServerEvent(REDISMODULE_EVENT_CONFIG, REDISMODULE_SUBEVENT_CONFIG_CHANGE, &cc);
addReply(c,shared.ok);
goto end;
@@ -845,12 +884,14 @@ err:
}
end:
zfree(set_configs);
+ zfree(config_names);
zfree(new_values);
for (i = 0; i < config_count; i++)
sdsfree(old_values[i]);
zfree(old_values);
zfree(apply_fns);
zfree(config_map_fns);
+ listRelease(module_configs_apply);
}
/*-----------------------------------------------------------------------------
@@ -858,38 +899,51 @@ end:
*----------------------------------------------------------------------------*/
void configGetCommand(client *c) {
- void *replylen = addReplyDeferredLen(c);
- int matches = 0;
int i;
+ dictEntry *de;
+ dictIterator *di;
+ /* Create a dictionary to store the matched configs */
+ dict *matches = dictCreate(&externalStringType);
+ for (i = 0; i < c->argc - 2; i++) {
+ robj *o = c->argv[2+i];
+ sds name = o->ptr;
+
+ /* If the string doesn't contain glob patterns, just directly
+ * look up the key in the dictionary. */
+ if (!strpbrk(name, "[*?")) {
+ if (dictFind(matches, name)) continue;
+ standardConfig *config = lookupConfig(name);
+
+ if (config) {
+ dictAdd(matches, name, config);
+ }
+ continue;
+ }
- for (standardConfig *config = configs; config->name != NULL; config++) {
- int matched_conf = 0;
- int matched_alias = 0;
- for (i = 0; i < c->argc - 2 && (!matched_conf || !matched_alias); i++) {
- robj *o = c->argv[2+i];
- char *pattern = o->ptr;
-
+ /* Otherwise, do a match against all items in the dictionary. */
+ di = dictGetIterator(configs);
+
+ while ((de = dictNext(di)) != NULL) {
+ standardConfig *config = dictGetVal(de);
/* Note that hidden configs require an exact match (not a pattern) */
- if (!matched_conf &&
- (((config->flags & HIDDEN_CONFIG) && !strcasecmp(pattern, config->name)) ||
- (!(config->flags & HIDDEN_CONFIG) && stringmatch(pattern, config->name, 1)))) {
- addReplyBulkCString(c, config->name);
- addReplyBulkSds(c, config->interface.get(config->data));
- matches++;
- matched_conf = 1;
- }
- if (!matched_alias && config->alias &&
- (((config->flags & HIDDEN_CONFIG) && !strcasecmp(pattern, config->alias)) ||
- (!(config->flags & HIDDEN_CONFIG) && stringmatch(pattern, config->alias, 1)))) {
- addReplyBulkCString(c, config->alias);
- addReplyBulkSds(c, config->interface.get(config->data));
- matches++;
- matched_alias = 1;
+ if (config->flags & HIDDEN_CONFIG) continue;
+ if (dictFind(matches, config->name)) continue;
+ if (stringmatch(name, de->key, 1)) {
+ dictAdd(matches, de->key, config);
}
}
+ dictReleaseIterator(di);
}
-
- setDeferredMapLen(c,replylen,matches);
+
+ di = dictGetIterator(matches);
+ addReplyMapLen(c, dictSize(matches));
+ while ((de = dictNext(di)) != NULL) {
+ standardConfig *config = (standardConfig *) dictGetVal(de);
+ addReplyBulkCString(c, de->key);
+ addReplyBulkSds(c, config->interface.get(config));
+ }
+ dictReleaseIterator(di);
+ dictRelease(matches);
}
/*-----------------------------------------------------------------------------
@@ -996,17 +1050,32 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) {
FILE *fp = fopen(path,"r");
if (fp == NULL && errno != ENOENT) return NULL;
- char buf[CONFIG_MAX_LINE+1];
+ struct redis_stat sb;
+ if (fp && redis_fstat(fileno(fp),&sb) == -1) return NULL;
+
int linenum = -1;
struct rewriteConfigState *state = rewriteConfigCreateState();
- if (fp == NULL) return state;
+ if (fp == NULL || sb.st_size == 0) return state;
- /* Read the old file line by line, populate the state. */
- while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) {
+ /* Load the file content */
+ sds config = sdsnewlen(SDS_NOINIT,sb.st_size);
+ if (fread(config,1,sb.st_size,fp) == 0) {
+ sdsfree(config);
+ rewriteConfigReleaseState(state);
+ fclose(fp);
+ return NULL;
+ }
+
+ int i, totlines;
+ sds *lines = sdssplitlen(config,sdslen(config),"\n",1,&totlines);
+
+ /* Read the old content line by line, populate the state. */
+ for (i = 0; i < totlines; i++) {
int argc;
sds *argv;
- sds line = sdstrim(sdsnew(buf),"\r\n\t ");
+ sds line = sdstrim(lines[i],"\r\n\t ");
+ lines[i] = NULL;
linenum++; /* Zero based, so we init at -1 */
@@ -1020,12 +1089,13 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) {
/* Not a comment, split into arguments. */
argv = sdssplitargs(line,&argc);
- if (argv == NULL) {
+ if (argv == NULL || (!server.sentinel_mode && !lookupConfig(argv[0]))) {
/* Apparently the line is unparsable for some reason, for
- * instance it may have unbalanced quotes. Load it as a
- * comment. */
+ * instance it may have unbalanced quotes, or may contain a
+ * config that doesn't exist anymore. Load it as a comment. */
sds aux = sdsnew("# ??? ");
aux = sdscatsds(aux,line);
+ if (argv) sdsfreesplitres(argv, argc);
sdsfree(line);
rewriteConfigAppendLine(state,aux);
continue;
@@ -1062,6 +1132,8 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) {
sdsfreesplitres(argv,argc);
}
fclose(fp);
+ sdsfreesplitres(lines,totlines);
+ sdsfree(config);
return state;
}
@@ -1235,8 +1307,8 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, const char *optio
}
/* Rewrite the save option. */
-void rewriteConfigSaveOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigSaveOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
int j;
sds line;
@@ -1296,8 +1368,8 @@ void rewriteConfigUserOption(struct rewriteConfigState *state) {
}
/* Rewrite the dir option, always using absolute paths.*/
-void rewriteConfigDirOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigDirOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
char cwd[1024];
if (getcwd(cwd,sizeof(cwd)) == NULL) {
@@ -1308,8 +1380,8 @@ void rewriteConfigDirOption(typeData data, const char *name, struct rewriteConfi
}
/* Rewrite the slaveof option. */
-void rewriteConfigReplicaOfOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigReplicaOfOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
sds line;
/* If this is a master, we want all the slaveof config options
@@ -1325,8 +1397,8 @@ void rewriteConfigReplicaOfOption(typeData data, const char *name, struct rewrit
}
/* Rewrite the notify-keyspace-events option. */
-void rewriteConfigNotifyKeyspaceEventsOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigNotifyKeyspaceEventsOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
int force = server.notify_keyspace_events != 0;
sds line, flags;
@@ -1339,8 +1411,8 @@ void rewriteConfigNotifyKeyspaceEventsOption(typeData data, const char *name, st
}
/* Rewrite the client-output-buffer-limit option. */
-void rewriteConfigClientOutputBufferLimitOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigClientOutputBufferLimitOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
int j;
for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) {
int force = (server.client_obuf_limits[j].hard_limit_bytes !=
@@ -1367,8 +1439,8 @@ void rewriteConfigClientOutputBufferLimitOption(typeData data, const char *name,
}
/* Rewrite the oom-score-adj-values option. */
-void rewriteConfigOOMScoreAdjValuesOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigOOMScoreAdjValuesOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
int force = 0;
int j;
sds line;
@@ -1387,8 +1459,8 @@ void rewriteConfigOOMScoreAdjValuesOption(typeData data, const char *name, struc
}
/* Rewrite the bind option. */
-void rewriteConfigBindOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigBindOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
int force = 1;
sds line, addresses;
int is_default = 0;
@@ -1508,10 +1580,14 @@ sds getConfigDebugInfo() {
/* Iterate the configs and "rewrite" the ones that have
* the debug flag. */
- for (standardConfig *config = configs; config->name != NULL; config++) {
+ dictIterator *di = dictGetIterator(configs);
+ dictEntry *de;
+ while ((de = dictNext(di)) != NULL) {
+ standardConfig *config = dictGetVal(de);
if (!(config->flags & DEBUG_CONFIG)) continue;
- config->interface.rewrite(config->data, config->name, state);
+ config->interface.rewrite(config, config->name, state);
}
+ dictReleaseIterator(di);
sds info = rewriteConfigGetContentFromState(state);
rewriteConfigReleaseState(state);
return info;
@@ -1599,9 +1675,15 @@ int rewriteConfig(char *path, int force_write) {
* the rewrite state. */
/* Iterate the configs that are standard */
- for (standardConfig *config = configs; config->name != NULL; config++) {
- if (config->interface.rewrite) config->interface.rewrite(config->data, config->name, state);
+ dictIterator *di = dictGetIterator(configs);
+ dictEntry *de;
+ while ((de = dictNext(di)) != NULL) {
+ standardConfig *config = dictGetVal(de);
+ /* Only rewrite the primary names */
+ if (config->flags & ALIAS_CONFIG) continue;
+ if (config->interface.rewrite) config->interface.rewrite(config, de->key, state);
}
+ dictReleaseIterator(di);
rewriteConfigUserOption(state);
rewriteConfigLoadmoduleOption(state);
@@ -1656,38 +1738,46 @@ static char loadbuf[LOADBUF_SIZE];
*/
/* Bool Configs */
-static void boolConfigInit(typeData data) {
- *data.yesno.config = data.yesno.default_value;
+static void boolConfigInit(standardConfig *config) {
+ *config->data.yesno.config = config->data.yesno.default_value;
}
-static int boolConfigSet(typeData data, sds *argv, int argc, const char **err) {
+static int boolConfigSet(standardConfig *config, sds *argv, int argc, const char **err) {
UNUSED(argc);
int yn = yesnotoi(argv[0]);
if (yn == -1) {
*err = "argument must be 'yes' or 'no'";
return 0;
}
- if (data.yesno.is_valid_fn && !data.yesno.is_valid_fn(yn, err))
+ if (config->data.yesno.is_valid_fn && !config->data.yesno.is_valid_fn(yn, err))
return 0;
- int prev = *(data.yesno.config);
+ int prev = config->flags & MODULE_CONFIG ? getModuleBoolConfig(config->privdata) : *(config->data.yesno.config);
if (prev != yn) {
- *(data.yesno.config) = yn;
+ if (config->flags & MODULE_CONFIG) {
+ return setModuleBoolConfig(config->privdata, yn, err);
+ }
+ *(config->data.yesno.config) = yn;
return 1;
}
return 2;
}
-static sds boolConfigGet(typeData data) {
- return sdsnew(*data.yesno.config ? "yes" : "no");
+static sds boolConfigGet(standardConfig *config) {
+ if (config->flags & MODULE_CONFIG) {
+ return sdsnew(getModuleBoolConfig(config->privdata) ? "yes" : "no");
+ }
+ return sdsnew(*config->data.yesno.config ? "yes" : "no");
}
-static void boolConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
- rewriteConfigYesNoOption(state, name,*(data.yesno.config), data.yesno.default_value);
+static void boolConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ int val = config->flags & MODULE_CONFIG ? getModuleBoolConfig(config->privdata) : *(config->data.yesno.config);
+ rewriteConfigYesNoOption(state, name, val, config->data.yesno.default_value);
}
#define createBoolConfig(name, alias, flags, config_addr, default, is_valid, apply) { \
embedCommonConfig(name, alias, flags) \
embedConfigInterface(boolConfigInit, boolConfigSet, boolConfigGet, boolConfigRewrite, apply) \
+ .type = BOOL_CONFIG, \
.data.yesno = { \
.config = &(config_addr), \
.default_value = (default), \
@@ -1696,61 +1786,69 @@ static void boolConfigRewrite(typeData data, const char *name, struct rewriteCon
}
/* String Configs */
-static void stringConfigInit(typeData data) {
- *data.string.config = (data.string.convert_empty_to_null && !data.string.default_value) ? NULL : zstrdup(data.string.default_value);
+static void stringConfigInit(standardConfig *config) {
+ *config->data.string.config = (config->data.string.convert_empty_to_null && !config->data.string.default_value) ? NULL : zstrdup(config->data.string.default_value);
}
-static int stringConfigSet(typeData data, sds *argv, int argc, const char **err) {
+static int stringConfigSet(standardConfig *config, sds *argv, int argc, const char **err) {
UNUSED(argc);
- if (data.string.is_valid_fn && !data.string.is_valid_fn(argv[0], err))
+ if (config->data.string.is_valid_fn && !config->data.string.is_valid_fn(argv[0], err))
return 0;
- char *prev = *data.string.config;
- char *new = (data.string.convert_empty_to_null && !argv[0][0]) ? NULL : argv[0];
+ char *prev = *config->data.string.config;
+ char *new = (config->data.string.convert_empty_to_null && !argv[0][0]) ? NULL : argv[0];
if (new != prev && (new == NULL || prev == NULL || strcmp(prev, new))) {
- *data.string.config = new != NULL ? zstrdup(new) : NULL;
+ *config->data.string.config = new != NULL ? zstrdup(new) : NULL;
zfree(prev);
return 1;
}
return 2;
}
-static sds stringConfigGet(typeData data) {
- return sdsnew(*data.string.config ? *data.string.config : "");
+static sds stringConfigGet(standardConfig *config) {
+ return sdsnew(*config->data.string.config ? *config->data.string.config : "");
}
-static void stringConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
- rewriteConfigStringOption(state, name,*(data.string.config), data.string.default_value);
+static void stringConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ rewriteConfigStringOption(state, name,*(config->data.string.config), config->data.string.default_value);
}
/* SDS Configs */
-static void sdsConfigInit(typeData data) {
- *data.sds.config = (data.sds.convert_empty_to_null && !data.sds.default_value) ? NULL: sdsnew(data.sds.default_value);
+static void sdsConfigInit(standardConfig *config) {
+ *config->data.sds.config = (config->data.sds.convert_empty_to_null && !config->data.sds.default_value) ? NULL : sdsnew(config->data.sds.default_value);
}
-static int sdsConfigSet(typeData data, sds *argv, int argc, const char **err) {
+static int sdsConfigSet(standardConfig *config, sds *argv, int argc, const char **err) {
UNUSED(argc);
- if (data.sds.is_valid_fn && !data.sds.is_valid_fn(argv[0], err))
+ if (config->data.sds.is_valid_fn && !config->data.sds.is_valid_fn(argv[0], err))
return 0;
- sds prev = *data.sds.config;
- sds new = (data.string.convert_empty_to_null && (sdslen(argv[0]) == 0)) ? NULL : argv[0];
+ sds prev = config->flags & MODULE_CONFIG ? getModuleStringConfig(config->privdata) : *config->data.sds.config;
+ sds new = (config->data.string.convert_empty_to_null && (sdslen(argv[0]) == 0)) ? NULL : argv[0];
if (new != prev && (new == NULL || prev == NULL || sdscmp(prev, new))) {
- *data.sds.config = new != NULL ? sdsdup(new) : NULL;
sdsfree(prev);
+ if (config->flags & MODULE_CONFIG) {
+ return setModuleStringConfig(config->privdata, new, err);
+ }
+ *config->data.sds.config = new != NULL ? sdsdup(new) : NULL;
return 1;
}
+ if (config->flags & MODULE_CONFIG && prev) sdsfree(prev);
return 2;
}
-static sds sdsConfigGet(typeData data) {
- if (*data.sds.config) {
- return sdsdup(*data.sds.config);
+static sds sdsConfigGet(standardConfig *config) {
+ sds val = config->flags & MODULE_CONFIG ? getModuleStringConfig(config->privdata) : *config->data.sds.config;
+ if (val) {
+ if (config->flags & MODULE_CONFIG) return val;
+ return sdsdup(val);
} else {
return sdsnew("");
}
}
-static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
- rewriteConfigSdsOption(state, name, *(data.sds.config), data.sds.default_value);
+static void sdsConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ sds val = config->flags & MODULE_CONFIG ? getModuleStringConfig(config->privdata) : *config->data.sds.config;
+ rewriteConfigSdsOption(state, name, val, config->data.sds.default_value);
+ if (val) sdsfree(val);
}
@@ -1760,6 +1858,7 @@ static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConf
#define createStringConfig(name, alias, flags, empty_to_null, config_addr, default, is_valid, apply) { \
embedCommonConfig(name, alias, flags) \
embedConfigInterface(stringConfigInit, stringConfigSet, stringConfigGet, stringConfigRewrite, apply) \
+ .type = STRING_CONFIG, \
.data.string = { \
.config = &(config_addr), \
.default_value = (default), \
@@ -1771,6 +1870,7 @@ static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConf
#define createSDSConfig(name, alias, flags, empty_to_null, config_addr, default, is_valid, apply) { \
embedCommonConfig(name, alias, flags) \
embedConfigInterface(sdsConfigInit, sdsConfigSet, sdsConfigGet, sdsConfigRewrite, apply) \
+ .type = SDS_CONFIG, \
.data.sds = { \
.config = &(config_addr), \
.default_value = (default), \
@@ -1780,16 +1880,16 @@ static void sdsConfigRewrite(typeData data, const char *name, struct rewriteConf
}
/* Enum configs */
-static void enumConfigInit(typeData data) {
- *data.enumd.config = data.enumd.default_value;
+static void enumConfigInit(standardConfig *config) {
+ *config->data.enumd.config = config->data.enumd.default_value;
}
-static int enumConfigSet(typeData data, sds *argv, int argc, const char **err) {
+static int enumConfigSet(standardConfig *config, sds *argv, int argc, const char **err) {
UNUSED(argc);
- int enumval = configEnumGetValue(data.enumd.enum_value, argv[0]);
+ int enumval = configEnumGetValue(config->data.enumd.enum_value, argv[0]);
if (enumval == INT_MIN) {
sds enumerr = sdsnew("argument must be one of the following: ");
- configEnum *enumNode = data.enumd.enum_value;
+ configEnum *enumNode = config->data.enumd.enum_value;
while(enumNode->name != NULL) {
enumerr = sdscatlen(enumerr, enumNode->name,
strlen(enumNode->name));
@@ -1805,27 +1905,32 @@ static int enumConfigSet(typeData data, sds *argv, int argc, const char **err) {
*err = loadbuf;
return 0;
}
- if (data.enumd.is_valid_fn && !data.enumd.is_valid_fn(enumval, err))
+ if (config->data.enumd.is_valid_fn && !config->data.enumd.is_valid_fn(enumval, err))
return 0;
- int prev = *(data.enumd.config);
+ int prev = config->flags & MODULE_CONFIG ? getModuleEnumConfig(config->privdata) : *(config->data.enumd.config);
if (prev != enumval) {
- *(data.enumd.config) = enumval;
+ if (config->flags & MODULE_CONFIG)
+ return setModuleEnumConfig(config->privdata, enumval, err);
+ *(config->data.enumd.config) = enumval;
return 1;
}
return 2;
}
-static sds enumConfigGet(typeData data) {
- return sdsnew(configEnumGetNameOrUnknown(data.enumd.enum_value,*data.enumd.config));
+static sds enumConfigGet(standardConfig *config) {
+ int val = config->flags & MODULE_CONFIG ? getModuleEnumConfig(config->privdata) : *(config->data.enumd.config);
+ return sdsnew(configEnumGetNameOrUnknown(config->data.enumd.enum_value,val));
}
-static void enumConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
- rewriteConfigEnumOption(state, name,*(data.enumd.config), data.enumd.enum_value, data.enumd.default_value);
+static void enumConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ int val = config->flags & MODULE_CONFIG ? getModuleEnumConfig(config->privdata) : *(config->data.enumd.config);
+ rewriteConfigEnumOption(state, name, val, config->data.enumd.enum_value, config->data.enumd.default_value);
}
#define createEnumConfig(name, alias, flags, enum, config_addr, default, is_valid, apply) { \
embedCommonConfig(name, alias, flags) \
embedConfigInterface(enumConfigInit, enumConfigSet, enumConfigGet, enumConfigRewrite, apply) \
+ .type = ENUM_CONFIG, \
.data.enumd = { \
.config = &(config_addr), \
.default_value = (default), \
@@ -1836,69 +1941,74 @@ static void enumConfigRewrite(typeData data, const char *name, struct rewriteCon
/* Gets a 'long long val' and sets it into the union, using a macro to get
* compile time type check. */
-#define SET_NUMERIC_TYPE(val) \
- if (data.numeric.numeric_type == NUMERIC_TYPE_INT) { \
- *(data.numeric.config.i) = (int) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_UINT) { \
- *(data.numeric.config.ui) = (unsigned int) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG) { \
- *(data.numeric.config.l) = (long) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG) { \
- *(data.numeric.config.ul) = (unsigned long) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) { \
- *(data.numeric.config.ll) = (long long) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) { \
- *(data.numeric.config.ull) = (unsigned long long) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) { \
- *(data.numeric.config.st) = (size_t) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) { \
- *(data.numeric.config.sst) = (ssize_t) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) { \
- *(data.numeric.config.ot) = (off_t) val; \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) { \
- *(data.numeric.config.tt) = (time_t) val; \
+int setNumericType(standardConfig *config, long long val, const char **err) {
+ if (config->data.numeric.numeric_type == NUMERIC_TYPE_INT) {
+ *(config->data.numeric.config.i) = (int) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_UINT) {
+ *(config->data.numeric.config.ui) = (unsigned int) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_LONG) {
+ *(config->data.numeric.config.l) = (long) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_ULONG) {
+ *(config->data.numeric.config.ul) = (unsigned long) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) {
+ if (config->flags & MODULE_CONFIG)
+ return setModuleNumericConfig(config->privdata, val, err);
+ else *(config->data.numeric.config.ll) = (long long) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) {
+ *(config->data.numeric.config.ull) = (unsigned long long) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) {
+ *(config->data.numeric.config.st) = (size_t) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) {
+ *(config->data.numeric.config.sst) = (ssize_t) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) {
+ *(config->data.numeric.config.ot) = (off_t) val;
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) {
+ *(config->data.numeric.config.tt) = (time_t) val;
}
+ return 1;
+}
/* Gets a 'long long val' and sets it with the value from the union, using a
* macro to get compile time type check. */
#define GET_NUMERIC_TYPE(val) \
- if (data.numeric.numeric_type == NUMERIC_TYPE_INT) { \
- val = *(data.numeric.config.i); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_UINT) { \
- val = *(data.numeric.config.ui); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG) { \
- val = *(data.numeric.config.l); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG) { \
- val = *(data.numeric.config.ul); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) { \
- val = *(data.numeric.config.ll); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) { \
- val = *(data.numeric.config.ull); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) { \
- val = *(data.numeric.config.st); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) { \
- val = *(data.numeric.config.sst); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) { \
- val = *(data.numeric.config.ot); \
- } else if (data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) { \
- val = *(data.numeric.config.tt); \
+ if (config->data.numeric.numeric_type == NUMERIC_TYPE_INT) { \
+ val = *(config->data.numeric.config.i); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_UINT) { \
+ val = *(config->data.numeric.config.ui); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_LONG) { \
+ val = *(config->data.numeric.config.l); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_ULONG) { \
+ val = *(config->data.numeric.config.ul); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) { \
+ if (config->flags & MODULE_CONFIG) val = getModuleNumericConfig(config->privdata); \
+ else val = *(config->data.numeric.config.ll); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) { \
+ val = *(config->data.numeric.config.ull); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) { \
+ val = *(config->data.numeric.config.st); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) { \
+ val = *(config->data.numeric.config.sst); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) { \
+ val = *(config->data.numeric.config.ot); \
+ } else if (config->data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) { \
+ val = *(config->data.numeric.config.tt); \
}
/* Numeric configs */
-static void numericConfigInit(typeData data) {
- SET_NUMERIC_TYPE(data.numeric.default_value)
+static void numericConfigInit(standardConfig *config) {
+ setNumericType(config, config->data.numeric.default_value, NULL);
}
-static int numericBoundaryCheck(typeData data, long long ll, const char **err) {
- if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG ||
- data.numeric.numeric_type == NUMERIC_TYPE_UINT ||
- data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) {
+static int numericBoundaryCheck(standardConfig *config, long long ll, const char **err) {
+ if (config->data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG ||
+ config->data.numeric.numeric_type == NUMERIC_TYPE_UINT ||
+ config->data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) {
/* Boundary check for unsigned types */
unsigned long long ull = ll;
- unsigned long long upper_bound = data.numeric.upper_bound;
- unsigned long long lower_bound = data.numeric.lower_bound;
+ unsigned long long upper_bound = config->data.numeric.upper_bound;
+ unsigned long long lower_bound = config->data.numeric.lower_bound;
if (ull > upper_bound || ull < lower_bound) {
- if (data.numeric.flags & OCTAL_CONFIG) {
+ if (config->data.numeric.flags & OCTAL_CONFIG) {
snprintf(loadbuf, LOADBUF_SIZE,
"argument must be between %llo and %llo inclusive",
lower_bound,
@@ -1914,21 +2024,21 @@ static int numericBoundaryCheck(typeData data, long long ll, const char **err) {
}
} else {
/* Boundary check for percentages */
- if (data.numeric.flags & PERCENT_CONFIG && ll < 0) {
- if (ll < data.numeric.lower_bound) {
+ if (config->data.numeric.flags & PERCENT_CONFIG && ll < 0) {
+ if (ll < config->data.numeric.lower_bound) {
snprintf(loadbuf, LOADBUF_SIZE,
"percentage argument must be less or equal to %lld",
- -data.numeric.lower_bound);
+ -config->data.numeric.lower_bound);
*err = loadbuf;
return 0;
}
}
/* Boundary check for signed types */
- else if (ll > data.numeric.upper_bound || ll < data.numeric.lower_bound) {
+ else if (ll > config->data.numeric.upper_bound || ll < config->data.numeric.lower_bound) {
snprintf(loadbuf, LOADBUF_SIZE,
"argument must be between %lld and %lld inclusive",
- data.numeric.lower_bound,
- data.numeric.upper_bound);
+ config->data.numeric.lower_bound,
+ config->data.numeric.upper_bound);
*err = loadbuf;
return 0;
}
@@ -1936,9 +2046,9 @@ static int numericBoundaryCheck(typeData data, long long ll, const char **err) {
return 1;
}
-static int numericParseString(typeData data, sds value, const char **err, long long *res) {
+static int numericParseString(standardConfig *config, sds value, const char **err, long long *res) {
/* First try to parse as memory */
- if (data.numeric.flags & MEMORY_CONFIG) {
+ if (config->data.numeric.flags & MEMORY_CONFIG) {
int memerr;
*res = memtoull(value, &memerr);
if (!memerr)
@@ -1946,7 +2056,7 @@ static int numericParseString(typeData data, sds value, const char **err, long l
}
/* Attempt to parse as percent */
- if (data.numeric.flags & PERCENT_CONFIG &&
+ if (config->data.numeric.flags & PERCENT_CONFIG &&
sdslen(value) > 1 && value[sdslen(value)-1] == '%' &&
string2ll(value, sdslen(value)-1, res) &&
*res >= 0) {
@@ -1956,7 +2066,7 @@ static int numericParseString(typeData data, sds value, const char **err, long l
}
/* Attempt to parse as an octal number */
- if (data.numeric.flags & OCTAL_CONFIG) {
+ if (config->data.numeric.flags & OCTAL_CONFIG) {
char *endptr;
errno = 0;
*res = strtoll(value, &endptr, 8);
@@ -1965,58 +2075,57 @@ static int numericParseString(typeData data, sds value, const char **err, long l
}
/* Attempt a simple number (no special flags set) */
- if (!data.numeric.flags && string2ll(value, sdslen(value), res))
+ if (!config->data.numeric.flags && string2ll(value, sdslen(value), res))
return 1;
/* Select appropriate error string */
- if (data.numeric.flags & MEMORY_CONFIG &&
- data.numeric.flags & PERCENT_CONFIG)
+ if (config->data.numeric.flags & MEMORY_CONFIG &&
+ config->data.numeric.flags & PERCENT_CONFIG)
*err = "argument must be a memory or percent value" ;
- else if (data.numeric.flags & MEMORY_CONFIG)
+ else if (config->data.numeric.flags & MEMORY_CONFIG)
*err = "argument must be a memory value";
- else if (data.numeric.flags & OCTAL_CONFIG)
+ else if (config->data.numeric.flags & OCTAL_CONFIG)
*err = "argument couldn't be parsed as an octal number";
else
*err = "argument couldn't be parsed into an integer";
return 0;
}
-static int numericConfigSet(typeData data, sds *argv, int argc, const char **err) {
+static int numericConfigSet(standardConfig *config, sds *argv, int argc, const char **err) {
UNUSED(argc);
long long ll, prev = 0;
- if (!numericParseString(data, argv[0], err, &ll))
+ if (!numericParseString(config, argv[0], err, &ll))
return 0;
- if (!numericBoundaryCheck(data, ll, err))
+ if (!numericBoundaryCheck(config, ll, err))
return 0;
- if (data.numeric.is_valid_fn && !data.numeric.is_valid_fn(ll, err))
+ if (config->data.numeric.is_valid_fn && !config->data.numeric.is_valid_fn(ll, err))
return 0;
GET_NUMERIC_TYPE(prev)
if (prev != ll) {
- SET_NUMERIC_TYPE(ll)
- return 1;
+ return setNumericType(config, ll, err);
}
return 2;
}
-static sds numericConfigGet(typeData data) {
+static sds numericConfigGet(standardConfig *config) {
char buf[128];
long long value = 0;
GET_NUMERIC_TYPE(value)
- if (data.numeric.flags & PERCENT_CONFIG && value < 0) {
+ if (config->data.numeric.flags & PERCENT_CONFIG && value < 0) {
int len = ll2string(buf, sizeof(buf), -value);
buf[len] = '%';
buf[len+1] = '\0';
}
- else if (data.numeric.flags & MEMORY_CONFIG) {
+ else if (config->data.numeric.flags & MEMORY_CONFIG) {
ull2string(buf, sizeof(buf), value);
- } else if (data.numeric.flags & OCTAL_CONFIG) {
+ } else if (config->data.numeric.flags & OCTAL_CONFIG) {
snprintf(buf, sizeof(buf), "%llo", value);
} else {
ll2string(buf, sizeof(buf), value);
@@ -2024,25 +2133,26 @@ static sds numericConfigGet(typeData data) {
return sdsnew(buf);
}
-static void numericConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
+static void numericConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
long long value = 0;
GET_NUMERIC_TYPE(value)
- if (data.numeric.flags & PERCENT_CONFIG && value < 0) {
- rewriteConfigPercentOption(state, name, -value, data.numeric.default_value);
- } else if (data.numeric.flags & MEMORY_CONFIG) {
- rewriteConfigBytesOption(state, name, value, data.numeric.default_value);
- } else if (data.numeric.flags & OCTAL_CONFIG) {
- rewriteConfigOctalOption(state, name, value, data.numeric.default_value);
+ if (config->data.numeric.flags & PERCENT_CONFIG && value < 0) {
+ rewriteConfigPercentOption(state, name, -value, config->data.numeric.default_value);
+ } else if (config->data.numeric.flags & MEMORY_CONFIG) {
+ rewriteConfigBytesOption(state, name, value, config->data.numeric.default_value);
+ } else if (config->data.numeric.flags & OCTAL_CONFIG) {
+ rewriteConfigOctalOption(state, name, value, config->data.numeric.default_value);
} else {
- rewriteConfigNumericalOption(state, name, value, data.numeric.default_value);
+ rewriteConfigNumericalOption(state, name, value, config->data.numeric.default_value);
}
}
#define embedCommonNumericalConfig(name, alias, _flags, lower, upper, config_addr, default, num_conf_flags, is_valid, apply) { \
embedCommonConfig(name, alias, _flags) \
embedConfigInterface(numericConfigInit, numericConfigSet, numericConfigGet, numericConfigRewrite, apply) \
+ .type = NUMERIC_CONFIG, \
.data.numeric = { \
.lower_bound = (lower), \
.upper_bound = (upper), \
@@ -2121,6 +2231,7 @@ static void numericConfigRewrite(typeData data, const char *name, struct rewrite
}
#define createSpecialConfig(name, alias, modifiable, setfn, getfn, rewritefn, applyfn) { \
+ .type = SPECIAL_CONFIG, \
embedCommonConfig(name, alias, modifiable) \
embedConfigInterface(NULL, setfn, getfn, rewritefn, applyfn) \
}
@@ -2397,8 +2508,8 @@ static int applyTLSPort(const char **err) {
#endif /* USE_OPENSSL */
-static int setConfigDirOption(typeData data, sds *argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigDirOption(standardConfig *config, sds *argv, int argc, const char **err) {
+ UNUSED(config);
if (argc != 1) {
*err = "wrong number of arguments";
return 0;
@@ -2410,8 +2521,8 @@ static int setConfigDirOption(typeData data, sds *argv, int argc, const char **e
return 1;
}
-static sds getConfigDirOption(typeData data) {
- UNUSED(data);
+static sds getConfigDirOption(standardConfig *config) {
+ UNUSED(config);
char buf[1024];
if (getcwd(buf,sizeof(buf)) == NULL)
@@ -2420,8 +2531,8 @@ static sds getConfigDirOption(typeData data) {
return sdsnew(buf);
}
-static int setConfigSaveOption(typeData data, sds *argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigSaveOption(standardConfig *config, sds *argv, int argc, const char **err) {
+ UNUSED(config);
int j;
/* Special case: treat single arg "" as zero args indicating empty save configuration */
@@ -2473,8 +2584,8 @@ static int setConfigSaveOption(typeData data, sds *argv, int argc, const char **
return 1;
}
-static sds getConfigSaveOption(typeData data) {
- UNUSED(data);
+static sds getConfigSaveOption(standardConfig *config) {
+ UNUSED(config);
sds buf = sdsempty();
int j;
@@ -2489,13 +2600,13 @@ static sds getConfigSaveOption(typeData data) {
return buf;
}
-static int setConfigClientOutputBufferLimitOption(typeData data, sds *argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigClientOutputBufferLimitOption(standardConfig *config, sds *argv, int argc, const char **err) {
+ UNUSED(config);
return updateClientOutputBufferLimit(argv, argc, err);
}
-static sds getConfigClientOutputBufferLimitOption(typeData data) {
- UNUSED(data);
+static sds getConfigClientOutputBufferLimitOption(standardConfig *config) {
+ UNUSED(config);
sds buf = sdsempty();
int j;
for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) {
@@ -2513,11 +2624,11 @@ static sds getConfigClientOutputBufferLimitOption(typeData data) {
/* Parse an array of CONFIG_OOM_COUNT sds strings, validate and populate
* server.oom_score_adj_values if valid.
*/
-static int setConfigOOMScoreAdjValuesOption(typeData data, sds *argv, int argc, const char **err) {
+static int setConfigOOMScoreAdjValuesOption(standardConfig *config, sds *argv, int argc, const char **err) {
int i;
int values[CONFIG_OOM_COUNT];
int change = 0;
- UNUSED(data);
+ UNUSED(config);
if (argc != CONFIG_OOM_COUNT) {
*err = "wrong number of arguments";
@@ -2558,8 +2669,8 @@ static int setConfigOOMScoreAdjValuesOption(typeData data, sds *argv, int argc,
return change ? 1 : 2;
}
-static sds getConfigOOMScoreAdjValuesOption(typeData data) {
- UNUSED(data);
+static sds getConfigOOMScoreAdjValuesOption(standardConfig *config) {
+ UNUSED(config);
sds buf = sdsempty();
int j;
@@ -2572,8 +2683,8 @@ static sds getConfigOOMScoreAdjValuesOption(typeData data) {
return buf;
}
-static int setConfigNotifyKeyspaceEventsOption(typeData data, sds *argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigNotifyKeyspaceEventsOption(standardConfig *config, sds *argv, int argc, const char **err) {
+ UNUSED(config);
if (argc != 1) {
*err = "wrong number of arguments";
return 0;
@@ -2587,13 +2698,13 @@ static int setConfigNotifyKeyspaceEventsOption(typeData data, sds *argv, int arg
return 1;
}
-static sds getConfigNotifyKeyspaceEventsOption(typeData data) {
- UNUSED(data);
+static sds getConfigNotifyKeyspaceEventsOption(standardConfig *config) {
+ UNUSED(config);
return keyspaceEventsFlagsToString(server.notify_keyspace_events);
}
-static int setConfigBindOption(typeData data, sds* argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigBindOption(standardConfig *config, sds* argv, int argc, const char **err) {
+ UNUSED(config);
int j;
if (argc > CONFIG_BINDADDR_MAX) {
@@ -2615,8 +2726,8 @@ static int setConfigBindOption(typeData data, sds* argv, int argc, const char **
return 1;
}
-static int setConfigReplicaOfOption(typeData data, sds* argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigReplicaOfOption(standardConfig *config, sds* argv, int argc, const char **err) {
+ UNUSED(config);
if (argc != 2) {
*err = "wrong number of arguments";
@@ -2639,13 +2750,13 @@ static int setConfigReplicaOfOption(typeData data, sds* argv, int argc, const ch
return 1;
}
-static sds getConfigBindOption(typeData data) {
- UNUSED(data);
+static sds getConfigBindOption(standardConfig *config) {
+ UNUSED(config);
return sdsjoin(server.bindaddr,server.bindaddr_count," ");
}
-static sds getConfigReplicaOfOption(typeData data) {
- UNUSED(data);
+static sds getConfigReplicaOfOption(standardConfig *config) {
+ UNUSED(config);
char buf[256];
if (server.masterhost)
snprintf(buf,sizeof(buf),"%s %d",
@@ -2661,8 +2772,8 @@ int allowProtectedAction(int config, client *c) {
}
-static int setConfigLatencyTrackingInfoPercentilesOutputOption(typeData data, sds *argv, int argc, const char **err) {
- UNUSED(data);
+static int setConfigLatencyTrackingInfoPercentilesOutputOption(standardConfig *config, sds *argv, int argc, const char **err) {
+ UNUSED(config);
zfree(server.latency_tracking_info_percentiles);
server.latency_tracking_info_percentiles = NULL;
server.latency_tracking_info_percentiles_len = argc;
@@ -2694,8 +2805,8 @@ configerr:
return 0;
}
-static sds getConfigLatencyTrackingInfoPercentilesOutputOption(typeData data) {
- UNUSED(data);
+static sds getConfigLatencyTrackingInfoPercentilesOutputOption(standardConfig *config) {
+ UNUSED(config);
sds buf = sdsempty();
for (int j = 0; j < server.latency_tracking_info_percentiles_len; j++) {
char fbuf[128];
@@ -2709,8 +2820,8 @@ static sds getConfigLatencyTrackingInfoPercentilesOutputOption(typeData data) {
}
/* Rewrite the latency-tracking-info-percentiles option. */
-void rewriteConfigLatencyTrackingInfoPercentilesOutputOption(typeData data, const char *name, struct rewriteConfigState *state) {
- UNUSED(data);
+void rewriteConfigLatencyTrackingInfoPercentilesOutputOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
+ UNUSED(config);
sds line = sdsnew(name);
/* Rewrite latency-tracking-info-percentiles parameters,
* or an empty 'latency-tracking-info-percentiles ""' line to avoid the
@@ -2729,7 +2840,7 @@ void rewriteConfigLatencyTrackingInfoPercentilesOutputOption(typeData data, cons
rewriteConfigRewriteLine(state,name,line,1);
}
-standardConfig configs[] = {
+standardConfig static_configs[] = {
/* Bool configs */
createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL),
createBoolConfig("daemonize", NULL, IMMUTABLE_CONFIG, server.daemonize, 0, NULL, NULL),
@@ -2771,7 +2882,7 @@ standardConfig configs[] = {
createBoolConfig("crash-log-enabled", NULL, MODIFIABLE_CONFIG, server.crashlog_enabled, 1, NULL, updateSighandlerEnabled),
createBoolConfig("crash-memcheck-enabled", NULL, MODIFIABLE_CONFIG, server.memcheck_enabled, 1, NULL, NULL),
createBoolConfig("use-exit-on-panic", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.use_exit_on_panic, 0, NULL, NULL),
- createBoolConfig("disable-thp", NULL, MODIFIABLE_CONFIG, server.disable_thp, 1, NULL, NULL),
+ createBoolConfig("disable-thp", NULL, IMMUTABLE_CONFIG, server.disable_thp, 1, NULL, NULL),
createBoolConfig("cluster-allow-replica-migration", NULL, MODIFIABLE_CONFIG, server.cluster_allow_replica_migration, 1, NULL, NULL),
createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, server.replica_announced, 1, NULL, NULL),
createBoolConfig("latency-tracking", NULL, MODIFIABLE_CONFIG, server.latency_tracking_enabled, 1, NULL, NULL),
@@ -2930,10 +3041,105 @@ standardConfig configs[] = {
createSpecialConfig("replicaof", "slaveof", IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, setConfigReplicaOfOption, getConfigReplicaOfOption, rewriteConfigReplicaOfOption, NULL),
createSpecialConfig("latency-tracking-info-percentiles", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigLatencyTrackingInfoPercentilesOutputOption, getConfigLatencyTrackingInfoPercentilesOutputOption, rewriteConfigLatencyTrackingInfoPercentilesOutputOption, NULL),
- /* NULL Terminator */
+ /* NULL Terminator, this is dropped when we convert to the runtime array. */
{NULL}
};
+/* Create a new config by copying the passed in config. Returns 1 on success
+ * or 0 when their was already a config with the same name.. */
+int registerConfigValue(const char *name, const standardConfig *config, int alias) {
+ standardConfig *new = zmalloc(sizeof(standardConfig));
+ memcpy(new, config, sizeof(standardConfig));
+ if (alias) {
+ new->flags |= ALIAS_CONFIG;
+ new->name = config->alias;
+ new->alias = config->name;
+ }
+
+ return dictAdd(configs, sdsnew(name), new) == DICT_OK;
+}
+
+/* Initialize configs to their default values and create and populate the
+ * runtime configuration dictionary. */
+void initConfigValues() {
+ configs = dictCreate(&sdsHashDictType);
+ dictExpand(configs, sizeof(static_configs) / sizeof(standardConfig));
+ for (standardConfig *config = static_configs; config->name != NULL; config++) {
+ if (config->interface.init) config->interface.init(config);
+ /* Add the primary config to the dictionary. */
+ int ret = registerConfigValue(config->name, config, 0);
+ serverAssert(ret);
+
+ /* Aliases are the same as their primary counter parts, but they
+ * also have a flag indicating they are the alias. */
+ if (config->alias) {
+ int ret = registerConfigValue(config->alias, config, ALIAS_CONFIG);
+ serverAssert(ret);
+ }
+ }
+}
+
+/* Remove a config by name from the configs dict. */
+void removeConfig(sds name) {
+ standardConfig *config = lookupConfig(name);
+ if (!config) return;
+ if (config->flags & MODULE_CONFIG) {
+ sdsfree((sds) config->name);
+ if (config->type == ENUM_CONFIG) {
+ configEnum *enumNode = config->data.enumd.enum_value;
+ while(enumNode->name != NULL) {
+ zfree(enumNode->name);
+ enumNode++;
+ }
+ zfree(config->data.enumd.enum_value);
+ } else if (config->type == SDS_CONFIG) {
+ if (config->data.sds.default_value) sdsfree((sds)config->data.sds.default_value);
+ }
+ }
+ dictDelete(configs, name);
+}
+
+/*-----------------------------------------------------------------------------
+ * Module Config
+ *----------------------------------------------------------------------------*/
+
+/* Create a bool/string/enum/numeric standardConfig for a module config in the configs dictionary */
+void addModuleBoolConfig(const char *module_name, const char *name, int flags, void *privdata, int default_val) {
+ sds config_name = sdscatfmt(sdsempty(), "%s.%s", module_name, name);
+ int config_dummy_address;
+ standardConfig module_config = createBoolConfig(config_name, NULL, flags | MODULE_CONFIG, config_dummy_address, default_val, NULL, NULL);
+ module_config.data.yesno.config = NULL;
+ module_config.privdata = privdata;
+ registerConfigValue(config_name, &module_config, 0);
+}
+
+void addModuleStringConfig(const char *module_name, const char *name, int flags, void *privdata, sds default_val) {
+ sds config_name = sdscatfmt(sdsempty(), "%s.%s", module_name, name);
+ sds config_dummy_address;
+ standardConfig module_config = createSDSConfig(config_name, NULL, flags | MODULE_CONFIG, 0, config_dummy_address, default_val, NULL, NULL);
+ module_config.data.sds.config = NULL;
+ module_config.privdata = privdata;
+ registerConfigValue(config_name, &module_config, 0);
+}
+
+void addModuleEnumConfig(const char *module_name, const char *name, int flags, void *privdata, int default_val, configEnum *enum_vals) {
+ sds config_name = sdscatfmt(sdsempty(), "%s.%s", module_name, name);
+ int config_dummy_address;
+ standardConfig module_config = createEnumConfig(config_name, NULL, flags | MODULE_CONFIG, enum_vals, config_dummy_address, default_val, NULL, NULL);
+ module_config.data.enumd.config = NULL;
+ module_config.privdata = privdata;
+ registerConfigValue(config_name, &module_config, 0);
+}
+
+void addModuleNumericConfig(const char *module_name, const char *name, int flags, void *privdata, long long default_val, int conf_flags, long long lower, long long upper) {
+ sds config_name = sdscatfmt(sdsempty(), "%s.%s", module_name, name);
+ long long config_dummy_address;
+ standardConfig module_config = createLongLongConfig(config_name, NULL, flags | MODULE_CONFIG, lower, upper, config_dummy_address, default_val, conf_flags, NULL, NULL);
+ module_config.data.numeric.config.ll = NULL;
+ module_config.privdata = privdata;
+ registerConfigValue(config_name, &module_config, 0);
+}
+
/*-----------------------------------------------------------------------------
* CONFIG HELP
*----------------------------------------------------------------------------*/
@@ -2975,8 +3181,10 @@ void configRewriteCommand(client *c) {
return;
}
if (rewriteConfig(server.configfile, 0) == -1) {
- serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno));
- addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno));
+ /* save errno in case of being tainted. */
+ int err = errno;
+ serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(err));
+ addReplyErrorFormat(c,"Rewriting config file: %s", strerror(err));
} else {
serverLog(LL_WARNING,"CONFIG REWRITE executed with success.");
addReply(c,shared.ok);
diff --git a/src/db.c b/src/db.c
index d28349664..d4da756f1 100644
--- a/src/db.c
+++ b/src/db.c
@@ -218,9 +218,12 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) {
val->lru = old->lru;
}
/* Although the key is not really deleted from the database, we regard
- overwrite as two steps of unlink+add, so we still need to call the unlink
- callback of the module. */
+ * overwrite as two steps of unlink+add, so we still need to call the unlink
+ * callback of the module. */
moduleNotifyKeyUnlink(key,old,db->id);
+ /* We want to try to unblock any client using a blocking XREADGROUP */
+ if (old->type == OBJ_STREAM)
+ signalKeyAsReady(db,key,old->type);
dictSetVal(db->dict, de, val);
if (server.lazyfree_lazy_server_del) {
@@ -311,6 +314,9 @@ static int dbGenericDelete(redisDb *db, robj *key, int async) {
robj *val = dictGetVal(de);
/* Tells the module that the key has been unlinked from the database. */
moduleNotifyKeyUnlink(key,val,db->id);
+ /* We want to try to unblock any client using a blocking XREADGROUP */
+ if (val->type == OBJ_STREAM)
+ signalKeyAsReady(db,key,val->type);
if (async) {
freeObjAsync(key, val, db->id);
dictSetVal(db->dict, de, NULL);
@@ -551,6 +557,7 @@ void signalFlushedDb(int dbid, int async) {
}
for (int j = startdb; j <= enddb; j++) {
+ scanDatabaseForDeletedStreams(&server.db[j], NULL);
touchAllWatchedKeysInDb(&server.db[j], NULL);
}
@@ -1311,7 +1318,7 @@ void copyCommand(client *c) {
* one or more blocked clients for B[LR]POP or other blocking commands
* and signal the keys as ready if they are of the right type. See the comment
* where the function is used for more info. */
-void scanDatabaseForReadyLists(redisDb *db) {
+void scanDatabaseForReadyKeys(redisDb *db) {
dictEntry *de;
dictIterator *di = dictGetSafeIterator(db->blocking_keys);
while((de = dictNext(di)) != NULL) {
@@ -1325,6 +1332,39 @@ void scanDatabaseForReadyLists(redisDb *db) {
dictReleaseIterator(di);
}
+/* Since we are unblocking XREADGROUP clients in the event the
+ * key was deleted/overwritten we must do the same in case the
+ * database was flushed/swapped. */
+void scanDatabaseForDeletedStreams(redisDb *emptied, redisDb *replaced_with) {
+ /* Optimization: If no clients are in type BLOCKED_STREAM,
+ * we can skip this loop. */
+ if (!server.blocked_clients_by_type[BLOCKED_STREAM]) return;
+
+ dictEntry *de;
+ dictIterator *di = dictGetSafeIterator(emptied->blocking_keys);
+ while((de = dictNext(di)) != NULL) {
+ robj *key = dictGetKey(de);
+ int was_stream = 0, is_stream = 0;
+
+ dictEntry *kde = dictFind(emptied->dict, key->ptr);
+ if (kde) {
+ robj *value = dictGetVal(kde);
+ was_stream = value->type == OBJ_STREAM;
+ }
+ if (replaced_with) {
+ dictEntry *kde = dictFind(replaced_with->dict, key->ptr);
+ if (kde) {
+ robj *value = dictGetVal(kde);
+ is_stream = value->type == OBJ_STREAM;
+ }
+ }
+ /* We want to try to unblock any client using a blocking XREADGROUP */
+ if (was_stream && !is_stream)
+ signalKeyAsReady(emptied, key, OBJ_STREAM);
+ }
+ dictReleaseIterator(di);
+}
+
/* Swap two databases at runtime so that all clients will magically see
* the new database even if already connected. Note that the client
* structure c->db points to a given DB, so we need to be smarter and
@@ -1345,6 +1385,10 @@ int dbSwapDatabases(int id1, int id2) {
touchAllWatchedKeysInDb(db1, db2);
touchAllWatchedKeysInDb(db2, db1);
+ /* Try to unblock any XREADGROUP clients if the key no longer exists. */
+ scanDatabaseForDeletedStreams(db1, db2);
+ scanDatabaseForDeletedStreams(db2, db1);
+
/* Swap hash tables. Note that we don't swap blocking_keys,
* ready_keys and watched_keys, since we want clients to
* remain in the same DB they were. */
@@ -1367,8 +1411,8 @@ int dbSwapDatabases(int id1, int id2) {
* in dbAdd() when a list is created. So here we need to rescan
* the list of clients blocked on lists and signal lists as ready
* if needed. */
- scanDatabaseForReadyLists(db1);
- scanDatabaseForReadyLists(db2);
+ scanDatabaseForReadyKeys(db1);
+ scanDatabaseForReadyKeys(db2);
return C_OK;
}
@@ -1391,6 +1435,9 @@ void swapMainDbWithTempDb(redisDb *tempDb) {
* client watching keys. */
touchAllWatchedKeysInDb(activedb, newdb);
+ /* Try to unblock any XREADGROUP clients if the key no longer exists. */
+ scanDatabaseForDeletedStreams(activedb, newdb);
+
/* Swap hash tables. Note that we don't swap blocking_keys,
* ready_keys and watched_keys, since clients
* remain in the same DB they were. */
@@ -1413,7 +1460,7 @@ void swapMainDbWithTempDb(redisDb *tempDb) {
* in dbAdd() when a list is created. So here we need to rescan
* the list of clients blocked on lists and signal lists as ready
* if needed. */
- scanDatabaseForReadyLists(activedb);
+ scanDatabaseForReadyKeys(activedb);
}
trackingInvalidateKeysOnFlush(1);
diff --git a/src/debug.c b/src/debug.c
index b95daaa36..4f0e37777 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -482,10 +482,12 @@ void debugCommand(client *c) {
" Show low level client eviction pools info (maxmemory-clients).",
"PAUSE-CRON <0|1>",
" Stop periodic cron job processing.",
-"REPLYBUFFER-PEAK-RESET-TIME <NEVER||RESET|time>",
+"REPLYBUFFER PEAK-RESET-TIME <NEVER||RESET|time>",
" Sets the time (in milliseconds) to wait between client reply buffer peak resets.",
" In case NEVER is provided the last observed peak will never be reset",
" In case RESET is provided the peak reset time will be restored to the default value",
+"REPLYBUFFER RESIZING <0|1>",
+" Enable or disable the replay buffer resize cron job",
NULL
};
addReplyHelp(c, help);
@@ -793,6 +795,10 @@ NULL
* also have a normal reply type after the attribute. */
addReplyBulkCString(c,"Some real reply following the attribute");
} else if (!strcasecmp(name,"push")) {
+ if (c->resp < 3) {
+ addReplyError(c,"RESP2 is not supported by this command");
+ return;
+ }
addReplyPushLen(c,2);
addReplyBulkCString(c,"server-cpu-usage");
addReplyLongLong(c,42);
@@ -962,14 +968,21 @@ NULL
{
server.pause_cron = atoi(c->argv[2]->ptr);
addReply(c,shared.ok);
- } else if (!strcasecmp(c->argv[1]->ptr,"replybuffer-peak-reset-time") && c->argc == 3 ) {
- if (!strcasecmp(c->argv[2]->ptr, "never")) {
- server.reply_buffer_peak_reset_time = -1;
- } else if(!strcasecmp(c->argv[2]->ptr, "reset")) {
- server.reply_buffer_peak_reset_time = REPLY_BUFFER_DEFAULT_PEAK_RESET_TIME;
+ } else if (!strcasecmp(c->argv[1]->ptr,"replybuffer") && c->argc == 4 ) {
+ if(!strcasecmp(c->argv[2]->ptr, "peak-reset-time")) {
+ if (!strcasecmp(c->argv[3]->ptr, "never")) {
+ server.reply_buffer_peak_reset_time = -1;
+ } else if(!strcasecmp(c->argv[3]->ptr, "reset")) {
+ server.reply_buffer_peak_reset_time = REPLY_BUFFER_DEFAULT_PEAK_RESET_TIME;
+ } else {
+ if (getLongFromObjectOrReply(c, c->argv[3], &server.reply_buffer_peak_reset_time, NULL) != C_OK)
+ return;
+ }
+ } else if(!strcasecmp(c->argv[2]->ptr,"resizing")) {
+ server.reply_buffer_resizing_enabled = atoi(c->argv[3]->ptr);
} else {
- if (getLongFromObjectOrReply(c, c->argv[2], &server.reply_buffer_peak_reset_time, NULL) != C_OK)
- return;
+ addReplySubcommandSyntaxError(c);
+ return;
}
addReply(c, shared.ok);
} else {
@@ -1943,7 +1956,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
serverLog(LL_WARNING,
"Accessing address: %p", (void*)info->si_addr);
}
- if (info->si_code <= SI_USER && info->si_pid != -1) {
+ if (info->si_code == SI_USER && info->si_pid != -1) {
serverLog(LL_WARNING, "Killed by PID: %ld, UID: %d", (long) info->si_pid, info->si_uid);
}
diff --git a/src/defrag.c b/src/defrag.c
index d4983c6d5..a756f26b0 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -407,7 +407,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) {
* new pointer. Additionally, we try to defrag the dictEntry in that dict.
* Oldkey mey be a dead pointer and should not be accessed (we get a
* pre-calculated hash value). Newkey may be null if the key pointer wasn't
- * moved. Return value is the the dictEntry if found, or NULL if not found.
+ * moved. Return value is the dictEntry if found, or NULL if not found.
* NOTE: this is very ugly code, but it let's us avoid the complication of
* doing a scan on another dict. */
dictEntry* replaceSatelliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) {
@@ -1196,7 +1196,7 @@ void activeDefragCycle(void) {
cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
/* Once in 16 scan iterations, 512 pointer reallocations. or 64 keys
- * (if we have a lot of pointers in one hash bucket or rehasing),
+ * (if we have a lot of pointers in one hash bucket or rehashing),
* check if we reached the time limit.
* But regardless, don't start a new db in this loop, this is because after
* the last db we call defragOtherGlobals, which must be done in one cycle */
diff --git a/src/eval.c b/src/eval.c
index 1a9437a09..22bcbdb73 100644
--- a/src/eval.c
+++ b/src/eval.c
@@ -508,6 +508,7 @@ void evalGenericCommand(client *c, int evalsha) {
scriptRunCtx rctx;
if (scriptPrepareForRun(&rctx, lctx.lua_client, c, lua_cur_script, l->flags, ro) != C_OK) {
+ lua_pop(lua,2); /* Remove the function and error handler. */
return;
}
rctx.flags |= SCRIPT_EVAL_MODE; /* mark the current run as EVAL (as opposed to FCALL) so we'll
diff --git a/src/functions.c b/src/functions.c
index 739d178aa..d327d3358 100644
--- a/src/functions.c
+++ b/src/functions.c
@@ -57,6 +57,12 @@ struct functionsLibCtx {
dict *engines_stats; /* Per engine statistics */
};
+typedef struct functionsLibMataData {
+ sds engine;
+ sds name;
+ sds code;
+} functionsLibMataData;
+
dictType engineDictType = {
dictSdsCaseHash, /* hash function */
dictSdsDup, /* key dup */
@@ -124,7 +130,6 @@ static size_t functionMallocSize(functionInfo *fi) {
static size_t libraryMallocSize(functionLibInfo *li) {
return zmalloc_size(li) + sdsZmallocSize(li->name)
- + (li->desc ? sdsZmallocSize(li->desc) : 0)
+ sdsZmallocSize(li->code);
}
@@ -157,7 +162,6 @@ static void engineLibraryFree(functionLibInfo* li) {
dictRelease(li->functions);
sdsfree(li->name);
sdsfree(li->code);
- if (li->desc) sdsfree(li->desc);
zfree(li);
}
@@ -265,14 +269,13 @@ int functionLibCreateFunction(sds name, void *function, functionLibInfo *li, sds
return C_OK;
}
-static functionLibInfo* engineLibraryCreate(sds name, engineInfo *ei, sds desc, sds code) {
+static functionLibInfo* engineLibraryCreate(sds name, engineInfo *ei, sds code) {
functionLibInfo *li = zmalloc(sizeof(*li));
*li = (functionLibInfo) {
.name = sdsdup(name),
.functions = dictCreate(&libraryFunctionDictType),
.ei = ei,
.code = sdsdup(code),
- .desc = desc ? sdsdup(desc) : NULL,
};
return li;
}
@@ -540,17 +543,11 @@ void functionListCommand(client *c) {
}
}
++reply_len;
- addReplyMapLen(c, with_code? 5 : 4);
+ addReplyMapLen(c, with_code? 4 : 3);
addReplyBulkCString(c, "library_name");
addReplyBulkCBuffer(c, li->name, sdslen(li->name));
addReplyBulkCString(c, "engine");
addReplyBulkCBuffer(c, li->ei->name, sdslen(li->ei->name));
- addReplyBulkCString(c, "description");
- if (li->desc) {
- addReplyBulkCBuffer(c, li->desc, sdslen(li->desc));
- } else {
- addReplyNull(c);
- }
addReplyBulkCString(c, "functions");
addReplyArrayLen(c, dictSize(li->functions));
@@ -745,11 +742,11 @@ void functionRestoreCommand(client *c) {
err = sdsnew("can not read data type");
goto load_error;
}
- if (type != RDB_OPCODE_FUNCTION) {
+ if (type != RDB_OPCODE_FUNCTION && type != RDB_OPCODE_FUNCTION2) {
err = sdsnew("given type is not a function");
goto load_error;
}
- if (rdbFunctionLoad(&payload, rdbver, functions_lib_ctx, RDBFLAGS_NONE, &err) != C_OK) {
+ if (rdbFunctionLoad(&payload, rdbver, functions_lib_ctx, type, RDBFLAGS_NONE, &err) != C_OK) {
if (!err) {
err = sdsnew("failed loading the given functions payload");
}
@@ -868,36 +865,111 @@ static int functionsVerifyName(sds name) {
return C_OK;
}
-/* Compile and save the given library, return C_OK on success and C_ERR on failure.
- * In case on failure the err out param is set with relevant error message */
-int functionsCreateWithLibraryCtx(sds lib_name,sds engine_name, sds desc, sds code,
- int replace, sds* err, functionsLibCtx *lib_ctx) {
+int functionExtractLibMetaData(sds payload, functionsLibMataData *md, sds *err) {
+ sds name = NULL;
+ sds desc = NULL;
+ sds engine = NULL;
+ sds code = NULL;
+ if (strncmp(payload, "#!", 2) != 0) {
+ *err = sdsnew("Missing library metadata");
+ return C_ERR;
+ }
+ char *shebang_end = strchr(payload, '\n');
+ if (shebang_end == NULL) {
+ *err = sdsnew("Invalid library metadata");
+ return C_ERR;
+ }
+ size_t shebang_len = shebang_end - payload;
+ sds shebang = sdsnewlen(payload, shebang_len);
+ int numparts;
+ sds *parts = sdssplitargs(shebang, &numparts);
+ sdsfree(shebang);
+ if (!parts || numparts == 0) {
+ *err = sdsnew("Invalid library metadata");
+ sdsfreesplitres(parts, numparts);
+ return C_ERR;
+ }
+ engine = sdsdup(parts[0]);
+ sdsrange(engine, 2, -1);
+ for (int i = 1 ; i < numparts ; ++i) {
+ sds part = parts[i];
+ if (strncasecmp(part, "name=", 5) == 0) {
+ if (name) {
+ *err = sdscatfmt(sdsempty(), "Invalid metadata value, name argument was given multiple times");
+ goto error;
+ }
+ name = sdsdup(part);
+ sdsrange(name, 5, -1);
+ continue;
+ }
+ *err = sdscatfmt(sdsempty(), "Invalid metadata value given: %s", part);
+ goto error;
+ }
+
+ if (!name) {
+ *err = sdsnew("Library name was not given");
+ goto error;
+ }
+
+ sdsfreesplitres(parts, numparts);
+
+ md->name = name;
+ md->code = sdsnewlen(shebang_end, sdslen(payload) - shebang_len);
+ md->engine = engine;
+
+ return C_OK;
+
+error:
+ if (name) sdsfree(name);
+ if (desc) sdsfree(desc);
+ if (engine) sdsfree(engine);
+ if (code) sdsfree(code);
+ sdsfreesplitres(parts, numparts);
+ return C_ERR;
+}
+
+void functionFreeLibMetaData(functionsLibMataData *md) {
+ if (md->code) sdsfree(md->code);
+ if (md->name) sdsfree(md->name);
+ if (md->engine) sdsfree(md->engine);
+}
+
+/* Compile and save the given library, return the loaded library name on success
+ * and NULL on failure. In case on failure the err out param is set with relevant error message */
+sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx) {
dictIterator *iter = NULL;
dictEntry *entry = NULL;
- if (functionsVerifyName(lib_name)) {
+ functionLibInfo *new_li = NULL;
+ functionLibInfo *old_li = NULL;
+ functionsLibMataData md = {0};
+ if (functionExtractLibMetaData(code, &md, err) != C_OK) {
+ return NULL;
+ }
+
+ if (functionsVerifyName(md.name)) {
*err = sdsnew("Library names can only contain letters and numbers and must be at least one character long");
- return C_ERR;
+ goto error;
}
- engineInfo *ei = dictFetchValue(engines, engine_name);
+ engineInfo *ei = dictFetchValue(engines, md.engine);
if (!ei) {
- *err = sdsnew("Engine not found");
- return C_ERR;
+ *err = sdscatfmt(sdsempty(), "Engine '%S' not found", md.engine);
+ goto error;
}
engine *engine = ei->engine;
- functionLibInfo *old_li = dictFetchValue(lib_ctx->libraries, lib_name);
+ old_li = dictFetchValue(lib_ctx->libraries, md.name);
if (old_li && !replace) {
- *err = sdsnew("Library already exists");
- return C_ERR;
+ *err = sdscatfmt(sdsempty(), "Library '%S' already exists", md.name);
+ goto error;
}
if (old_li) {
libraryUnlink(lib_ctx, old_li);
}
- functionLibInfo *new_li = engineLibraryCreate(lib_name, ei, desc, code);
- if (engine->create(engine->engine_ctx, new_li, code, err) != C_OK) {
+ new_li = engineLibraryCreate(md.name, ei, code);
+ if (engine->create(engine->engine_ctx, new_li, md.code, err) != C_OK) {
goto error;
}
@@ -925,48 +997,34 @@ int functionsCreateWithLibraryCtx(sds lib_name,sds engine_name, sds desc, sds co
engineLibraryFree(old_li);
}
- return C_OK;
+ sds loaded_lib_name = md.name;
+ md.name = NULL;
+ functionFreeLibMetaData(&md);
+
+ return loaded_lib_name;
error:
if (iter) dictReleaseIterator(iter);
- engineLibraryFree(new_li);
- if (old_li) {
- libraryLink(lib_ctx, old_li);
- }
- return C_ERR;
+ if (new_li) engineLibraryFree(new_li);
+ if (old_li) libraryLink(lib_ctx, old_li);
+ functionFreeLibMetaData(&md);
+ return NULL;
}
/*
- * FUNCTION LOAD <ENGINE NAME> <LIBRARY NAME>
- * [REPLACE] [DESC <LIBRARY DESCRIPTION>] <LIBRARY CODE>
- *
- * ENGINE NAME - name of the engine to use the run the library
- * LIBRARY NAME - name of the library
+ * FUNCTION LOAD [REPLACE] <LIBRARY CODE>
* REPLACE - optional, replace existing library
- * DESCRIPTION - optional, library description
* LIBRARY CODE - library code to pass to the engine
*/
void functionLoadCommand(client *c) {
- robj *engine_name = c->argv[2];
- robj *library_name = c->argv[3];
-
int replace = 0;
- int argc_pos = 4;
- sds desc = NULL;
+ int argc_pos = 2;
while (argc_pos < c->argc - 1) {
robj *next_arg = c->argv[argc_pos++];
if (!strcasecmp(next_arg->ptr, "replace")) {
replace = 1;
continue;
}
- if (!strcasecmp(next_arg->ptr, "description")) {
- if (argc_pos >= c->argc) {
- addReplyError(c, "Bad function description");
- return;
- }
- desc = c->argv[argc_pos++]->ptr;
- continue;
- }
addReplyErrorFormat(c, "Unknown option given: %s", (char*)next_arg->ptr);
return;
}
@@ -978,8 +1036,8 @@ void functionLoadCommand(client *c) {
robj *code = c->argv[argc_pos];
sds err = NULL;
- if (functionsCreateWithLibraryCtx(library_name->ptr, engine_name->ptr,
- desc, code->ptr, replace, &err, curr_functions_lib_ctx) != C_OK)
+ sds library_name = NULL;
+ if (!(library_name = functionsCreateWithLibraryCtx(code->ptr, replace, &err, curr_functions_lib_ctx)))
{
addReplyErrorSds(c, err);
return;
@@ -987,7 +1045,7 @@ void functionLoadCommand(client *c) {
/* Indicate that the command changed the data so it will be replicated and
* counted as a data change (for persistence configuration) */
server.dirty++;
- addReply(c, shared.ok);
+ addReplyBulkSds(c, library_name);
}
/* Return memory usage of all the engines combine */
diff --git a/src/functions.h b/src/functions.h
index fb2b74de9..40716dbc7 100644
--- a/src/functions.h
+++ b/src/functions.h
@@ -106,12 +106,10 @@ struct functionLibInfo {
dict *functions; /* Functions dictionary */
engineInfo *ei; /* Pointer to the function engine */
sds code; /* Library code */
- sds desc; /* Library description */
};
int functionsRegisterEngine(const char *engine_name, engine *engine_ctx);
-int functionsCreateWithLibraryCtx(sds lib_name, sds engine_name, sds desc, sds code,
- int replace, sds* err, functionsLibCtx *lib_ctx);
+sds functionsCreateWithLibraryCtx(sds code, int replace, sds* err, functionsLibCtx *lib_ctx);
unsigned long functionsMemory();
unsigned long functionsMemoryOverhead();
unsigned long functionsNum();
diff --git a/src/geohash.c b/src/geohash.c
index de9620b7a..2cbcf2875 100644
--- a/src/geohash.c
+++ b/src/geohash.c
@@ -46,7 +46,7 @@
/* Interleave lower bits of x and y, so the bits of x
* are in the even positions and bits from y in the odd;
- * x and y must initially be less than 2**32 (65536).
+ * x and y must initially be less than 2**32 (4294967296).
* From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN
*/
static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) {
diff --git a/src/help.h b/src/help.h
index edeb9b26e..e25ca3fa3 100644
--- a/src/help.h
+++ b/src/help.h
@@ -399,6 +399,11 @@ struct commandHelp {
"Bind a hash slot to a specific node",
12,
"3.0.0" },
+ { "CLUSTER SHARDS",
+ "",
+ "Get array of cluster slots to node mappings",
+ 12,
+ "7.0.0" },
{ "CLUSTER SLAVES",
"node-id",
"List replica nodes of the specified master node",
@@ -431,7 +436,7 @@ struct commandHelp {
"2.8.13" },
{ "COMMAND GETKEYSANDFLAGS",
"",
- "Extract keys given a full Redis command",
+ "Extract keys and access flags given a full Redis command",
9,
"7.0.0" },
{ "COMMAND HELP",
@@ -630,7 +635,7 @@ struct commandHelp {
10,
"7.0.0" },
{ "FUNCTION LOAD",
- "engine-name library-name [REPLACE] [DESCRIPTION library-description] function-code",
+ "[REPLACE] function-code",
"Create a function with the given arguments (name, code, description)",
10,
"7.0.0" },
@@ -1019,6 +1024,11 @@ struct commandHelp {
"Load a module",
9,
"4.0.0" },
+ { "MODULE LOADEX",
+ "path [CONFIG name value [name value ...]] [ARGS arg [arg ...]]",
+ "Load a module with extended parameters",
+ 9,
+ "7.0.0" },
{ "MODULE UNLOAD",
"name",
"Unload a module",
diff --git a/src/listpack.c b/src/listpack.c
index 450976e2c..e651e4960 100644
--- a/src/listpack.c
+++ b/src/listpack.c
@@ -958,7 +958,7 @@ unsigned char *lpPrependInteger(unsigned char *lp, long long lval) {
return lpInsertInteger(lp, lval, p, LP_BEFORE, NULL);
}
-/* Append the specified element 'ele' of length 'len' at the end of the
+/* Append the specified element 'ele' of length 'size' at the end of the
* listpack. It is implemented in terms of lpInsert(), so the return value is
* the same as lpInsert(). */
unsigned char *lpAppend(unsigned char *lp, unsigned char *ele, uint32_t size) {
diff --git a/src/localtime.c b/src/localtime.c
index e2ac81f98..1cefdfa88 100644
--- a/src/localtime.c
+++ b/src/localtime.c
@@ -108,7 +108,7 @@ void nolocks_localtime(struct tm *tmp, time_t t, time_t tz, int dst) {
int main(void) {
/* Obtain timezone and daylight info. */
- tzset(); /* Now 'timezome' global is populated. */
+ tzset(); /* Now 'timezone' global is populated. */
time_t t = time(NULL);
struct tm *aux = localtime(&t);
int daylight_active = aux->tm_isdst;
diff --git a/src/module.c b/src/module.c
index 7130139a6..3fc6a5499 100644
--- a/src/module.c
+++ b/src/module.c
@@ -352,6 +352,9 @@ typedef struct RedisModuleServerInfoData {
#define REDISMODULE_ARGV_RESP_3 (1<<3)
#define REDISMODULE_ARGV_RESP_AUTO (1<<4)
#define REDISMODULE_ARGV_CHECK_ACL (1<<5)
+#define REDISMODULE_ARGV_SCRIPT_MODE (1<<6)
+#define REDISMODULE_ARGV_NO_WRITES (1<<7)
+#define REDISMODULE_ARGV_CALL_REPLIES_AS_ERRORS (1<<8)
/* Determine whether Redis should signalModifiedKey implicitly.
* In case 'ctx' has no 'module' member (and therefore no module->options),
@@ -393,6 +396,40 @@ typedef struct RedisModuleKeyOptCtx {
as `copy2`, 'from_dbid' and 'to_dbid' are both valid. */
} RedisModuleKeyOptCtx;
+/* Data structures related to redis module configurations */
+/* The function signatures for module config get callbacks. These are identical to the ones exposed in redismodule.h. */
+typedef RedisModuleString * (*RedisModuleConfigGetStringFunc)(const char *name, void *privdata);
+typedef long long (*RedisModuleConfigGetNumericFunc)(const char *name, void *privdata);
+typedef int (*RedisModuleConfigGetBoolFunc)(const char *name, void *privdata);
+typedef int (*RedisModuleConfigGetEnumFunc)(const char *name, void *privdata);
+/* The function signatures for module config set callbacks. These are identical to the ones exposed in redismodule.h. */
+typedef int (*RedisModuleConfigSetStringFunc)(const char *name, RedisModuleString *val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetNumericFunc)(const char *name, long long val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetBoolFunc)(const char *name, int val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetEnumFunc)(const char *name, int val, void *privdata, RedisModuleString **err);
+/* Apply signature, identical to redismodule.h */
+typedef int (*RedisModuleConfigApplyFunc)(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err);
+
+/* Struct representing a module config. These are stored in a list in the module struct */
+struct ModuleConfig {
+ sds name; /* Name of config without the module name appended to the front */
+ void *privdata; /* Optional data passed into the module config callbacks */
+ union get_fn { /* The get callback specified by the module */
+ RedisModuleConfigGetStringFunc get_string;
+ RedisModuleConfigGetNumericFunc get_numeric;
+ RedisModuleConfigGetBoolFunc get_bool;
+ RedisModuleConfigGetEnumFunc get_enum;
+ } get_fn;
+ union set_fn { /* The set callback specified by the module */
+ RedisModuleConfigSetStringFunc set_string;
+ RedisModuleConfigSetNumericFunc set_numeric;
+ RedisModuleConfigSetBoolFunc set_bool;
+ RedisModuleConfigSetEnumFunc set_enum;
+ } set_fn;
+ RedisModuleConfigApplyFunc apply_fn;
+ RedisModule *module;
+};
+
/* --------------------------------------------------------------------------
* Prototypes
* -------------------------------------------------------------------------- */
@@ -596,7 +633,10 @@ static void moduleFreeKeyIterator(RedisModuleKey *key) {
serverAssert(key->iter != NULL);
switch (key->value->type) {
case OBJ_LIST: listTypeReleaseIterator(key->iter); break;
- case OBJ_STREAM: zfree(key->iter); break;
+ case OBJ_STREAM:
+ streamIteratorStop(key->iter);
+ zfree(key->iter);
+ break;
default: serverAssert(0); /* No key->iter for other types. */
}
key->iter = NULL;
@@ -1935,6 +1975,16 @@ int moduleIsModuleCommand(void *module_handle, struct redisCommand *cmd) {
* ## Module information and time measurement
* -------------------------------------------------------------------------- */
+int moduleListConfigMatch(void *config, void *name) {
+ return strcasecmp(((ModuleConfig *) config)->name, (char *) name) == 0;
+}
+
+void moduleListFree(void *config) {
+ ModuleConfig *module_config = (ModuleConfig *) config;
+ sdsfree(module_config->name);
+ zfree(config);
+}
+
void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int apiver) {
/* Called by RM_Init() to setup the `ctx->module` structure.
*
@@ -1951,7 +2001,11 @@ void RM_SetModuleAttribs(RedisModuleCtx *ctx, const char *name, int ver, int api
module->usedby = listCreate();
module->using = listCreate();
module->filters = listCreate();
+ module->module_configs = listCreate();
+ listSetMatchMethod(module->module_configs, moduleListConfigMatch);
+ listSetFreeMethod(module->module_configs, moduleListFree);
module->in_call = 0;
+ module->configs_initialized = 0;
module->in_hook = 0;
module->options = 0;
module->info_cb = 0;
@@ -2250,7 +2304,7 @@ RedisModuleString *RM_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll
* The returned string must be released with RedisModule_FreeString() or by
* enabling automatic memory management. */
RedisModuleString *RM_CreateStringFromDouble(RedisModuleCtx *ctx, double d) {
- char buf[128];
+ char buf[MAX_D2STRING_CHARS];
size_t len = d2string(buf,sizeof(buf),d);
return RM_CreateString(ctx,buf,len);
}
@@ -2884,8 +2938,11 @@ void RM_ReplySetSetLength(RedisModuleCtx *ctx, long len) {
}
/* Very similar to RedisModule_ReplySetMapLength
- * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. */
+ * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3.
+ *
+ * Must not be called if RM_ReplyWithAttribute returned an error. */
void RM_ReplySetAttributeLength(RedisModuleCtx *ctx, long len) {
+ if (ctx->client->resp == 2) return;
moduleReplySetCollectionLength(ctx, len, COLLECTION_REPLY_ATTRIBUTE);
}
@@ -5103,6 +5160,7 @@ int RM_StreamIteratorStop(RedisModuleKey *key) {
errno = EBADF;
return REDISMODULE_ERR;
}
+ streamIteratorStop(key->iter);
zfree(key->iter);
key->iter = NULL;
return REDISMODULE_OK;
@@ -5544,6 +5602,12 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int
if (flags) (*flags) |= REDISMODULE_ARGV_RESP_AUTO;
} else if (*p == 'C') {
if (flags) (*flags) |= REDISMODULE_ARGV_CHECK_ACL;
+ } else if (*p == 'S') {
+ if (flags) (*flags) |= REDISMODULE_ARGV_SCRIPT_MODE;
+ } else if (*p == 'W') {
+ if (flags) (*flags) |= REDISMODULE_ARGV_NO_WRITES;
+ } else if (*p == 'E') {
+ if (flags) (*flags) |= REDISMODULE_ARGV_CALL_REPLIES_AS_ERRORS;
} else {
goto fmterr;
}
@@ -5583,6 +5647,17 @@ fmterr:
* same as the client attached to the given RedisModuleCtx. This will
* probably used when you want to pass the reply directly to the client.
* * `C` -- Check if command can be executed according to ACL rules.
+ * * 'S' -- Run the command in a script mode, this means that it will raise
+ * an error if a command which are not allowed inside a script
+ * (flagged with the `deny-script` flag) is invoked (like SHUTDOWN).
+ * In addition, on script mode, write commands are not allowed if there are
+ * not enough good replicas (as configured with `min-replicas-to-write`)
+ * or when the server is unable to persist to the disk.
+ * * 'W' -- Do not allow to run any write command (flagged with the `write` flag).
+ * * 'E' -- Return error as RedisModuleCallReply. If there is an error before
+ * invoking the command, the error is returned using errno mechanism.
+ * This flag allows to get the error also as an error CallReply with
+ * relevant error message.
* * **...**: The actual arguments to the Redis command.
*
* On success a RedisModuleCallReply object is returned, otherwise
@@ -5597,6 +5672,8 @@ fmterr:
* * ENETDOWN: operation in Cluster instance when cluster is down.
* * ENOTSUP: No ACL user for the specified module context
* * EACCES: Command cannot be executed, according to ACL rules
+ * * ENOSPC: Write command is not allowed
+ * * ESPIPE: Command not allowed on script mode
*
* Example code fragment:
*
@@ -5616,11 +5693,13 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
va_list ap;
RedisModuleCallReply *reply = NULL;
int replicate = 0; /* Replicate this command? */
+ int error_as_call_replies = 0; /* return errors as RedisModuleCallReply object */
/* Handle arguments. */
va_start(ap, fmt);
argv = moduleCreateArgvFromUserFormat(cmdname,fmt,&argc,&argv_len,&flags,ap);
replicate = flags & REDISMODULE_ARGV_REPLICATE;
+ error_as_call_replies = flags & REDISMODULE_ARGV_CALL_REPLIES_AS_ERRORS;
va_end(ap);
c = moduleAllocTempClient();
@@ -5643,6 +5722,10 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
/* We handle the above format error only when the client is setup so that
* we can free it normally. */
if (argv == NULL) {
+ /* We do not return a call reply here this is an error that should only
+ * be catch by the module indicating wrong fmt was given, the module should
+ * handle this error and decide how to continue. It is not an error that
+ * should be propagated to the user. */
errno = EBADF;
goto cleanup;
}
@@ -5656,6 +5739,11 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
cmd = lookupCommand(c->argv,c->argc);
if (!cmd) {
errno = ENOENT;
+ if (error_as_call_replies) {
+ sds msg = sdscatfmt(sdsempty(),"Unknown Redis "
+ "command '%S'.",c->argv[0]->ptr);
+ reply = callReplyCreateError(msg, ctx);
+ }
goto cleanup;
}
c->cmd = c->lastcmd = c->realcmd = cmd;
@@ -5663,9 +5751,66 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
/* Basic arity checks. */
if ((cmd->arity > 0 && cmd->arity != argc) || (argc < -cmd->arity)) {
errno = EINVAL;
+ if (error_as_call_replies) {
+ sds msg = sdscatfmt(sdsempty(), "Wrong number of "
+ "args calling Redis command '%S'.", c->cmd->fullname);
+ reply = callReplyCreateError(msg, ctx);
+ }
goto cleanup;
}
+ if (flags & REDISMODULE_ARGV_SCRIPT_MODE) {
+ /* Basically on script mode we want to only allow commands that can
+ * be executed on scripts (CMD_NOSCRIPT is not set on the command flags) */
+ if (cmd->flags & CMD_NOSCRIPT) {
+ errno = ESPIPE;
+ if (error_as_call_replies) {
+ sds msg = sdscatfmt(sdsempty(), "command '%S' is not allowed on script mode", c->cmd->fullname);
+ reply = callReplyCreateError(msg, ctx);
+ }
+ goto cleanup;
+ }
+ }
+
+ if (cmd->flags & CMD_WRITE) {
+ if (flags & REDISMODULE_ARGV_NO_WRITES) {
+ errno = ENOSPC;
+ if (error_as_call_replies) {
+ sds msg = sdscatfmt(sdsempty(), "Write command '%S' was "
+ "called while write is not allowed.", c->cmd->fullname);
+ reply = callReplyCreateError(msg, ctx);
+ }
+ goto cleanup;
+ }
+
+ if (flags & REDISMODULE_ARGV_SCRIPT_MODE) {
+ /* on script mode, if a command is a write command,
+ * We will not run it if we encounter disk error
+ * or we do not have enough replicas */
+
+ if (!checkGoodReplicasStatus()) {
+ errno = ENOSPC;
+ if (error_as_call_replies) {
+ sds msg = sdsdup(shared.noreplicaserr->ptr);
+ reply = callReplyCreateError(msg, ctx);
+ }
+ goto cleanup;
+ }
+
+ int deny_write_type = writeCommandsDeniedByDiskError();
+
+ if (deny_write_type != DISK_ERROR_TYPE_NONE) {
+ errno = ENOSPC;
+ if (error_as_call_replies) {
+ sds msg = writeCommandsGetDiskErrorMessage(deny_write_type);
+ reply = callReplyCreateError(msg, ctx);
+ }
+ goto cleanup;
+ }
+
+ }
+ }
+
/* Check if the user can run this command according to the current
* ACLs. */
if (flags & REDISMODULE_ARGV_CHECK_ACL) {
@@ -5674,12 +5819,20 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
if (ctx->client->user == NULL) {
errno = ENOTSUP;
+ if (error_as_call_replies) {
+ sds msg = sdsnew("acl verification failed, context is not attached to a client.");
+ reply = callReplyCreateError(msg, ctx);
+ }
goto cleanup;
}
acl_retval = ACLCheckAllUserCommandPerm(ctx->client->user,c->cmd,c->argv,c->argc,&acl_errpos);
if (acl_retval != ACL_OK) {
sds object = (acl_retval == ACL_DENIED_CMD) ? sdsdup(c->cmd->fullname) : sdsdup(c->argv[acl_errpos]->ptr);
addACLLogEntry(ctx->client, acl_retval, ACL_LOG_CTX_MODULE, -1, ctx->client->user->name, object);
+ if (error_as_call_replies) {
+ sds msg = sdscatfmt(sdsempty(), "acl verification failed, %s.", getAclErrorMessage(acl_retval));
+ reply = callReplyCreateError(msg, ctx);
+ }
errno = EACCES;
goto cleanup;
}
@@ -5696,13 +5849,26 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,&error_code) !=
server.cluster->myself)
{
+ sds msg = NULL;
if (error_code == CLUSTER_REDIR_DOWN_RO_STATE) {
+ if (error_as_call_replies) {
+ msg = sdscatfmt(sdsempty(), "Can not execute a write command '%S' while the cluster is down and readonly", c->cmd->fullname);
+ }
errno = EROFS;
} else if (error_code == CLUSTER_REDIR_DOWN_STATE) {
+ if (error_as_call_replies) {
+ msg = sdscatfmt(sdsempty(), "Can not execute a command '%S' while the cluster is down", c->cmd->fullname);
+ }
errno = ENETDOWN;
} else {
+ if (error_as_call_replies) {
+ msg = sdsnew("Attempted to access a non local key in a cluster node");
+ }
errno = EPERM;
}
+ if (msg) {
+ reply = callReplyCreateError(msg, ctx);
+ }
goto cleanup;
}
}
@@ -5744,9 +5910,9 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
}
reply = callReplyCreate(proto, c->deferred_reply_errors, ctx);
c->deferred_reply_errors = NULL; /* now the responsibility of the reply object. */
- autoMemoryAdd(ctx,REDISMODULE_AM_REPLY,reply);
cleanup:
+ if (reply) autoMemoryAdd(ctx,REDISMODULE_AM_REPLY,reply);
if (ctx->module) ctx->module->in_call--;
moduleReleaseTempClient(c);
return reply;
@@ -7785,8 +7951,9 @@ size_t RM_GetClusterSize(void) {
}
/* Populate the specified info for the node having as ID the specified 'id',
- * then returns REDISMODULE_OK. Otherwise if the node ID does not exist from
- * the POV of this local node, REDISMODULE_ERR is returned.
+ * then returns REDISMODULE_OK. Otherwise if the format of node ID is invalid
+ * or the node ID does not exist from the POV of this local node, REDISMODULE_ERR
+ * is returned.
*
* The arguments `ip`, `master_id`, `port` and `flags` can be NULL in case we don't
* need to populate back certain info. If an `ip` and `master_id` (only populated
@@ -7806,7 +7973,7 @@ size_t RM_GetClusterSize(void) {
int RM_GetClusterNodeInfo(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) {
UNUSED(ctx);
- clusterNode *node = clusterLookupNode(id);
+ clusterNode *node = clusterLookupNode(id, strlen(id));
if (node == NULL ||
node->flags & (CLUSTER_NODE_NOADDR|CLUSTER_NODE_HANDSHAKE))
{
@@ -8607,6 +8774,24 @@ int RM_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id) {
return REDISMODULE_OK;
}
+/* Redact the client command argument specified at the given position. Redacted arguments
+ * are obfuscated in user facing commands such as SLOWLOG or MONITOR, as well as
+ * never being written to server logs. This command may be called multiple times on the
+ * same position.
+ *
+ * Note that the command name, position 0, can not be redacted.
+ *
+ * Returns REDISMODULE_OK if the argument was redacted and REDISMODULE_ERR if there
+ * was an invalid parameter passed in or the position is outside the client
+ * argument range. */
+int RM_RedactClientCommandArgument(RedisModuleCtx *ctx, int pos) {
+ if (!ctx || !ctx->client || pos <= 0 || ctx->client->argc <= pos) {
+ return REDISMODULE_ERR;
+ }
+ redactClientCommandArgument(ctx->client, pos);
+ return REDISMODULE_OK;
+}
+
/* Return the X.509 client-side certificate used by the client to authenticate
* this connection.
*
@@ -9972,6 +10157,7 @@ static uint64_t moduleEventVersions[] = {
-1, /* REDISMODULE_EVENT_FORK_CHILD */
-1, /* REDISMODULE_EVENT_REPL_ASYNC_LOAD */
-1, /* REDISMODULE_EVENT_EVENTLOOP */
+ -1, /* REDISMODULE_EVENT_CONFIG */
};
/* Register to be notified, via a callback, when the specified server event
@@ -10192,7 +10378,7 @@ static uint64_t moduleEventVersions[] = {
* are now triggered when repl-diskless-load is set to swapdb.
*
* Called when repl-diskless-load config is set to swapdb,
- * And redis needs to backup the the current database for the
+ * And redis needs to backup the current database for the
* possibility to be restored later. A module with global data and
* maybe with aux_load and aux_save callbacks may need to use this
* notification to backup / restore / discard its globals.
@@ -10232,6 +10418,20 @@ static uint64_t moduleEventVersions[] = {
* * `REDISMODULE_SUBEVENT_EVENTLOOP_BEFORE_SLEEP`
* * `REDISMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP`
*
+ * * RedisModule_Event_Config
+ *
+ * Called when a configuration event happens
+ * The following sub events are available:
+ *
+ * * `REDISMODULE_SUBEVENT_CONFIG_CHANGE`
+ *
+ * The data pointer can be casted to a RedisModuleConfigChange
+ * structure with the following fields:
+ *
+ * const char **config_names; // An array of C string pointers containing the
+ * // name of each modified configuration item
+ * uint32_t num_changes; // The number of elements in the config_names array
+ *
* The function returns REDISMODULE_OK if the module was successfully subscribed
* for the specified event. If the API is called from a wrong context or unsupported event
* is given then REDISMODULE_ERR is returned. */
@@ -10309,6 +10509,8 @@ int RM_IsSubEventSupported(RedisModuleEvent event, int64_t subevent) {
return subevent < _REDISMODULE_SUBEVENT_FORK_CHILD_NEXT;
case REDISMODULE_EVENT_EVENTLOOP:
return subevent < _REDISMODULE_SUBEVENT_EVENTLOOP_NEXT;
+ case REDISMODULE_EVENT_CONFIG:
+ return subevent < _REDISMODULE_SUBEVENT_CONFIG_NEXT;
default:
break;
}
@@ -10385,6 +10587,8 @@ void moduleFireServerEvent(uint64_t eid, int subid, void *data) {
moduledata = data;
} else if (eid == REDISMODULE_EVENT_SWAPDB) {
moduledata = data;
+ } else if (eid == REDISMODULE_EVENT_CONFIG) {
+ moduledata = data;
}
el->module->in_hook++;
@@ -10528,9 +10732,21 @@ void moduleRegisterCoreAPI(void);
void moduleInitModulesSystemLast(void) {
}
+
+dictType sdsKeyValueHashDictType = {
+ dictSdsCaseHash, /* hash function */
+ NULL, /* key dup */
+ NULL, /* val dup */
+ dictSdsKeyCaseCompare, /* key compare */
+ dictSdsDestructor, /* key destructor */
+ dictSdsDestructor, /* val destructor */
+ NULL /* allow to expand */
+};
+
void moduleInitModulesSystem(void) {
moduleUnblockedClients = listCreate();
server.loadmodule_queue = listCreate();
+ server.module_configs_queue = dictCreate(&sdsKeyValueHashDictType);
modules = dictCreate(&modulesDictType);
/* Set up the keyspace notification subscriber list and static client */
@@ -10603,6 +10819,20 @@ void moduleLoadQueueEntryFree(struct moduleLoadQueueEntry *loadmod) {
zfree(loadmod);
}
+/* Remove Module Configs from standardConfig array in config.c */
+void moduleRemoveConfigs(RedisModule *module) {
+ listIter li;
+ listNode *ln;
+ listRewind(module->module_configs, &li);
+ while ((ln = listNext(&li))) {
+ ModuleConfig *config = listNodeValue(ln);
+ sds module_name = sdsnew(module->name);
+ sds full_name = sdscat(sdscat(module_name, "."), config->name); /* ModuleName.ModuleConfig */
+ removeConfig(full_name);
+ sdsfree(full_name);
+ }
+}
+
/* Load all the modules in the server.loadmodule_queue list, which is
* populated by `loadmodule` directives in the configuration file.
* We can't load modules directly when processing the configuration file
@@ -10619,7 +10849,7 @@ void moduleLoadFromQueue(void) {
listRewind(server.loadmodule_queue,&li);
while((ln = listNext(&li))) {
struct moduleLoadQueueEntry *loadmod = ln->value;
- if (moduleLoad(loadmod->path,(void **)loadmod->argv,loadmod->argc)
+ if (moduleLoad(loadmod->path,(void **)loadmod->argv,loadmod->argc, 0)
== C_ERR)
{
serverLog(LL_WARNING,
@@ -10630,6 +10860,10 @@ void moduleLoadFromQueue(void) {
moduleLoadQueueEntryFree(loadmod);
listDelNode(server.loadmodule_queue, ln);
}
+ if (dictSize(server.module_configs_queue)) {
+ serverLog(LL_WARNING, "Module Configuration detected without loadmodule directive or no ApplyConfig call: aborting");
+ exit(1);
+ }
}
void moduleFreeModuleStructure(struct RedisModule *module) {
@@ -10637,6 +10871,7 @@ void moduleFreeModuleStructure(struct RedisModule *module) {
listRelease(module->filters);
listRelease(module->usedby);
listRelease(module->using);
+ listRelease(module->module_configs);
sdsfree(module->name);
moduleLoadQueueEntryFree(module->loadmod);
zfree(module);
@@ -10717,15 +10952,56 @@ void moduleUnregisterCommands(struct RedisModule *module) {
dictReleaseIterator(di);
}
+/* We parse argv to add sds "NAME VALUE" pairs to the server.module_configs_queue list of configs.
+ * We also increment the module_argv pointer to just after ARGS if there are args, otherwise
+ * we set it to NULL */
+int parseLoadexArguments(RedisModuleString ***module_argv, int *module_argc) {
+ int args_specified = 0;
+ RedisModuleString **argv = *module_argv;
+ int argc = *module_argc;
+ for (int i = 0; i < argc; i++) {
+ char *arg_val = argv[i]->ptr;
+ if (!strcasecmp(arg_val, "CONFIG")) {
+ if (i + 2 >= argc) {
+ serverLog(LL_NOTICE, "CONFIG specified without name value pair");
+ return REDISMODULE_ERR;
+ }
+ sds name = sdsdup(argv[i + 1]->ptr);
+ sds value = sdsdup(argv[i + 2]->ptr);
+ if (!dictReplace(server.module_configs_queue, name, value)) sdsfree(name);
+ i += 2;
+ } else if (!strcasecmp(arg_val, "ARGS")) {
+ args_specified = 1;
+ i++;
+ if (i >= argc) {
+ *module_argv = NULL;
+ *module_argc = 0;
+ } else {
+ *module_argv = argv + i;
+ *module_argc = argc - i;
+ }
+ break;
+ } else {
+ serverLog(LL_NOTICE, "Syntax Error from arguments to loadex around %s.", arg_val);
+ return REDISMODULE_ERR;
+ }
+ }
+ if (!args_specified) {
+ *module_argv = NULL;
+ *module_argc = 0;
+ }
+ return REDISMODULE_OK;
+}
+
/* Load a module and initialize it. On success C_OK is returned, otherwise
* C_ERR is returned. */
-int moduleLoad(const char *path, void **module_argv, int module_argc) {
+int moduleLoad(const char *path, void **module_argv, int module_argc, int is_loadex) {
int (*onload)(void *, void **, int);
void *handle;
struct stat st;
- if (stat(path, &st) == 0)
- { // this check is best effort
+ if (stat(path, &st) == 0) {
+ /* This check is best effort */
if (!(st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
serverLog(LL_WARNING, "Module %s failed to load: It does not have execute permissions.", path);
return C_ERR;
@@ -10749,16 +11025,17 @@ int moduleLoad(const char *path, void **module_argv, int module_argc) {
moduleCreateContext(&ctx, NULL, REDISMODULE_CTX_TEMP_CLIENT); /* We pass NULL since we don't have a module yet. */
selectDb(ctx.client, 0);
if (onload((void*)&ctx,module_argv,module_argc) == REDISMODULE_ERR) {
+ serverLog(LL_WARNING,
+ "Module %s initialization failed. Module not loaded",path);
if (ctx.module) {
moduleUnregisterCommands(ctx.module);
moduleUnregisterSharedAPI(ctx.module);
moduleUnregisterUsedAPI(ctx.module);
+ moduleRemoveConfigs(ctx.module);
moduleFreeModuleStructure(ctx.module);
}
moduleFreeContext(&ctx);
dlclose(handle);
- serverLog(LL_WARNING,
- "Module %s initialization failed. Module not loaded",path);
return C_ERR;
}
@@ -10776,15 +11053,30 @@ int moduleLoad(const char *path, void **module_argv, int module_argc) {
}
serverLog(LL_NOTICE,"Module '%s' loaded from %s",ctx.module->name,path);
+
+ if (listLength(ctx.module->module_configs) && !ctx.module->configs_initialized) {
+ serverLogRaw(LL_WARNING, "Module Configurations were not set, likely a missing LoadConfigs call. Unloading the module.");
+ moduleUnload(ctx.module->name);
+ moduleFreeContext(&ctx);
+ return C_ERR;
+ }
+
+ if (is_loadex && dictSize(server.module_configs_queue)) {
+ serverLogRaw(LL_WARNING, "Loadex configurations were not applied, likely due to invalid arguments. Unloading the module.");
+ moduleUnload(ctx.module->name);
+ moduleFreeContext(&ctx);
+ return C_ERR;
+ }
+
/* Fire the loaded modules event. */
moduleFireServerEvent(REDISMODULE_EVENT_MODULE_CHANGE,
REDISMODULE_SUBEVENT_MODULE_LOADED,
ctx.module);
+
moduleFreeContext(&ctx);
return C_OK;
}
-
/* Unload the module registered with the specified name. On success
* C_OK is returned, otherwise C_ERR is returned and errno is set
* to the following values depending on the type of error:
@@ -10836,6 +11128,7 @@ int moduleUnload(sds name) {
moduleUnregisterSharedAPI(module);
moduleUnregisterUsedAPI(module);
moduleUnregisterFilters(module);
+ moduleRemoveConfigs(module);
/* Remove any notification subscribers this module might have */
moduleUnsubscribeNotifications(module);
@@ -10964,10 +11257,433 @@ sds genModulesInfoString(sds info) {
return info;
}
+/* --------------------------------------------------------------------------
+ * Module Configurations API internals
+ * -------------------------------------------------------------------------- */
+
+/* Check if the configuration name is already registered */
+int isModuleConfigNameRegistered(RedisModule *module, sds name) {
+ listNode *match = listSearchKey(module->module_configs, (void *) name);
+ return match != NULL;
+}
+
+/* Assert that the flags passed into the RM_RegisterConfig Suite are valid */
+int moduleVerifyConfigFlags(unsigned int flags, configType type) {
+ if ((flags & ~(REDISMODULE_CONFIG_DEFAULT
+ | REDISMODULE_CONFIG_IMMUTABLE
+ | REDISMODULE_CONFIG_SENSITIVE
+ | REDISMODULE_CONFIG_HIDDEN
+ | REDISMODULE_CONFIG_PROTECTED
+ | REDISMODULE_CONFIG_DENY_LOADING
+ | REDISMODULE_CONFIG_MEMORY))) {
+ serverLogRaw(LL_WARNING, "Invalid flag(s) for configuration");
+ return REDISMODULE_ERR;
+ }
+ if (type != NUMERIC_CONFIG && flags & REDISMODULE_CONFIG_MEMORY) {
+ serverLogRaw(LL_WARNING, "Numeric flag provided for non-numeric configuration.");
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int moduleVerifyConfigName(sds name) {
+ if (sdslen(name) == 0) {
+ serverLogRaw(LL_WARNING, "Module config names cannot be an empty string.");
+ return REDISMODULE_ERR;
+ }
+ for (size_t i = 0 ; i < sdslen(name) ; ++i) {
+ char curr_char = name[i];
+ if ((curr_char >= 'a' && curr_char <= 'z') ||
+ (curr_char >= 'A' && curr_char <= 'Z') ||
+ (curr_char >= '0' && curr_char <= '9') ||
+ (curr_char == '_') || (curr_char == '-'))
+ {
+ continue;
+ }
+ serverLog(LL_WARNING, "Invalid character %c in Module Config name %s.", curr_char, name);
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+/* This is a series of set functions for each type that act as dispatchers for
+ * config.c to call module set callbacks. */
+#define CONFIG_ERR_SIZE 256
+static char configerr[CONFIG_ERR_SIZE];
+static void propagateErrorString(RedisModuleString *err_in, const char **err) {
+ if (err_in) {
+ strncpy(configerr, err_in->ptr, CONFIG_ERR_SIZE);
+ configerr[CONFIG_ERR_SIZE - 1] = '\0';
+ decrRefCount(err_in);
+ *err = configerr;
+ }
+}
+
+int setModuleBoolConfig(ModuleConfig *config, int val, const char **err) {
+ RedisModuleString *error = NULL;
+ int return_code = config->set_fn.set_bool(config->name, val, config->privdata, &error);
+ propagateErrorString(error, err);
+ return return_code == REDISMODULE_OK ? 1 : 0;
+}
+
+int setModuleStringConfig(ModuleConfig *config, sds strval, const char **err) {
+ RedisModuleString *error = NULL;
+ RedisModuleString *new = createStringObject(strval, sdslen(strval));
+ int return_code = config->set_fn.set_string(config->name, new, config->privdata, &error);
+ propagateErrorString(error, err);
+ decrRefCount(new);
+ return return_code == REDISMODULE_OK ? 1 : 0;
+}
+
+int setModuleEnumConfig(ModuleConfig *config, int val, const char **err) {
+ RedisModuleString *error = NULL;
+ int return_code = config->set_fn.set_enum(config->name, val, config->privdata, &error);
+ propagateErrorString(error, err);
+ return return_code == REDISMODULE_OK ? 1 : 0;
+}
+
+int setModuleNumericConfig(ModuleConfig *config, long long val, const char **err) {
+ RedisModuleString *error = NULL;
+ int return_code = config->set_fn.set_numeric(config->name, val, config->privdata, &error);
+ propagateErrorString(error, err);
+ return return_code == REDISMODULE_OK ? 1 : 0;
+}
+
+/* This is a series of get functions for each type that act as dispatchers for
+ * config.c to call module set callbacks. */
+int getModuleBoolConfig(ModuleConfig *module_config) {
+ return module_config->get_fn.get_bool(module_config->name, module_config->privdata);
+}
+
+sds getModuleStringConfig(ModuleConfig *module_config) {
+ RedisModuleString *val = module_config->get_fn.get_string(module_config->name, module_config->privdata);
+ return val ? sdsdup(val->ptr) : NULL;
+}
+
+int getModuleEnumConfig(ModuleConfig *module_config) {
+ return module_config->get_fn.get_enum(module_config->name, module_config->privdata);
+}
+
+long long getModuleNumericConfig(ModuleConfig *module_config) {
+ return module_config->get_fn.get_numeric(module_config->name, module_config->privdata);
+}
+
+/* This function takes a module and a list of configs stored as sds NAME VALUE pairs.
+ * It attempts to call set on each of these configs. */
+int loadModuleConfigs(RedisModule *module) {
+ listIter li;
+ listNode *ln;
+ const char *err = NULL;
+ listRewind(module->module_configs, &li);
+ while ((ln = listNext(&li))) {
+ ModuleConfig *module_config = listNodeValue(ln);
+ sds config_name = sdscatfmt(sdsempty(), "%s.%s", module->name, module_config->name);
+ dictEntry *config_argument = dictFind(server.module_configs_queue, config_name);
+ if (config_argument) {
+ if (!performModuleConfigSetFromName(dictGetKey(config_argument), dictGetVal(config_argument), &err)) {
+ serverLog(LL_WARNING, "Issue during loading of configuration %s : %s", (sds) dictGetKey(config_argument), err);
+ sdsfree(config_name);
+ dictEmpty(server.module_configs_queue, NULL);
+ return REDISMODULE_ERR;
+ }
+ } else {
+ if (!performModuleConfigSetDefaultFromName(config_name, &err)) {
+ serverLog(LL_WARNING, "Issue attempting to set default value of configuration %s : %s", module_config->name, err);
+ sdsfree(config_name);
+ dictEmpty(server.module_configs_queue, NULL);
+ return REDISMODULE_ERR;
+ }
+ }
+ dictDelete(server.module_configs_queue, config_name);
+ sdsfree(config_name);
+ }
+ module->configs_initialized = 1;
+ return REDISMODULE_OK;
+}
+
+/* Add module_config to the list if the apply and privdata do not match one already in it. */
+void addModuleConfigApply(list *module_configs, ModuleConfig *module_config) {
+ if (!module_config->apply_fn) return;
+ listIter li;
+ listNode *ln;
+ ModuleConfig *pending_apply;
+ listRewind(module_configs, &li);
+ while ((ln = listNext(&li))) {
+ pending_apply = listNodeValue(ln);
+ if (pending_apply->apply_fn == module_config->apply_fn && pending_apply->privdata == module_config->privdata) {
+ return;
+ }
+ }
+ listAddNodeTail(module_configs, module_config);
+}
+
+/* Call apply on all module configs specified in set, if an apply function was specified at registration time. */
+int moduleConfigApplyConfig(list *module_configs, const char **err, const char **err_arg_name) {
+ if (!listLength(module_configs)) return 1;
+ listIter li;
+ listNode *ln;
+ ModuleConfig *module_config;
+ RedisModuleString *error = NULL;
+ RedisModuleCtx ctx;
+
+ listRewind(module_configs, &li);
+ while ((ln = listNext(&li))) {
+ module_config = listNodeValue(ln);
+ moduleCreateContext(&ctx, module_config->module, REDISMODULE_CTX_NONE);
+ if (module_config->apply_fn(&ctx, module_config->privdata, &error)) {
+ if (err_arg_name) *err_arg_name = module_config->name;
+ propagateErrorString(error, err);
+ moduleFreeContext(&ctx);
+ return 0;
+ }
+ moduleFreeContext(&ctx);
+ }
+ return 1;
+}
+
+/* --------------------------------------------------------------------------
+ * ## Module Configurations API
+ * -------------------------------------------------------------------------- */
+
+/* Create a module config object. */
+ModuleConfig *createModuleConfig(sds name, RedisModuleConfigApplyFunc apply_fn, void *privdata, RedisModule *module) {
+ ModuleConfig *new_config = zmalloc(sizeof(ModuleConfig));
+ new_config->name = sdsdup(name);
+ new_config->apply_fn = apply_fn;
+ new_config->privdata = privdata;
+ new_config->module = module;
+ return new_config;
+}
+
+int moduleConfigValidityCheck(RedisModule *module, sds name, unsigned int flags, configType type) {
+ if (moduleVerifyConfigFlags(flags, type) || moduleVerifyConfigName(name)) {
+ errno = EINVAL;
+ return REDISMODULE_ERR;
+ }
+ if (isModuleConfigNameRegistered(module, name)) {
+ serverLog(LL_WARNING, "Configuration by the name: %s already registered", name);
+ errno = EALREADY;
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+unsigned int maskModuleConfigFlags(unsigned int flags) {
+ unsigned int new_flags = 0;
+ if (flags & REDISMODULE_CONFIG_DEFAULT) new_flags |= MODIFIABLE_CONFIG;
+ if (flags & REDISMODULE_CONFIG_IMMUTABLE) new_flags |= IMMUTABLE_CONFIG;
+ if (flags & REDISMODULE_CONFIG_HIDDEN) new_flags |= HIDDEN_CONFIG;
+ if (flags & REDISMODULE_CONFIG_PROTECTED) new_flags |= PROTECTED_CONFIG;
+ if (flags & REDISMODULE_CONFIG_DENY_LOADING) new_flags |= DENY_LOADING_CONFIG;
+ return new_flags;
+}
+
+unsigned int maskModuleNumericConfigFlags(unsigned int flags) {
+ unsigned int new_flags = 0;
+ if (flags & REDISMODULE_CONFIG_MEMORY) new_flags |= MEMORY_CONFIG;
+ return new_flags;
+}
+
+/* Create a string config that Redis users can interact with via the Redis config file,
+ * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands.
+ *
+ * The actual config value is owned by the module, and the `getfn`, `setfn` and optional
+ * `applyfn` callbacks that are provided to Redis in order to access or manipulate the
+ * value. The `getfn` callback retrieves the value from the module, while the `setfn`
+ * callback provides a value to be stored into the module config.
+ * The optional `applyfn` callback is called after a `CONFIG SET` command modified one or
+ * more configs using the `setfn` callback and can be used to atomically apply a config
+ * after several configs were changed together.
+ * If there are multiple configs with `applyfn` callbacks set by a single `CONFIG SET`
+ * command, they will be deduplicated if their `applyfn` function and `privdata` pointers
+ * are identical, and the callback will only be run once.
+ * Both the `setfn` and `applyfn` can return an error if the provided value is invalid or
+ * cannot be used.
+ * The config also declares a type for the value that is validated by Redis and
+ * provided to the module. The config system provides the following types:
+ *
+ * * Redis String: Binary safe string data.
+ * * Enum: One of a finite number of string tokens, provided during registration.
+ * * Numeric: 64 bit signed integer, which also supports min and max values.
+ * * Bool: Yes or no value.
+ *
+ * The `setfn` callback is expected to return REDISMODULE_OK when the value is successfully
+ * applied. It can also return REDISMODULE_ERR if the value can't be applied, and the
+ * *err pointer can be set with a RedisModuleString error message to provide to the client.
+ * This RedisModuleString will be freed by redis after returning from the set callback.
+ *
+ * All configs are registered with a name, a type, a default value, private data that is made
+ * available in the callbacks, as well as several flags that modify the behavior of the config.
+ * The name must only contain alphanumeric characters or dashes. The supported flags are:
+ *
+ * * REDISMODULE_CONFIG_DEFAULT: The default flags for a config. This creates a config that can be modified after startup.
+ * * REDISMODULE_CONFIG_IMMUTABLE: This config can only be provided loading time.
+ * * REDISMODULE_CONFIG_SENSITIVE: The value stored in this config is redacted from all logging.
+ * * REDISMODULE_CONFIG_HIDDEN: The name is hidden from `CONFIG GET` with pattern matching.
+ * * REDISMODULE_CONFIG_PROTECTED: This config will be only be modifiable based off the value of enable-protected-configs.
+ * * REDISMODULE_CONFIG_DENY_LOADING: This config is not modifiable while the server is loading data.
+ * * REDISMODULE_CONFIG_MEMORY: For numeric configs, this config will convert data unit notations into their byte equivalent.
+ *
+ * Default values are used on startup to set the value if it is not provided via the config file
+ * or command line. Default values are also used to compare to on a config rewrite.
+ *
+ * Notes:
+ *
+ * 1. On string config sets that the string passed to the set callback will be freed after execution and the module must retain it.
+ * 2. On string config gets the string will not be consumed and will be valid after execution.
+ *
+ * Example implementation:
+ *
+ * RedisModuleString *strval;
+ * int adjustable = 1;
+ * RedisModuleString *getStringConfigCommand(const char *name, void *privdata) {
+ * return strval;
+ * }
+ *
+ * int setStringConfigCommand(const char *name, RedisModuleString *new, void *privdata, RedisModuleString **err) {
+ * if (adjustable) {
+ * RedisModule_Free(strval);
+ * RedisModule_RetainString(NULL, new);
+ * strval = new;
+ * return REDISMODULE_OK;
+ * }
+ * *err = RedisModule_CreateString(NULL, "Not adjustable.", 15);
+ * return REDISMODULE_ERR;
+ * }
+ * ...
+ * RedisModule_RegisterStringConfig(ctx, "string", NULL, REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL);
+ *
+ * If the registration fails, REDISMODULE_ERR is returned and one of the following
+ * errno is set:
+ * * EINVAL: The provided flags are invalid for the registration or the name of the config contains invalid characters.
+ * * EALREADY: The provided configuration name is already used. */
+int RM_RegisterStringConfig(RedisModuleCtx *ctx, const char *name, const char *default_val, unsigned int flags, RedisModuleConfigGetStringFunc getfn, RedisModuleConfigSetStringFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) {
+ RedisModule *module = ctx->module;
+ sds config_name = sdsnew(name);
+ if (moduleConfigValidityCheck(module, config_name, flags, NUMERIC_CONFIG)) {
+ sdsfree(config_name);
+ return REDISMODULE_ERR;
+ }
+ ModuleConfig *new_config = createModuleConfig(config_name, applyfn, privdata, module);
+ sdsfree(config_name);
+ new_config->get_fn.get_string = getfn;
+ new_config->set_fn.set_string = setfn;
+ listAddNodeTail(module->module_configs, new_config);
+ flags = maskModuleConfigFlags(flags);
+ addModuleStringConfig(module->name, name, flags, new_config, default_val ? sdsnew(default_val) : NULL);
+ return REDISMODULE_OK;
+}
+
+/* Create a bool config that server clients can interact with via the
+ * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands. See
+ * RedisModule_RegisterStringConfig for detailed information about configs. */
+int RM_RegisterBoolConfig(RedisModuleCtx *ctx, const char *name, int default_val, unsigned int flags, RedisModuleConfigGetBoolFunc getfn, RedisModuleConfigSetBoolFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) {
+ RedisModule *module = ctx->module;
+ sds config_name = sdsnew(name);
+ if (moduleConfigValidityCheck(module, config_name, flags, BOOL_CONFIG)) {
+ sdsfree(config_name);
+ return REDISMODULE_ERR;
+ }
+ ModuleConfig *new_config = createModuleConfig(config_name, applyfn, privdata, module);
+ sdsfree(config_name);
+ new_config->get_fn.get_bool = getfn;
+ new_config->set_fn.set_bool = setfn;
+ listAddNodeTail(module->module_configs, new_config);
+ flags = maskModuleConfigFlags(flags);
+ addModuleBoolConfig(module->name, name, flags, new_config, default_val);
+ return REDISMODULE_OK;
+}
+
+/*
+ * Create an enum config that server clients can interact with via the
+ * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands.
+ * Enum configs are a set of string tokens to corresponding integer values, where
+ * the string value is exposed to Redis clients but the value passed Redis and the
+ * module is the integer value. These values are defined in enum_values, an array
+ * of null-terminated c strings, and int_vals, an array of enum values who has an
+ * index partner in enum_values.
+ * Example Implementation:
+ * const char *enum_vals[3] = {"first", "second", "third"};
+ * const int int_vals[3] = {0, 2, 4};
+ * int enum_val = 0;
+ *
+ * int getEnumConfigCommand(const char *name, void *privdata) {
+ * return enum_val;
+ * }
+ *
+ * int setEnumConfigCommand(const char *name, int val, void *privdata, const char **err) {
+ * enum_val = val;
+ * return REDISMODULE_OK;
+ * }
+ * ...
+ * RedisModule_RegisterEnumConfig(ctx, "enum", 0, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 3, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL);
+ *
+ * See RedisModule_RegisterStringConfig for detailed general information about configs. */
+int RM_RegisterEnumConfig(RedisModuleCtx *ctx, const char *name, int default_val, unsigned int flags, const char **enum_values, const int *int_values, int num_enum_vals, RedisModuleConfigGetEnumFunc getfn, RedisModuleConfigSetEnumFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) {
+ RedisModule *module = ctx->module;
+ sds config_name = sdsnew(name);
+ if (moduleConfigValidityCheck(module, config_name, flags, ENUM_CONFIG)) {
+ sdsfree(config_name);
+ return REDISMODULE_ERR;
+ }
+ ModuleConfig *new_config = createModuleConfig(config_name, applyfn, privdata, module);
+ sdsfree(config_name);
+ new_config->get_fn.get_enum = getfn;
+ new_config->set_fn.set_enum = setfn;
+ configEnum *enum_vals = zmalloc((num_enum_vals + 1) * sizeof(configEnum));
+ for (int i = 0; i < num_enum_vals; i++) {
+ enum_vals[i].name = zstrdup(enum_values[i]);
+ enum_vals[i].val = int_values[i];
+ }
+ enum_vals[num_enum_vals].name = NULL;
+ enum_vals[num_enum_vals].val = 0;
+ listAddNodeTail(module->module_configs, new_config);
+ flags = maskModuleConfigFlags(flags);
+ addModuleEnumConfig(module->name, name, flags, new_config, default_val, enum_vals);
+ return REDISMODULE_OK;
+}
+
+/*
+ * Create an integer config that server clients can interact with via the
+ * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands. See
+ * RedisModule_RegisterStringConfig for detailed information about configs. */
+int RM_RegisterNumericConfig(RedisModuleCtx *ctx, const char *name, long long default_val, unsigned int flags, long long min, long long max, RedisModuleConfigGetNumericFunc getfn, RedisModuleConfigSetNumericFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) {
+ RedisModule *module = ctx->module;
+ sds config_name = sdsnew(name);
+ if (moduleConfigValidityCheck(module, config_name, flags, NUMERIC_CONFIG)) {
+ sdsfree(config_name);
+ return REDISMODULE_ERR;
+ }
+ ModuleConfig *new_config = createModuleConfig(config_name, applyfn, privdata, module);
+ sdsfree(config_name);
+ new_config->get_fn.get_numeric = getfn;
+ new_config->set_fn.set_numeric = setfn;
+ listAddNodeTail(module->module_configs, new_config);
+ unsigned int numeric_flags = maskModuleNumericConfigFlags(flags);
+ flags = maskModuleConfigFlags(flags);
+ addModuleNumericConfig(module->name, name, flags, new_config, default_val, numeric_flags, min, max);
+ return REDISMODULE_OK;
+}
+
+/* Applies all pending configurations on the module load. This should be called
+ * after all of the configurations have been registered for the module inside of RedisModule_OnLoad.
+ * This API needs to be called when configurations are provided in either `MODULE LOADEX`
+ * or provided as startup arguments. */
+int RM_LoadConfigs(RedisModuleCtx *ctx) {
+ if (!ctx || !ctx->module) {
+ return REDISMODULE_ERR;
+ }
+ RedisModule *module = ctx->module;
+ /* Load configs from conf file or arguments from loadex */
+ if (loadModuleConfigs(module)) return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
+
/* Redis MODULE command.
*
* MODULE LIST
* MODULE LOAD <path> [args...]
+ * MODULE LOADEX <path> [[CONFIG NAME VALUE] [CONFIG NAME VALUE]] [ARGS ...]
* MODULE UNLOAD <name>
*/
void moduleCommand(client *c) {
@@ -10979,6 +11695,8 @@ void moduleCommand(client *c) {
" Return a list of loaded modules.",
"LOAD <path> [<arg> ...]",
" Load a module library from <path>, passing to it any optional arguments.",
+"LOADEX <path> [[CONFIG NAME VALUE] [CONFIG NAME VALUE]] [ARGS ...]",
+" Load a module library from <path>, while passing it module configurations and optional arguments.",
"UNLOAD <name>",
" Unload a module.",
NULL
@@ -10993,11 +11711,30 @@ NULL
argv = &c->argv[3];
}
- if (moduleLoad(c->argv[2]->ptr,(void **)argv,argc) == C_OK)
+ if (moduleLoad(c->argv[2]->ptr,(void **)argv,argc, 0) == C_OK)
addReply(c,shared.ok);
else
addReplyError(c,
"Error loading the extension. Please check the server logs.");
+ } else if (!strcasecmp(subcmd,"loadex") && c->argc >= 3) {
+ robj **argv = NULL;
+ int argc = 0;
+
+ if (c->argc > 3) {
+ argc = c->argc - 3;
+ argv = &c->argv[3];
+ }
+ /* If this is a loadex command we want to populate server.module_configs_queue with
+ * sds NAME VALUE pairs. We also want to increment argv to just after ARGS, if supplied. */
+ if (parseLoadexArguments((RedisModuleString ***) &argv, &argc) == REDISMODULE_OK &&
+ moduleLoad(c->argv[2]->ptr, (void **)argv, argc, 1) == C_OK)
+ addReply(c,shared.ok);
+ else {
+ dictEmpty(server.module_configs_queue, NULL);
+ addReplyError(c,
+ "Error loading the extension. Please check the server logs.");
+ }
+
} else if (!strcasecmp(subcmd,"unload") && c->argc == 3) {
if (moduleUnload(c->argv[2]->ptr) == C_OK)
addReply(c,shared.ok);
@@ -11785,6 +12522,7 @@ void moduleRegisterCoreAPI(void) {
REGISTER_API(IsSubEventSupported);
REGISTER_API(GetServerVersion);
REGISTER_API(GetClientCertificate);
+ REGISTER_API(RedactClientCommandArgument);
REGISTER_API(GetCommandKeys);
REGISTER_API(GetCommandKeysWithFlags);
REGISTER_API(GetCurrentCommandName);
@@ -11799,4 +12537,9 @@ void moduleRegisterCoreAPI(void) {
REGISTER_API(EventLoopDel);
REGISTER_API(EventLoopAddOneShot);
REGISTER_API(Yield);
+ REGISTER_API(RegisterBoolConfig);
+ REGISTER_API(RegisterNumericConfig);
+ REGISTER_API(RegisterStringConfig);
+ REGISTER_API(RegisterEnumConfig);
+ REGISTER_API(LoadConfigs);
}
diff --git a/src/modules/helloacl.c b/src/modules/helloacl.c
index 6766c0a58..53f3a440c 100644
--- a/src/modules/helloacl.c
+++ b/src/modules/helloacl.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <pthread.h>
#include <unistd.h>
diff --git a/src/modules/helloblock.c b/src/modules/helloblock.c
index afdfeece4..dc3d74975 100644
--- a/src/modules/helloblock.c
+++ b/src/modules/helloblock.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/src/modules/hellocluster.c b/src/modules/hellocluster.c
index a6508f837..bc145c2b2 100644
--- a/src/modules/hellocluster.c
+++ b/src/modules/hellocluster.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c
index 2a836c491..12b6e91d2 100644
--- a/src/modules/hellodict.c
+++ b/src/modules/hellodict.c
@@ -33,7 +33,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/src/modules/hellohook.c b/src/modules/hellohook.c
index b451bd975..2859a8b26 100644
--- a/src/modules/hellohook.c
+++ b/src/modules/hellohook.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c
index f6700df26..67e1e6714 100644
--- a/src/modules/hellotimer.c
+++ b/src/modules/hellotimer.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "../redismodule.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/src/networking.c b/src/networking.c
index b05d02b1b..767d871d8 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -147,7 +147,6 @@ client *createClient(connection *conn) {
c->ref_block_pos = 0;
c->qb_pos = 0;
c->querybuf = sdsempty();
- c->pending_querybuf = sdsempty();
c->querybuf_peak = 0;
c->reqtype = 0;
c->argc = 0;
@@ -167,6 +166,7 @@ client *createClient(connection *conn) {
c->repl_start_cmd_stream_on_ack = 0;
c->reploff = 0;
c->read_reploff = 0;
+ c->repl_applied = 0;
c->repl_ack_off = 0;
c->repl_ack_time = 0;
c->repl_last_partial_write = 0;
@@ -201,7 +201,7 @@ client *createClient(connection *conn) {
c->pending_read_list_node = NULL;
c->client_tracking_redirection = 0;
c->client_tracking_prefixes = NULL;
- c->last_memory_usage = c->last_memory_usage_on_bucket_update = 0;
+ c->last_memory_usage = 0;
c->last_memory_type = CLIENT_TYPE_NORMAL;
c->auth_callback = NULL;
c->auth_callback_privdata = NULL;
@@ -1568,7 +1568,6 @@ void freeClient(client *c) {
/* Free the query buffer */
sdsfree(c->querybuf);
- sdsfree(c->pending_querybuf);
c->querybuf = NULL;
/* Deallocate structures used to block on blocking ops. */
@@ -1940,7 +1939,7 @@ int writeToClient(client *c, int handler_installed) {
if (!clientHasPendingReplies(c)) {
c->sentlen = 0;
/* Note that writeToClient() is called in a threaded way, but
- * adDeleteFileEvent() is not thread safe: however writeToClient()
+ * aeDeleteFileEvent() is not thread safe: however writeToClient()
* is always called with handler_installed set to 0 from threads
* so we are fine. */
if (handler_installed) {
@@ -1954,7 +1953,11 @@ int writeToClient(client *c, int handler_installed) {
return C_ERR;
}
}
- updateClientMemUsage(c);
+ /* Update client's memory usage after writing.
+ * Since this isn't thread safe we do this conditionally. In case of threaded writes this is done in
+ * handleClientsWithPendingWritesUsingThreads(). */
+ if (io_threads_op == IO_THREADS_OP_IDLE)
+ updateClientMemUsage(c);
return C_OK;
}
@@ -2128,7 +2131,7 @@ int processInlineBuffer(client *c) {
* we got some desynchronization in the protocol, for example
* because of a PSYNC gone bad.
*
- * However the is an exception: masters may send us just a newline
+ * However there is an exception: masters may send us just a newline
* to keep the connection active. */
if (querylen != 0 && c->flags & CLIENT_MASTER) {
sdsfreesplitres(argv,argc);
@@ -2292,8 +2295,12 @@ int processMultibulkBuffer(client *c) {
}
c->qb_pos = newline-c->querybuf+2;
- if (ll >= PROTO_MBULK_BIG_ARG) {
- /* If we are going to read a large object from network
+ if (!(c->flags & CLIENT_MASTER) && ll >= PROTO_MBULK_BIG_ARG) {
+ /* When the client is not a master client (because master
+ * client's querybuf can only be trimmed after data applied
+ * and sent to replicas).
+ *
+ * If we are going to read a large object from network
* try to make it likely that it will start at c->querybuf
* boundary so that we can optimize object creation
* avoiding a large copy of data.
@@ -2324,10 +2331,11 @@ int processMultibulkBuffer(client *c) {
c->argv = zrealloc(c->argv, sizeof(robj*)*c->argv_len);
}
- /* Optimization: if the buffer contains JUST our bulk element
+ /* Optimization: if a non-master client's buffer contains JUST our bulk element
* instead of creating a new object by *copying* the sds we
* just use the current sds string. */
- if (c->qb_pos == 0 &&
+ if (!(c->flags & CLIENT_MASTER) &&
+ c->qb_pos == 0 &&
c->bulklen >= PROTO_MBULK_BIG_ARG &&
sdslen(c->querybuf) == (size_t)(c->bulklen+2))
{
@@ -2388,8 +2396,8 @@ void commandProcessed(client *c) {
if (c->flags & CLIENT_MASTER) {
long long applied = c->reploff - prev_offset;
if (applied) {
- replicationFeedStreamFromMasterStream(c->pending_querybuf,applied);
- sdsrange(c->pending_querybuf,applied,-1);
+ replicationFeedStreamFromMasterStream(c->querybuf+c->repl_applied,applied);
+ c->repl_applied += applied;
}
}
}
@@ -2432,13 +2440,22 @@ int processCommandAndResetClient(client *c) {
/* This function will execute any fully parsed commands pending on
* the client. Returns C_ERR if the client is no longer valid after executing
* the command, and C_OK for all other cases. */
-int processPendingCommandsAndResetClient(client *c) {
+int processPendingCommandAndInputBuffer(client *c) {
if (c->flags & CLIENT_PENDING_COMMAND) {
c->flags &= ~CLIENT_PENDING_COMMAND;
if (processCommandAndResetClient(c) == C_ERR) {
return C_ERR;
}
}
+
+ /* Now process client if it has more data in it's buffer.
+ *
+ * Note: when a master client steps into this function,
+ * it can always satisfy this condition, because its querbuf
+ * contains data not applied. */
+ if (c->querybuf && sdslen(c->querybuf) > 0) {
+ return processInputBuffer(c);
+ }
return C_OK;
}
@@ -2510,8 +2527,26 @@ int processInputBuffer(client *c) {
}
}
- /* Trim to pos */
- if (c->qb_pos) {
+ if (c->flags & CLIENT_MASTER) {
+ /* If the client is a master, trim the querybuf to repl_applied,
+ * since master client is very special, its querybuf not only
+ * used to parse command, but also proxy to sub-replicas.
+ *
+ * Here are some scenarios we cannot trim to qb_pos:
+ * 1. we don't receive complete command from master
+ * 2. master client blocked cause of client pause
+ * 3. io threads operate read, master client flagged with CLIENT_PENDING_COMMAND
+ *
+ * In these scenarios, qb_pos points to the part of the current command
+ * or the beginning of next command, and the current command is not applied yet,
+ * so the repl_applied is not equal to qb_pos. */
+ if (c->repl_applied) {
+ sdsrange(c->querybuf,c->repl_applied,-1);
+ c->qb_pos -= c->repl_applied;
+ c->repl_applied = 0;
+ }
+ } else if (c->qb_pos) {
+ /* Trim to pos */
sdsrange(c->querybuf,c->qb_pos,-1);
c->qb_pos = 0;
}
@@ -2519,7 +2554,8 @@ int processInputBuffer(client *c) {
/* Update client memory usage after processing the query buffer, this is
* important in case the query buffer is big and wasn't drained during
* the above loop (because of partially sent big commands). */
- updateClientMemUsage(c);
+ if (io_threads_op == IO_THREADS_OP_IDLE)
+ updateClientMemUsage(c);
return C_OK;
}
@@ -2546,16 +2582,22 @@ void readQueryFromClient(connection *conn) {
if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1
&& c->bulklen >= PROTO_MBULK_BIG_ARG)
{
- ssize_t remaining = (size_t)(c->bulklen+2)-sdslen(c->querybuf);
+ ssize_t remaining = (size_t)(c->bulklen+2)-(sdslen(c->querybuf)-c->qb_pos);
big_arg = 1;
/* Note that the 'remaining' variable may be zero in some edge case,
* for example once we resume a blocked client after CLIENT PAUSE. */
if (remaining > 0) readlen = remaining;
+
+ /* Master client needs expand the readlen when meet BIG_ARG(see #9100),
+ * but doesn't need align to the next arg, we can read more data. */
+ if (c->flags & CLIENT_MASTER && readlen < PROTO_IOBUF_LEN)
+ readlen = PROTO_IOBUF_LEN;
}
qblen = sdslen(c->querybuf);
- if (big_arg || sdsalloc(c->querybuf) < PROTO_IOBUF_LEN) {
+ if (!(c->flags & CLIENT_MASTER) && // master client's querybuf can grow greedy.
+ (big_arg || sdsalloc(c->querybuf) < PROTO_IOBUF_LEN)) {
/* When reading a BIG_ARG we won't be reading more than that one arg
* into the query buffer, so we don't need to pre-allocate more than we
* need, so using the non-greedy growing. For an initial allocation of
@@ -2585,12 +2627,6 @@ void readQueryFromClient(connection *conn) {
}
freeClientAsync(c);
goto done;
- } else if (c->flags & CLIENT_MASTER) {
- /* Append the query buffer to the pending (not applied) buffer
- * of the master. We'll use this buffer later in order to have a
- * copy of the string applied by the last command executed. */
- c->pending_querybuf = sdscatlen(c->pending_querybuf,
- c->querybuf+qblen,nread);
}
sdsIncrLen(c->querybuf,nread);
@@ -2868,8 +2904,6 @@ void clientCommand(client *c) {
" Control the replies sent to the current connection.",
"SETNAME <name>",
" Assign the name <name> to the current connection.",
-"GETNAME",
-" Get the name of the current connection.",
"UNBLOCK <clientid> [TIMEOUT|ERROR]",
" Unblock the specified blocked client.",
"TRACKING (ON|OFF) [REDIRECT <id>] [BCAST] [PREFIX <prefix> [...]]",
@@ -3075,6 +3109,10 @@ NULL
if (getLongLongFromObjectOrReply(c,c->argv[2],&id,NULL)
!= C_OK) return;
struct client *target = lookupClientByID(id);
+ /* Note that we never try to unblock a client blocked on a module command, which
+ * doesn't have a timeout callback (even in the case of UNBLOCK ERROR).
+ * The reason is that we assume that if a command doesn't expect to be timedout,
+ * it also doesn't expect to be unblocked by CLIENT UNBLOCK */
if (target && target->flags & CLIENT_BLOCKED && moduleBlockedClientMayTimeout(target)) {
if (unblock_error)
addReplyError(target,
@@ -3464,7 +3502,11 @@ static void retainOriginalCommandVector(client *c) {
* original_argv array. */
void redactClientCommandArgument(client *c, int argc) {
retainOriginalCommandVector(c);
- decrRefCount(c->argv[argc]);
+ if (c->original_argv[argc] == shared.redacted) {
+ /* This argument has already been redacted */
+ return;
+ }
+ decrRefCount(c->original_argv[argc]);
c->original_argv[argc] = shared.redacted;
}
@@ -4165,8 +4207,8 @@ int handleClientsWithPendingWritesUsingThreads(void) {
while((ln = listNext(&li))) {
client *c = listNodeValue(ln);
- /* Update the client in the mem usage buckets after we're done processing it in the io-threads */
- updateClientMemUsageBucket(c);
+ /* Update the client in the mem usage after we're done processing it in the io-threads */
+ updateClientMemUsage(c);
/* Install the write handler if there are pending writes in some
* of the clients. */
@@ -4274,17 +4316,10 @@ int handleClientsWithPendingReadsUsingThreads(void) {
continue;
}
- /* Once io-threads are idle we can update the client in the mem usage buckets */
- updateClientMemUsageBucket(c);
-
- if (processPendingCommandsAndResetClient(c) == C_ERR) {
- /* If the client is no longer valid, we avoid
- * processing the client later. So we just go
- * to the next. */
- continue;
- }
+ /* Once io-threads are idle we can update the client in the mem usage */
+ updateClientMemUsage(c);
- if (processInputBuffer(c) == C_ERR) {
+ if (processPendingCommandAndInputBuffer(c) == C_ERR) {
/* If the client is no longer valid, we avoid
* processing the client later. So we just go
* to the next. */
diff --git a/src/object.c b/src/object.c
index 089547167..a60a27e90 100644
--- a/src/object.c
+++ b/src/object.c
@@ -1532,7 +1532,7 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"stats") && c->argc == 2) {
struct redisMemOverhead *mh = getMemoryOverheadData();
- addReplyMapLen(c,26+mh->num_dbs);
+ addReplyMapLen(c,27+mh->num_dbs);
addReplyBulkCString(c,"peak.allocated");
addReplyLongLong(c,mh->peak_allocated);
@@ -1552,6 +1552,9 @@ NULL
addReplyBulkCString(c,"clients.normal");
addReplyLongLong(c,mh->clients_normal);
+ addReplyBulkCString(c,"cluster.links");
+ addReplyLongLong(c,mh->cluster_links);
+
addReplyBulkCString(c,"aof.buffer");
addReplyLongLong(c,mh->aof_buffer);
diff --git a/src/pqsort.c b/src/pqsort.c
index 508c09f92..fab54e026 100644
--- a/src/pqsort.c
+++ b/src/pqsort.c
@@ -38,7 +38,7 @@
*/
#include <sys/types.h>
-
+#include <stdint.h>
#include <errno.h>
#include <stdlib.h>
@@ -62,7 +62,7 @@ static inline void swapfunc (char *, char *, size_t, int);
} while (--i > 0); \
}
-#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+#define SWAPINIT(a, es) swaptype = (uintptr_t)a % sizeof(long) || \
es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
static inline void
diff --git a/src/rax.c b/src/rax.c
index 3e6829c6e..dd89ad929 100644
--- a/src/rax.c
+++ b/src/rax.c
@@ -905,9 +905,9 @@ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) {
return raxGenericInsert(rax,s,len,data,old,1);
}
-/* Non overwriting insert function: this if an element with the same key
+/* Non overwriting insert function: if an element with the same key
* exists, the value is not updated and the function returns 0.
- * This is a just a wrapper for raxGenericInsert(). */
+ * This is just a wrapper for raxGenericInsert(). */
int raxTryInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) {
return raxGenericInsert(rax,s,len,data,old,0);
}
diff --git a/src/rdb.c b/src/rdb.c
index d5f853dd8..0283630f7 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -1242,24 +1242,9 @@ ssize_t rdbSaveFunctions(rio *rdb) {
ssize_t written = 0;
ssize_t ret;
while ((entry = dictNext(iter))) {
- if ((ret = rdbSaveType(rdb, RDB_OPCODE_FUNCTION)) < 0) goto werr;
+ if ((ret = rdbSaveType(rdb, RDB_OPCODE_FUNCTION2)) < 0) goto werr;
written += ret;
functionLibInfo *li = dictGetVal(entry);
- if ((ret = rdbSaveRawString(rdb, (unsigned char *) li->name, sdslen(li->name))) < 0) goto werr;
- written += ret;
- if ((ret = rdbSaveRawString(rdb, (unsigned char *) li->ei->name, sdslen(li->ei->name))) < 0) goto werr;
- written += ret;
- if (li->desc) {
- /* desc exists */
- if ((ret = rdbSaveLen(rdb, 1)) < 0) goto werr;
- written += ret;
- if ((ret = rdbSaveRawString(rdb, (unsigned char *) li->desc, sdslen(li->desc))) < 0) goto werr;
- written += ret;
- } else {
- /* desc not exists */
- if ((ret = rdbSaveLen(rdb, 0)) < 0) goto werr;
- written += ret;
- }
if ((ret = rdbSaveRawString(rdb, (unsigned char *) li->code, sdslen(li->code))) < 0) goto werr;
written += ret;
}
@@ -2811,56 +2796,79 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
*
* The lib_ctx argument is also optional. If NULL is given, only verify rdb
* structure with out performing the actual functions loading. */
-int rdbFunctionLoad(rio *rdb, int ver, functionsLibCtx* lib_ctx, int rdbflags, sds *err) {
+int rdbFunctionLoad(rio *rdb, int ver, functionsLibCtx* lib_ctx, int type, int rdbflags, sds *err) {
UNUSED(ver);
- sds name = NULL;
- sds engine_name = NULL;
- sds desc = NULL;
- sds blob = NULL;
- uint64_t has_desc;
sds error = NULL;
+ sds final_payload = NULL;
int res = C_ERR;
- if (!(name = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
- error = sdsnew("Failed loading library name");
- goto error;
- }
+ if (type == RDB_OPCODE_FUNCTION) {
+ /* RDB that was generated on versions 7.0 rc1 and 7.0 rc2 has another
+ * an old format that contains the library name, engine and description.
+ * To support this format we must read those values. */
+ sds name = NULL;
+ sds engine_name = NULL;
+ sds desc = NULL;
+ sds blob = NULL;
+ uint64_t has_desc;
+
+ if (!(name = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
+ error = sdsnew("Failed loading library name");
+ goto cleanup;
+ }
- if (!(engine_name = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
- error = sdsnew("Failed loading engine name");
- goto error;
- }
+ if (!(engine_name = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
+ error = sdsnew("Failed loading engine name");
+ goto cleanup;
+ }
- if ((has_desc = rdbLoadLen(rdb, NULL)) == RDB_LENERR) {
- error = sdsnew("Failed loading library description indicator");
- goto error;
- }
+ if ((has_desc = rdbLoadLen(rdb, NULL)) == RDB_LENERR) {
+ error = sdsnew("Failed loading library description indicator");
+ goto cleanup;
+ }
- if (has_desc && !(desc = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
- error = sdsnew("Failed loading library description");
- goto error;
- }
+ if (has_desc && !(desc = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
+ error = sdsnew("Failed loading library description");
+ goto cleanup;
+ }
- if (!(blob = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
- error = sdsnew("Failed loading library blob");
- goto error;
+ if (!(blob = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
+ error = sdsnew("Failed loading library blob");
+ goto cleanup;
+ }
+ /* Translate old format (versions 7.0 rc1 and 7.0 rc2) to new format.
+ * The new format has the library name and engine inside the script payload.
+ * Add those parameters to the original script payload (ignore the description if exists). */
+ final_payload = sdscatfmt(sdsempty(), "#!%s name=%s\n%s", engine_name, name, blob);
+cleanup:
+ if (name) sdsfree(name);
+ if (engine_name) sdsfree(engine_name);
+ if (desc) sdsfree(desc);
+ if (blob) sdsfree(blob);
+ if (error) goto done;
+ } else if (type == RDB_OPCODE_FUNCTION2) {
+ if (!(final_payload = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL))) {
+ error = sdsnew("Failed loading library payload");
+ goto done;
+ }
+ } else {
+ serverPanic("Bad function type was given to rdbFunctionLoad");
}
if (lib_ctx) {
- if (functionsCreateWithLibraryCtx(name, engine_name, desc, blob, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx) != C_OK) {
+ sds library_name = NULL;
+ if (!(library_name = functionsCreateWithLibraryCtx(final_payload, rdbflags & RDBFLAGS_ALLOW_DUP, &error, lib_ctx))) {
if (!error) {
error = sdsnew("Failed creating the library");
}
- goto error;
+ goto done;
}
+ sdsfree(library_name);
}
res = C_OK;
-error:
- if (name) sdsfree(name);
- if (engine_name) sdsfree(engine_name);
- if (desc) sdsfree(desc);
- if (blob) sdsfree(blob);
+done:
+ if (final_payload) sdsfree(final_payload);
if (error) {
if (err) {
*err = error;
@@ -3091,9 +3099,9 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin
decrRefCount(aux);
continue; /* Read next opcode. */
}
- } else if (type == RDB_OPCODE_FUNCTION) {
+ } else if (type == RDB_OPCODE_FUNCTION || type == RDB_OPCODE_FUNCTION2) {
sds err = NULL;
- if (rdbFunctionLoad(rdb, rdbver, rdb_loading_ctx->functions_lib_ctx, rdbflags, &err) != C_OK) {
+ if (rdbFunctionLoad(rdb, rdbver, rdb_loading_ctx->functions_lib_ctx, type, rdbflags, &err) != C_OK) {
serverLog(LL_WARNING,"Failed loading library, %s", err);
sdsfree(err);
goto eoferr;
diff --git a/src/rdb.h b/src/rdb.h
index 0d298c40d..4f057a252 100644
--- a/src/rdb.h
+++ b/src/rdb.h
@@ -101,7 +101,8 @@
#define rdbIsObjectType(t) ((t >= 0 && t <= 7) || (t >= 9 && t <= 19))
/* Special RDB opcodes (saved/loaded with rdbSaveType/rdbLoadType). */
-#define RDB_OPCODE_FUNCTION 246 /* engine data */
+#define RDB_OPCODE_FUNCTION2 245 /* function library data */
+#define RDB_OPCODE_FUNCTION 246 /* old function library data for 7.0 rc1 and rc2 */
#define RDB_OPCODE_MODULE_AUX 247 /* Module auxiliary data. */
#define RDB_OPCODE_IDLE 248 /* LRU idle time. */
#define RDB_OPCODE_FREQ 249 /* LFU frequency. */
@@ -170,7 +171,7 @@ int rdbSaveBinaryFloatValue(rio *rdb, float val);
int rdbLoadBinaryFloatValue(rio *rdb, float *val);
int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi);
int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadingCtx *rdb_loading_ctx);
-int rdbFunctionLoad(rio *rdb, int ver, functionsLibCtx* lib_ctx, int rdbflags, sds *err);
+int rdbFunctionLoad(rio *rdb, int ver, functionsLibCtx* lib_ctx, int type, int rdbflags, sds *err);
int rdbSaveRio(int req, rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi);
ssize_t rdbSaveFunctions(rio *rdb);
rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi);
diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c
index 64893ac37..9b183559d 100644
--- a/src/redis-benchmark.c
+++ b/src/redis-benchmark.c
@@ -1118,6 +1118,9 @@ static clusterNode **addClusterNode(clusterNode *node) {
return config.cluster_nodes;
}
+/* TODO: This should be refactored to use CLUSTER SLOTS, the migrating/importing
+ * information is anyway not used.
+ */
static int fetchClusterConfiguration() {
int success = 1;
redisContext *ctx = NULL;
@@ -1179,7 +1182,7 @@ static int fetchClusterConfiguration() {
clusterNode *node = NULL;
char *ip = NULL;
int port = 0;
- char *paddr = strchr(addr, ':');
+ char *paddr = strrchr(addr, ':');
if (paddr != NULL) {
*paddr = '\0';
ip = addr;
diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c
index 7fc798e45..accdc35b0 100644
--- a/src/redis-check-rdb.c
+++ b/src/redis-check-rdb.c
@@ -63,6 +63,7 @@ struct {
#define RDB_CHECK_DOING_READ_LEN 6
#define RDB_CHECK_DOING_READ_AUX 7
#define RDB_CHECK_DOING_READ_MODULE_AUX 8
+#define RDB_CHECK_DOING_READ_FUNCTIONS 9
char *rdb_check_doing_string[] = {
"start",
@@ -73,7 +74,8 @@ char *rdb_check_doing_string[] = {
"check-sum",
"read-len",
"read-aux",
- "read-module-aux"
+ "read-module-aux",
+ "read-functions"
};
char *rdb_type_string[] = {
@@ -303,9 +305,10 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
robj *o = rdbLoadCheckModuleValue(&rdb,name);
decrRefCount(o);
continue; /* Read type again. */
- } else if (type == RDB_OPCODE_FUNCTION) {
+ } else if (type == RDB_OPCODE_FUNCTION || type == RDB_OPCODE_FUNCTION2) {
sds err = NULL;
- if (rdbFunctionLoad(&rdb, rdbver, NULL, 0, &err) != C_OK) {
+ rdbstate.doing = RDB_CHECK_DOING_READ_FUNCTIONS;
+ if (rdbFunctionLoad(&rdb, rdbver, NULL, type, 0, &err) != C_OK) {
rdbCheckError("Failed loading library, %s", err);
sdsfree(err);
goto err;
diff --git a/src/redis-cli.c b/src/redis-cli.c
index bbbe6d6ec..1cd1ad85c 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -70,6 +70,7 @@
#define OUTPUT_RAW 1
#define OUTPUT_CSV 2
#define OUTPUT_JSON 3
+#define OUTPUT_QUOTED_JSON 4
#define REDIS_CLI_KEEPALIVE_INTERVAL 15 /* seconds */
#define REDIS_CLI_DEFAULT_PIPE_TIMEOUT 30 /* seconds */
#define REDIS_CLI_HISTFILE_ENV "REDISCLI_HISTFILE"
@@ -155,6 +156,9 @@
#define CC_FORCE (1<<0) /* Re-connect if already connected. */
#define CC_QUIET (1<<1) /* Don't log connecting errors. */
+/* DNS lookup */
+#define NET_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46 */
+
/* --latency-dist palettes. */
int spectrum_palette_color_size = 19;
int spectrum_palette_color[] = {0,233,234,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196};
@@ -281,7 +285,7 @@ static void usage(int err);
static void slaveMode(void);
char *redisGitSHA1(void);
char *redisGitDirty(void);
-static int cliConnect(int force);
+static int cliConnect(int flags);
static char *getInfoField(char *info, char *field);
static long getLongInfoField(char *info, char *field);
@@ -799,7 +803,12 @@ static void cliInitHelp(void) {
redisReply *commandTable;
dict *groups;
- if (cliConnect(CC_QUIET) == REDIS_ERR) return;
+ if (cliConnect(CC_QUIET) == REDIS_ERR) {
+ /* Can not connect to the server, but we still want to provide
+ * help, generate it only from the old help.h data instead. */
+ cliOldInitHelp();
+ return;
+ }
commandTable = redisCommand(context, "COMMAND DOCS");
if (commandTable == NULL || commandTable->type == REDIS_REPLY_ERROR) {
/* New COMMAND DOCS subcommand not supported - generate help from old help.h data instead. */
@@ -809,7 +818,7 @@ static void cliInitHelp(void) {
return;
};
if (commandTable->type != REDIS_REPLY_MAP && commandTable->type != REDIS_REPLY_ARRAY) return;
-
+
/* Scan the array reported by COMMAND DOCS and fill in the entries */
helpEntriesLen = cliCountCommands(commandTable);
helpEntries = zmalloc(sizeof(helpEntry)*helpEntriesLen);
@@ -869,6 +878,12 @@ static void cliOutputHelp(int argc, char **argv) {
group = argv[0]+1;
}
+ if (helpEntries == NULL) {
+ /* Initialize the help using the results of the COMMAND command.
+ * In case we are using redis-cli help XXX, we need to init it. */
+ cliInitHelp();
+ }
+
assert(argc > 0);
for (i = 0; i < helpEntriesLen; i++) {
entry = &helpEntries[i];
@@ -1486,16 +1501,39 @@ static sds cliFormatReplyCSV(redisReply *r) {
return out;
}
-static sds cliFormatReplyJson(sds out, redisReply *r) {
+/* Append specified buffer to out and return it, using required JSON output
+ * mode. */
+static sds jsonStringOutput(sds out, const char *p, int len, int mode) {
+ if (mode == OUTPUT_JSON) {
+ return escapeJsonString(out, p, len);
+ } else if (mode == OUTPUT_QUOTED_JSON) {
+ /* Need to double-quote backslashes */
+ sds tmp = sdscatrepr(sdsempty(), p, len);
+ int tmplen = sdslen(tmp);
+ char *n = tmp;
+ while (tmplen--) {
+ if (*n == '\\') out = sdscatlen(out, "\\\\", 2);
+ else out = sdscatlen(out, n, 1);
+ n++;
+ }
+
+ sdsfree(tmp);
+ return out;
+ } else {
+ assert(0);
+ }
+}
+
+static sds cliFormatReplyJson(sds out, redisReply *r, int mode) {
unsigned int i;
switch (r->type) {
case REDIS_REPLY_ERROR:
out = sdscat(out,"error:");
- out = sdscatrepr(out,r->str,strlen(r->str));
+ out = jsonStringOutput(out,r->str,strlen(r->str),mode);
break;
case REDIS_REPLY_STATUS:
- out = sdscatrepr(out,r->str,r->len);
+ out = jsonStringOutput(out,r->str,r->len,mode);
break;
case REDIS_REPLY_INTEGER:
out = sdscatprintf(out,"%lld",r->integer);
@@ -1505,7 +1543,7 @@ static sds cliFormatReplyJson(sds out, redisReply *r) {
break;
case REDIS_REPLY_STRING:
case REDIS_REPLY_VERB:
- out = sdscatrepr(out,r->str,r->len);
+ out = jsonStringOutput(out,r->str,r->len,mode);
break;
case REDIS_REPLY_NIL:
out = sdscat(out,"null");
@@ -1518,7 +1556,7 @@ static sds cliFormatReplyJson(sds out, redisReply *r) {
case REDIS_REPLY_PUSH:
out = sdscat(out,"[");
for (i = 0; i < r->elements; i++ ) {
- out = cliFormatReplyJson(out, r->element[i]);
+ out = cliFormatReplyJson(out,r->element[i],mode);
if (i != r->elements-1) out = sdscat(out,",");
}
out = sdscat(out,"]");
@@ -1527,20 +1565,25 @@ static sds cliFormatReplyJson(sds out, redisReply *r) {
out = sdscat(out,"{");
for (i = 0; i < r->elements; i += 2) {
redisReply *key = r->element[i];
- if (key->type == REDIS_REPLY_STATUS ||
+ if (key->type == REDIS_REPLY_ERROR ||
+ key->type == REDIS_REPLY_STATUS ||
key->type == REDIS_REPLY_STRING ||
- key->type == REDIS_REPLY_VERB) {
- out = cliFormatReplyJson(out, key);
+ key->type == REDIS_REPLY_VERB)
+ {
+ out = cliFormatReplyJson(out,key,mode);
} else {
- /* According to JSON spec, JSON map keys must be strings, */
- /* and in RESP3, they can be other types. */
- sds tmp = cliFormatReplyJson(sdsempty(), key);
- out = sdscatrepr(out,tmp,sdslen(tmp));
- sdsfree(tmp);
+ /* According to JSON spec, JSON map keys must be strings,
+ * and in RESP3, they can be other types.
+ * The first one(cliFormatReplyJson) is to convert non string type to string
+ * The Second one(escapeJsonString) is to escape the converted string */
+ sds keystr = cliFormatReplyJson(sdsempty(),key,mode);
+ if (keystr[0] == '"') out = sdscatsds(out,keystr);
+ else out = sdscatfmt(out,"\"%S\"",keystr);
+ sdsfree(keystr);
}
out = sdscat(out,":");
- out = cliFormatReplyJson(out, r->element[i+1]);
+ out = cliFormatReplyJson(out,r->element[i+1],mode);
if (i != r->elements-2) out = sdscat(out,",");
}
out = sdscat(out,"}");
@@ -1566,8 +1609,8 @@ static sds cliFormatReply(redisReply *reply, int mode, int verbatim) {
} else if (mode == OUTPUT_CSV) {
out = cliFormatReplyCSV(reply);
out = sdscatlen(out, "\n", 1);
- } else if (mode == OUTPUT_JSON) {
- out = cliFormatReplyJson(sdsempty(), reply);
+ } else if (mode == OUTPUT_JSON || mode == OUTPUT_QUOTED_JSON) {
+ out = cliFormatReplyJson(sdsempty(), reply, mode);
out = sdscatlen(out, "\n", 1);
} else {
fprintf(stderr, "Error: Unknown output encoding %d\n", mode);
@@ -1684,12 +1727,6 @@ static int cliSendCommand(int argc, char **argv, long repeat) {
size_t *argvlen;
int j, output_raw;
- if (!config.eval_ldb && /* In debugging mode, let's pass "help" to Redis. */
- (!strcasecmp(command,"help") || !strcasecmp(command,"?"))) {
- cliOutputHelp(--argc, ++argv);
- return REDIS_OK;
- }
-
if (context == NULL) return REDIS_ERR;
output_raw = 0;
@@ -1953,11 +1990,17 @@ static int parseOptions(int argc, char **argv) {
} else if (!strcmp(argv[i],"--csv")) {
config.output = OUTPUT_CSV;
} else if (!strcmp(argv[i],"--json")) {
- /* Not overwrite explicit value by -3*/
+ /* Not overwrite explicit value by -3 */
if (config.resp3 == 0) {
config.resp3 = 2;
}
config.output = OUTPUT_JSON;
+ } else if (!strcmp(argv[i],"--quoted-json")) {
+ /* Not overwrite explicit value by -3*/
+ if (config.resp3 == 0) {
+ config.resp3 = 2;
+ }
+ config.output = OUTPUT_QUOTED_JSON;
} else if (!strcmp(argv[i],"--latency")) {
config.latency_mode = 1;
} else if (!strcmp(argv[i],"--latency-dist")) {
@@ -2289,6 +2332,7 @@ static void usage(int err) {
" --quoted-input Force input to be handled as quoted strings.\n"
" --csv Output in CSV format.\n"
" --json Output in JSON format (default RESP3, use -2 if you want to use with RESP2).\n"
+" --quoted-json Same as --json, but produce ASCII-safe quoted strings, not Unicode.\n"
" --show-pushes <yn> Whether to print RESP3 PUSH messages. Enabled by default when\n"
" STDOUT is a tty but can be overridden with --show-pushes no.\n"
" --stat Print rolling stats about server: mem, clients, ...\n",version);
@@ -2384,6 +2428,17 @@ static int confirmWithYes(char *msg, int ignore_force) {
}
static int issueCommandRepeat(int argc, char **argv, long repeat) {
+ /* In Lua debugging mode, we want to pass the "help" to Redis to get
+ * it's own HELP message, rather than handle it by the CLI, see ldbRepl.
+ *
+ * For the normal Redis HELP, we can process it without a connection. */
+ if (!config.eval_ldb &&
+ (!strcasecmp(argv[0],"help") || !strcasecmp(argv[0],"?")))
+ {
+ cliOutputHelp(--argc, ++argv);
+ return REDIS_OK;
+ }
+
while (1) {
if (config.cluster_reissue_command || context == NULL ||
context->err == REDIS_ERR_IO || context->err == REDIS_ERR_EOF)
@@ -2403,6 +2458,8 @@ static int issueCommandRepeat(int argc, char **argv, long repeat) {
}
if (cliSendCommand(argc,argv,repeat) != REDIS_OK) {
cliPrintContextError();
+ redisFree(context);
+ context = NULL;
return REDIS_ERR;
}
@@ -2540,8 +2597,13 @@ static void repl(void) {
int argc;
sds *argv;
- /* Initialize the help using the results of the COMMAND command. */
- cliInitHelp();
+ /* There is no need to initialize redis HELP when we are in lua debugger mode.
+ * It has its own HELP and commands (COMMAND or COMMAND DOCS will fail and got nothing).
+ * We will initialize the redis HELP after the Lua debugging session ended.*/
+ if (!config.eval_ldb) {
+ /* Initialize the help using the results of the COMMAND command. */
+ cliInitHelp();
+ }
config.interactive = 1;
linenoiseSetMultiLine(1);
@@ -2643,6 +2705,7 @@ static void repl(void) {
printf("\n(Lua debugging session ended%s)\n\n",
config.eval_ldb_sync ? "" :
" -- dataset changes rolled back");
+ cliInitHelp();
}
elapsed = mstime()-start_time;
@@ -2695,7 +2758,7 @@ static int noninteractive(int argc, char **argv) {
retval = issueCommand(argc, sds_args);
sdsfreesplitres(sds_args, argc);
- return retval;
+ return retval == REDIS_OK ? 0 : 1;
}
/*------------------------------------------------------------------------------
@@ -2782,7 +2845,7 @@ static int evalMode(int argc, char **argv) {
break; /* Return to the caller. */
}
}
- return retval;
+ return retval == REDIS_OK ? 0 : 1;
}
/*------------------------------------------------------------------------------
@@ -3915,7 +3978,10 @@ static int clusterManagerSetSlot(clusterManagerNode *node1,
slot, status,
(char *) node2->name);
if (err != NULL) *err = NULL;
- if (!reply) return 0;
+ if (!reply) {
+ if (err) *err = zstrdup("CLUSTER SETSLOT failed to run");
+ return 0;
+ }
int success = 1;
if (reply->type == REDIS_REPLY_ERROR) {
success = 0;
@@ -4365,33 +4431,41 @@ static int clusterManagerMoveSlot(clusterManagerNode *source,
pipeline, print_dots, err);
if (!(opts & CLUSTER_MANAGER_OPT_QUIET)) printf("\n");
if (!success) return 0;
- /* Set the new node as the owner of the slot in all the known nodes. */
if (!option_cold) {
+ /* Set the new node as the owner of the slot in all the known nodes.
+ *
+ * We inform the target node first. It will propagate the information to
+ * the rest of the cluster.
+ *
+ * If we inform any other node first, it can happen that the target node
+ * crashes before it is set as the new owner and then the slot is left
+ * without an owner which results in redirect loops. See issue #7116. */
+ success = clusterManagerSetSlot(target, target, slot, "node", err);
+ if (!success) return 0;
+
+ /* Inform the source node. If the source node has just lost its last
+ * slot and the target node has already informed the source node, the
+ * source node has turned itself into a replica. This is not an error in
+ * this scenario so we ignore it. See issue #9223. */
+ success = clusterManagerSetSlot(source, target, slot, "node", err);
+ const char *acceptable = "ERR Please use SETSLOT only with masters.";
+ if (!success && err && !strncmp(*err, acceptable, strlen(acceptable))) {
+ zfree(*err);
+ *err = NULL;
+ } else if (!success && err) {
+ return 0;
+ }
+
+ /* We also inform the other nodes to avoid redirects in case the target
+ * node is slow to propagate the change to the entire cluster. */
listIter li;
listNode *ln;
listRewind(cluster_manager.nodes, &li);
while ((ln = listNext(&li)) != NULL) {
clusterManagerNode *n = ln->value;
+ if (n == target || n == source) continue; /* already done */
if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue;
- redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER "
- "SETSLOT %d %s %s",
- slot, "node",
- target->name);
- success = (r != NULL);
- if (!success) {
- if (err) *err = zstrdup("CLUSTER SETSLOT failed to run");
- return 0;
- }
- if (r->type == REDIS_REPLY_ERROR) {
- success = 0;
- if (err != NULL) {
- *err = zmalloc((r->len + 1) * sizeof(char));
- strcpy(*err, r->str);
- } else {
- CLUSTER_MANAGER_PRINT_REPLY_ERROR(n, r->str);
- }
- }
- freeReplyObject(r);
+ success = clusterManagerSetSlot(n, target, slot, "node", err);
if (!success) return 0;
}
}
@@ -6235,16 +6309,26 @@ assign_replicas:
clusterManagerLogInfo(">>> Sending CLUSTER MEET messages to join "
"the cluster\n");
clusterManagerNode *first = NULL;
+ char first_ip[NET_IP_STR_LEN]; /* first->ip may be a hostname */
listRewind(cluster_manager.nodes, &li);
while ((ln = listNext(&li)) != NULL) {
clusterManagerNode *node = ln->value;
if (first == NULL) {
first = node;
+ /* Although hiredis supports connecting to a hostname, CLUSTER
+ * MEET requires an IP address, so we do a DNS lookup here. */
+ if (anetResolve(NULL, first->ip, first_ip, sizeof(first_ip), ANET_NONE)
+ == ANET_ERR)
+ {
+ fprintf(stderr, "Invalid IP address or hostname specified: %s\n", first->ip);
+ success = 0;
+ goto cleanup;
+ }
continue;
}
redisReply *reply = NULL;
reply = CLUSTER_MANAGER_COMMAND(node, "cluster meet %s %d",
- first->ip, first->port);
+ first_ip, first->port);
int is_err = 0;
if (reply != NULL) {
if ((is_err = reply->type == REDIS_REPLY_ERROR))
@@ -6416,8 +6500,15 @@ static int clusterManagerCommandAddNode(int argc, char **argv) {
// Send CLUSTER MEET command to the new node
clusterManagerLogInfo(">>> Send CLUSTER MEET to node %s:%d to make it "
"join the cluster.\n", ip, port);
+ /* CLUSTER MEET requires an IP address, so we do a DNS lookup here. */
+ char first_ip[NET_IP_STR_LEN];
+ if (anetResolve(NULL, first->ip, first_ip, sizeof(first_ip), ANET_NONE) == ANET_ERR) {
+ fprintf(stderr, "Invalid IP address or hostname specified: %s\n", first->ip);
+ success = 0;
+ goto cleanup;
+ }
reply = CLUSTER_MANAGER_COMMAND(new_node, "CLUSTER MEET %s %d",
- first->ip, first->port);
+ first_ip, first->port);
if (!(success = clusterManagerCheckRedisReply(new_node, reply, NULL)))
goto cleanup;
@@ -7252,14 +7343,14 @@ static int clusterManagerCommandHelp(int argc, char **argv) {
int commands_count = sizeof(clusterManagerCommands) /
sizeof(clusterManagerCommandDef);
int i = 0, j;
- fprintf(stderr, "Cluster Manager Commands:\n");
+ fprintf(stdout, "Cluster Manager Commands:\n");
int padding = 15;
for (; i < commands_count; i++) {
clusterManagerCommandDef *def = &(clusterManagerCommands[i]);
int namelen = strlen(def->name), padlen = padding - namelen;
- fprintf(stderr, " %s", def->name);
- for (j = 0; j < padlen; j++) fprintf(stderr, " ");
- fprintf(stderr, "%s\n", (def->args ? def->args : ""));
+ fprintf(stdout, " %s", def->name);
+ for (j = 0; j < padlen; j++) fprintf(stdout, " ");
+ fprintf(stdout, "%s\n", (def->args ? def->args : ""));
if (def->options != NULL) {
int optslen = strlen(def->options);
char *p = def->options, *eos = p + optslen;
@@ -7269,18 +7360,18 @@ static int clusterManagerCommandHelp(int argc, char **argv) {
char buf[255];
memcpy(buf, p, deflen);
buf[deflen] = '\0';
- for (j = 0; j < padding; j++) fprintf(stderr, " ");
- fprintf(stderr, " --cluster-%s\n", buf);
+ for (j = 0; j < padding; j++) fprintf(stdout, " ");
+ fprintf(stdout, " --cluster-%s\n", buf);
p = comma + 1;
if (p >= eos) break;
}
if (p < eos) {
- for (j = 0; j < padding; j++) fprintf(stderr, " ");
- fprintf(stderr, " --cluster-%s\n", p);
+ for (j = 0; j < padding; j++) fprintf(stdout, " ");
+ fprintf(stdout, " --cluster-%s\n", p);
}
}
}
- fprintf(stderr, "\nFor check, fix, reshard, del-node, set-timeout, "
+ fprintf(stdout, "\nFor check, fix, reshard, del-node, set-timeout, "
"info, rebalance, call, import, backup you "
"can specify the host and port of any working node in "
"the cluster.\n");
@@ -7288,16 +7379,16 @@ static int clusterManagerCommandHelp(int argc, char **argv) {
int options_count = sizeof(clusterManagerOptions) /
sizeof(clusterManagerOptionDef);
i = 0;
- fprintf(stderr, "\nCluster Manager Options:\n");
+ fprintf(stdout, "\nCluster Manager Options:\n");
for (; i < options_count; i++) {
clusterManagerOptionDef *def = &(clusterManagerOptions[i]);
int namelen = strlen(def->name), padlen = padding - namelen;
- fprintf(stderr, " %s", def->name);
- for (j = 0; j < padlen; j++) fprintf(stderr, " ");
- fprintf(stderr, "%s\n", def->desc);
+ fprintf(stdout, " %s", def->name);
+ for (j = 0; j < padlen; j++) fprintf(stdout, " ");
+ fprintf(stdout, "%s\n", def->desc);
}
- fprintf(stderr, "\n");
+ fprintf(stdout, "\n");
return 0;
}
@@ -8969,10 +9060,11 @@ int main(int argc, char **argv) {
}
/* Otherwise, we have some arguments to execute */
- if (cliConnect(0) != REDIS_OK) exit(1);
if (config.eval) {
+ if (cliConnect(0) != REDIS_OK) exit(1);
return evalMode(argc,argv);
} else {
+ cliConnect(CC_QUIET);
return noninteractive(argc,argv);
}
}
diff --git a/src/redismodule.h b/src/redismodule.h
index 79ce2c697..f27c06b0e 100644
--- a/src/redismodule.h
+++ b/src/redismodule.h
@@ -80,6 +80,15 @@
#define REDISMODULE_HASH_EXISTS (1<<3)
#define REDISMODULE_HASH_COUNT_ALL (1<<4)
+#define REDISMODULE_CONFIG_DEFAULT 0 /* This is the default for a module config. */
+#define REDISMODULE_CONFIG_IMMUTABLE (1ULL<<0) /* Can this value only be set at startup? */
+#define REDISMODULE_CONFIG_SENSITIVE (1ULL<<1) /* Does this value contain sensitive information */
+#define REDISMODULE_CONFIG_HIDDEN (1ULL<<4) /* This config is hidden in `config get <pattern>` (used for tests/debugging) */
+#define REDISMODULE_CONFIG_PROTECTED (1ULL<<5) /* Becomes immutable if enable-protected-configs is enabled. */
+#define REDISMODULE_CONFIG_DENY_LOADING (1ULL<<6) /* This config is forbidden during loading. */
+
+#define REDISMODULE_CONFIG_MEMORY (1ULL<<7) /* Indicates if this value can be set as a memory value */
+
/* StreamID type. */
typedef struct RedisModuleStreamID {
uint64_t ms;
@@ -429,7 +438,8 @@ typedef void (*RedisModuleEventLoopOneShotFunc)(void *user_data);
#define REDISMODULE_EVENT_FORK_CHILD 13
#define REDISMODULE_EVENT_REPL_ASYNC_LOAD 14
#define REDISMODULE_EVENT_EVENTLOOP 15
-#define _REDISMODULE_EVENT_NEXT 16 /* Next event flag, should be updated if a new event added. */
+#define REDISMODULE_EVENT_CONFIG 16
+#define _REDISMODULE_EVENT_NEXT 17 /* Next event flag, should be updated if a new event added. */
typedef struct RedisModuleEvent {
uint64_t id; /* REDISMODULE_EVENT_... defines. */
@@ -532,7 +542,11 @@ static const RedisModuleEvent
RedisModuleEvent_EventLoop = {
REDISMODULE_EVENT_EVENTLOOP,
1
-};
+ },
+ RedisModuleEvent_Config = {
+ REDISMODULE_EVENT_CONFIG,
+ 1
+ };
/* Those are values that are used for the 'subevent' callback argument. */
#define REDISMODULE_SUBEVENT_PERSISTENCE_RDB_START 0
@@ -574,6 +588,9 @@ static const RedisModuleEvent
#define REDISMODULE_SUBEVENT_MODULE_UNLOADED 1
#define _REDISMODULE_SUBEVENT_MODULE_NEXT 2
+#define REDISMODULE_SUBEVENT_CONFIG_CHANGE 0
+#define _REDISMODULE_SUBEVENT_CONFIG_NEXT 1
+
#define REDISMODULE_SUBEVENT_LOADING_PROGRESS_RDB 0
#define REDISMODULE_SUBEVENT_LOADING_PROGRESS_AOF 1
#define _REDISMODULE_SUBEVENT_LOADING_PROGRESS_NEXT 2
@@ -674,6 +691,17 @@ typedef struct RedisModuleModuleChange {
#define RedisModuleModuleChange RedisModuleModuleChangeV1
+#define REDISMODULE_CONFIGCHANGE_VERSION 1
+typedef struct RedisModuleConfigChange {
+ uint64_t version; /* Not used since this structure is never passed
+ from the module to the core right now. Here
+ for future compatibility. */
+ uint32_t num_changes; /* how many redis config options were changed */
+ const char **config_names; /* the config names that were changed */
+} RedisModuleConfigChangeV1;
+
+#define RedisModuleConfigChange RedisModuleConfigChangeV1
+
#define REDISMODULE_CRON_LOOP_VERSION 1
typedef struct RedisModuleCronLoopInfo {
uint64_t version; /* Not used since this structure is never passed
@@ -788,6 +816,15 @@ typedef void (*RedisModuleScanCB)(RedisModuleCtx *ctx, RedisModuleString *keynam
typedef void (*RedisModuleScanKeyCB)(RedisModuleKey *key, RedisModuleString *field, RedisModuleString *value, void *privdata);
typedef void (*RedisModuleUserChangedFunc) (uint64_t client_id, void *privdata);
typedef int (*RedisModuleDefragFunc)(RedisModuleDefragCtx *ctx);
+typedef RedisModuleString * (*RedisModuleConfigGetStringFunc)(const char *name, void *privdata);
+typedef long long (*RedisModuleConfigGetNumericFunc)(const char *name, void *privdata);
+typedef int (*RedisModuleConfigGetBoolFunc)(const char *name, void *privdata);
+typedef int (*RedisModuleConfigGetEnumFunc)(const char *name, void *privdata);
+typedef int (*RedisModuleConfigSetStringFunc)(const char *name, RedisModuleString *val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetNumericFunc)(const char *name, long long val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetBoolFunc)(const char *name, int val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigSetEnumFunc)(const char *name, int val, void *privdata, RedisModuleString **err);
+typedef int (*RedisModuleConfigApplyFunc)(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err);
typedef struct RedisModuleTypeMethods {
uint64_t version;
@@ -1124,6 +1161,7 @@ REDISMODULE_API void (*RedisModule_ACLAddLogEntry)(RedisModuleCtx *ctx, RedisMod
REDISMODULE_API int (*RedisModule_AuthenticateClientWithACLUser)(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_AuthenticateClientWithUser)(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_DeauthenticateAndCloseClient)(RedisModuleCtx *ctx, uint64_t client_id) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_RedactClientCommandArgument)(RedisModuleCtx *ctx, int pos) REDISMODULE_ATTR;
REDISMODULE_API RedisModuleString * (*RedisModule_GetClientCertificate)(RedisModuleCtx *ctx, uint64_t id) REDISMODULE_ATTR;
REDISMODULE_API int *(*RedisModule_GetCommandKeys)(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, int *num_keys) REDISMODULE_ATTR;
REDISMODULE_API int *(*RedisModule_GetCommandKeysWithFlags)(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, int *num_keys, int **out_flags) REDISMODULE_ATTR;
@@ -1139,6 +1177,11 @@ REDISMODULE_API const RedisModuleString * (*RedisModule_GetKeyNameFromDefragCtx)
REDISMODULE_API int (*RedisModule_EventLoopAdd)(int fd, int mask, RedisModuleEventLoopFunc func, void *user_data) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_EventLoopDel)(int fd, int mask) REDISMODULE_ATTR;
REDISMODULE_API int (*RedisModule_EventLoopAddOneShot)(RedisModuleEventLoopOneShotFunc func, void *user_data) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_RegisterBoolConfig)(RedisModuleCtx *ctx, char *name, int default_val, unsigned int flags, RedisModuleConfigGetBoolFunc getfn, RedisModuleConfigSetBoolFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_RegisterNumericConfig)(RedisModuleCtx *ctx, const char *name, long long default_val, unsigned int flags, long long min, long long max, RedisModuleConfigGetNumericFunc getfn, RedisModuleConfigSetNumericFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_RegisterStringConfig)(RedisModuleCtx *ctx, const char *name, const char *default_val, unsigned int flags, RedisModuleConfigGetStringFunc getfn, RedisModuleConfigSetStringFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_RegisterEnumConfig)(RedisModuleCtx *ctx, const char *name, int default_val, unsigned int flags, const char **enum_values, const int *int_values, int num_enum_vals, RedisModuleConfigGetEnumFunc getfn, RedisModuleConfigSetEnumFunc setfn, RedisModuleConfigApplyFunc applyfn, void *privdata) REDISMODULE_ATTR;
+REDISMODULE_API int (*RedisModule_LoadConfigs)(RedisModuleCtx *ctx) REDISMODULE_ATTR;
#define RedisModule_IsAOFClient(id) ((id) == UINT64_MAX)
@@ -1447,6 +1490,7 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
REDISMODULE_GET_API(DeauthenticateAndCloseClient);
REDISMODULE_GET_API(AuthenticateClientWithACLUser);
REDISMODULE_GET_API(AuthenticateClientWithUser);
+ REDISMODULE_GET_API(RedactClientCommandArgument);
REDISMODULE_GET_API(GetClientCertificate);
REDISMODULE_GET_API(GetCommandKeys);
REDISMODULE_GET_API(GetCommandKeysWithFlags);
@@ -1462,6 +1506,11 @@ static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int
REDISMODULE_GET_API(EventLoopAdd);
REDISMODULE_GET_API(EventLoopDel);
REDISMODULE_GET_API(EventLoopAddOneShot);
+ REDISMODULE_GET_API(RegisterBoolConfig);
+ REDISMODULE_GET_API(RegisterNumericConfig);
+ REDISMODULE_GET_API(RegisterStringConfig);
+ REDISMODULE_GET_API(RegisterEnumConfig);
+ REDISMODULE_GET_API(LoadConfigs);
if (RedisModule_IsModuleNameBusy && RedisModule_IsModuleNameBusy(name)) return REDISMODULE_ERR;
RedisModule_SetModuleAttribs(ctx,name,ver,apiver);
diff --git a/src/replication.c b/src/replication.c
index 9c2d110b0..e9a754ab4 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -543,7 +543,8 @@ void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) {
}
void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) {
- if (!(listLength(server.monitors) && !server.loading)) return;
+ /* Fast path to return if the monitors list is empty or the server is in loading. */
+ if (monitors == NULL || listLength(monitors) == 0 || server.loading) return;
listNode *ln;
listIter li;
int j;
@@ -1528,15 +1529,7 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData,
}
}
-/* This function is called at the end of every background saving,
- * or when the replication RDB transfer strategy is modified from
- * disk to socket or the other way around.
- *
- * The goal of this function is to handle slaves waiting for a successful
- * background saving in order to perform non-blocking synchronization, and
- * to schedule a new BGSAVE if there are slaves that attached while a
- * BGSAVE was in progress, but it was not a good one for replication (no
- * other slave was accumulating differences).
+/* This function is called at the end of every background saving.
*
* The argument bgsaveerr is C_OK if the background saving succeeded
* otherwise C_ERR is passed to the function.
@@ -3204,7 +3197,8 @@ void replicationCacheMaster(client *c) {
* offsets, including pending transactions, already populated arguments,
* pending outputs to the master. */
sdsclear(server.master->querybuf);
- sdsclear(server.master->pending_querybuf);
+ server.master->qb_pos = 0;
+ server.master->repl_applied = 0;
server.master->read_reploff = server.master->reploff;
if (c->flags & CLIENT_MULTI) discardTransaction(c);
listEmpty(c->reply);
@@ -3342,6 +3336,14 @@ void refreshGoodSlavesCount(void) {
server.repl_good_slaves_count = good;
}
+/* return true if status of good replicas is OK. otherwise false */
+int checkGoodReplicasStatus(void) {
+ return server.masterhost || /* not a primary status should be OK */
+ !server.repl_min_slaves_max_lag || /* Min slave max lag not configured */
+ !server.repl_min_slaves_to_write || /* Min slave to write not configured */
+ server.repl_good_slaves_count >= server.repl_min_slaves_to_write; /* check if we have enough slaves */
+}
+
/* ----------------------- SYNCHRONOUS REPLICATION --------------------------
* Redis synchronous replication design can be summarized in points:
*
diff --git a/src/script.c b/src/script.c
index d78d9fd6b..990248c45 100644
--- a/src/script.c
+++ b/src/script.c
@@ -312,25 +312,7 @@ static int scriptVerifyACL(client *c, sds *err) {
int acl_retval = ACLCheckAllPerm(c, &acl_errpos);
if (acl_retval != ACL_OK) {
addACLLogEntry(c,acl_retval,ACL_LOG_CTX_LUA,acl_errpos,NULL,NULL);
- switch (acl_retval) {
- case ACL_DENIED_CMD:
- *err = sdsnew("The user executing the script can't run this "
- "command or subcommand");
- break;
- case ACL_DENIED_KEY:
- *err = sdsnew("The user executing the script can't access "
- "at least one of the keys mentioned in the "
- "command arguments");
- break;
- case ACL_DENIED_CHANNEL:
- *err = sdsnew("The user executing the script can't publish "
- "to the channel mentioned in the command");
- break;
- default:
- *err = sdsnew("The user executing the script is lacking the "
- "permissions for the command");
- break;
- }
+ *err = sdscatfmt(sdsempty(), "The user executing the script %s", getAclErrorMessage(acl_retval));
return C_ERR;
}
return C_OK;
@@ -360,14 +342,7 @@ static int scriptVerifyWriteCommandAllow(scriptRunCtx *run_ctx, char **err) {
}
if (deny_write_type != DISK_ERROR_TYPE_NONE) {
- if (deny_write_type == DISK_ERROR_TYPE_RDB) {
- *err = sdsdup(shared.bgsaveerr->ptr);
- } else {
- *err = sdsempty();
- *err = sdscatfmt(*err,
- "-MISCONF Errors writing to the AOF file: %s\r\n",
- strerror(server.aof_last_write_errno));
- }
+ *err = writeCommandsGetDiskErrorMessage(deny_write_type);
return C_ERR;
}
@@ -375,11 +350,7 @@ static int scriptVerifyWriteCommandAllow(scriptRunCtx *run_ctx, char **err) {
* user configured the min-slaves-to-write option. Note this only reachable
* for Eval scripts that didn't declare flags, see the other check in
* scriptPrepareForRun */
- if (server.masterhost == NULL &&
- server.repl_min_slaves_max_lag &&
- server.repl_min_slaves_to_write &&
- server.repl_good_slaves_count < server.repl_min_slaves_to_write)
- {
+ if (!checkGoodReplicasStatus()) {
*err = sdsdup(shared.noreplicaserr->ptr);
return C_ERR;
}
@@ -387,6 +358,16 @@ static int scriptVerifyWriteCommandAllow(scriptRunCtx *run_ctx, char **err) {
return C_OK;
}
+static int scriptVerifyMayReplicate(scriptRunCtx *run_ctx, char **err) {
+ if (run_ctx->c->cmd->flags & CMD_MAY_REPLICATE &&
+ server.client_pause_type == CLIENT_PAUSE_WRITE) {
+ *err = sdsnew("May-replicate commands are not allowed when client pause write.");
+ return C_ERR;
+ }
+
+ return C_OK;
+}
+
static int scriptVerifyOOM(scriptRunCtx *run_ctx, char **err) {
if (run_ctx->flags & SCRIPT_ALLOW_OOM) {
/* Allow running any command even if OOM reached */
@@ -528,6 +509,10 @@ void scriptCall(scriptRunCtx *run_ctx, robj* *argv, int argc, sds *err) {
goto error;
}
+ if (scriptVerifyMayReplicate(run_ctx, err) != C_OK) {
+ goto error;
+ }
+
if (scriptVerifyOOM(run_ctx, err) != C_OK) {
goto error;
}
diff --git a/src/sentinel.c b/src/sentinel.c
index eb37e5ede..3ad8f902b 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -391,7 +391,6 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd
sentinelRedisInstance *sentinelGetMasterByName(char *name);
char *sentinelGetSubjectiveLeader(sentinelRedisInstance *master);
char *sentinelGetObjectiveLeader(sentinelRedisInstance *master);
-int yesnotoi(char *s);
void instanceLinkConnectionError(const redisAsyncContext *c);
const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri);
void sentinelAbortFailover(sentinelRedisInstance *ri);
@@ -1134,6 +1133,27 @@ int sentinelTryConnectionSharing(sentinelRedisInstance *ri) {
return C_ERR;
}
+/* Disconnect the relevant master and its replicas. */
+void dropInstanceConnections(sentinelRedisInstance *ri) {
+ serverAssert(ri->flags & SRI_MASTER);
+
+ /* Disconnect with the master. */
+ instanceLinkCloseConnection(ri->link, ri->link->cc);
+ instanceLinkCloseConnection(ri->link, ri->link->pc);
+
+ /* Disconnect with all replicas. */
+ dictIterator *di;
+ dictEntry *de;
+ sentinelRedisInstance *repl_ri;
+ di = dictGetIterator(ri->slaves);
+ while ((de = dictNext(di)) != NULL) {
+ repl_ri = dictGetVal(de);
+ instanceLinkCloseConnection(repl_ri->link, repl_ri->link->cc);
+ instanceLinkCloseConnection(repl_ri->link, repl_ri->link->pc);
+ }
+ dictReleaseIterator(di);
+}
+
/* Drop all connections to other sentinels. Returns the number of connections
* dropped.*/
int sentinelDropConnections(void) {
@@ -4063,7 +4083,7 @@ NULL
dictReleaseIterator(di);
if (masters_local != sentinel.masters) dictRelease(masters_local);
} else if (!strcasecmp(c->argv[1]->ptr,"simulate-failure")) {
- /* SENTINEL SIMULATE-FAILURE <flag> <flag> ... <flag> */
+ /* SENTINEL SIMULATE-FAILURE [CRASH-AFTER-ELECTION] [CRASH-AFTER-PROMOTION] [HELP] */
int j;
sentinel.simfailure_flags = SENTINEL_SIMFAILURE_NONE;
@@ -4297,6 +4317,7 @@ void sentinelSetCommand(client *c) {
char *value = c->argv[++j]->ptr;
sdsfree(ri->auth_pass);
ri->auth_pass = strlen(value) ? sdsnew(value) : NULL;
+ dropInstanceConnections(ri);
changes++;
redacted = 1;
} else if (!strcasecmp(option,"auth-user") && moreargs > 0) {
@@ -4304,6 +4325,7 @@ void sentinelSetCommand(client *c) {
char *value = c->argv[++j]->ptr;
sdsfree(ri->auth_user);
ri->auth_user = strlen(value) ? sdsnew(value) : NULL;
+ dropInstanceConnections(ri);
changes++;
} else if (!strcasecmp(option,"quorum") && moreargs > 0) {
/* quorum <count> */
diff --git a/src/server.c b/src/server.c
index 00c279837..84f21fed3 100644
--- a/src/server.c
+++ b/src/server.c
@@ -526,6 +526,30 @@ dictType stringSetDictType = {
NULL /* allow to expand */
};
+/* Dict for for case-insensitive search using null terminated C strings.
+ * The key and value do not have a destructor. */
+dictType externalStringType = {
+ distCStrCaseHash, /* hash function */
+ NULL, /* key dup */
+ NULL, /* val dup */
+ distCStrKeyCaseCompare, /* key compare */
+ NULL, /* key destructor */
+ NULL, /* val destructor */
+ NULL /* allow to expand */
+};
+
+/* Dict for case-insensitive search using sds objects with a zmalloc
+ * allocated object as the value. */
+dictType sdsHashDictType = {
+ dictSdsCaseHash, /* hash function */
+ NULL, /* key dup */
+ NULL, /* val dup */
+ dictSdsKeyCaseCompare, /* key compare */
+ dictSdsDestructor, /* key destructor */
+ dictVanillaFree, /* val destructor */
+ NULL /* allow to expand */
+};
+
int htNeedsResize(dict *dict) {
long long size, used;
@@ -686,23 +710,6 @@ int clientsCronResizeQueryBuffer(client *c) {
* which ever is bigger. */
if (c->bulklen != -1 && (size_t)c->bulklen > c->querybuf_peak)
c->querybuf_peak = c->bulklen;
-
- /* Clients representing masters also use a "pending query buffer" that
- * is the yet not applied part of the stream we are reading. Such buffer
- * also needs resizing from time to time, otherwise after a very large
- * transfer (a huge value or a big MIGRATE operation) it will keep using
- * a lot of memory. */
- if (c->flags & CLIENT_MASTER) {
- /* There are two conditions to resize the pending query buffer:
- * 1) Pending Query buffer is > LIMIT_PENDING_QUERYBUF.
- * 2) Used length is smaller than pending_querybuf_size/2 */
- size_t pending_querybuf_size = sdsAllocSize(c->pending_querybuf);
- if(pending_querybuf_size > LIMIT_PENDING_QUERYBUF &&
- sdslen(c->pending_querybuf) < (pending_querybuf_size/2))
- {
- c->pending_querybuf = sdsRemoveFreeSpace(c->pending_querybuf);
- }
- }
return 0;
}
@@ -720,6 +727,10 @@ int clientsCronResizeOutputBuffer(client *c, mstime_t now_ms) {
const size_t buffer_target_shrink_size = c->buf_usable_size/2;
const size_t buffer_target_expand_size = c->buf_usable_size*2;
+ /* in case the resizing is disabled return immediately */
+ if(!server.reply_buffer_resizing_enabled)
+ return 0;
+
if (buffer_target_shrink_size >= PROTO_REPLY_MIN_BYTES &&
c->buf_peak < buffer_target_shrink_size )
{
@@ -786,7 +797,7 @@ int clientsCronTrackExpansiveClients(client *c, int time_idx) {
* client's memory usage doubles it's moved up to the next bucket, if it's
* halved we move it down a bucket.
* For more details see CLIENT_MEM_USAGE_BUCKETS documentation in server.h. */
-clientMemUsageBucket *getMemUsageBucket(size_t mem) {
+static inline clientMemUsageBucket *getMemUsageBucket(size_t mem) {
int size_in_bits = 8*(int)sizeof(mem);
int clz = mem > 0 ? __builtin_clzl(mem) : size_in_bits;
int bucket_idx = size_in_bits - clz;
@@ -803,46 +814,34 @@ clientMemUsageBucket *getMemUsageBucket(size_t mem) {
* and also from the clientsCron. We call it from the cron so we have updated
* stats for non CLIENT_TYPE_NORMAL/PUBSUB clients and in case a configuration
* change requires us to evict a non-active client.
+ *
+ * This also adds the client to the correct memory usage bucket. Each bucket contains
+ * all clients with roughly the same amount of memory. This way we group
+ * together clients consuming about the same amount of memory and can quickly
+ * free them in case we reach maxmemory-clients (client eviction).
*/
int updateClientMemUsage(client *c) {
+ serverAssert(io_threads_op == IO_THREADS_OP_IDLE);
size_t mem = getClientMemoryUsage(c, NULL);
int type = getClientType(c);
/* Remove the old value of the memory used by the client from the old
* category, and add it back. */
- atomicDecr(server.stat_clients_type_memory[c->last_memory_type], c->last_memory_usage);
- atomicIncr(server.stat_clients_type_memory[type], mem);
-
- /* Remember what we added and where, to remove it next time. */
- c->last_memory_usage = mem;
- c->last_memory_type = type;
-
- /* Update client mem usage bucket only when we're not in the context of an
- * IO thread. See updateClientMemUsageBucket() for details. */
- if (io_threads_op == IO_THREADS_OP_IDLE)
- updateClientMemUsageBucket(c);
-
- return 0;
-}
+ if (type != c->last_memory_type) {
+ server.stat_clients_type_memory[c->last_memory_type] -= c->last_memory_usage;
+ server.stat_clients_type_memory[type] += mem;
+ c->last_memory_type = type;
+ } else {
+ server.stat_clients_type_memory[type] += mem - c->last_memory_usage;
+ }
-/* Adds the client to the correct memory usage bucket. Each bucket contains
- * all clients with roughly the same amount of memory. This way we group
- * together clients consuming about the same amount of memory and can quickly
- * free them in case we reach maxmemory-clients (client eviction).
- * Note that in case of io-threads enabled we have to call this function only
- * after the fan-in phase (when no io-threads are working) because the bucket
- * lists are global. The io-threads themselves track per-client memory usage in
- * updateClientMemUsage(). Here we update the clients to each bucket when all
- * io-threads are done (both for read and write io-threading). */
-void updateClientMemUsageBucket(client *c) {
- serverAssert(io_threads_op == IO_THREADS_OP_IDLE);
int allow_eviction =
- (c->last_memory_type == CLIENT_TYPE_NORMAL || c->last_memory_type == CLIENT_TYPE_PUBSUB) &&
+ (type == CLIENT_TYPE_NORMAL || type == CLIENT_TYPE_PUBSUB) &&
!(c->flags & CLIENT_NO_EVICT);
/* Update the client in the mem usage buckets */
if (c->mem_usage_bucket) {
- c->mem_usage_bucket->mem_usage_sum -= c->last_memory_usage_on_bucket_update;
+ c->mem_usage_bucket->mem_usage_sum -= c->last_memory_usage;
/* If this client can't be evicted then remove it from the mem usage
* buckets */
if (!allow_eviction) {
@@ -852,8 +851,8 @@ void updateClientMemUsageBucket(client *c) {
}
}
if (allow_eviction) {
- clientMemUsageBucket *bucket = getMemUsageBucket(c->last_memory_usage);
- bucket->mem_usage_sum += c->last_memory_usage;
+ clientMemUsageBucket *bucket = getMemUsageBucket(mem);
+ bucket->mem_usage_sum += mem;
if (bucket != c->mem_usage_bucket) {
if (c->mem_usage_bucket)
listDelNode(c->mem_usage_bucket->clients,
@@ -864,7 +863,10 @@ void updateClientMemUsageBucket(client *c) {
}
}
- c->last_memory_usage_on_bucket_update = c->last_memory_usage;
+ /* Remember what we added, to remove it next time. */
+ c->last_memory_usage = mem;
+
+ return 0;
}
/* Return the max samples in the memory usage of clients tracked by
@@ -1410,7 +1412,7 @@ void blockingOperationEnds() {
}
}
-/* This function fill in the role of serverCron during RDB or AOF loading, and
+/* This function fills in the role of serverCron during RDB or AOF loading, and
* also during blocked scripts.
* It attempts to do its duties at a similar rate as the configured server.hz,
* and updates cronloops variable so that similarly to serverCron, the
@@ -2405,6 +2407,7 @@ void initServer(void) {
server.thp_enabled = 0;
server.cluster_drop_packet_filter = -1;
server.reply_buffer_peak_reset_time = REPLY_BUFFER_DEFAULT_PEAK_RESET_TIME;
+ server.reply_buffer_resizing_enabled = 1;
resetReplicationBuffer();
if ((server.tls_port || server.tls_replication || server.tls_cluster)
@@ -3406,6 +3409,16 @@ void rejectCommand(client *c, robj *reply) {
}
}
+void rejectCommandSds(client *c, sds s) {
+ if (c->cmd && c->cmd->proc == execCommand) {
+ execCommandAbort(c, s);
+ sdsfree(s);
+ } else {
+ /* The following frees 's'. */
+ addReplyErrorSds(c, s);
+ }
+}
+
void rejectCommandFormat(client *c, const char *fmt, ...) {
if (c->cmd) c->cmd->rejected_calls++;
flagTransaction(c);
@@ -3416,13 +3429,7 @@ void rejectCommandFormat(client *c, const char *fmt, ...) {
/* Make sure there are no newlines in the string, otherwise invalid protocol
* is emitted (The args come from the user, they may contain any character). */
sdsmapchars(s, "\r\n", " ", 2);
- if (c->cmd && c->cmd->proc == execCommand) {
- execCommandAbort(c, s);
- sdsfree(s);
- } else {
- /* The following frees 's'. */
- addReplyErrorSds(c, s);
- }
+ rejectCommandSds(c, s);
}
/* This is called after a command in call, we can do some maintenance job in it. */
@@ -3705,23 +3712,14 @@ int processCommand(client *c) {
server.masterhost == NULL &&
(is_write_command ||c->cmd->proc == pingCommand))
{
- if (deny_write_type == DISK_ERROR_TYPE_RDB)
- rejectCommand(c, shared.bgsaveerr);
- else
- rejectCommandFormat(c,
- "-MISCONF Errors writing to the AOF file: %s",
- strerror(server.aof_last_write_errno));
+ sds err = writeCommandsGetDiskErrorMessage(deny_write_type);
+ rejectCommandSds(c, err);
return C_OK;
}
/* Don't accept write commands if there are not enough good slaves and
* user configured the min-slaves-to-write option. */
- if (server.masterhost == NULL &&
- server.repl_min_slaves_to_write &&
- server.repl_min_slaves_max_lag &&
- is_write_command &&
- server.repl_good_slaves_count < server.repl_min_slaves_to_write)
- {
+ if (is_write_command && !checkGoodReplicasStatus()) {
rejectCommand(c, shared.noreplicaserr);
return C_OK;
}
@@ -4146,6 +4144,18 @@ int writeCommandsDeniedByDiskError(void) {
return DISK_ERROR_TYPE_NONE;
}
+sds writeCommandsGetDiskErrorMessage(int error_code) {
+ sds ret = NULL;
+ if (error_code == DISK_ERROR_TYPE_RDB) {
+ ret = sdsdup(shared.bgsaveerr->ptr);
+ } else {
+ ret = sdscatfmt(sdsempty(),
+ "-MISCONF Errors writing to the AOF file: %s",
+ strerror(server.aof_last_write_errno));
+ }
+ return ret;
+}
+
/* The PING command. It works in a different way if the client is in
* in Pub/Sub mode. */
void pingCommand(client *c) {
@@ -6388,9 +6398,9 @@ void dismissMemory(void* ptr, size_t size_hint) {
/* Dismiss big chunks of memory inside a client structure, see dismissMemory() */
void dismissClientMemory(client *c) {
- /* Dismiss client query buffer. */
+ /* Dismiss client query buffer and static reply buffer. */
+ dismissMemory(c->buf, c->buf_usable_size);
dismissSds(c->querybuf);
- dismissSds(c->pending_querybuf);
/* Dismiss argv array only if we estimate it contains a big buffer. */
if (c->argc && c->argv_len_sum/c->argc >= server.page_size) {
for (int i = 0; i < c->argc; i++) {
@@ -6414,9 +6424,6 @@ void dismissClientMemory(client *c) {
if (bulk) dismissMemory(bulk, bulk->size);
}
}
-
- /* The client struct has a big static reply buffer in it. */
- dismissMemory(c, 0);
}
/* In the child process, we don't need some buffers anymore, and these are
diff --git a/src/server.h b/src/server.h
index 4da7a010f..edfea2226 100644
--- a/src/server.h
+++ b/src/server.h
@@ -103,7 +103,6 @@ typedef long long ustime_t; /* microsecond time type. */
#define CONFIG_MIN_HZ 1
#define CONFIG_MAX_HZ 500
#define MAX_CLIENTS_PER_CLOCK_TICK 200 /* HZ is adapted based on that. */
-#define CONFIG_MAX_LINE 1024
#define CRON_DBS_PER_CALL 16
#define NET_MAX_WRITES_PER_EVENT (1024*64)
#define PROTO_SHARED_SELECT_CMDS 10
@@ -165,11 +164,8 @@ typedef long long ustime_t; /* microsecond time type. */
#define PROTO_MBULK_BIG_ARG (1024*32)
#define PROTO_RESIZE_THRESHOLD (1024*32) /* Threshold for determining whether to resize query buffer */
#define PROTO_REPLY_MIN_BYTES (1024) /* the lower limit on reply buffer size */
-#define LONG_STR_SIZE 21 /* Bytes needed for long -> str + '\0' */
#define REDIS_AUTOSYNC_BYTES (1024*1024*4) /* Sync file every 4MB. */
-#define LIMIT_PENDING_QUERYBUF (4*1024*1024) /* 4mb */
-
#define REPLY_BUFFER_DEFAULT_PEAK_RESET_TIME 5000 /* 5 seconds */
/* When configuring the server eventloop, we setup it so that the total number
@@ -764,6 +760,8 @@ struct RedisModule {
list *usedby; /* List of modules using APIs from this one. */
list *using; /* List of modules we use some APIs of. */
list *filters; /* List of filters the module has registered. */
+ list *module_configs; /* List of configurations the module has registered */
+ int configs_initialized; /* Have the module configurations been initialized? */
int in_call; /* RM_Call() nesting level */
int in_hook; /* Hooks callback nesting level for this module (0 or 1). */
int options; /* Module options and capabilities. */
@@ -1084,10 +1082,6 @@ typedef struct client {
robj *name; /* As set by CLIENT SETNAME. */
sds querybuf; /* Buffer we use to accumulate client queries. */
size_t qb_pos; /* The position we have read in querybuf. */
- sds pending_querybuf; /* If this client is flagged as master, this buffer
- represents the yet not applied portion of the
- replication stream that we are receiving from
- the master. */
size_t querybuf_peak; /* Recent (100ms or more) peak of querybuf size. */
int argc; /* Num of arguments of current command. */
robj **argv; /* Arguments of current command. */
@@ -1124,6 +1118,7 @@ typedef struct client {
sds replpreamble; /* Replication DB preamble. */
long long read_reploff; /* Read replication offset if this is a master. */
long long reploff; /* Applied replication offset if this is a master. */
+ long long repl_applied; /* Applied replication data count in querybuf, if this is a replica. */
long long repl_ack_off; /* Replication ack offset, if this is a slave. */
long long repl_ack_time;/* Replication ack time, if this is a slave. */
long long repl_last_partial_write; /* The last time the server did a partial write from the RDB child pipe to this replica */
@@ -1173,7 +1168,6 @@ typedef struct client {
size_t last_memory_usage;
int last_memory_type;
- size_t last_memory_usage_on_bucket_update;
listNode *mem_usage_bucket_node;
clientMemUsageBucket *mem_usage_bucket;
@@ -1482,6 +1476,7 @@ struct redisServer {
dict *moduleapi; /* Exported core APIs dictionary for modules. */
dict *sharedapi; /* Like moduleapi but containing the APIs that
modules share with each other. */
+ dict *module_configs_queue; /* Dict that stores module configurations from .conf file until after modules are loaded during startup or arguments to loadex. */
list *loadmodule_queue; /* List of modules to load at startup. */
int module_pipe[2]; /* Pipe used to awake the event loop by module threads. */
pid_t child_pid; /* PID of current child */
@@ -1584,8 +1579,8 @@ struct redisServer {
size_t stat_aof_cow_bytes; /* Copy on write bytes during AOF rewrite. */
size_t stat_module_cow_bytes; /* Copy on write bytes during module fork. */
double stat_module_progress; /* Module save progress. */
- redisAtomic size_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */
- size_t stat_cluster_links_memory;/* Mem usage by cluster links */
+ size_t stat_clients_type_memory[CLIENT_TYPE_COUNT];/* Mem usage by type */
+ size_t stat_cluster_links_memory; /* Mem usage by cluster links */
long long stat_unexpected_error_replies; /* Number of unexpected (aof-loading, replica to master, etc.) error replies */
long long stat_total_error_replies; /* Total number of issued error replies ( command + rejected errors ) */
long long stat_dump_payload_sanitizations; /* Number deep dump payloads integrity validations. */
@@ -1697,7 +1692,7 @@ struct redisServer {
int rdb_pipe_numconns; /* target of diskless rdb fork child. */
int rdb_pipe_numconns_writing; /* Number of rdb conns with pending writes. */
char *rdb_pipe_buff; /* In diskless replication, this buffer holds data */
- int rdb_pipe_bufflen; /* that was read from the the rdb pipe. */
+ int rdb_pipe_bufflen; /* that was read from the rdb pipe. */
int rdb_key_save_delay; /* Delay in microseconds between keys while
* writing the RDB. (for testings). negative
* value means fractions of microseconds (on average). */
@@ -1851,7 +1846,7 @@ struct redisServer {
int cluster_slave_no_failover; /* Prevent slave from starting a failover
if the master is in failure state. */
char *cluster_announce_ip; /* IP address to announce on cluster bus. */
- char *cluster_announce_hostname; /* IP address to announce on cluster bus. */
+ char *cluster_announce_hostname; /* hostname to announce on cluster bus. */
int cluster_preferred_endpoint_type; /* Use the announced hostname when available. */
int cluster_announce_port; /* base port to announce on cluster bus. */
int cluster_announce_tls_port; /* TLS port to announce on cluster bus. */
@@ -1914,6 +1909,7 @@ struct redisServer {
int cluster_allow_pubsubshard_when_down; /* Is pubsubshard allowed when the cluster
is down, doesn't affect pubsub global. */
long reply_buffer_peak_reset_time; /* The amount of time (in milliseconds) to wait between reply buffer peak resets */
+ int reply_buffer_resizing_enabled; /* Is reply buffer resizing enabled (1 by default) */
};
#define MAX_KEYS_BUFFER 256
@@ -2326,6 +2322,8 @@ extern dictType dbDictType;
extern double R_Zero, R_PosInf, R_NegInf, R_Nan;
extern dictType hashDictType;
extern dictType stringSetDictType;
+extern dictType externalStringType;
+extern dictType sdsHashDictType;
extern dictType dbExpiresDictType;
extern dictType modulesDictType;
extern dictType sdsReplyDictType;
@@ -2343,7 +2341,8 @@ int populateArgsStructure(struct redisCommandArg *args);
void moduleInitModulesSystem(void);
void moduleInitModulesSystemLast(void);
void modulesCron(void);
-int moduleLoad(const char *path, void **argv, int argc);
+int moduleLoad(const char *path, void **argv, int argc, int is_loadex);
+int moduleUnload(sds name);
void moduleLoadFromQueue(void);
int moduleGetCommandKeysViaAPI(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result);
int moduleGetCommandChannelsViaAPI(struct redisCommand *cmd, robj **argv, int argc, getKeysResult *result);
@@ -2649,6 +2648,7 @@ void resizeReplicationBacklog();
void replicationSetMaster(char *ip, int port);
void replicationUnsetMaster(void);
void refreshGoodSlavesCount(void);
+int checkGoodReplicasStatus(void);
void processClientsWaitingReplicas(void);
void unblockClientWaitingReplicas(client *c);
int replicationCountAcksByOffset(long long offset);
@@ -2689,6 +2689,7 @@ int allPersistenceDisabled(void);
#define DISK_ERROR_TYPE_RDB 2 /* Don't accept writes: RDB errors. */
#define DISK_ERROR_TYPE_NONE 0 /* No problems, we can accept writes. */
int writeCommandsDeniedByDiskError(void);
+sds writeCommandsGetDiskErrorMessage(int);
/* RDB persistence */
#include "rdb.h"
@@ -2757,6 +2758,7 @@ user *ACLGetUserByName(const char *name, size_t namelen);
int ACLUserCheckKeyPerm(user *u, const char *key, int keylen, int flags);
int ACLUserCheckChannelPerm(user *u, sds channel, int literal);
int ACLCheckAllUserCommandPerm(user *u, struct redisCommand *cmd, robj **argv, int argc, int *idxptr);
+int ACLUserCheckCmdWithUnrestrictedKeyAccess(user *u, struct redisCommand *cmd, robj **argv, int argc, int flags);
int ACLCheckAllPerm(client *c, int *idxptr);
int ACLSetUser(user *u, const char *op, ssize_t oplen);
uint64_t ACLGetCommandCategoryFlagByName(const char *name);
@@ -2769,6 +2771,7 @@ void addReplyCommandCategories(client *c, struct redisCommand *cmd);
user *ACLCreateUnlinkedUser();
void ACLFreeUserAndKillClients(user *u);
void addACLLogEntry(client *c, int reason, int context, int argpos, sds username, sds object);
+const char* getAclErrorMessage(int acl_res);
void ACLUpdateDefaultUserPassword(sds password);
/* Sorted sets data type */
@@ -2844,7 +2847,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
size_t freeMemoryGetNotCountedMemory();
int overMaxmemoryAfterAlloc(size_t moremem);
int processCommand(client *c);
-int processPendingCommandsAndResetClient(client *c);
+int processPendingCommandAndInputBuffer(client *c);
void setupSignalHandlers(void);
void removeSignalHandlers(void);
int createSocketAcceptHandler(socketFds *sfd, aeFileProc *accept_handler);
@@ -2971,6 +2974,40 @@ int keyspaceEventsStringToFlags(char *classes);
sds keyspaceEventsFlagsToString(int flags);
/* Configuration */
+/* Configuration Flags */
+#define MODIFIABLE_CONFIG 0 /* This is the implied default for a standard
+ * config, which is mutable. */
+#define IMMUTABLE_CONFIG (1ULL<<0) /* Can this value only be set at startup? */
+#define SENSITIVE_CONFIG (1ULL<<1) /* Does this value contain sensitive information */
+#define DEBUG_CONFIG (1ULL<<2) /* Values that are useful for debugging. */
+#define MULTI_ARG_CONFIG (1ULL<<3) /* This config receives multiple arguments. */
+#define HIDDEN_CONFIG (1ULL<<4) /* This config is hidden in `config get <pattern>` (used for tests/debugging) */
+#define PROTECTED_CONFIG (1ULL<<5) /* Becomes immutable if enable-protected-configs is enabled. */
+#define DENY_LOADING_CONFIG (1ULL<<6) /* This config is forbidden during loading. */
+#define ALIAS_CONFIG (1ULL<<7) /* For configs with multiple names, this flag is set on the alias. */
+#define MODULE_CONFIG (1ULL<<8) /* This config is a module config */
+
+#define INTEGER_CONFIG 0 /* No flags means a simple integer configuration */
+#define MEMORY_CONFIG (1<<0) /* Indicates if this value can be loaded as a memory value */
+#define PERCENT_CONFIG (1<<1) /* Indicates if this value can be loaded as a percent (and stored as a negative int) */
+#define OCTAL_CONFIG (1<<2) /* This value uses octal representation */
+
+/* Enum Configs contain an array of configEnum objects that match a string with an integer. */
+typedef struct configEnum {
+ char *name;
+ int val;
+} configEnum;
+
+/* Type of configuration. */
+typedef enum {
+ BOOL_CONFIG,
+ NUMERIC_CONFIG,
+ STRING_CONFIG,
+ SDS_CONFIG,
+ ENUM_CONFIG,
+ SPECIAL_CONFIG,
+} configType;
+
void loadServerConfig(char *filename, char config_from_stdin, char *options);
void appendServerSaveParams(time_t seconds, int changes);
void resetServerSaveParams(void);
@@ -2979,9 +3016,29 @@ void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *opti
void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, const char *option);
int rewriteConfig(char *path, int force_write);
void initConfigValues();
+void removeConfig(sds name);
sds getConfigDebugInfo();
int allowProtectedAction(int config, client *c);
+/* Module Configuration */
+typedef struct ModuleConfig ModuleConfig;
+int performModuleConfigSetFromName(sds name, sds value, const char **err);
+int performModuleConfigSetDefaultFromName(sds name, const char **err);
+void addModuleBoolConfig(const char *module_name, const char *name, int flags, void *privdata, int default_val);
+void addModuleStringConfig(const char *module_name, const char *name, int flags, void *privdata, sds default_val);
+void addModuleEnumConfig(const char *module_name, const char *name, int flags, void *privdata, int default_val, configEnum *enum_vals);
+void addModuleNumericConfig(const char *module_name, const char *name, int flags, void *privdata, long long default_val, int conf_flags, long long lower, long long upper);
+void addModuleConfigApply(list *module_configs, ModuleConfig *module_config);
+int moduleConfigApplyConfig(list *module_configs, const char **err, const char **err_arg_name);
+int getModuleBoolConfig(ModuleConfig *module_config);
+int setModuleBoolConfig(ModuleConfig *config, int val, const char **err);
+sds getModuleStringConfig(ModuleConfig *module_config);
+int setModuleStringConfig(ModuleConfig *config, sds strval, const char **err);
+int getModuleEnumConfig(ModuleConfig *module_config);
+int setModuleEnumConfig(ModuleConfig *config, int val, const char **err);
+long long getModuleNumericConfig(ModuleConfig *module_config);
+int setModuleNumericConfig(ModuleConfig *config, long long val, const char **err);
+
/* db.c -- Keyspace access API */
int removeExpire(redisDb *db, robj *key);
void deleteExpiredKeyAndPropagate(redisDb *db, robj *keyobj);
@@ -3128,6 +3185,7 @@ void handleClientsBlockedOnKeys(void);
void signalKeyAsReady(redisDb *db, robj *key, int type);
void blockForKeys(client *c, int btype, robj **keys, int numkeys, long count, mstime_t timeout, robj *target, struct blockPos *blockpos, streamID *ids);
void updateStatsOnUnblock(client *c, long blocked_us, long reply_us, int had_errors);
+void scanDatabaseForDeletedStreams(redisDb *emptied, redisDb *replaced_with);
/* timeout.c -- Blocked clients timeout and connections timeout. */
void addClientToTimeoutTable(client *c);
diff --git a/src/sort.c b/src/sort.c
index 153d6ba79..62e7ad701 100644
--- a/src/sort.c
+++ b/src/sort.c
@@ -197,13 +197,15 @@ void sortCommandGeneric(client *c, int readonly) {
int syntax_error = 0;
robj *sortval, *sortby = NULL, *storekey = NULL;
redisSortObject *vector; /* Resulting vector to sort */
-
+ int user_has_full_key_access = 0; /* ACL - used in order to verify 'get' and 'by' options can be used */
/* Create a list of operations to perform for every sorted element.
* Operations can be GET */
operations = listCreate();
listSetFreeMethod(operations,zfree);
j = 2; /* options start at argv[2] */
+ user_has_full_key_access = ACLUserCheckCmdWithUnrestrictedKeyAccess(c->user, c->cmd, c->argv, c->argc, CMD_KEY_ACCESS);
+
/* The SORT command has an SQL-alike syntax, parse it */
while(j < c->argc) {
int leftargs = c->argc-j-1;
@@ -233,13 +235,20 @@ void sortCommandGeneric(client *c, int readonly) {
if (strchr(c->argv[j+1]->ptr,'*') == NULL) {
dontsort = 1;
} else {
- /* If BY is specified with a real patter, we can't accept
+ /* If BY is specified with a real pattern, we can't accept
* it in cluster mode. */
if (server.cluster_enabled) {
addReplyError(c,"BY option of SORT denied in Cluster mode.");
syntax_error++;
break;
}
+ /* If BY is specified with a real pattern, we can't accept
+ * it if no full ACL key access is applied for this command. */
+ if (!user_has_full_key_access) {
+ addReplyError(c,"BY option of SORT denied due to insufficient ACL permissions.");
+ syntax_error++;
+ break;
+ }
}
j++;
} else if (!strcasecmp(c->argv[j]->ptr,"get") && leftargs >= 1) {
@@ -248,6 +257,11 @@ void sortCommandGeneric(client *c, int readonly) {
syntax_error++;
break;
}
+ if (!user_has_full_key_access) {
+ addReplyError(c,"GET option of SORT denied due to insufficient ACL permissions.");
+ syntax_error++;
+ break;
+ }
listAddNodeTail(operations,createSortOperation(
SORT_OP_GET,c->argv[j+1]));
getop++;
diff --git a/src/t_zset.c b/src/t_zset.c
index bc947c965..77ca7c83a 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -120,8 +120,9 @@ void zslFree(zskiplist *zsl) {
* (both inclusive), with a powerlaw-alike distribution where higher
* levels are less likely to be returned. */
int zslRandomLevel(void) {
+ static const int threshold = ZSKIPLIST_P*RAND_MAX;
int level = 1;
- while ((random()&0xFFFF) < (ZSKIPLIST_P * 0xFFFF))
+ while (random() < threshold)
level += 1;
return (level<ZSKIPLIST_MAXLEVEL) ? level : ZSKIPLIST_MAXLEVEL;
}
@@ -720,8 +721,8 @@ zskiplistNode *zslLastInLexRange(zskiplist *zsl, zlexrangespec *range) {
double zzlStrtod(unsigned char *vstr, unsigned int vlen) {
char buf[128];
- if (vlen > sizeof(buf))
- vlen = sizeof(buf);
+ if (vlen > sizeof(buf) - 1)
+ vlen = sizeof(buf) - 1;
memcpy(buf,vstr,vlen);
buf[vlen] = '\0';
return strtod(buf,NULL);
@@ -1026,7 +1027,7 @@ unsigned char *zzlDelete(unsigned char *zl, unsigned char *eptr) {
unsigned char *zzlInsertAt(unsigned char *zl, unsigned char *eptr, sds ele, double score) {
unsigned char *sptr;
- char scorebuf[128];
+ char scorebuf[MAX_D2STRING_CHARS];
int scorelen;
scorelen = d2string(scorebuf,sizeof(scorebuf),score);
diff --git a/src/util.c b/src/util.c
index 75086db42..45591d9f2 100644
--- a/src/util.c
+++ b/src/util.c
@@ -405,8 +405,8 @@ int string2ll(const char *s, size_t slen, long long *value) {
int negative = 0;
unsigned long long v;
- /* A zero length string is not a valid number. */
- if (plen == slen)
+ /* A string of zero length or excessive length is not a valid number. */
+ if (plen == slen || slen >= LONG_STR_SIZE)
return 0;
/* Special case: first and only digit is 0. */
diff --git a/src/util.h b/src/util.h
index 7dce8ff69..5ea71fecd 100644
--- a/src/util.h
+++ b/src/util.h
@@ -34,10 +34,22 @@
#include "sds.h"
/* The maximum number of characters needed to represent a long double
- * as a string (long double has a huge range).
+ * as a string (long double has a huge range of some 4952 chars, see LDBL_MAX).
* This should be the size of the buffer given to ld2string */
#define MAX_LONG_DOUBLE_CHARS 5*1024
+/* The maximum number of characters needed to represent a double
+ * as a string (double has a huge range of some 328 chars, see DBL_MAX).
+ * This should be the size of the buffer for sprintf with %f */
+#define MAX_DOUBLE_CHARS 400
+
+/* The maximum number of characters needed to for d2string call.
+ * Since it uses %g and not %f, some 40 chars should be enough. */
+#define MAX_D2STRING_CHARS 128
+
+/* Bytes needed for long -> str + '\0' */
+#define LONG_STR_SIZE 21
+
/* long double to string conversion options */
typedef enum {
LD_STR_AUTO, /* %.17Lg */
@@ -63,6 +75,7 @@ int string2d(const char *s, size_t slen, double *dp);
int trimDoubleString(char *buf, size_t len);
int d2string(char *buf, size_t len, double value);
int ld2string(char *buf, size_t len, long double value, ld2string_mode mode);
+int yesnotoi(char *s);
sds getAbsolutePath(char *filename);
long getTimeZone(void);
int pathIsBaseName(char *path);
diff --git a/src/version.h b/src/version.h
index 0f750bad3..b9dc9258e 100644
--- a/src/version.h
+++ b/src/version.h
@@ -1,2 +1,2 @@
-#define REDIS_VERSION "6.9.241"
-#define REDIS_VERSION_NUM 0x000609f1
+#define REDIS_VERSION "6.9.242"
+#define REDIS_VERSION_NUM 0x000609f2
diff --git a/src/ziplist.c b/src/ziplist.c
index 8a65f3931..fa73cbf6f 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -608,7 +608,7 @@ int64_t zipLoadInteger(unsigned char *p, unsigned char encoding) {
}
/* Fills a struct with all information about an entry.
- * This function is the "unsafe" alternative to the one blow.
+ * This function is the "unsafe" alternative to the one below.
* Generally, all function that return a pointer to an element in the ziplist
* will assert that this element is valid, so it can be freely used.
* Generally functions such ziplistGet assume the input pointer is already
@@ -803,7 +803,7 @@ unsigned char *__ziplistCascadeUpdate(unsigned char *zl, unsigned char *p) {
/* Update tail offset after loop. */
if (tail == zl + prevoffset) {
- /* When the the last entry we need to update is also the tail, update tail offset
+ /* When the last entry we need to update is also the tail, update tail offset
* unless this is the only entry that was updated (so the tail offset didn't change). */
if (extra - delta != 0) {
ZIPLIST_TAIL_OFFSET(zl) =
diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl
index 531e90d6c..9c669e128 100644
--- a/tests/cluster/cluster.tcl
+++ b/tests/cluster/cluster.tcl
@@ -203,6 +203,21 @@ proc wait_for_cluster_propagation {} {
}
}
+# Check if cluster's view of hostnames is consistent
+proc are_hostnames_propagated {match_string} {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ set cfg [R $j cluster slots]
+ foreach node $cfg {
+ for {set i 2} {$i < [llength $node]} {incr i} {
+ if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
+ return 0
+ }
+ }
+ }
+ }
+ return 1
+}
+
# Returns a parsed CLUSTER LINKS output of the instance identified
# by the given `id` as a list of dictionaries, with each dictionary
# corresponds to a link.
diff --git a/tests/cluster/tests/00-base.tcl b/tests/cluster/tests/00-base.tcl
index 656128e53..12d8244a8 100644
--- a/tests/cluster/tests/00-base.tcl
+++ b/tests/cluster/tests/00-base.tcl
@@ -64,7 +64,7 @@ test "It is possible to write and read from the cluster" {
}
test "Function no-cluster flag" {
- R 1 function load lua test {
+ R 1 function load {#!lua name=test
redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-cluster'}}
}
catch {R 1 fcall f1 0} e
diff --git a/tests/cluster/tests/11-manual-takeover.tcl b/tests/cluster/tests/11-manual-takeover.tcl
index f567c6962..78a0f858b 100644
--- a/tests/cluster/tests/11-manual-takeover.tcl
+++ b/tests/cluster/tests/11-manual-takeover.tcl
@@ -14,20 +14,32 @@ test "Cluster is writable" {
cluster_write_test 0
}
+# For this test, disable replica failover until
+# all of the primaries are confirmed killed. Otherwise
+# there might be enough time to elect a replica.
+set replica_ids { 5 6 7 }
+foreach id $replica_ids {
+ R $id config set cluster-replica-no-failover yes
+}
+
test "Killing majority of master nodes" {
kill_instance redis 0
kill_instance redis 1
kill_instance redis 2
}
+foreach id $replica_ids {
+ R $id config set cluster-replica-no-failover no
+}
+
test "Cluster should eventually be down" {
assert_cluster_state fail
}
test "Use takeover to bring slaves back" {
- R 5 cluster failover takeover
- R 6 cluster failover takeover
- R 7 cluster failover takeover
+ foreach id $replica_ids {
+ R $id cluster failover takeover
+ }
}
test "Cluster should eventually be up again" {
@@ -39,9 +51,9 @@ test "Cluster is writable" {
}
test "Instance #5, #6, #7 are now masters" {
- assert {[RI 5 role] eq {master}}
- assert {[RI 6 role] eq {master}}
- assert {[RI 7 role] eq {master}}
+ foreach id $replica_ids {
+ assert {[RI $id role] eq {master}}
+ }
}
test "Restarting the previously killed master nodes" {
diff --git a/tests/cluster/tests/12-replica-migration-2.tcl b/tests/cluster/tests/12-replica-migration-2.tcl
index f0493e57e..ed680061c 100644
--- a/tests/cluster/tests/12-replica-migration-2.tcl
+++ b/tests/cluster/tests/12-replica-migration-2.tcl
@@ -45,11 +45,12 @@ test "Resharding all the master #0 slots away from it" {
}
-test "Master #0 should lose its replicas" {
+test "Master #0 who lost all slots should turn into a replica without replicas" {
wait_for_condition 1000 50 {
- [llength [lindex [R 0 role] 2]] == 0
+ [RI 0 role] == "slave" && [RI 0 connected_slaves] == 0
} else {
- fail "Master #0 still has replicas"
+ puts [R 0 info replication]
+ fail "Master #0 didn't turn itself into a replica"
}
}
diff --git a/tests/cluster/tests/27-endpoints.tcl b/tests/cluster/tests/27-endpoints.tcl
index 4010b92ed..32e3e794d 100644
--- a/tests/cluster/tests/27-endpoints.tcl
+++ b/tests/cluster/tests/27-endpoints.tcl
@@ -1,20 +1,5 @@
source "../tests/includes/init-tests.tcl"
-# Check if cluster's view of hostnames is consistent
-proc are_hostnames_propagated {match_string} {
- for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
- set cfg [R $j cluster slots]
- foreach node $cfg {
- for {set i 2} {$i < [llength $node]} {incr i} {
- if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
- return 0
- }
- }
- }
- }
- return 1
-}
-
# Isolate a node from the cluster and give it a new nodeid
proc isolate_node {id} {
set node_id [R $id CLUSTER MYID]
@@ -212,6 +197,9 @@ test "Verify the nodes configured with prefer hostname only show hostname for ne
test "Test restart will keep hostname information" {
# Set a new hostname, reboot and make sure it sticks
R 0 config set cluster-announce-hostname "restart-1.com"
+ # Store the hostname in the config
+ R 0 config rewrite
+ kill_instance redis 0
restart_instance redis 0
set slot_result [R 0 CLUSTER SLOTS]
assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "restart-1.com"
diff --git a/tests/cluster/tests/28-cluster-shards.tcl b/tests/cluster/tests/28-cluster-shards.tcl
new file mode 100644
index 000000000..fe794f2b7
--- /dev/null
+++ b/tests/cluster/tests/28-cluster-shards.tcl
@@ -0,0 +1,185 @@
+source "../tests/includes/init-tests.tcl"
+
+# Initial slot distribution.
+set ::slot0 [list 0 1000 1002 5459 5461 5461 10926 10926]
+set ::slot1 [list 5460 5460 5462 10922 10925 10925]
+set ::slot2 [list 10923 10924 10927 16383]
+set ::slot3 [list 1001 1001]
+
+proc cluster_create_with_split_slots {masters replicas} {
+ for {set j 0} {$j < $masters} {incr j} {
+ R $j cluster ADDSLOTSRANGE {*}[set ::slot${j}]
+ }
+ if {$replicas} {
+ cluster_allocate_slaves $masters $replicas
+ }
+ set ::cluster_master_nodes $masters
+ set ::cluster_replica_nodes $replicas
+}
+
+# Get the node info with the specific node_id from the
+# given reference node. Valid type options are "node" and "shard"
+proc get_node_info_from_shard {id reference {type node}} {
+ set shards_response [R $reference CLUSTER SHARDS]
+ foreach shard_response $shards_response {
+ set nodes [dict get $shard_response nodes]
+ foreach node $nodes {
+ if {[dict get $node id] eq $id} {
+ if {$type eq "node"} {
+ return $node
+ } elseif {$type eq "shard"} {
+ return $shard_response
+ } else {
+ return {}
+ }
+ }
+ }
+ }
+ # No shard found, return nothing
+ return {}
+}
+
+test "Create a 8 nodes cluster with 4 shards" {
+ cluster_create_with_split_slots 4 4
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Set cluster hostnames and verify they are propagated" {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ }
+
+ # Wait for everyone to agree about the state
+ wait_for_cluster_propagation
+}
+
+test "Verify information about the shards" {
+ set ids {}
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ lappend ids [R $j CLUSTER MYID]
+ }
+ set slots [list $::slot0 $::slot1 $::slot2 $::slot3 $::slot0 $::slot1 $::slot2 $::slot3]
+
+ # Verify on each node (primary/replica), the response of the `CLUSTER SLOTS` command is consistent.
+ for {set ref 0} {$ref < $::cluster_master_nodes + $::cluster_replica_nodes} {incr ref} {
+ for {set i 0} {$i < $::cluster_master_nodes + $::cluster_replica_nodes} {incr i} {
+ assert_equal [lindex $slots $i] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "shard"] slots]
+ assert_equal "host-$i.com" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] hostname]
+ assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] ip]
+ # Default value of 'cluster-preferred-endpoint-type' is ip.
+ assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] endpoint]
+
+ if {$::tls} {
+ assert_equal [get_instance_attrib redis $i plaintext-port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
+ assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] tls-port]
+ } else {
+ assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
+ }
+
+ if {$i < 4} {
+ assert_equal "master" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
+ assert_equal "online" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] health]
+ } else {
+ assert_equal "replica" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
+ # Replica could be in online or loading
+ }
+ }
+ }
+}
+
+test "Verify no slot shard" {
+ # Node 8 has no slots assigned
+ set node_8_id [R 8 CLUSTER MYID]
+ assert_equal {} [dict get [get_node_info_from_shard $node_8_id 8 "shard"] slots]
+ assert_equal {} [dict get [get_node_info_from_shard $node_8_id 0 "shard"] slots]
+}
+
+set node_0_id [R 0 CLUSTER MYID]
+
+test "Kill a node and tell the replica to immediately takeover" {
+ kill_instance redis 0
+ R 4 cluster failover force
+}
+
+# Primary 0 node should report as fail, wait until the new primary acknowledges it.
+test "Verify health as fail for killed node" {
+ wait_for_condition 50 100 {
+ "fail" eq [dict get [get_node_info_from_shard $node_0_id 4 "node"] "health"]
+ } else {
+ fail "New primary never detected the node failed"
+ }
+}
+
+set primary_id 4
+set replica_id 0
+
+test "Restarting primary node" {
+ restart_instance redis $replica_id
+}
+
+test "Instance #0 gets converted into a replica" {
+ wait_for_condition 1000 50 {
+ [RI $replica_id role] eq {slave}
+ } else {
+ fail "Old primary was not converted into replica"
+ }
+}
+
+test "Test the replica reports a loading state while it's loading" {
+ # Test the command is good for verifying everything moves to a happy state
+ set replica_cluster_id [R $replica_id CLUSTER MYID]
+ wait_for_condition 50 1000 {
+ [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health] eq "online"
+ } else {
+ fail "Replica never transitioned to online"
+ }
+
+ # Set 1 MB of data, so there is something to load on full sync
+ R $primary_id debug populate 1000 key 1000
+
+ # Kill replica client for primary and load new data to the primary
+ R $primary_id config set repl-backlog-size 100
+
+ # Set the key load delay so that it will take at least
+ # 2 seconds to fully load the data.
+ R $replica_id config set key-load-delay 4000
+
+ # Trigger event loop processing every 1024 bytes, this trigger
+ # allows us to send and receive cluster messages, so we are setting
+ # it low so that the cluster messages are sent more frequently.
+ R $replica_id config set loading-process-events-interval-bytes 1024
+
+ R $primary_id multi
+ R $primary_id client kill type replica
+ # populate the correct data
+ set num 100
+ set value [string repeat A 1024]
+ for {set j 0} {$j < $num} {incr j} {
+ # Use hashtag valid for shard #0
+ set key "{ch3}$j"
+ R $primary_id set $key $value
+ }
+ R $primary_id exec
+
+ # The replica should reconnect and start a full sync, it will gossip about it's health to the primary.
+ wait_for_condition 50 1000 {
+ "loading" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
+ } else {
+ fail "Replica never transitioned to loading"
+ }
+
+ # Speed up the key loading and verify everything resumes
+ R $replica_id config set key-load-delay 0
+
+ wait_for_condition 50 1000 {
+ "online" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
+ } else {
+ fail "Replica never transitioned to online"
+ }
+
+ # Final sanity, the replica agrees it is online.
+ assert_equal "online" [dict get [get_node_info_from_shard $replica_cluster_id $replica_id "node"] health]
+} \ No newline at end of file
diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl
index 88b189ac0..e159fb17d 100644
--- a/tests/integration/redis-cli.tcl
+++ b/tests/integration/redis-cli.tcl
@@ -228,6 +228,30 @@ start_server {tags {"cli"}} {
file delete $tmpfile
}
+ test_tty_cli "Escape character in JSON mode" {
+ # reverse solidus
+ r hset solidus \/ \/
+ assert_equal \/ \/ [run_cli hgetall solidus]
+ set escaped_reverse_solidus \"\\"
+ assert_equal $escaped_reverse_solidus $escaped_reverse_solidus [run_cli --json hgetall \/]
+ # non printable (0xF0 in ISO-8859-1, not UTF-8(0xC3 0xB0))
+ set eth "\u00f0\u0065"
+ r hset eth test $eth
+ assert_equal \"\\xf0e\" [run_cli hget eth test]
+ assert_equal \"\u00f0e\" [run_cli --json hget eth test]
+ assert_equal \"\\\\xf0e\" [run_cli --quoted-json hget eth test]
+ # control characters
+ r hset control test "Hello\x00\x01\x02\x03World"
+ assert_equal \"Hello\\u0000\\u0001\\u0002\\u0003World" [run_cli --json hget control test]
+ # non-string keys
+ r hset numkey 1 One
+ assert_equal \{\"1\":\"One\"\} [run_cli --json hgetall numkey]
+ # non-string, non-printable keys
+ r hset npkey "K\u0000\u0001ey" "V\u0000\u0001alue"
+ assert_equal \{\"K\\u0000\\u0001ey\":\"V\\u0000\\u0001alue\"\} [run_cli --json hgetall npkey]
+ assert_equal \{\"K\\\\x00\\\\x01ey\":\"V\\\\x00\\\\x01alue\"\} [run_cli --quoted-json hgetall npkey]
+ }
+
test_nontty_cli "Status reply" {
assert_equal "OK" [run_cli set key bar]
assert_equal "bar" [r get key]
@@ -322,7 +346,7 @@ if {!$::tls} { ;# fake_redis_node doesn't support TLS
set dir [lindex [r config get dir] 1]
assert_equal "OK" [r debug populate 100000 key 1000]
- assert_equal "OK" [r function load lua lib1 "redis.register_function('func1', function() return 123 end)"]
+ assert_equal "lib1" [r function load "#!lua name=lib1\nredis.register_function('func1', function() return 123 end)"]
if {$functions_only} {
set args "--functions-rdb $dir/cli.rdb"
} else {
@@ -335,10 +359,10 @@ if {!$::tls} { ;# fake_redis_node doesn't support TLS
file rename "$dir/cli.rdb" "$dir/dump.rdb"
assert_equal "OK" [r set should-not-exist 1]
- assert_equal "OK" [r function load lua should_not_exist_func "redis.register_function('should_not_exist_func', function() return 456 end)"]
+ assert_equal "should_not_exist_func" [r function load "#!lua name=should_not_exist_func\nredis.register_function('should_not_exist_func', function() return 456 end)"]
assert_equal "OK" [r debug reload nosave]
assert_equal {} [r get should-not-exist]
- assert_equal {{library_name lib1 engine LUA description {} functions {{name func1 description {} flags {}}}}} [r function list]
+ assert_equal {{library_name lib1 engine LUA functions {{name func1 description {} flags {}}}}} [r function list]
if {$functions_only} {
assert_equal 0 [r dbsize]
} else {
diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl
index b8c50308a..281d5a8eb 100644
--- a/tests/integration/replication-4.tcl
+++ b/tests/integration/replication-4.tcl
@@ -47,7 +47,7 @@ start_server {tags {"repl external:skip"}} {
set slave [srv 0 client]
# Load some functions to be used later
- $master FUNCTION load lua test replace {
+ $master FUNCTION load replace {#!lua name=test
redis.register_function{function_name='f_default_flags', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={}}
redis.register_function{function_name='f_no_writes', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={'no-writes'}}
}
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index 05f62d5e8..44915be1b 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -523,10 +523,14 @@ foreach testType {Successful Aborted} {
$replica set mykey myvalue
# Set a function value on replica to check status during loading, on failure and after swapping db
- $replica function load LUA test {redis.register_function('test', function() return 'hello1' end)}
+ $replica function load {#!lua name=test
+ redis.register_function('test', function() return 'hello1' end)
+ }
# Set a function value on master to check it reaches the replica when replication ends
- $master function load LUA test {redis.register_function('test', function() return 'hello2' end)}
+ $master function load {#!lua name=test
+ redis.register_function('test', function() return 'hello2' end)
+ }
# Force the replica to try another full sync (this time it will have matching master replid)
$master multi
@@ -659,7 +663,9 @@ test {diskless loading short read} {
set start [clock clicks -milliseconds]
# Set a function value to check short read handling on functions
- r function load LUA test {redis.register_function('test', function() return 'hello1' end)}
+ r function load {#!lua name=test
+ redis.register_function('test', function() return 'hello1' end)
+ }
for {set k 0} {$k < 3} {incr k} {
for {set i 0} {$i < 10} {incr i} {
diff --git a/tests/modules/Makefile b/tests/modules/Makefile
index ce842f3af..1b7159c89 100644
--- a/tests/modules/Makefile
+++ b/tests/modules/Makefile
@@ -54,7 +54,9 @@ TEST_MODULES = \
subcommands.so \
reply.so \
cmdintrospection.so \
- eventloop.so
+ eventloop.so \
+ moduleconfigs.so \
+ moduleconfigstwo.so
.PHONY: all
diff --git a/tests/modules/aclcheck.c b/tests/modules/aclcheck.c
index 0e9c9af29..8a9d468a6 100644
--- a/tests/modules/aclcheck.c
+++ b/tests/modules/aclcheck.c
@@ -136,6 +136,22 @@ int rm_call_aclcheck_cmd_module_user(RedisModuleCtx *ctx, RedisModuleString **ar
return res;
}
+int rm_call_aclcheck_with_errors(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "vEC", argv + 2, argc - 2);
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ return REDISMODULE_OK;
+}
+
/* A wrap for RM_Call that pass the 'C' flag to do ACL check on the command. */
int rm_call_aclcheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
REDISMODULE_NOT_USED(argv);
@@ -190,5 +206,9 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
"write",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"aclcheck.rm_call_with_errors", rm_call_aclcheck_with_errors,
+ "write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
return REDISMODULE_OK;
}
diff --git a/tests/modules/auth.c b/tests/modules/auth.c
index 040a447ec..612320dbc 100644
--- a/tests/modules/auth.c
+++ b/tests/modules/auth.c
@@ -54,6 +54,16 @@ int Auth_AuthRealUser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
return RedisModule_ReplyWithLongLong(ctx, (uint64_t) client_id);
}
+/* This command redacts every other arguments and returns OK */
+int Auth_RedactedAPI(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ for(int i = argc - 1; i > 0; i -= 2) {
+ int result = RedisModule_RedactClientCommandArgument(ctx, i);
+ RedisModule_Assert(result == REDISMODULE_OK);
+ }
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
int Auth_ChangeCount(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
@@ -87,6 +97,10 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
Auth_ChangeCount,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"auth.redact",
+ Auth_RedactedAPI,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
return REDISMODULE_OK;
}
diff --git a/tests/modules/basics.c b/tests/modules/basics.c
index 4d639d682..ecd1b8852 100644
--- a/tests/modules/basics.c
+++ b/tests/modules/basics.c
@@ -718,6 +718,25 @@ end:
/* Return 1 if the reply matches the specified string, otherwise log errors
* in the server log and return 0. */
+int TestAssertErrorReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char *str, size_t len) {
+ RedisModuleString *mystr, *expected;
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ERROR) {
+ return 0;
+ }
+
+ mystr = RedisModule_CreateStringFromCallReply(reply);
+ expected = RedisModule_CreateString(ctx,str,len);
+ if (RedisModule_StringCompare(mystr,expected) != 0) {
+ const char *mystr_ptr = RedisModule_StringPtrLen(mystr,NULL);
+ const char *expected_ptr = RedisModule_StringPtrLen(expected,NULL);
+ RedisModule_Log(ctx,"warning",
+ "Unexpected Error reply reply '%s' (instead of '%s')",
+ mystr_ptr, expected_ptr);
+ return 0;
+ }
+ return 1;
+}
+
int TestAssertStringReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char *str, size_t len) {
RedisModuleString *mystr, *expected;
@@ -846,6 +865,18 @@ int TestBasics(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (!TestAssertStringReply(ctx,RedisModule_CallReplyArrayElement(reply, 0),"test",4)) goto fail;
if (!TestAssertStringReply(ctx,RedisModule_CallReplyArrayElement(reply, 1),"1234",4)) goto fail;
+ T("foo", "E");
+ if (!TestAssertErrorReply(ctx,reply,"ERR Unknown Redis command 'foo'.",32)) goto fail;
+
+ T("set", "Ec", "x");
+ if (!TestAssertErrorReply(ctx,reply,"ERR Wrong number of args calling Redis command 'set'.",53)) goto fail;
+
+ T("shutdown", "SE");
+ if (!TestAssertErrorReply(ctx,reply,"ERR command 'shutdown' is not allowed on script mode",52)) goto fail;
+
+ T("set", "WEcc", "x", "1");
+ if (!TestAssertErrorReply(ctx,reply,"ERR Write command 'set' was called while write is not allowed.",62)) goto fail;
+
RedisModule_ReplyWithSimpleString(ctx,"ALL TESTS PASSED");
return REDISMODULE_OK;
diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c
index a2d7c6d00..9c2598a34 100644
--- a/tests/modules/blockedclient.c
+++ b/tests/modules/blockedclient.c
@@ -195,7 +195,7 @@ int do_rm_call(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
- RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "v", argv + 2, argc - 2);
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "Ev", argv + 2, argc - 2);
if(!rep){
RedisModule_ReplyWithError(ctx, "NULL reply returned");
}else{
diff --git a/tests/modules/blockonkeys.c b/tests/modules/blockonkeys.c
index 4568b9fa8..1aa576489 100644
--- a/tests/modules/blockonkeys.c
+++ b/tests/modules/blockonkeys.c
@@ -135,22 +135,29 @@ int bpop_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int arg
return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
}
-/* FSL.BPOP <key> <timeout> - Block clients until list has two or more elements.
+/* FSL.BPOP <key> <timeout> [NO_TO_CB]- Block clients until list has two or more elements.
* When that happens, unblock client and pop the last two elements (from the right). */
int fsl_bpop(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
- if (argc != 3)
+ if (argc < 3)
return RedisModule_WrongArity(ctx);
long long timeout;
if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK || timeout < 0)
return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
+ int to_cb = 1;
+ if (argc == 4) {
+ if (strcasecmp("NO_TO_CB", RedisModule_StringPtrLen(argv[3], NULL)))
+ return RedisModule_ReplyWithError(ctx,"ERR invalid argument");
+ to_cb = 0;
+ }
+
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_READ, 0, &fsl, 1))
return REDISMODULE_OK;
if (!fsl) {
- RedisModule_BlockClientOnKeys(ctx, bpop_reply_callback, bpop_timeout_callback,
+ RedisModule_BlockClientOnKeys(ctx, bpop_reply_callback, to_cb ? bpop_timeout_callback : NULL,
NULL, timeout, &argv[1], 1, NULL);
} else {
RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
diff --git a/tests/modules/eventloop.c b/tests/modules/eventloop.c
index 50d3bc052..c0cfdf04f 100644
--- a/tests/modules/eventloop.c
+++ b/tests/modules/eventloop.c
@@ -11,7 +11,6 @@
* 4- test.oneshot : Test for oneshot API
*/
-#define REDISMODULE_EXPERIMENTAL_API
#include "redismodule.h"
#include <stdlib.h>
#include <unistd.h>
diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c
index af4681cf9..94d902d22 100644
--- a/tests/modules/hooks.c
+++ b/tests/modules/hooks.c
@@ -267,6 +267,18 @@ void swapDbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void
LogNumericEvent(ctx, "swapdb-second", ei->dbnum_second);
}
+void configChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ if (sub != REDISMODULE_SUBEVENT_CONFIG_CHANGE) {
+ return;
+ }
+
+ RedisModuleConfigChangeV1 *ei = data;
+ LogNumericEvent(ctx, "config-change-count", ei->num_changes);
+ LogStringEvent(ctx, "config-change-first", ei->config_names[0]);
+}
+
/* This function must be present on each Redis module. It is used in order to
* register the commands into the Redis server. */
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
@@ -317,6 +329,9 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
RedisModule_SubscribeToServerEvent(ctx,
RedisModuleEvent_SwapDB, swapDbCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Config, configChangeCallback);
+
event_log = RedisModule_CreateDict(ctx);
if (RedisModule_CreateCommand(ctx,"hooks.event_count", cmdEventCount,"",0,0,0) == REDISMODULE_ERR)
diff --git a/tests/modules/moduleconfigs.c b/tests/modules/moduleconfigs.c
new file mode 100644
index 000000000..a9e434a7b
--- /dev/null
+++ b/tests/modules/moduleconfigs.c
@@ -0,0 +1,142 @@
+#include "redismodule.h"
+#include <strings.h>
+int mutable_bool_val;
+int immutable_bool_val;
+long long longval;
+long long memval;
+RedisModuleString *strval = NULL;
+int enumval;
+
+/* Series of get and set callbacks for each type of config, these rely on the privdata ptr
+ * to point to the config, and they register the configs as such. Note that one could also just
+ * use names if they wanted, and store anything in privdata. */
+int getBoolConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ return (*(int *)privdata);
+}
+
+int setBoolConfigCommand(const char *name, int new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ *(int *)privdata = new;
+ return REDISMODULE_OK;
+}
+
+long long getNumericConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ return (*(long long *) privdata);
+}
+
+int setNumericConfigCommand(const char *name, long long new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ *(long long *)privdata = new;
+ return REDISMODULE_OK;
+}
+
+RedisModuleString *getStringConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(privdata);
+ return strval;
+}
+int setStringConfigCommand(const char *name, RedisModuleString *new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ REDISMODULE_NOT_USED(privdata);
+ size_t len;
+ if (!strcasecmp(RedisModule_StringPtrLen(new, &len), "rejectisfreed")) {
+ *err = RedisModule_CreateString(NULL, "Cannot set string to 'rejectisfreed'", 36);
+ return REDISMODULE_ERR;
+ }
+ if (strval) RedisModule_FreeString(NULL, strval);
+ RedisModule_RetainString(NULL, new);
+ strval = new;
+ return REDISMODULE_OK;
+}
+
+int getEnumConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(privdata);
+ return enumval;
+}
+
+int setEnumConfigCommand(const char *name, int val, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ REDISMODULE_NOT_USED(privdata);
+ enumval = val;
+ return REDISMODULE_OK;
+}
+
+int boolApplyFunc(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(privdata);
+ if (mutable_bool_val && immutable_bool_val) {
+ *err = RedisModule_CreateString(NULL, "Bool configs cannot both be yes.", 32);
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int longlongApplyFunc(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(privdata);
+ if (longval == memval) {
+ *err = RedisModule_CreateString(NULL, "These configs cannot equal each other.", 38);
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "moduleconfigs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_RegisterBoolConfig(ctx, "mutable_bool", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &mutable_bool_val) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ /* Immutable config here. */
+ if (RedisModule_RegisterBoolConfig(ctx, "immutable_bool", 0, REDISMODULE_CONFIG_IMMUTABLE, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &immutable_bool_val) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_RegisterStringConfig(ctx, "string", "secret password", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ /* On the stack to make sure we're copying them. */
+ const char *enum_vals[3] = {"one", "two", "three"};
+ const int int_vals[3] = {0, 2, 4};
+
+ if (RedisModule_RegisterEnumConfig(ctx, "enum", 0, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 3, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ /* Memory config here. */
+ if (RedisModule_RegisterNumericConfig(ctx, "memory_numeric", 1024, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_MEMORY, 0, 3000000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &memval) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_RegisterNumericConfig(ctx, "numeric", -1, REDISMODULE_CONFIG_DEFAULT, -5, 2000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &longval) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ size_t len;
+ if (argc && !strcasecmp(RedisModule_StringPtrLen(argv[0], &len), "noload")) {
+ return REDISMODULE_OK;
+ } else if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
+ if (strval) {
+ RedisModule_FreeString(ctx, strval);
+ strval = NULL;
+ }
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ REDISMODULE_NOT_USED(ctx);
+ if (strval) {
+ RedisModule_FreeString(ctx, strval);
+ strval = NULL;
+ }
+ return REDISMODULE_OK;
+} \ No newline at end of file
diff --git a/tests/modules/moduleconfigstwo.c b/tests/modules/moduleconfigstwo.c
new file mode 100644
index 000000000..c0e8f9136
--- /dev/null
+++ b/tests/modules/moduleconfigstwo.c
@@ -0,0 +1,39 @@
+#include "redismodule.h"
+#include <strings.h>
+
+/* Second module configs module, for testing.
+ * Need to make sure that multiple modules with configs don't interfere with each other */
+int bool_config;
+
+int getBoolConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(privdata);
+ if (!strcasecmp(name, "test")) {
+ return bool_config;
+ }
+ return 0;
+}
+
+int setBoolConfigCommand(const char *name, int new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(privdata);
+ REDISMODULE_NOT_USED(err);
+ if (!strcasecmp(name, "test")) {
+ bool_config = new;
+ return REDISMODULE_OK;
+ }
+ return REDISMODULE_ERR;
+}
+
+/* No arguments are expected */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "configs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_RegisterBoolConfig(ctx, "test", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, NULL, &argc) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+} \ No newline at end of file
diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl
index 3e930646a..71525fc7c 100644
--- a/tests/sentinel/tests/03-runtime-reconf.tcl
+++ b/tests/sentinel/tests/03-runtime-reconf.tcl
@@ -2,6 +2,162 @@
source "../tests/includes/init-tests.tcl"
set num_sentinels [llength $::sentinel_instances]
+set ::user "testuser"
+set ::password "secret"
+
+proc server_set_password {} {
+ foreach_redis_id id {
+ assert_equal {OK} [R $id CONFIG SET requirepass $::password]
+ assert_equal {OK} [R $id AUTH $::password]
+ assert_equal {OK} [R $id CONFIG SET masterauth $::password]
+ }
+}
+
+proc server_reset_password {} {
+ foreach_redis_id id {
+ assert_equal {OK} [R $id CONFIG SET requirepass ""]
+ assert_equal {OK} [R $id CONFIG SET masterauth ""]
+ }
+}
+
+proc server_set_acl {id} {
+ assert_equal {OK} [R $id ACL SETUSER $::user on >$::password allchannels +@all]
+ assert_equal {OK} [R $id ACL SETUSER default off]
+
+ R $id CLIENT KILL USER default SKIPME no
+ assert_equal {OK} [R $id AUTH $::user $::password]
+ assert_equal {OK} [R $id CONFIG SET masteruser $::user]
+ assert_equal {OK} [R $id CONFIG SET masterauth $::password]
+}
+
+proc server_reset_acl {id} {
+ assert_equal {OK} [R $id ACL SETUSER default on]
+ assert_equal {1} [R $id ACL DELUSER $::user]
+
+ assert_equal {OK} [R $id CONFIG SET masteruser ""]
+ assert_equal {OK} [R $id CONFIG SET masterauth ""]
+}
+
+proc verify_sentinel_connect_replicas {id} {
+ foreach replica [S $id SENTINEL REPLICAS mymaster] {
+ if {[string match "*disconnected*" [dict get $replica flags]]} {
+ return 0
+ }
+ }
+ return 1
+}
+
+proc wait_for_sentinels_connect_servers { {is_connect 1} } {
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [string match "*disconnected*" [dict get [S $id SENTINEL MASTER mymaster] flags]] != $is_connect
+ } else {
+ fail "At least some sentinel can't connect to master"
+ }
+
+ wait_for_condition 1000 50 {
+ [verify_sentinel_connect_replicas $id] == $is_connect
+ } else {
+ fail "At least some sentinel can't connect to replica"
+ }
+ }
+}
+
+test "Sentinels (re)connection following SENTINEL SET mymaster auth-pass" {
+ # 3 types of sentinels to test:
+ # (re)started while master changed pwd. Manage to connect only after setting pwd
+ set sent2re 0
+ # (up)dated in advance with master new password
+ set sent2up 1
+ # (un)touched. Yet manage to maintain (old) connection
+ set sent2un 2
+
+ wait_for_sentinels_connect_servers
+ kill_instance sentinel $sent2re
+ server_set_password
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-pass $::password]
+ restart_instance sentinel $sent2re
+
+ # Verify sentinel that restarted failed to connect master
+ if {![string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]]} {
+ fail "Expected to be disconnected from master due to wrong password"
+ }
+
+ # Update restarted sentinel with master password
+ assert_equal {OK} [S $sent2re SENTINEL SET mymaster auth-pass $::password]
+
+ # All sentinels expected to connect successfully
+ wait_for_sentinels_connect_servers
+
+ # remove requirepass and verify sentinels manage to connect servers
+ server_reset_password
+ wait_for_sentinels_connect_servers
+ # Sanity check
+ verify_sentinel_auto_discovery
+}
+
+test "Sentinels (re)connection following master ACL change" {
+ # Three types of sentinels to test during ACL change:
+ # 1. (re)started Sentinel. Manage to connect only after setting new pwd
+ # 2. (up)dated Sentinel, get just before ACL change the new password
+ # 3. (un)touched Sentinel that kept old connection with master and didn't
+ # set new ACL password won't persist ACL pwd change (unlike legacy auth-pass)
+ set sent2re 0
+ set sent2up 1
+ set sent2un 2
+
+ wait_for_sentinels_connect_servers
+ # kill sentinel 'sent2re' and restart it after ACL change
+ kill_instance sentinel $sent2re
+
+ # Update sentinel 'sent2up' with new user and pwd
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-user $::user]
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-pass $::password]
+
+ foreach_redis_id id {
+ server_set_acl $id
+ }
+
+ restart_instance sentinel $sent2re
+
+ # Verify sentinel that restarted failed to reconnect master
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0
+ } else {
+ fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ }
+
+ # Verify sentinel with updated password managed to connect (wait for sentinelTimer to reconnect)
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2up SENTINEL MASTER mymaster] flags]] == 0
+ } else {
+ fail "Expected: Sentinel to be connected to master"
+ }
+
+ # Verify sentinel untouched gets failed to connect master
+ if {![string match "*disconnected*" [dict get [S $sent2un SENTINEL MASTER mymaster] flags]]} {
+ fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ }
+
+ # Now update all sentinels with new password
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL SET mymaster auth-user $::user]
+ assert_equal {OK} [S $id SENTINEL SET mymaster auth-pass $::password]
+ }
+
+ # All sentinels expected to connect successfully
+ wait_for_sentinels_connect_servers
+
+ # remove requirepass and verify sentinels manage to connect servers
+ foreach_redis_id id {
+ server_reset_acl $id
+ }
+
+ wait_for_sentinels_connect_servers
+ # Sanity check
+ verify_sentinel_auto_discovery
+}
+
test "Set parameters in normal case" {
set info [S 0 SENTINEL master mymaster]
diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl
index e7e33a29a..bb24d6dff 100644
--- a/tests/sentinel/tests/07-down-conditions.tcl
+++ b/tests/sentinel/tests/07-down-conditions.tcl
@@ -24,7 +24,6 @@ proc ensure_master_up {} {
}
}
-
proc ensure_master_down {} {
S $::alive_sentinel sentinel debug info-period 1000
S $::alive_sentinel sentinel debug ping-period 100
@@ -45,10 +44,14 @@ test "Crash the majority of Sentinels to prevent failovers for this unit" {
}
test "SDOWN is triggered by non-responding but not crashed instance" {
- lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port
ensure_master_up
- exec ../../../src/redis-cli -h $host -p $port {*}[rediscli_tls_config "../../../tests"] debug sleep 3 > /dev/null &
+ set master_addr [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $master_addr 1]]
+
+ set pid [get_instance_attrib redis $master_id pid]
+ exec kill -SIGSTOP $pid
ensure_master_down
+ exec kill -SIGCONT $pid
ensure_master_up
}
diff --git a/tests/sentinel/tests/08-hostname-conf.tcl b/tests/sentinel/tests/08-hostname-conf.tcl
index be6e42cb0..263b06fca 100644
--- a/tests/sentinel/tests/08-hostname-conf.tcl
+++ b/tests/sentinel/tests/08-hostname-conf.tcl
@@ -1,3 +1,5 @@
+source "../tests/includes/utils.tcl"
+
proc set_redis_announce_ip {addr} {
foreach_redis_id id {
R $id config set replica-announce-ip $addr
diff --git a/tests/sentinel/tests/09-acl-support.tcl b/tests/sentinel/tests/09-acl-support.tcl
index 8c967f0bc..a754dacf5 100644
--- a/tests/sentinel/tests/09-acl-support.tcl
+++ b/tests/sentinel/tests/09-acl-support.tcl
@@ -31,16 +31,23 @@ test "(post-init) Set up ACL configuration" {
test "SENTINEL CONFIG SET handles on-the-fly credentials reconfiguration" {
# Make sure we're starting with a broken state...
- after 5000
- catch {S 1 SENTINEL CKQUORUM mymaster} err
- assert_match {*NOQUORUM*} $err
+ wait_for_condition 200 50 {
+ [catch {S 1 SENTINEL CKQUORUM mymaster}] == 1
+ } else {
+ fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ }
+ assert_error "*NOQUORUM*" {S 1 SENTINEL CKQUORUM mymaster}
foreach_sentinel_id id {
assert_equal {OK} [S $id SENTINEL CONFIG SET sentinel-user $::user]
assert_equal {OK} [S $id SENTINEL CONFIG SET sentinel-pass $::password]
}
- after 5000
+ wait_for_condition 200 50 {
+ [catch {S 1 SENTINEL CKQUORUM mymaster}] == 0
+ } else {
+ fail "Expected: Sentinel to be connected to master after setting password"
+ }
assert_match {*OK*} [S 1 SENTINEL CKQUORUM mymaster]
}
diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl
index b5baa256f..fe9a61815 100644
--- a/tests/sentinel/tests/includes/init-tests.tcl
+++ b/tests/sentinel/tests/includes/init-tests.tcl
@@ -1,16 +1,5 @@
# Initialization tests -- most units will start including this.
-
-proc restart_killed_instances {} {
- foreach type {redis sentinel} {
- foreach_${type}_id id {
- if {[get_instance_attrib $type $id pid] == -1} {
- puts -nonewline "$type/$id "
- flush stdout
- restart_instance $type $id
- }
- }
- }
-}
+source "../tests/includes/utils.tcl"
test "(init) Restart killed instances" {
restart_killed_instances
@@ -59,14 +48,7 @@ test "(init) Sentinels can talk with the master" {
}
test "(init) Sentinels are able to auto-discover other sentinels" {
- set sentinels [llength $::sentinel_instances]
- foreach_sentinel_id id {
- wait_for_condition 1000 50 {
- [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1)
- } else {
- fail "At least some sentinel can't detect some other sentinel"
- }
- }
+ verify_sentinel_auto_discovery
}
test "(init) Sentinels are able to auto-discover slaves" {
diff --git a/tests/sentinel/tests/includes/utils.tcl b/tests/sentinel/tests/includes/utils.tcl
new file mode 100644
index 000000000..adfd91c09
--- /dev/null
+++ b/tests/sentinel/tests/includes/utils.tcl
@@ -0,0 +1,22 @@
+proc restart_killed_instances {} {
+ foreach type {redis sentinel} {
+ foreach_${type}_id id {
+ if {[get_instance_attrib $type $id pid] == -1} {
+ puts -nonewline "$type/$id "
+ flush stdout
+ restart_instance $type $id
+ }
+ }
+ }
+}
+
+proc verify_sentinel_auto_discovery {} {
+ set sentinels [llength $::sentinel_instances]
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1)
+ } else {
+ fail "At least some sentinel can't detect some other sentinel"
+ }
+ }
+}
diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl
index 5743be5f4..edcc1fb48 100644
--- a/tests/support/redis.tcl
+++ b/tests/support/redis.tcl
@@ -188,6 +188,10 @@ proc ::redis::__method__readraw {id fd val} {
set ::redis::readraw($id) $val
}
+proc ::redis::__method__readingraw {id fd} {
+ return $::redis::readraw($id)
+}
+
proc ::redis::__method__attributes {id fd} {
set _ $::redis::attributes($id)
}
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
index b06bd73ba..9d0c4510d 100644
--- a/tests/support/server.tcl
+++ b/tests/support/server.tcl
@@ -684,6 +684,14 @@ proc start_server {options {code undefined}} {
}
}
+# Start multiple servers with the same options, run code, then stop them.
+proc start_multiple_servers {num options code} {
+ for {set i 0} {$i < $num} {incr i} {
+ set code [list start_server $options $code]
+ }
+ uplevel 1 $code
+}
+
proc restart_server {level wait_ready rotate_logs {reconnect 1} {shutdown sigterm}} {
set srv [lindex $::servers end+$level]
if {$shutdown ne {sigterm}} {
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
index 46c9654c8..4ad96ab10 100644
--- a/tests/support/util.tcl
+++ b/tests/support/util.tcl
@@ -122,7 +122,7 @@ proc wait_replica_online r {
wait_for_condition 50 100 {
[string match "*slave0:*,state=online*" [$r info replication]]
} else {
- fail "replica didn't sync in time"
+ fail "replica didn't online in time"
}
}
@@ -130,7 +130,7 @@ proc wait_for_ofs_sync {r1 r2} {
wait_for_condition 50 100 {
[status $r1 master_repl_offset] eq [status $r2 master_repl_offset]
} else {
- fail "replica didn't sync in time"
+ fail "replica offset didn't match in time"
}
}
diff --git a/tests/unit/acl-v2.tcl b/tests/unit/acl-v2.tcl
index 72ea44c3a..12eb5a3be 100644
--- a/tests/unit/acl-v2.tcl
+++ b/tests/unit/acl-v2.tcl
@@ -305,13 +305,19 @@ start_server {tags {"acl external:skip"}} {
assert_equal "ERR Command 'not-a-command' not found" $e
}
+ test {Test various commands for command permissions} {
+ r ACL setuser command-test -@all
+ assert_equal "This user has no permissions to run the 'set' command" [r ACL DRYRUN command-test set somekey somevalue]
+ assert_equal "This user has no permissions to run the 'get' command" [r ACL DRYRUN command-test get somekey]
+ }
+
test {Test various odd commands for key permissions} {
r ACL setuser command-test +@all %R~read* %W~write* %RW~rw*
# Test migrate, which is marked with incomplete keys
- assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever rw]
- assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever read]
- assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever write]
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever rw 0 500]
+ assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever read 0 500]
+ assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever write 0 500]
assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS rw]
assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS read]
assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS write]
@@ -428,6 +434,50 @@ start_server {tags {"acl external:skip"}} {
assert_equal "This user has no permissions to access the 'otherchannel' channel" [r ACL DRYRUN test-channels ssubscribe otherchannel foo]
}
+ test {Test sort with ACL permissions} {
+ r set v1 1
+ r lpush mylist 1
+
+ r ACL setuser test-sort-acl on nopass (+sort ~mylist)
+ $r2 auth test-sort-acl nopass
+
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist ~v*)
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist %W~*)
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist %R~*)
+ assert_equal "1" [$r2 sort mylist by v*]
+
+ # cleanup
+ r ACL deluser test-sort-acl
+ r del v1 mylist
+ }
+
+ test {Test DRYRUN with wrong number of arguments} {
+ r ACL setuser test-dry-run +@all ~v*
+
+ assert_equal "OK" [r ACL DRYRUN test-dry-run SET v v]
+
+ catch {r ACL DRYRUN test-dry-run SET v} e
+ assert_equal "ERR wrong number of arguments for 'set' command" $e
+
+ catch {r ACL DRYRUN test-dry-run SET} e
+ assert_equal "ERR wrong number of arguments for 'set' command" $e
+ }
+
$r2 close
}
diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl
index ac861a653..45db1939f 100644
--- a/tests/unit/aofrw.tcl
+++ b/tests/unit/aofrw.tcl
@@ -185,7 +185,7 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
test "AOF rewrite functions" {
r flushall
- r FUNCTION LOAD LUA test DESCRIPTION {desc} {
+ r FUNCTION LOAD {#!lua name=test
redis.register_function('test', function() return 1 end)
}
r bgrewriteaof
@@ -194,7 +194,7 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
r debug loadaof
assert_equal [r fcall test 0] 1
r FUNCTION LIST
- } {{library_name test engine LUA description desc functions {{name test description {} flags {}}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
test {BGREWRITEAOF is delayed if BGSAVE is in progress} {
r flushall
diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl
index 89431c81f..ec08a060d 100644
--- a/tests/unit/bitops.tcl
+++ b/tests/unit/bitops.tcl
@@ -433,6 +433,12 @@ start_server {tags {"bitops"}} {
r bitfield foo3{t} incrby i5 0 1
set dirty5 [s rdb_changes_since_last_save]
assert {$dirty5 == $dirty4 + 2}
+
+ # Change length only
+ r setbit foo{t} 90 0
+ r bitfield foo2{t} set i5 90 0
+ set dirty6 [s rdb_changes_since_last_save]
+ assert {$dirty6 == $dirty5 + 2}
}
test {BITPOS bit=1 fuzzy testing using SETBIT} {
diff --git a/tests/unit/client-eviction.tcl b/tests/unit/client-eviction.tcl
index 949ac8f3d..469828473 100644
--- a/tests/unit/client-eviction.tcl
+++ b/tests/unit/client-eviction.tcl
@@ -395,13 +395,14 @@ start_server {} {
test "evict clients only until below limit" {
set client_count 10
set client_mem [mb 1]
- r debug replybuffer-peak-reset-time never
+ r debug replybuffer resizing 0
r config set maxmemory-clients 0
r client setname control
r client no-evict on
# Make multiple clients consume together roughly 1mb less than maxmemory_clients
set total_client_mem 0
+ set max_client_mem 0
set rrs {}
for {set j 0} {$j < $client_count} {incr j} {
set rr [redis_client]
@@ -414,20 +415,27 @@ start_server {} {
} else {
fail "Failed to fill qbuf for test"
}
- incr total_client_mem [client_field client$j tot-mem]
+ # In theory all these clients should use the same amount of memory (~1mb). But in practice
+ # some allocators (libc) can return different allocation sizes for the same malloc argument causing
+ # some clients to use slightly more memory than others. We find the largest client and make sure
+ # all clients are roughly the same size (+-1%). Then we can safely set the client eviction limit and
+ # expect consistent results in the test.
+ set cmem [client_field client$j tot-mem]
+ if {$max_client_mem > 0} {
+ set size_ratio [expr $max_client_mem.0/$cmem.0]
+ assert_range $size_ratio 0.99 1.01
+ }
+ if {$cmem > $max_client_mem} {
+ set max_client_mem $cmem
+ }
}
- set client_actual_mem [expr $total_client_mem / $client_count]
-
- # Make sure client_acutal_mem is more or equal to what we intended
- assert {$client_actual_mem >= $client_mem}
-
# Make sure all clients are still connected
set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
assert {$connected_clients == $client_count}
# Set maxmemory-clients to accommodate half our clients (taking into account the control client)
- set maxmemory_clients [expr ($client_actual_mem * $client_count) / 2 + [client_field control tot-mem]]
+ set maxmemory_clients [expr ($max_client_mem * $client_count) / 2 + [client_field control tot-mem]]
r config set maxmemory-clients $maxmemory_clients
# Make sure total used memory is below maxmemory_clients
@@ -438,8 +446,8 @@ start_server {} {
set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
assert {$connected_clients == [expr $client_count / 2]}
- # Restore the peak reset time to default
- r debug replybuffer-peak-reset-time reset
+ # Restore the reply buffer resize to default
+ r debug replybuffer resizing 1
foreach rr $rrs {$rr close}
} {} {needs:debug}
@@ -454,7 +462,7 @@ start_server {} {
r client setname control
r client no-evict on
r config set maxmemory-clients 0
- r debug replybuffer-peak-reset-time never
+ r debug replybuffer resizing 0
# Run over all sizes and create some clients using up that size
set total_client_mem 0
@@ -505,8 +513,8 @@ start_server {} {
}
}
- # Restore the peak reset time to default
- r debug replybuffer-peak-reset-time reset
+ # Restore the reply buffer resize to default
+ r debug replybuffer resizing 1
foreach rr $rrs {$rr close}
} {} {needs:debug}
diff --git a/tests/unit/cluster.tcl b/tests/unit/cluster.tcl
index 99925688c..9d49a2dee 100644
--- a/tests/unit/cluster.tcl
+++ b/tests/unit/cluster.tcl
@@ -19,6 +19,7 @@ proc csi {args} {
}
# make sure the test infra won't use SELECT
+set old_singledb $::singledb
set ::singledb 1
# cluster creation is complicated with TLS, and the current tests don't really need that coverage
@@ -26,14 +27,13 @@ tags {tls:skip external:skip cluster} {
# start three servers
set base_conf [list cluster-enabled yes cluster-node-timeout 1]
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
+start_multiple_servers 3 [list overrides $base_conf] {
set node1 [srv 0 client]
set node2 [srv -1 client]
set node3 [srv -2 client]
set node3_pid [srv -2 pid]
+ set node3_rd [redis_deferring_client -2]
test {Create 3 node cluster} {
exec src/redis-cli --cluster-yes --cluster create \
@@ -52,7 +52,6 @@ start_server [list overrides $base_conf] {
test "Run blocking command on cluster node3" {
# key9184688 is mapped to slot 10923 (first slot of node 3)
- set node3_rd [redis_deferring_client -2]
$node3_rd brpop key9184688 0
$node3_rd flush
@@ -90,10 +89,11 @@ start_server [list overrides $base_conf] {
}
}
+ set node1_rd [redis_deferring_client 0]
+
test "Sanity test push cmd after resharding" {
assert_error {*MOVED*} {$node3 lpush key9184688 v1}
- set node1_rd [redis_deferring_client 0]
$node1_rd brpop key9184688 0
$node1_rd flush
@@ -109,13 +109,11 @@ start_server [list overrides $base_conf] {
assert_equal {key9184688 v2} [$node1_rd read]
}
- $node1_rd close
$node3_rd close
test "Run blocking command again on cluster node1" {
$node1 del key9184688
# key9184688 is mapped to slot 10923 which has been moved to node1
- set node1_rd [redis_deferring_client 0]
$node1_rd brpop key9184688 0
$node1_rd flush
@@ -149,18 +147,11 @@ start_server [list overrides $base_conf] {
exec kill -SIGCONT $node3_pid
$node1_rd close
-# stop three servers
-}
-}
-}
+} ;# stop servers
# Test redis-cli -- cluster create, add-node, call.
# Test that functions are propagated on add-node
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
-start_server [list overrides $base_conf] {
+start_multiple_servers 5 [list overrides $base_conf] {
set node4_rd [redis_client -3]
set node5_rd [redis_client -4]
@@ -182,7 +173,9 @@ start_server [list overrides $base_conf] {
# upload a function to all the cluster
exec src/redis-cli --cluster-yes --cluster call 127.0.0.1:[srv 0 port] \
- FUNCTION LOAD LUA TEST {redis.register_function('test', function() return 'hello' end)}
+ FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }
# adding node to the cluster
exec src/redis-cli --cluster-yes --cluster add-node \
@@ -199,13 +192,15 @@ start_server [list overrides $base_conf] {
}
# make sure 'test' function was added to the new node
- assert_equal {{library_name TEST engine LUA description {} functions {{name test description {} flags {}}}}} [$node4_rd FUNCTION LIST]
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node4_rd FUNCTION LIST]
# add function to node 5
- assert_equal {OK} [$node5_rd FUNCTION LOAD LUA TEST {redis.register_function('test', function() return 'hello' end)}]
+ assert_equal {TEST} [$node5_rd FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }]
# make sure functions was added to node 5
- assert_equal {{library_name TEST engine LUA description {} functions {{name test description {} flags {}}}}} [$node5_rd FUNCTION LIST]
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node5_rd FUNCTION LIST]
# adding node 5 to the cluster should failed because it already contains the 'test' function
catch {
@@ -215,11 +210,111 @@ start_server [list overrides $base_conf] {
} e
assert_match {*node already contains functions*} $e
}
-# stop 5 servers
-}
-}
-}
-}
+} ;# stop servers
+
+# Test redis-cli --cluster create, add-node.
+# Test that one slot can be migrated to and then away from the new node.
+test {Migrate the last slot away from a node using redis-cli} {
+ start_multiple_servers 4 [list overrides $base_conf] {
+
+ # Create a cluster of 3 nodes
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [csi 0 cluster_state] eq {ok} &&
+ [csi -1 cluster_state] eq {ok} &&
+ [csi -2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Insert some data
+ assert_equal OK [exec src/redis-cli -c -p [srv 0 port] SET foo bar]
+ set slot [exec src/redis-cli -c -p [srv 0 port] CLUSTER KEYSLOT foo]
+
+ # Add new node to the cluster
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_condition 1000 50 {
+ [csi 0 cluster_state] eq {ok} &&
+ [csi -1 cluster_state] eq {ok} &&
+ [csi -2 cluster_state] eq {ok} &&
+ [csi -3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ set newnode_r [redis_client -3]
+ set newnode_id [$newnode_r CLUSTER MYID]
+
+ # Find out which node has the key "foo" by asking the new node for a
+ # redirect.
+ catch { $newnode_r get foo } e
+ assert_match "MOVED $slot *" $e
+ lassign [split [lindex $e 2] :] owner_host owner_port
+ set owner_r [redis $owner_host $owner_port 0 $::tls]
+ set owner_id [$owner_r CLUSTER MYID]
+
+ # Move slot to new node using plain Redis commands
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot IMPORTING $owner_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot MIGRATING $newnode_id]
+ assert_equal {foo} [$owner_r CLUSTER GETKEYSINSLOT $slot 10]
+ assert_equal OK [$owner_r MIGRATE 127.0.0.1 [srv -3 port] "" 0 5000 KEYS foo]
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot NODE $newnode_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot NODE $newnode_id]
+
+ # Using --cluster check make sure we won't get `Not all slots are covered by nodes`.
+ # Wait for the cluster to become stable make sure the cluster is up during MIGRATE.
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -3 port]}] == 0 &&
+ [csi 0 cluster_state] eq {ok} &&
+ [csi -1 cluster_state] eq {ok} &&
+ [csi -2 cluster_state] eq {ok} &&
+ [csi -3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Move the only slot back to original node using redis-cli
+ exec src/redis-cli --cluster reshard 127.0.0.1:[srv -3 port] \
+ --cluster-from $newnode_id \
+ --cluster-to $owner_id \
+ --cluster-slots 1 \
+ --cluster-yes
+
+ # The empty node will become a replica of the new owner before the
+ # `MOVED` check, so let's wait for the cluster to become stable.
+ wait_for_condition 1000 50 {
+ [csi 0 cluster_state] eq {ok} &&
+ [csi -1 cluster_state] eq {ok} &&
+ [csi -2 cluster_state] eq {ok} &&
+ [csi -3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Check that the key foo has been migrated back to the original owner.
+ catch { $newnode_r get foo } e
+ assert_equal "MOVED $slot $owner_host:$owner_port" $e
+
+ # Check that the empty node has turned itself into a replica of the new
+ # owner and that the new owner knows that.
+ wait_for_condition 1000 50 {
+ [string match "*slave*" [$owner_r CLUSTER REPLICAS $owner_id]]
+ } else {
+ fail "Empty node didn't turn itself into a replica."
+ }
+ }
}
-} ;# tags \ No newline at end of file
+} ;# tags
+
+set ::singledb $old_singledb
diff --git a/tests/unit/functions.tcl b/tests/unit/functions.tcl
index 65e99e9e7..4c08261ed 100644
--- a/tests/unit/functions.tcl
+++ b/tests/unit/functions.tcl
@@ -1,61 +1,61 @@
proc get_function_code {args} {
- return [format "redis.register_function('%s', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0] [lindex $args 1]]
+ return [format "#!%s name=%s\nredis.register_function('%s', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]]
}
proc get_no_writes_function_code {args} {
- return [format "redis.register_function{function_name='%s', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0] [lindex $args 1]]
+ return [format "#!%s name=%s\nredis.register_function{function_name='%s', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]]
}
start_server {tags {"scripting"}} {
test {FUNCTION - Basic usage} {
- r function load LUA test [get_function_code test {return 'hello'}]
+ r function load [get_function_code LUA test test {return 'hello'}]
r fcall test 0
} {hello}
test {FUNCTION - Load with unknown argument} {
catch {
- r function load LUA test foo bar [get_function_code test {return 'hello'}]
+ r function load foo bar [get_function_code LUA test test {return 'hello'}]
} e
set _ $e
} {*Unknown option given*}
test {FUNCTION - Create an already exiting library raise error} {
catch {
- r function load LUA test [get_function_code test {return 'hello1'}]
+ r function load [get_function_code LUA test test {return 'hello1'}]
} e
set _ $e
} {*already exists*}
test {FUNCTION - Create an already exiting library raise error (case insensitive)} {
catch {
- r function load LUA TEST [get_function_code test {return 'hello1'}]
+ r function load [get_function_code LUA test test {return 'hello1'}]
} e
set _ $e
} {*already exists*}
test {FUNCTION - Create a library with wrong name format} {
catch {
- r function load LUA {bad\0foramat} [get_function_code test {return 'hello1'}]
+ r function load [get_function_code LUA {bad\0foramat} test {return 'hello1'}]
} e
set _ $e
} {*Library names can only contain letters and numbers*}
test {FUNCTION - Create library with unexisting engine} {
catch {
- r function load bad_engine test [get_function_code test {return 'hello1'}]
+ r function load [get_function_code bad_engine test test {return 'hello1'}]
} e
set _ $e
- } {*Engine not found*}
+ } {*Engine 'bad_engine' not found*}
test {FUNCTION - Test uncompiled script} {
catch {
- r function load LUA test1 {bad script}
+ r function load replace [get_function_code LUA test test {bad script}]
} e
set _ $e
} {*Error compiling function*}
test {FUNCTION - test replace argument} {
- r function load LUA test REPLACE [get_function_code test {return 'hello1'}]
+ r function load REPLACE [get_function_code LUA test test {return 'hello1'}]
r fcall test 0
} {hello1}
@@ -76,12 +76,8 @@ start_server {tags {"scripting"}} {
set _ $e
} {*Function not found*}
- test {FUNCTION - test description argument} {
- r function load LUA test DESCRIPTION {some description} [get_function_code test {return 'hello'}]
- r function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}}
-
test {FUNCTION - test fcall bad arguments} {
+ r function load [get_function_code LUA test test {return 'hello'}]
catch {
r fcall test bad_arg
} e
@@ -133,14 +129,14 @@ start_server {tags {"scripting"}} {
assert_match "*Error trying to load the RDB*" $e
r debug reload noflush merge
r function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}} {needs:debug}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}} {needs:debug}
test {FUNCTION - test debug reload with nosave and noflush} {
r function delete test
r set x 1
- r function load LUA test1 DESCRIPTION {some description} [get_function_code test1 {return 'hello'}]
+ r function load [get_function_code LUA test1 test1 {return 'hello'}]
r debug reload
- r function load LUA test2 DESCRIPTION {some description} [get_function_code test2 {return 'hello'}]
+ r function load [get_function_code LUA test2 test2 {return 'hello'}]
r debug reload nosave noflush merge
assert_equal [r fcall test1 0] {hello}
assert_equal [r fcall test2 0] {hello}
@@ -148,21 +144,21 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test flushall and flushdb do not clean functions} {
r function flush
- r function load lua test REPLACE [get_function_code test {return redis.call('set', 'x', '1')}]
+ r function load REPLACE [get_function_code lua test test {return redis.call('set', 'x', '1')}]
r flushall
r flushdb
r function list
- } {{library_name test engine LUA description {} functions {{name test description {} flags {}}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
test {FUNCTION - test function dump and restore} {
r function flush
- r function load lua test description {some description} [get_function_code test {return 'hello'}]
+ r function load [get_function_code lua test test {return 'hello'}]
set e [r function dump]
r function delete test
assert_match {} [r function list]
r function restore $e
r function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
test {FUNCTION - test function dump and restore with flush argument} {
set e [r function dump]
@@ -170,17 +166,17 @@ start_server {tags {"scripting"}} {
assert_match {} [r function list]
r function restore $e FLUSH
r function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
test {FUNCTION - test function dump and restore with append argument} {
set e [r function dump]
r function flush
assert_match {} [r function list]
- r function load lua test [get_function_code test {return 'hello1'}]
+ r function load [get_function_code lua test test {return 'hello1'}]
catch {r function restore $e APPEND} err
assert_match {*already exists*} $err
r function flush
- r function load lua test1 [get_function_code test1 {return 'hello1'}]
+ r function load [get_function_code lua test1 test1 {return 'hello1'}]
r function restore $e APPEND
assert_match {hello} [r fcall test 0]
assert_match {hello1} [r fcall test1 0]
@@ -188,11 +184,11 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test function dump and restore with replace argument} {
r function flush
- r function load LUA test DESCRIPTION {some description} [get_function_code test {return 'hello'}]
+ r function load [get_function_code LUA test test {return 'hello'}]
set e [r function dump]
r function flush
assert_match {} [r function list]
- r function load lua test [get_function_code test {return 'hello1'}]
+ r function load [get_function_code lua test test {return 'hello1'}]
assert_match {hello1} [r fcall test 0]
r function restore $e REPLACE
assert_match {hello} [r fcall test 0]
@@ -200,11 +196,11 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test function restore with bad payload do not drop existing functions} {
r function flush
- r function load LUA test DESCRIPTION {some description} [get_function_code test {return 'hello'}]
+ r function load [get_function_code LUA test test {return 'hello'}]
catch {r function restore bad_payload} e
assert_match {*payload version or checksum are wrong*} $e
r function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
test {FUNCTION - test function restore with wrong number of arguments} {
catch {r function restore arg1 args2 arg3} e
@@ -212,19 +208,19 @@ start_server {tags {"scripting"}} {
} {*Unknown subcommand or wrong number of arguments for 'restore'. Try FUNCTION HELP.}
test {FUNCTION - test fcall_ro with write command} {
- r function load lua test REPLACE [get_no_writes_function_code test {return redis.call('set', 'x', '1')}]
+ r function load REPLACE [get_no_writes_function_code lua test test {return redis.call('set', 'x', '1')}]
catch { r fcall_ro test 0 } e
set _ $e
} {*Write commands are not allowed from read-only scripts*}
test {FUNCTION - test fcall_ro with read only commands} {
- r function load lua test REPLACE [get_no_writes_function_code test {return redis.call('get', 'x')}]
+ r function load REPLACE [get_no_writes_function_code lua test test {return redis.call('get', 'x')}]
r set x 1
r fcall_ro test 0
} {1}
test {FUNCTION - test keys and argv} {
- r function load lua test REPLACE [get_function_code test {return redis.call('set', KEYS[1], ARGV[1])}]
+ r function load REPLACE [get_function_code lua test test {return redis.call('set', KEYS[1], ARGV[1])}]
r fcall test 1 x foo
r get x
} {foo}
@@ -240,7 +236,7 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test function kill} {
set rd [redis_deferring_client]
r config set busy-reply-threshold 10
- r function load lua test REPLACE [get_function_code test {local a = 1 while true do a = a + 1 end}]
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
$rd fcall test 0
after 200
catch {r ping} e
@@ -254,7 +250,7 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test script kill not working on function} {
set rd [redis_deferring_client]
r config set busy-reply-threshold 10
- r function load lua test REPLACE [get_function_code test {local a = 1 while true do a = a + 1 end}]
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
$rd fcall test 0
after 200
catch {r ping} e
@@ -281,18 +277,18 @@ start_server {tags {"scripting"}} {
}
test {FUNCTION - test function flush} {
- r function load lua test REPLACE [get_function_code test {local a = 1 while true do a = a + 1 end}]
- assert_match {{library_name test engine LUA description {} functions {{name test description {} flags {}}}}} [r function list]
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
r function flush
assert_match {} [r function list]
- r function load lua test REPLACE [get_function_code test {local a = 1 while true do a = a + 1 end}]
- assert_match {{library_name test engine LUA description {} functions {{name test description {} flags {}}}}} [r function list]
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
r function flush async
assert_match {} [r function list]
- r function load lua test REPLACE [get_function_code test {local a = 1 while true do a = a + 1 end}]
- assert_match {{library_name test engine LUA description {} functions {{name test description {} flags {}}}}} [r function list]
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
r function flush sync
assert_match {} [r function list]
}
@@ -310,7 +306,7 @@ start_server {tags {"scripting repl external:skip"}} {
start_server {} {
test "Connect a replica to the master instance" {
r -1 slaveof [srv 0 host] [srv 0 port]
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]]
} else {
@@ -319,9 +315,9 @@ start_server {tags {"scripting repl external:skip"}} {
}
test {FUNCTION - creation is replicated to replica} {
- r function load LUA test DESCRIPTION {some description} [get_no_writes_function_code test {return 'hello'}]
- wait_for_condition 50 100 {
- [r -1 function list] eq {{library_name test engine LUA description {some description} functions {{name test description {} flags no-writes}}}}
+ r function load [get_no_writes_function_code LUA test test {return 'hello'}]
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
} else {
fail "Failed waiting for function to replicate to replica"
}
@@ -335,7 +331,7 @@ start_server {tags {"scripting repl external:skip"}} {
set e [r function dump]
r function delete test
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[r -1 function list] eq {}
} else {
fail "Failed waiting for function to replicate to replica"
@@ -343,8 +339,8 @@ start_server {tags {"scripting repl external:skip"}} {
assert_equal [r function restore $e] {OK}
- wait_for_condition 50 100 {
- [r -1 function list] eq {{library_name test engine LUA description {some description} functions {{name test description {} flags no-writes}}}}
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
} else {
fail "Failed waiting for function to replicate to replica"
}
@@ -352,7 +348,7 @@ start_server {tags {"scripting repl external:skip"}} {
test {FUNCTION - delete is replicated to replica} {
r function delete test
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[r -1 function list] eq {}
} else {
fail "Failed waiting for function to replicate to replica"
@@ -360,14 +356,14 @@ start_server {tags {"scripting repl external:skip"}} {
}
test {FUNCTION - flush is replicated to replica} {
- r function load LUA test DESCRIPTION {some description} [get_function_code test {return 'hello'}]
- wait_for_condition 50 100 {
- [r -1 function list] eq {{library_name test engine LUA description {some description} functions {{name test description {} flags {}}}}}
+ r function load [get_function_code LUA test test {return 'hello'}]
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags {}}}}}
} else {
fail "Failed waiting for function to replicate to replica"
}
r function flush
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[r -1 function list] eq {}
} else {
fail "Failed waiting for function to replicate to replica"
@@ -378,11 +374,11 @@ start_server {tags {"scripting repl external:skip"}} {
r -1 slaveof no one
# creating a function after disconnect to make sure function
# is replicated on rdb phase
- r function load LUA test DESCRIPTION {some description} [get_no_writes_function_code test {return 'hello'}]
+ r function load [get_no_writes_function_code LUA test test {return 'hello'}]
# reconnect the replica
r -1 slaveof [srv 0 host] [srv 0 port]
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]]
} else {
@@ -396,11 +392,11 @@ start_server {tags {"scripting repl external:skip"}} {
test "FUNCTION - test replication to replica on rdb phase info command" {
r -1 function list
- } {{library_name test engine LUA description {some description} functions {{name test description {} flags no-writes}}}}
+ } {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
test "FUNCTION - create on read only replica" {
catch {
- r -1 function load LUA test DESCRIPTION {some description} [get_function_code test {return 'hello'}]
+ r -1 function load [get_function_code LUA test test {return 'hello'}]
} e
set _ $e
} {*can't write against a read only replica*}
@@ -413,10 +409,10 @@ start_server {tags {"scripting repl external:skip"}} {
} {*can't write against a read only replica*}
test "FUNCTION - function effect is replicated to replica" {
- r function load LUA test REPLACE [get_function_code test {return redis.call('set', 'x', '1')}]
+ r function load REPLACE [get_function_code LUA test test {return redis.call('set', 'x', '1')}]
r fcall test 0
assert {[r get x] eq {1}}
- wait_for_condition 50 100 {
+ wait_for_condition 150 100 {
[r -1 get x] eq {1}
} else {
fail "Failed waiting function effect to be replicated to replica"
@@ -436,12 +432,12 @@ test {FUNCTION can processes create, delete and flush commands in AOF when doing
start_server {} {
r config set appendonly yes
waitForBgrewriteaof r
- r FUNCTION LOAD lua test "redis.register_function('test', function() return 'hello' end)"
+ r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)"
r config set slave-read-only yes
r slaveof 127.0.0.1 0
r debug loadaof
r slaveof no one
- assert_equal [r function list] {{library_name test engine LUA description {} functions {{name test description {} flags {}}}}}
+ assert_equal [r function list] {{library_name test engine LUA functions {{name test description {} flags {}}}}}
r FUNCTION DELETE test
@@ -450,7 +446,7 @@ test {FUNCTION can processes create, delete and flush commands in AOF when doing
r slaveof no one
assert_equal [r function list] {}
- r FUNCTION LOAD lua test "redis.register_function('test', function() return 'hello' end)"
+ r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)"
r FUNCTION FLUSH
r slaveof 127.0.0.1 0
@@ -462,7 +458,7 @@ test {FUNCTION can processes create, delete and flush commands in AOF when doing
start_server {tags {"scripting"}} {
test {LIBRARIES - test shared function can access default globals} {
- r function load LUA lib1 {
+ r function load {#!lua name=lib1
local function ping()
return redis.call('ping')
end
@@ -477,7 +473,7 @@ start_server {tags {"scripting"}} {
} {PONG}
test {LIBRARIES - usage and code sharing} {
- r function load LUA lib1 REPLACE {
+ r function load REPLACE {#!lua name=lib1
local function add1(a)
return a + 1
end
@@ -497,11 +493,11 @@ start_server {tags {"scripting"}} {
assert_equal [r fcall f1 0] {2}
assert_equal [r fcall f2 0] {3}
r function list
- } {{library_name lib1 engine LUA description {} functions {*}}}
+ } {{library_name lib1 engine LUA functions {*}}}
test {LIBRARIES - test registration failure revert the entire load} {
catch {
- r function load LUA lib1 replace {
+ r function load replace {#!lua name=lib1
local function add1(a)
return a + 2
end
@@ -524,7 +520,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration function name collision} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function(
'f1',
function(keys, args)
@@ -540,7 +536,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration function name collision on same library} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function(
'f1',
function(keys, args)
@@ -560,7 +556,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with no argument} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function()
}
} e
@@ -569,7 +565,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with only name} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function('f1')
}
} e
@@ -578,7 +574,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with to many arguments} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function('f1', function() return 1 end, {}, 'description', 'extra arg')
}
} e
@@ -587,7 +583,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with no string name} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function(nil, function() return 1 end)
}
} e
@@ -596,7 +592,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with wrong name format} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function('test\0test', function() return 1 end)
}
} e
@@ -605,7 +601,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - test registration with empty name} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
redis.register_function('', function() return 1 end)
}
} e
@@ -614,7 +610,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - math.random from function load} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
return math.random()
}
} e
@@ -623,7 +619,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - redis.call from function load} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
return redis.call('ping')
}
} e
@@ -632,7 +628,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - redis.call from function load} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
return redis.setresp(3)
}
} e
@@ -641,7 +637,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - redis.set_repl from function load} {
catch {
- r function load LUA lib2 replace {
+ r function load replace {#!lua name=lib2
return redis.set_repl(redis.REPL_NONE)
}
} e
@@ -657,7 +653,7 @@ start_server {tags {"scripting"}} {
# have another level of protection on the C
# code itself and we want to test it and verify
# that it works properly.
- r function load LUA lib1 replace {
+ r function load replace {#!lua name=lib1
local lib = redis
lib.register_function('f1', function ()
lib.redis = redis
@@ -675,22 +671,34 @@ start_server {tags {"scripting"}} {
}
assert_equal {OK} [r fcall f1 0]
- catch {[r function load LUA lib2 {redis.math.random()}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.math.random()
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
- catch {[r function load LUA lib2 {redis.math.randomseed()}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.math.randomseed()
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
- catch {[r function load LUA lib2 {redis.redis.call('ping')}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.redis.call('ping')
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
- catch {[r function load LUA lib2 {redis.redis.pcall('ping')}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.redis.pcall('ping')
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
- catch {[r function load LUA lib2 {redis.redis.setresp(3)}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.redis.setresp(3)
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
- catch {[r function load LUA lib2 {redis.redis.set_repl(redis.redis.REPL_NONE)}]} e
+ catch {[r function load {#!lua name=lib2
+ redis.redis.set_repl(redis.redis.REPL_NONE)
+ }]} e
assert_match {*can only be called inside a script invocation*} $e
catch {[r fcall f2 0]} e
@@ -703,7 +711,7 @@ start_server {tags {"scripting"}} {
} {}
test {LIBRARIES - register function inside a function} {
- r function load LUA lib {
+ r function load {#!lua name=lib
redis.register_function(
'f1',
function(keys, args)
@@ -724,7 +732,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - register library with no functions} {
r function flush
catch {
- r function load LUA lib {
+ r function load {#!lua name=lib
return 1
}
} e
@@ -733,7 +741,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - load timeout} {
catch {
- r function load LUA lib {
+ r function load {#!lua name=lib
local a = 1
while 1 do a = a + 1 end
}
@@ -743,7 +751,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - verify global protection on the load run} {
catch {
- r function load LUA lib {
+ r function load {#!lua name=lib
a = 1
}
} e
@@ -751,7 +759,7 @@ start_server {tags {"scripting"}} {
} {*attempted to create global variable 'a'*}
test {LIBRARIES - named arguments} {
- r function load LUA lib {
+ r function load {#!lua name=lib
redis.register_function{
function_name='f1',
callback=function()
@@ -761,11 +769,11 @@ start_server {tags {"scripting"}} {
}
}
r function list
- } {{library_name lib engine LUA description {} functions {{name f1 description {some desc} flags {}}}}}
+ } {{library_name lib engine LUA functions {{name f1 description {some desc} flags {}}}}}
test {LIBRARIES - named arguments, bad function name} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
function_name=function() return 1 end,
callback=function()
@@ -780,7 +788,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - named arguments, bad callback type} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
function_name='f1',
callback='bad',
@@ -793,7 +801,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - named arguments, bad description} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
function_name='f1',
callback=function()
@@ -808,7 +816,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - named arguments, unknown argument} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
function_name='f1',
callback=function()
@@ -824,7 +832,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - named arguments, missing function name} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
callback=function()
return 'hello'
@@ -838,7 +846,7 @@ start_server {tags {"scripting"}} {
test {LIBRARIES - named arguments, missing callback} {
catch {
- r function load LUA lib replace {
+ r function load replace {#!lua name=lib
redis.register_function{
function_name='f1',
description='desc'
@@ -850,7 +858,7 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test function restore with function name collision} {
r function flush
- r function load lua lib1 {
+ r function load {#!lua name=lib1
local function add1(a)
return a + 1
end
@@ -877,7 +885,7 @@ start_server {tags {"scripting"}} {
r function flush
# load a library with different name but with the same function name
- r function load lua lib1 {
+ r function load {#!lua name=lib1
redis.register_function(
'f6',
function(keys, args)
@@ -885,7 +893,7 @@ start_server {tags {"scripting"}} {
end
)
}
- r function load lua lib2 {
+ r function load {#!lua name=lib2
local function add1(a)
return a + 1
end
@@ -926,14 +934,18 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test function list with code} {
r function flush
- r function load lua library1 {redis.register_function('f6', function(keys, args) return 7 end)}
+ r function load {#!lua name=library1
+ redis.register_function('f6', function(keys, args) return 7 end)
+ }
r function list withcode
- } {{library_name library1 engine LUA description {} functions {{name f6 description {} flags {}}} library_code {redis.register_function('f6', function(keys, args) return 7 end)}}}
+ } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}} library_code {*redis.register_function('f6', function(keys, args) return 7 end)*}}}
test {FUNCTION - test function list with pattern} {
- r function load lua lib1 {redis.register_function('f7', function(keys, args) return 7 end)}
+ r function load {#!lua name=lib1
+ redis.register_function('f7', function(keys, args) return 7 end)
+ }
r function list libraryname library*
- } {{library_name library1 engine LUA description {} functions {{name f6 description {} flags {}}}}}
+ } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}}}}
test {FUNCTION - test function list wrong argument} {
catch {r function list bad_argument} e
@@ -957,12 +969,16 @@ start_server {tags {"scripting"}} {
test {FUNCTION - verify OOM on function load and function restore} {
r function flush
- r function load lua test replace {redis.register_function('f1', function() return 1 end)}
+ r function load replace {#!lua name=test
+ redis.register_function('f1', function() return 1 end)
+ }
set payload [r function dump]
r config set maxmemory 1
r function flush
- catch {r function load lua test replace {redis.register_function('f1', function() return 1 end)}} e
+ catch {r function load replace {#!lua name=test
+ redis.register_function('f1', function() return 1 end)
+ }} e
assert_match {*command not allowed when used memory*} $e
r function flush
@@ -973,11 +989,13 @@ start_server {tags {"scripting"}} {
}
test {FUNCTION - verify allow-omm allows running any command} {
- r FUNCTION load lua f1 replace { redis.register_function{
- function_name='f1',
- callback=function() return redis.call('set', 'x', '1') end,
- flags={'allow-oom'}
- }}
+ r FUNCTION load replace {#!lua name=f1
+ redis.register_function{
+ function_name='f1',
+ callback=function() return redis.call('set', 'x', '1') end,
+ flags={'allow-oom'}
+ }
+ }
r config set maxmemory 1
@@ -990,53 +1008,65 @@ start_server {tags {"scripting"}} {
start_server {tags {"scripting"}} {
test {FUNCTION - wrong flags type named arguments} {
- catch {r function load lua test replace {redis.register_function{
- function_name = 'f1',
- callback = function() return 1 end,
- flags = 'bad flags type'
- }}} e
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = 'bad flags type'
+ }
+ }} e
set _ $e
} {*flags argument to redis.register_function must be a table representing function flags*}
test {FUNCTION - wrong flag type} {
- catch {r function load lua test replace {redis.register_function{
- function_name = 'f1',
- callback = function() return 1 end,
- flags = {function() return 1 end}
- }}} e
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = {function() return 1 end}
+ }
+ }} e
set _ $e
} {*unknown flag given*}
test {FUNCTION - unknown flag} {
- catch {r function load lua test replace {redis.register_function{
- function_name = 'f1',
- callback = function() return 1 end,
- flags = {'unknown'}
- }}} e
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = {'unknown'}
+ }
+ }} e
set _ $e
} {*unknown flag given*}
test {FUNCTION - write script on fcall_ro} {
- r function load lua test replace {redis.register_function{
- function_name = 'f1',
- callback = function() return redis.call('set', 'x', 1) end
- }}
+ r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return redis.call('set', 'x', 1) end
+ }
+ }
catch {r fcall_ro f1 0} e
set _ $e
} {*Can not execute a script with write flag using \*_ro command*}
test {FUNCTION - write script with no-writes flag} {
- r function load lua test replace {redis.register_function{
- function_name = 'f1',
- callback = function() return redis.call('set', 'x', 1) end,
- flags = {'no-writes'}
- }}
+ r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return redis.call('set', 'x', 1) end,
+ flags = {'no-writes'}
+ }
+ }
catch {r fcall f1 0} e
set _ $e
} {*Write commands are not allowed from read-only scripts*}
test {FUNCTION - deny oom} {
- r FUNCTION load lua test replace { redis.register_function('f1', function() return redis.call('set', 'x', '1') end) }
+ r FUNCTION load replace {#!lua name=test
+ redis.register_function('f1', function() return redis.call('set', 'x', '1') end)
+ }
r config set maxmemory 1
@@ -1047,7 +1077,9 @@ start_server {tags {"scripting"}} {
}
test {FUNCTION - deny oom on no-writes function} {
- r FUNCTION load lua test replace {redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}}}
+ r FUNCTION load replace {#!lua name=test
+ redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}}
+ }
r config set maxmemory 1
@@ -1061,7 +1093,7 @@ start_server {tags {"scripting"}} {
}
test {FUNCTION - allow stale} {
- r FUNCTION load lua test replace {
+ r FUNCTION load replace {#!lua name=test
redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}}
redis.register_function{function_name='f2', callback=function() return 'hello' end, flags={'allow-stale', 'no-writes'}}
redis.register_function{function_name='f3', callback=function() return redis.call('get', 'x') end, flags={'allow-stale', 'no-writes'}}
@@ -1087,7 +1119,7 @@ start_server {tags {"scripting"}} {
} {} {external:skip}
test {FUNCTION - redis version api} {
- r FUNCTION load lua test replace {
+ r FUNCTION load replace {#!lua name=test
local version = redis.REDIS_VERSION_NUM
redis.register_function{function_name='get_version_v1', callback=function()
@@ -1106,12 +1138,12 @@ start_server {tags {"scripting"}} {
test {FUNCTION - function stats} {
r FUNCTION FLUSH
- r FUNCTION load lua test1 {
+ r FUNCTION load {#!lua name=test1
redis.register_function('f1', function() return 1 end)
redis.register_function('f2', function() return 1 end)
}
- r FUNCTION load lua test2 {
+ r FUNCTION load {#!lua name=test2
redis.register_function('f3', function() return 1 end)
}
@@ -1132,4 +1164,38 @@ start_server {tags {"scripting"}} {
r function flush
r function stats
} {running_script {} engines {LUA {libraries_count 0 functions_count 0}}}
+
+ test {FUNCTION - function test empty engine} {
+ catch {r function load replace {#! name=test
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Engine '' not found}
+
+ test {FUNCTION - function test unknown metadata value} {
+ catch {r function load replace {#!lua name=test foo=bar
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Invalid metadata value given: foo=bar}
+
+ test {FUNCTION - function test no name} {
+ catch {r function load replace {#!lua
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Library name was not given}
+
+ test {FUNCTION - function test multiple names} {
+ catch {r function load replace {#!lua name=foo name=bar
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Invalid metadata value, name argument was given multiple times}
+
+ test {FUNCTION - function test name with quotes} {
+ r function load replace {#!lua name="foo"
+ redis.register_function('foo', function() return 1 end)
+ }
+ } {foo}
}
diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl
index 59c96b5ad..c530a31d3 100644
--- a/tests/unit/introspection.tcl
+++ b/tests/unit/introspection.tcl
@@ -184,6 +184,7 @@ start_server {tags {"introspection"}} {
always-show-logo
syslog-enabled
cluster-enabled
+ disable-thp
aclfile
unixsocket
pidfile
diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl
index e6663ce06..cef4b8fdf 100644
--- a/tests/unit/memefficiency.tcl
+++ b/tests/unit/memefficiency.tcl
@@ -82,7 +82,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
fail "defrag didn't stop."
}
- # Test the the fragmentation is lower.
+ # Test the fragmentation is lower.
after 120 ;# serverCron only updates the info once in 100ms
set frag [s allocator_frag_ratio]
set max_latency 0
@@ -226,7 +226,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
fail "defrag didn't stop."
}
- # test the the fragmentation is lower
+ # test the fragmentation is lower
after 120 ;# serverCron only updates the info once in 100ms
if {$::verbose} {
puts "used [s allocator_allocated]"
@@ -336,7 +336,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
fail "defrag didn't stop."
}
- # test the the fragmentation is lower
+ # test the fragmentation is lower
after 120 ;# serverCron only updates the info once in 100ms
set frag [s allocator_frag_ratio]
set max_latency 0
@@ -433,7 +433,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
fail "defrag didn't stop."
}
- # test the the fragmentation is lower
+ # test the fragmentation is lower
after 120 ;# serverCron only updates the info once in 100ms
set misses [s active_defrag_misses]
set hits [s active_defrag_hits]
@@ -553,7 +553,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
fail "defrag didn't stop."
}
- # test the the fragmentation is lower
+ # test the fragmentation is lower
after 120 ;# serverCron only updates the info once in 100ms
set misses [s active_defrag_misses]
set hits [s active_defrag_hits]
diff --git a/tests/unit/moduleapi/aclcheck.tcl b/tests/unit/moduleapi/aclcheck.tcl
index 953f4bf05..5adf65371 100644
--- a/tests/unit/moduleapi/aclcheck.tcl
+++ b/tests/unit/moduleapi/aclcheck.tcl
@@ -62,10 +62,13 @@ start_server {tags {"modules acl"}} {
# rm call check for key permission (y: only WRITE)
assert_equal [r aclcheck.rm_call set y 5] OK
assert_error {*NOPERM*} {r aclcheck.rm_call set y 5 get}
+ assert_error {ERR acl verification failed, can't access at least one of the keys mentioned in the command arguments.} {r aclcheck.rm_call_with_errors set y 5 get}
# rm call check for key permission (z: only READ)
assert_error {*NOPERM*} {r aclcheck.rm_call set z 5}
+ assert_error {ERR acl verification failed, can't access at least one of the keys mentioned in the command arguments.} {r aclcheck.rm_call_with_errors set z 5}
assert_error {*NOPERM*} {r aclcheck.rm_call set z 6 get}
+ assert_error {ERR acl verification failed, can't access at least one of the keys mentioned in the command arguments.} {r aclcheck.rm_call_with_errors set z 6 get}
# verify that new log entry added
set entry [lindex [r ACL LOG] 0]
@@ -77,6 +80,8 @@ start_server {tags {"modules acl"}} {
r acl setuser default -set
catch {r aclcheck.rm_call set x 5} e
assert_match {*NOPERM*} $e
+ catch {r aclcheck.rm_call_with_errors set x 5} e
+ assert_match {ERR acl verification failed, can't run this command or subcommand.} $e
# verify that new log entry added
set entry [lindex [r ACL LOG] 0]
diff --git a/tests/unit/moduleapi/auth.tcl b/tests/unit/moduleapi/auth.tcl
index 6d8c3bd6a..c7c2def77 100644
--- a/tests/unit/moduleapi/auth.tcl
+++ b/tests/unit/moduleapi/auth.tcl
@@ -68,6 +68,22 @@ start_server {tags {"modules"}} {
assert_equal [r acl whoami] "default"
}
+ test {modules can redact arguments} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r auth.redact 1 2 3 4
+ r auth.redact 1 2 3
+ r config set slowlog-log-slower-than -1
+ set slowlog_resp [r slowlog get]
+
+ # There will be 3 records, slowlog reset and the
+ # two auth redact calls.
+ assert_equal 3 [llength $slowlog_resp]
+ assert_equal {slowlog reset} [lindex [lindex $slowlog_resp 2] 3]
+ assert_equal {auth.redact 1 (redacted) 3 (redacted)} [lindex [lindex $slowlog_resp 1] 3]
+ assert_equal {auth.redact (redacted) 2 (redacted)} [lindex [lindex $slowlog_resp 0] 3]
+ }
+
test "Unload the module - testacl" {
assert_equal {OK} [r module unload testacl]
}
diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl
index ea2d6f5a4..de3cf5946 100644
--- a/tests/unit/moduleapi/blockedclient.tcl
+++ b/tests/unit/moduleapi/blockedclient.tcl
@@ -184,17 +184,17 @@ start_server {tags {"modules"}} {
r config resetstat
# simple module command that replies with string error
- assert_error "NULL reply returned" {r do_rm_call hgetalllll}
- assert_equal [errorrstat NULL r] {count=1}
+ assert_error "ERR Unknown Redis command 'hgetalllll'." {r do_rm_call hgetalllll}
+ assert_equal [errorrstat ERR r] {count=1}
# module command that replies with string error from bg thread
assert_error "NULL reply returned" {r do_bg_rm_call hgetalllll}
- assert_equal [errorrstat NULL r] {count=2}
+ assert_equal [errorrstat NULL r] {count=1}
# module command that returns an arity error
r do_rm_call set x x
assert_error "ERR wrong number of arguments for 'do_rm_call' command" {r do_rm_call}
- assert_equal [errorrstat ERR r] {count=1}
+ assert_equal [errorrstat ERR r] {count=2}
# RM_Call that propagates an error
assert_error "WRONGTYPE*" {r do_rm_call hgetall x}
diff --git a/tests/unit/moduleapi/blockonkeys.tcl b/tests/unit/moduleapi/blockonkeys.tcl
index 75191b3c7..094bcc0c0 100644
--- a/tests/unit/moduleapi/blockonkeys.tcl
+++ b/tests/unit/moduleapi/blockonkeys.tcl
@@ -168,6 +168,38 @@ start_server {tags {"modules"}} {
assert_error "*unblocked*" {$rd read}
}
+ test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK TIMEOUT} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpop k 0 NO_TO_CB
+ ;# wait until clients are actually blocked
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Clients are not blocked"
+ }
+ assert_equal [r client unblock $cid timeout] {0}
+ $rd close
+ }
+
+ test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK ERROR} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpop k 0 NO_TO_CB
+ ;# wait until clients are actually blocked
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Clients are not blocked"
+ }
+ assert_equal [r client unblock $cid error] {0}
+ $rd close
+ }
+
test {Module client re-blocked on keys after woke up on wrong type} {
r del k
set rd [redis_deferring_client]
diff --git a/tests/unit/moduleapi/cluster.tcl b/tests/unit/moduleapi/cluster.tcl
index b2d2df899..f1238992d 100644
--- a/tests/unit/moduleapi/cluster.tcl
+++ b/tests/unit/moduleapi/cluster.tcl
@@ -20,8 +20,10 @@ proc csi {args} {
set testmodule [file normalize tests/modules/blockonkeys.so]
set testmodule_nokey [file normalize tests/modules/blockonbackground.so]
+set testmodule_blockedclient [file normalize tests/modules/blockedclient.so]
# make sure the test infra won't use SELECT
+set old_singledb $::singledb
set ::singledb 1
# cluster creation is complicated with TLS, and the current tests don't really need that coverage
@@ -43,6 +45,10 @@ start_server [list overrides $base_conf] {
$node2 module load $testmodule_nokey
$node3 module load $testmodule_nokey
+ $node1 module load $testmodule_blockedclient
+ $node2 module load $testmodule_blockedclient
+ $node3 module load $testmodule_blockedclient
+
test {Create 3 node cluster} {
exec src/redis-cli --cluster-yes --cluster create \
127.0.0.1:[srv 0 port] \
@@ -193,6 +199,10 @@ start_server [list overrides $base_conf] {
assert_equal [s -1 blocked_clients] {0}
}
+ test "Verify command RM_Call is rejected when cluster is down" {
+ assert_error "ERR Can not execute a command 'set' while the cluster is down" {$node1 do_rm_call set x 1}
+ }
+
exec kill -SIGCONT $node3_pid
$node1_rd close
$node2_rd close
@@ -202,4 +212,6 @@ start_server [list overrides $base_conf] {
}
}
-} ;# tags \ No newline at end of file
+} ;# tags
+
+set ::singledb $old_singledb
diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl
index cb36c9f71..814f31bc0 100644
--- a/tests/unit/moduleapi/hooks.tcl
+++ b/tests/unit/moduleapi/hooks.tcl
@@ -150,13 +150,17 @@ tags "modules" {
r swapdb 0 10
assert_equal [r hooks.event_last swapdb-first] 0
assert_equal [r hooks.event_last swapdb-second] 10
+ }
+ test {Test configchange hooks} {
+ r config set rdbcompression no
+ assert_equal [r hooks.event_last config-change-count] 1
+ assert_equal [r hooks.event_last config-change-first] rdbcompression
}
# look into the log file of the server that just exited
test {Test shutdown hook} {
assert_equal [string match {*module-event-shutdown*} [exec tail -5 < $replica_stdout]] 1
}
-
}
}
diff --git a/tests/unit/moduleapi/moduleconfigs.tcl b/tests/unit/moduleapi/moduleconfigs.tcl
new file mode 100644
index 000000000..01aa1e88e
--- /dev/null
+++ b/tests/unit/moduleapi/moduleconfigs.tcl
@@ -0,0 +1,234 @@
+set testmodule [file normalize tests/modules/moduleconfigs.so]
+set testmoduletwo [file normalize tests/modules/moduleconfigstwo.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+ test {Config get commands work} {
+ # Make sure config get module config works
+ assert_equal [lindex [lindex [r module list] 0] 1] moduleconfigs
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ }
+
+ test {Config set commands work} {
+ # Make sure that config sets work during runtime
+ r config set moduleconfigs.mutable_bool no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ r config set moduleconfigs.memory_numeric 1mb
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1048576"
+ r config set moduleconfigs.string wafflewednesdays
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string wafflewednesdays"
+ set not_embstr [string repeat A 50]
+ r config set moduleconfigs.string $not_embstr
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string $not_embstr"
+ r config set moduleconfigs.string \x73\x75\x70\x65\x72\x20\x00\x73\x65\x63\x72\x65\x74\x20\x70\x61\x73\x73\x77\x6f\x72\x64
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ r config set moduleconfigs.enum two
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ r config set moduleconfigs.numeric -2
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -2"
+ }
+
+ test {Immutable flag works properly and rejected strings dont leak} {
+ # Configs flagged immutable should not allow sets
+ catch {[r config set moduleconfigs.immutable_bool yes]} e
+ assert_match {*can't set immutable config*} $e
+ catch {[r config set moduleconfigs.string rejectisfreed]} e
+ assert_match {*Cannot set string to 'rejectisfreed'*} $e
+ }
+
+ test {Numeric limits work properly} {
+ # Configs over/under the limit shouldn't be allowed, and memory configs should only take memory values
+ catch {[r config set moduleconfigs.memory_numeric 200gb]} e
+ assert_match {*argument must be between*} $e
+ catch {[r config set moduleconfigs.memory_numeric -5]} e
+ assert_match {*argument must be a memory value*} $e
+ catch {[r config set moduleconfigs.numeric -10]} e
+ assert_match {*argument must be between*} $e
+ }
+
+ test {Enums only able to be set to passed in values} {
+ # Module authors specify what values are valid for enums, check that only those values are ok on a set
+ catch {[r config set moduleconfigs.enum four]} e
+ assert_match {*argument must be one of the following*} $e
+ }
+
+ test {Unload removes module configs} {
+ r module unload moduleconfigs
+ assert_equal [r config get moduleconfigs.*] ""
+ r module load $testmodule
+ # these should have reverted back to their module specified values
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test loadex functionality} {
+ r module loadex $testmodule CONFIG moduleconfigs.mutable_bool no CONFIG moduleconfigs.immutable_bool yes CONFIG moduleconfigs.memory_numeric 2mb CONFIG moduleconfigs.string tclortickle
+ assert_equal [lindex [lindex [r module list] 0] 1] moduleconfigs
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool yes"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 2097152"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string tclortickle"
+ # Configs that were not changed should still be their module specified value
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ }
+
+ test {apply function works} {
+ catch {[r config set moduleconfigs.mutable_bool yes]} e
+ assert_match {*Bool configs*} $e
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ catch {[r config set moduleconfigs.memory_numeric 1000 moduleconfigs.numeric 1000]} e
+ assert_match {*cannot equal*} $e
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 2097152"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test double config argument to loadex} {
+ r module loadex $testmodule CONFIG moduleconfigs.mutable_bool yes CONFIG moduleconfigs.mutable_bool no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ r module unload moduleconfigs
+ }
+
+ test {missing loadconfigs call} {
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.string "cool" ARGS noload]} e
+ assert_match {*ERR*} $e
+ }
+
+ test {test loadex rejects bad configs} {
+ # Bad config 200gb is over the limit
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.memory_numeric 200gb ARGS]} e
+ assert_match {*ERR*} $e
+ # We should completely remove all configs on a failed load
+ assert_equal [r config get moduleconfigs.*] ""
+ # No value for config, should error out
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.mutable_bool CONFIG moduleconfigs.enum two ARGS]} e
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ # Asan will catch this if this string is not freed
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.string rejectisfreed]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ # test we can't set random configs
+ catch {[r module loadex $testmodule CONFIG maxclients 333]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ assert_not_equal [r config get maxclients] "maxclients 333"
+ # test we can't set other module's configs
+ r module load $testmoduletwo
+ catch {[r module loadex $testmodule CONFIG configs.test no]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get configs.test] "configs.test yes"
+ r module unload configs
+ }
+
+ test {test config rewrite with dynamic load} {
+ #translates to: super \0secret password
+ r module loadex $testmodule CONFIG moduleconfigs.string \x73\x75\x70\x65\x72\x20\x00\x73\x65\x63\x72\x65\x74\x20\x70\x61\x73\x73\x77\x6f\x72\x64 ARGS
+ assert_equal [lindex [lindex [r module list] 0] 1] moduleconfigs
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ r config set moduleconfigs.mutable_bool yes
+ r config set moduleconfigs.memory_numeric 750
+ r config set moduleconfigs.enum two
+ r config rewrite
+ restart_server 0 true false
+ # Ensure configs we rewrote are present and that the conf file is readable
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 750"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test multiple modules with configs} {
+ r module load $testmodule
+ r module loadex $testmoduletwo CONFIG configs.test yes
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ assert_equal [r config get configs.test] "configs.test yes"
+ r config set moduleconfigs.mutable_bool no
+ r config set moduleconfigs.string nice
+ r config set moduleconfigs.enum two
+ r config set configs.test no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string nice"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get configs.test] "configs.test no"
+ r config rewrite
+ # test we can load from conf file with multiple different modules.
+ restart_server 0 true false
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string nice"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get configs.test] "configs.test no"
+ r module unload moduleconfigs
+ r module unload configs
+ }
+
+ test {test 1.module load 2.config rewrite 3.module unload 4.config rewrite works} {
+ # Configs need to be removed from the old config file in this case.
+ r module loadex $testmodule CONFIG moduleconfigs.memory_numeric 500 ARGS
+ assert_equal [lindex [lindex [r module list] 0] 1] moduleconfigs
+ r config rewrite
+ r module unload moduleconfigs
+ r config rewrite
+ restart_server 0 true false
+ # Ensure configs we rewrote are no longer present
+ assert_equal [r config get moduleconfigs.*] ""
+ }
+ test {startup moduleconfigs} {
+ # No loadmodule directive
+ set nomodload [start_server [list overrides [list moduleconfigs.string "hello"]]]
+ wait_for_condition 100 50 {
+ ! [is_alive $nomodload]
+ } else {
+ fail "startup should've failed with no load and module configs supplied"
+ }
+ set stdout [dict get $nomodload stdout]
+ assert_equal [count_message_lines $stdout "Module Configuration detected without loadmodule directive or no ApplyConfig call: aborting"] 1
+
+ # Bad config value
+ set badconfig [start_server [list overrides [list loadmodule "$testmodule" moduleconfigs.string "rejectisfreed"]]]
+ wait_for_condition 100 50 {
+ ! [is_alive $badconfig]
+ } else {
+ fail "startup with bad moduleconfigs should've failed"
+ }
+ set stdout [dict get $badconfig stdout]
+ assert_equal [count_message_lines $stdout "Issue during loading of configuration moduleconfigs.string : Cannot set string to 'rejectisfreed'"] 1
+
+ set noload [start_server [list overrides [list loadmodule "$testmodule noload" moduleconfigs.string "hello"]]]
+ wait_for_condition 100 50 {
+ ! [is_alive $noload]
+ } else {
+ fail "startup with moduleconfigs and no loadconfigs call should've failed"
+ }
+ set stdout [dict get $noload stdout]
+ assert_equal [count_message_lines $stdout "Module Configurations were not set, likely a missing LoadConfigs call. Unloading the module."] 1
+
+ start_server [list overrides [list loadmodule "$testmodule" moduleconfigs.string "bootedup" moduleconfigs.enum two]] {
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string bootedup"
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ }
+ }
+}
+
diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl
index f4d540fcf..258ef2f6e 100644
--- a/tests/unit/other.tcl
+++ b/tests/unit/other.tcl
@@ -332,7 +332,8 @@ start_server {tags {"other external:skip"}} {
# Hash table should not rehash
assert_no_match "*table size: 8192*" [r debug HTSTATS 9]
exec kill -9 [get_child_pid 0]
- after 200
+ waitForBgsave r
+ after 200 ;# waiting for serverCron
# Hash table should rehash since there is no child process,
# size is power of two and over 4098, so it is 8192
diff --git a/tests/unit/pause.tcl b/tests/unit/pause.tcl
index 99fc7214d..f7ade2a10 100644
--- a/tests/unit/pause.tcl
+++ b/tests/unit/pause.tcl
@@ -86,6 +86,14 @@ start_server {tags {"pause network"}} {
$rd close
}
+ test "Test may-replicate commands are rejected in ro script by pause RO" {
+ r client PAUSE 60000 WRITE
+ assert_error {ERR May-replicate commands are not allowed when client pause write*} {
+ r EVAL_RO "return redis.call('publish','ch','msg')" 0
+ }
+ r client unpause
+ }
+
test "Test multiple clients can be queued up and unblocked" {
r client PAUSE 60000 WRITE
set clients [list [redis_deferring_client] [redis_deferring_client] [redis_deferring_client]]
diff --git a/tests/unit/replybufsize.tcl b/tests/unit/replybufsize.tcl
index 9377a8fd3..933189eb3 100644
--- a/tests/unit/replybufsize.tcl
+++ b/tests/unit/replybufsize.tcl
@@ -3,7 +3,7 @@ proc get_reply_buffer_size {cname} {
set clients [split [string trim [r client list]] "\r\n"]
set c [lsearch -inline $clients *name=$cname*]
if {![regexp rbs=(\[a-zA-Z0-9-\]+) $c - rbufsize]} {
- error "field rbus not found in $c"
+ error "field rbs not found in $c"
}
return $rbufsize
}
@@ -12,7 +12,7 @@ start_server {tags {"replybufsize"}} {
test {verify reply buffer limits} {
# In order to reduce test time we can set the peak reset time very low
- r debug replybuffer-peak-reset-time 100
+ r debug replybuffer peak-reset-time 100
# Create a simple idle test client
variable tc [redis_client]
@@ -29,7 +29,7 @@ start_server {tags {"replybufsize"}} {
r set bigval [string repeat x 32768]
# In order to reduce test time we can set the peak reset time very low
- r debug replybuffer-peak-reset-time never
+ r debug replybuffer peak-reset-time never
wait_for_condition 10 100 {
[$tc get bigval ; get_reply_buffer_size test_client] >= 16384 && [get_reply_buffer_size test_client] < 32768
@@ -39,7 +39,7 @@ start_server {tags {"replybufsize"}} {
}
# Restore the peak reset time to default
- r debug replybuffer-peak-reset-time reset
+ r debug replybuffer peak-reset-time reset
$tc close
} {0} {needs:debug}
diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl
index 6c40844c3..d9729b7bd 100644
--- a/tests/unit/scripting.tcl
+++ b/tests/unit/scripting.tcl
@@ -15,17 +15,25 @@ if {$is_eval == 1} {
}
} else {
proc run_script {args} {
- r function load LUA test replace [format "redis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0]]
+ r function load replace [format "#!lua name=test\nredis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0]]
+ if {[r readingraw] eq 1} {
+ # read name
+ assert_equal {test} [r read]
+ }
r fcall test {*}[lrange $args 1 end]
}
proc run_script_ro {args} {
- r function load LUA test replace [format "redis.register_function{function_name='test', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0]]
+ r function load replace [format "#!lua name=test\nredis.register_function{function_name='test', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0]]
+ if {[r readingraw] eq 1} {
+ # read name
+ assert_equal {test} [r read]
+ }
r fcall_ro test {*}[lrange $args 1 end]
}
proc run_script_on_connection {args} {
set rd [lindex $args 0]
- $rd function load LUA test replace [format "redis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 1]]
- # read the ok reply of function create
+ $rd function load replace [format "#!lua name=test\nredis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 1]]
+ # read name
$rd read
$rd fcall test {*}[lrange $args 2 end]
}
@@ -784,7 +792,7 @@ start_server {tags {"scripting"}} {
set buf "*3\r\n\$4\r\neval\r\n\$33\r\nwhile 1 do redis.call('ping') end\r\n\$1\r\n0\r\n"
append buf "*1\r\n\$4\r\nping\r\n"
} else {
- set buf "*6\r\n\$8\r\nfunction\r\n\$4\r\nload\r\n\$3\r\nlua\r\n\$4\r\ntest\r\n\$7\r\nreplace\r\n\$81\r\nredis.register_function('test', function() while 1 do redis.call('ping') end end)\r\n"
+ set buf "*4\r\n\$8\r\nfunction\r\n\$4\r\nload\r\n\$7\r\nreplace\r\n\$97\r\n#!lua name=test\nredis.register_function('test', function() while 1 do redis.call('ping') end end)\r\n"
append buf "*3\r\n\$5\r\nfcall\r\n\$4\r\ntest\r\n\$1\r\n0\r\n"
append buf "*1\r\n\$4\r\nping\r\n"
}
@@ -808,8 +816,8 @@ start_server {tags {"scripting"}} {
assert_equal [r ping] "PONG"
if {$is_eval == 0} {
- # read the ok reply of function create
- assert_match {OK} [$rd read]
+ # read the function name
+ assert_match {test} [$rd read]
}
catch {$rd read} res
@@ -1399,6 +1407,19 @@ start_server {tags {"scripting"}} {
r config set replica-serve-stale-data yes
set _ {}
} {} {external:skip}
+
+ test "reject script do not cause a Lua stack leak" {
+ r config set maxmemory 1
+ for {set i 0} {$i < 50} {incr i} {
+ assert_error {OOM allow-oom flag is not set on the script, can not run it when used memory > 'maxmemory'} {r eval {#!lua
+ return 1
+ } 0}
+ }
+ r config set maxmemory 0
+ assert_equal [r eval {#!lua
+ return 1
+ } 0] 1
+ }
}
# Additional eval only tests
diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl
index 5c618d285..d0a8ffb6d 100644
--- a/tests/unit/shutdown.tcl
+++ b/tests/unit/shutdown.tcl
@@ -66,3 +66,39 @@ start_server {tags {"shutdown external:skip"}} {
}
}
}
+
+start_server {tags {"shutdown external:skip"}} {
+ set pid [s process_id]
+ set dump_rdb [file join [lindex [r config get dir] 1] dump.rdb]
+
+ test {RDB save will be failed in shutdown} {
+ for {set i 0} {$i < 20} {incr i} {
+ r set $i $i
+ }
+
+ # create a folder called 'dump.rdb' to trigger temp-rdb rename failure
+ # and it will cause rdb save to fail eventually.
+ if {[file exists $dump_rdb]} {
+ exec rm -f $dump_rdb
+ }
+ exec mkdir -p $dump_rdb
+ }
+ test {SHUTDOWN will abort if rdb save failed on signal} {
+ # trigger a shutdown which will save an rdb
+ exec kill -SIGINT $pid
+ wait_for_log_messages 0 {"*Error trying to save the DB, can't exit*"} 0 100 10
+ }
+ test {SHUTDOWN will abort if rdb save failed on shutdown command} {
+ catch {[r shutdown]} err
+ assert_match {*Errors trying to SHUTDOWN*} $err
+ # make sure the server is still alive
+ assert_equal [r ping] {PONG}
+ }
+ test {SHUTDOWN can proceed if shutdown command was with nosave} {
+ catch {[r shutdown nosave]}
+ wait_for_log_messages 0 {"*ready to exit, bye bye*"} 0 100 10
+ }
+ test {Clean up rdb same named folder} {
+ exec rm -r $dump_rdb
+ }
+}
diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl
index 27cbc686e..d9bb4e760 100644
--- a/tests/unit/type/stream-cgroups.tcl
+++ b/tests/unit/type/stream-cgroups.tcl
@@ -205,6 +205,113 @@ start_server {
$rd close
}
+ test {Blocking XREADGROUP: key deleted} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r DEL mystream
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: key type changed with SET} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r SET mystream val1
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: key type changed with transaction} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r MULTI
+ r DEL mystream
+ r SADD mystream e1
+ r EXEC
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: flushed DB} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r FLUSHALL
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: swapped DB, key doesn't exist} {
+ r SELECT 4
+ r FLUSHDB
+ r SELECT 9
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd SELECT 9
+ $rd read
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r SWAPDB 4 9
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ } {0} {external:skip}
+
+ test {Blocking XREADGROUP: swapped DB, key is not a stream} {
+ r SELECT 4
+ r FLUSHDB
+ r LPUSH mystream e1
+ r SELECT 9
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd SELECT 9
+ $rd read
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ r SWAPDB 4 9
+ assert_error "*no longer exists*" {$rd read}
+ $rd close
+ } {0} {external:skip}
+
+ test {Blocking XREAD: key deleted} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 0 STREAMS mystream "$"
+ r DEL mystream
+
+ r XADD mystream 667 f v
+ set res [$rd read]
+ assert_equal [lindex $res 0 1 0] {667-0 {f v}}
+ $rd close
+ }
+
+ test {Blocking XREAD: key type changed with SET} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 0 STREAMS mystream "$"
+ r SET mystream val1
+
+ r DEL mystream
+ r XADD mystream 667 f v
+ set res [$rd read]
+ assert_equal [lindex $res 0 1 0] {667-0 {f v}}
+ $rd close
+ }
+
test {Blocking XREADGROUP for stream that ran dry (issue #5299)} {
set rd [redis_deferring_client]
diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl
index 10945674e..3ccfa61ab 100644
--- a/tests/unit/type/zset.tcl
+++ b/tests/unit/type/zset.tcl
@@ -2399,4 +2399,12 @@ start_server {tags {"zset"}} {
r config set zset-max-ziplist-value $original_max_value
}
+ test {zset score double range} {
+ set dblmax 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.00000000000000000
+ r del zz
+ r zadd zz $dblmax dblmax
+ assert_encoding listpack zz
+ r zscore zz dblmax
+ } {1.7976931348623157e+308}
+
}
diff --git a/utils/create-cluster/.gitignore b/utils/create-cluster/.gitignore
index 2988ee919..a34b639a1 100644
--- a/utils/create-cluster/.gitignore
+++ b/utils/create-cluster/.gitignore
@@ -3,3 +3,4 @@ config.sh
*.aof
*.conf
*.log
+appendonlydir-*
diff --git a/utils/hashtable/README b/utils/hashtable/README
deleted file mode 100644
index 87ffc2f08..000000000
--- a/utils/hashtable/README
+++ /dev/null
@@ -1,13 +0,0 @@
-Hash table implementation related utilities.
-
-rehashing.c
----
-
-Visually show buckets in the two hash tables between rehashings. Also stress
-test getRandomKeys() implementation, that may actually disappear from
-Redis soon, However the visualization code is reusable in new bugs
-investigation.
-
-Compile with:
-
- cc -I ../../src/ rehashing.c ../../src/zmalloc.c ../../src/dict.c -o rehashing_test
diff --git a/utils/hashtable/rehashing.c b/utils/hashtable/rehashing.c
deleted file mode 100644
index 3c0acb84c..000000000
--- a/utils/hashtable/rehashing.c
+++ /dev/null
@@ -1,143 +0,0 @@
-#include "redis.h"
-#include "dict.h"
-
-void _redisAssert(char *x, char *y, int l) {
- printf("ASSERT: %s %s %d\n",x,y,l);
- exit(1);
-}
-
-unsigned int dictKeyHash(const void *keyp) {
- unsigned long key = (unsigned long)keyp;
- key = dictGenHashFunction(&key,sizeof(key));
- key += ~(key << 15);
- key ^= (key >> 10);
- key += (key << 3);
- key ^= (key >> 6);
- key += ~(key << 11);
- key ^= (key >> 16);
- return key;
-}
-
-int dictKeyCompare(void *privdata, const void *key1, const void *key2) {
- unsigned long k1 = (unsigned long)key1;
- unsigned long k2 = (unsigned long)key2;
- return k1 == k2;
-}
-
-dictType dictTypeTest = {
- dictKeyHash, /* hash function */
- NULL, /* key dup */
- NULL, /* val dup */
- dictKeyCompare, /* key compare */
- NULL, /* key destructor */
- NULL, /* val destructor */
- NULL /* allow to expand */
-};
-
-void showBuckets(dictht ht) {
- if (ht.table == NULL) {
- printf("NULL\n");
- } else {
- int j;
- for (j = 0; j < ht.size; j++) {
- printf("%c", ht.table[j] ? '1' : '0');
- }
- printf("\n");
- }
-}
-
-void show(dict *d) {
- int j;
- if (d->rehashidx != -1) {
- printf("rhidx: ");
- for (j = 0; j < d->rehashidx; j++)
- printf(".");
- printf("|\n");
- }
- printf("ht[0]: ");
- showBuckets(d->ht[0]);
- printf("ht[1]: ");
- showBuckets(d->ht[1]);
- printf("\n");
-}
-
-int sortPointers(const void *a, const void *b) {
- unsigned long la, lb;
-
- la = (long) (*((dictEntry**)a));
- lb = (long) (*((dictEntry**)b));
- return la-lb;
-}
-
-void stressGetKeys(dict *d, int times, int *perfect_run, int *approx_run) {
- int j;
-
- dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d));
- for (j = 0; j < times; j++) {
- int requested = rand() % (dictSize(d)+1);
- int returned = dictGetSomeKeys(d, des, requested);
- int dup = 0;
-
- qsort(des,returned,sizeof(dictEntry*),sortPointers);
- if (returned > 1) {
- int i;
- for (i = 0; i < returned-1; i++) {
- if (des[i] == des[i+1]) dup++;
- }
- }
-
- if (requested == returned && dup == 0) {
- (*perfect_run)++;
- } else {
- (*approx_run)++;
- printf("Requested, returned, duplicated: %d %d %d\n",
- requested, returned, dup);
- }
- }
- zfree(des);
-}
-
-#define MAX1 120
-#define MAX2 1000
-int main(void) {
- dict *d = dictCreate(&dictTypeTest,NULL);
- unsigned long i;
- srand(time(NULL));
-
- for (i = 0; i < MAX1; i++) {
- dictAdd(d,(void*)i,NULL);
- show(d);
- }
- printf("Size: %d\n", (int)dictSize(d));
-
- for (i = 0; i < MAX1; i++) {
- dictDelete(d,(void*)i);
- dictResize(d);
- show(d);
- }
- dictRelease(d);
-
- d = dictCreate(&dictTypeTest,NULL);
-
- printf("Stress testing dictGetSomeKeys\n");
- int perfect_run = 0, approx_run = 0;
-
- for (i = 0; i < MAX2; i++) {
- dictAdd(d,(void*)i,NULL);
- stressGetKeys(d,100,&perfect_run,&approx_run);
- }
-
- for (i = 0; i < MAX2; i++) {
- dictDelete(d,(void*)i);
- dictResize(d);
- stressGetKeys(d,100,&perfect_run,&approx_run);
- }
-
- printf("dictGetSomeKey, %d perfect runs, %d approximated runs\n",
- perfect_run, approx_run);
-
- dictRelease(d);
-
- printf("TEST PASSED!\n");
- return 0;
-}