summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2011-07-12 12:39:16 +0200
committerantirez <antirez@gmail.com>2011-07-12 12:39:16 +0200
commit0681c5ad844cefefbe62f30df6587c0cbec3272e (patch)
tree35ab1b7e8863dba2b728d2bae871042309206413
parent15bc1cc1bcd4174e7116b5429f1f31f792d13d2e (diff)
parent34a8b51768a2579010e128b05e001bf1d8f99995 (diff)
downloadredis-0681c5ad844cefefbe62f30df6587c0cbec3272e.tar.gz
master branch merged into scripting.
-rw-r--r--Makefile1
-rw-r--r--deps/hiredis/hiredis.c6
-rw-r--r--redis.conf24
-rw-r--r--src/Makefile61
-rw-r--r--src/aof.c2
-rw-r--r--src/config.c20
-rw-r--r--src/db.c23
-rw-r--r--src/debug.c6
-rw-r--r--src/multi.c11
-rw-r--r--src/networking.c4
-rw-r--r--src/redis.c86
-rw-r--r--src/redis.h29
-rw-r--r--src/slowlog.c115
-rw-r--r--src/slowlog.h15
-rw-r--r--src/t_list.c3
-rw-r--r--src/valgrind.sup12
-rw-r--r--src/zmalloc.c10
-rw-r--r--src/zmalloc.h3
-rw-r--r--tests/integration/aof.tcl27
-rw-r--r--tests/integration/replication-2.tcl27
-rw-r--r--tests/integration/replication-3.tcl31
-rw-r--r--tests/integration/replication.tcl39
-rw-r--r--tests/support/server.tcl11
-rw-r--r--tests/support/test.tcl102
-rw-r--r--tests/support/tmpfile.tcl8
-rw-r--r--tests/test_helper.tcl323
-rw-r--r--tests/unit/expire.tcl2
-rw-r--r--tests/unit/other.tcl36
-rw-r--r--tests/unit/slowlog.tcl41
-rw-r--r--tests/unit/type/list-2.tcl44
-rw-r--r--tests/unit/type/list-3.tcl70
-rw-r--r--tests/unit/type/list-common.tcl5
-rw-r--r--tests/unit/type/list.tcl112
-rw-r--r--tests/unit/type/zset.tcl2
34 files changed, 866 insertions, 445 deletions
diff --git a/Makefile b/Makefile
index 20c4f86ef..691843d49 100644
--- a/Makefile
+++ b/Makefile
@@ -14,6 +14,7 @@ clean:
cd deps/linenoise && $(MAKE) $@
cd deps/jemalloc && $(MAKE) distclean
cd deps/lua && $(MAKE) $@
+ -(cd deps/jemalloc && $(MAKE) distclean)
$(TARGETS):
cd src && $(MAKE) $@
diff --git a/deps/hiredis/hiredis.c b/deps/hiredis/hiredis.c
index f2135bace..b27c63b83 100644
--- a/deps/hiredis/hiredis.c
+++ b/deps/hiredis/hiredis.c
@@ -50,7 +50,7 @@ typedef struct redisReader {
size_t pos; /* buffer cursor */
size_t len; /* buffer length */
- redisReadTask rstack[3]; /* stack of read tasks */
+ redisReadTask rstack[9]; /* stack of read tasks */
int ridx; /* index of stack */
void *privdata; /* user-settable arbitrary field */
} redisReader;
@@ -347,9 +347,9 @@ static int processMultiBulkItem(redisReader *r) {
int root = 0;
/* Set error for nested multi bulks with depth > 1 */
- if (r->ridx == 2) {
+ if (r->ridx == 8) {
redisSetReplyReaderError(r,sdscatprintf(sdsempty(),
- "No support for nested multi bulk replies with depth > 1"));
+ "No support for nested multi bulk replies with depth > 7"));
return -1;
}
diff --git a/redis.conf b/redis.conf
index 098c28da9..9e9eac5f8 100644
--- a/redis.conf
+++ b/redis.conf
@@ -319,6 +319,30 @@ auto-aof-rewrite-min-size 64mb
# your server forever. Set it to 0 or a negative value for unlimited execution.
lua-time-limit 60000
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 1024
+
############################### ADVANCED CONFIG ###############################
# Hashes are encoded in a special way (much more memory efficient) when they
diff --git a/src/Makefile b/src/Makefile
index 7e4b829d6..84cb1cda2 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -61,7 +61,7 @@ QUIET_CC = @printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR
QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR);
endif
-OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endian.o scripting.o
+OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endian.o slowlog.o scripting.o
BENCHOBJ = ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o
CLIOBJ = anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o
CHECKDUMPOBJ = redis-check-dump.o lzf_c.o lzf_d.o
@@ -86,38 +86,37 @@ ae_kqueue.o: ae_kqueue.c
ae_select.o: ae_select.c
anet.o: anet.c fmacros.h anet.h
aof.o: aof.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
cluster.o: cluster.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ slowlog.h
config.o: config.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
crc16.o: crc16.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
db.o: db.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
debug.o: debug.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h sha1.h
-dict.o: dict.c fmacros.h dict.h zmalloc.h
-diskstore.o: diskstore.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h \
sha1.h
-dscache.o: dscache.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+dict.o: dict.c fmacros.h dict.h zmalloc.h
endian.o: endian.c
intset.o: intset.c intset.h zmalloc.h endian.h
lzf_c.o: lzf_c.c lzfP.h
lzf_d.o: lzf_d.c lzfP.h
multi.o: multi.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
networking.o: networking.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ slowlog.h
object.o: object.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
pqsort.o: pqsort.c
pubsub.o: pubsub.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
rdb.o: rdb.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h lzf.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h \
+ lzf.h
redis-benchmark.o: redis-benchmark.c fmacros.h ae.h \
../deps/hiredis/hiredis.h sds.h adlist.h zmalloc.h
redis-check-aof.o: redis-check-aof.c fmacros.h config.h
@@ -125,27 +124,33 @@ redis-check-dump.o: redis-check-dump.c lzf.h
redis-cli.o: redis-cli.c fmacros.h version.h ../deps/hiredis/hiredis.h \
sds.h zmalloc.h ../deps/linenoise/linenoise.h help.h
redis.o: redis.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h \
asciilogo.h
release.o: release.c release.h
replication.o: replication.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ slowlog.h
sds.o: sds.c sds.h zmalloc.h
sha1.o: sha1.c sha1.h config.h
+slowlog.o: slowlog.c redis.h fmacros.h config.h ae.h sds.h dict.h \
+ adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ slowlog.h
sort.o: sort.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h pqsort.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h \
+ pqsort.h
syncio.o: syncio.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
t_hash.o: t_hash.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
t_list.o: t_list.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
t_set.o: t_set.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
t_string.o: t_string.c redis.h fmacros.h config.h ae.h sds.h dict.h \
- adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ adlist.h zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h \
+ slowlog.h
t_zset.o: t_zset.c redis.h fmacros.h config.h ae.h sds.h dict.h adlist.h \
- zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h
+ zmalloc.h anet.h zipmap.h ziplist.h intset.h version.h util.h slowlog.h
util.o: util.c fmacros.h util.h
ziplist.o: ziplist.c zmalloc.h util.h ziplist.h endian.h
zipmap.o: zipmap.c zmalloc.h endian.h
@@ -197,8 +202,8 @@ clean:
dep:
$(CC) -MM *.c -I ../deps/hiredis -I ../deps/linenoise
-test: redis-server
- @(cd ..; (which tclsh >/dev/null && tclsh tests/test_helper.tcl --tags "${TAGS}" --file "${FILE}") || echo "You need to install Tcl in order to run tests.")
+test: redis-server redis-check-aof
+ @(cd ..; (which tclsh8.5 >/dev/null && tclsh8.5 tests/test_helper.tcl --tags "${TAGS}") || echo "You need to install Tcl (tclsh8.5) in order to run tests.")
bench:
./redis-benchmark
diff --git a/src/aof.c b/src/aof.c
index ac6b97915..b43f99f5e 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -287,6 +287,8 @@ int loadAppendOnlyFile(char *filename) {
/* The fake client should not have a reply */
redisAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0);
+ /* The fake client should never get blocked */
+ redisAssert((fakeClient->flags & REDIS_BLOCKED) == 0);
/* Clean up. Command code may have changed argv/argc so we use the
* argv/argc of the client instead of the local variables. */
diff --git a/src/config.c b/src/config.c
index 88a00d382..5442e0366 100644
--- a/src/config.c
+++ b/src/config.c
@@ -298,6 +298,12 @@ void loadServerConfig(char *filename) {
server.cluster.configfile = zstrdup(argv[1]);
} else if (!strcasecmp(argv[0],"lua-time-limit") && argc == 2) {
server.lua_time_limit = strtoll(argv[1],NULL,10);
+ } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") &&
+ argc == 2)
+ {
+ server.slowlog_log_slower_than = strtoll(argv[1],NULL,10);
+ } else if (!strcasecmp(argv[0],"slowlog-max-len") && argc == 2) {
+ server.slowlog_max_len = strtoll(argv[1],NULL,10);
} else {
err = "Bad directive or wrong number of arguments"; goto loaderr;
}
@@ -471,6 +477,12 @@ void configSetCommand(redisClient *c) {
} else if (!strcasecmp(c->argv[2]->ptr,"lua-time-limit")) {
if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt;
server.lua_time_limit = ll;
+ } else if (!strcasecmp(c->argv[2]->ptr,"slowlog-log-slower-than")) {
+ if (getLongLongFromObject(o,&ll) == REDIS_ERR) goto badfmt;
+ server.slowlog_log_slower_than = ll;
+ } else if (!strcasecmp(c->argv[2]->ptr,"slowlog-max-len")) {
+ if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt;
+ server.slowlog_max_len = (unsigned)ll;
} else {
addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s",
(char*)c->argv[2]->ptr);
@@ -645,6 +657,14 @@ void configGetCommand(redisClient *c) {
if (stringmatch(pattern,"lua-time-limit",0)) {
addReplyBulkCString(c,"lua-time-limit");
addReplyBulkLongLong(c,server.lua_time_limit);
+ if (stringmatch(pattern,"slowlog-log-slower-than",0)) {
+ addReplyBulkCString(c,"slowlog-log-slower-than");
+ addReplyBulkLongLong(c,server.slowlog_log_slower_than);
+ matches++;
+ }
+ if (stringmatch(pattern,"slowlog-max-len",0)) {
+ addReplyBulkCString(c,"slowlog-max-len");
+ addReplyBulkLongLong(c,server.slowlog_max_len);
matches++;
}
setDeferredMultiBulkLength(c,replylen,matches*2);
diff --git a/src/db.c b/src/db.c
index a02f30438..26af2f21a 100644
--- a/src/db.c
+++ b/src/db.c
@@ -476,6 +476,9 @@ int expireIfNeeded(redisDb *db, robj *key) {
if (when < 0) return 0; /* No expire for this key */
+ /* Don't expire anything while loading. It will be done later. */
+ if (server.loading) return 0;
+
/* If we are running in the context of a slave, return ASAP:
* the slave key expiration is controlled by the master that will
* send us synthesized DEL operations for expired keys.
@@ -513,10 +516,24 @@ void expireGenericCommand(redisClient *c, robj *key, robj *param, long offset) {
addReply(c,shared.czero);
return;
}
- if (seconds <= 0) {
- if (dbDelete(c->db,key)) server.dirty++;
- addReply(c, shared.cone);
+ /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past
+ * should never be executed as a DEL when load the AOF or in the context
+ * of a slave instance.
+ *
+ * Instead we take the other branch of the IF statement setting an expire
+ * (possibly in the past) and wait for an explicit DEL from the master. */
+ if (seconds <= 0 && !server.loading && !server.masterhost) {
+ robj *aux;
+
+ redisAssert(dbDelete(c->db,key));
+ server.dirty++;
+
+ /* Replicate/AOF this as an explicit DEL. */
+ aux = createStringObject("DEL",3);
+ rewriteClientCommandVector(c,2,aux,key);
+ decrRefCount(aux);
signalModifiedKey(c->db,key);
+ addReply(c, shared.cone);
return;
} else {
time_t when = time(NULL)+seconds;
diff --git a/src/debug.c b/src/debug.c
index 511512dde..966e0eb40 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -281,6 +281,12 @@ void debugCommand(redisClient *c) {
d = sdscatprintf(d, "%02x",digest[j]);
addReplyStatus(c,d);
sdsfree(d);
+ } else if (!strcasecmp(c->argv[1]->ptr,"sleep") && c->argc == 3) {
+ double dtime = strtod(c->argv[2]->ptr,NULL);
+ long long utime = dtime*1000000;
+
+ usleep(utime);
+ addReply(c,shared.ok);
} else {
addReplyError(c,
"Syntax error, try DEBUG [SEGFAULT|OBJECT <key>|SWAPIN <key>|SWAPOUT <key>|RELOAD]");
diff --git a/src/multi.c b/src/multi.c
index ba3a0cd6c..f0b8fc856 100644
--- a/src/multi.c
+++ b/src/multi.c
@@ -24,14 +24,14 @@ void freeClientMultiState(redisClient *c) {
}
/* Add a new command into the MULTI commands queue */
-void queueMultiCommand(redisClient *c, struct redisCommand *cmd) {
+void queueMultiCommand(redisClient *c) {
multiCmd *mc;
int j;
c->mstate.commands = zrealloc(c->mstate.commands,
sizeof(multiCmd)*(c->mstate.count+1));
mc = c->mstate.commands+c->mstate.count;
- mc->cmd = cmd;
+ mc->cmd = c->cmd;
mc->argc = c->argc;
mc->argv = zmalloc(sizeof(robj*)*c->argc);
memcpy(mc->argv,c->argv,sizeof(robj*)*c->argc);
@@ -78,6 +78,7 @@ void execCommand(redisClient *c) {
int j;
robj **orig_argv;
int orig_argc;
+ struct redisCommand *orig_cmd;
if (!(c->flags & REDIS_MULTI)) {
addReplyError(c,"EXEC without MULTI");
@@ -105,18 +106,22 @@ void execCommand(redisClient *c) {
unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */
orig_argv = c->argv;
orig_argc = c->argc;
+ orig_cmd = c->cmd;
addReplyMultiBulkLen(c,c->mstate.count);
for (j = 0; j < c->mstate.count; j++) {
c->argc = c->mstate.commands[j].argc;
c->argv = c->mstate.commands[j].argv;
- call(c,c->mstate.commands[j].cmd);
+ c->cmd = c->mstate.commands[j].cmd;
+ call(c);
/* Commands may alter argc/argv, restore mstate. */
c->mstate.commands[j].argc = c->argc;
c->mstate.commands[j].argv = c->argv;
+ c->mstate.commands[j].cmd = c->cmd;
}
c->argv = orig_argv;
c->argc = orig_argc;
+ c->cmd = orig_cmd;
freeClientMultiState(c);
initClientMultiState(c);
c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS);
diff --git a/src/networking.c b/src/networking.c
index b95ef946a..a4eee4643 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -36,6 +36,7 @@ redisClient *createClient(int fd) {
c->reqtype = 0;
c->argc = 0;
c->argv = NULL;
+ c->cmd = NULL;
c->multibulklen = 0;
c->bulklen = -1;
c->sentlen = 0;
@@ -461,6 +462,7 @@ static void freeClientArgv(redisClient *c) {
for (j = 0; j < c->argc; j++)
decrRefCount(c->argv[j]);
c->argc = 0;
+ c->cmd = NULL;
}
void freeClient(redisClient *c) {
@@ -961,5 +963,7 @@ void rewriteClientCommandVector(redisClient *c, int argc, ...) {
/* Replace argv and argc with our new versions. */
c->argv = argv;
c->argc = argc;
+ c->cmd = lookupCommand(c->argv[0]->ptr);
+ redisAssert(c->cmd != NULL);
va_end(ap);
}
diff --git a/src/redis.c b/src/redis.c
index f4e3f6239..7e9c6fd5c 100644
--- a/src/redis.c
+++ b/src/redis.c
@@ -28,6 +28,7 @@
*/
#include "redis.h"
+#include "slowlog.h"
#ifdef HAVE_BACKTRACE
#include <execinfo.h>
@@ -194,7 +195,8 @@ struct redisCommand redisCommandTable[] = {
{"object",objectCommand,-2,0,NULL,0,0,0,0,0},
{"client",clientCommand,-2,0,NULL,0,0,0,0,0},
{"eval",evalCommand,-3,REDIS_CMD_DENYOOM,zunionInterGetKeys,0,0,0,0,0},
- {"evalsha",evalShaCommand,-3,REDIS_CMD_DENYOOM,zunionInterGetKeys,0,0,0,0,0}
+ {"evalsha",evalShaCommand,-3,REDIS_CMD_DENYOOM,zunionInterGetKeys,0,0,0,0,0},
+ {"slowlog",slowlogCommand,-2,0,NULL,0,0,0,0,0}
};
/*============================ Utility functions ============================ */
@@ -871,6 +873,10 @@ void initServerConfig() {
populateCommandTable();
server.delCommand = lookupCommandByCString("del");
server.multiCommand = lookupCommandByCString("multi");
+
+ /* Slow log */
+ server.slowlog_log_slower_than = REDIS_SLOWLOG_LOG_SLOWER_THAN;
+ server.slowlog_max_len = REDIS_SLOWLOG_MAX_LEN;
}
void initServer() {
@@ -958,6 +964,7 @@ void initServer() {
if (server.cluster_enabled) clusterInit();
scriptingInit();
+ slowlogInit();
srand(time(NULL)^getpid());
}
@@ -1004,18 +1011,20 @@ struct redisCommand *lookupCommandByCString(char *s) {
}
/* Call() is the core of Redis execution of a command */
-void call(redisClient *c, struct redisCommand *cmd) {
- long long dirty, start = ustime();
+void call(redisClient *c) {
+ long long dirty, start = ustime(), duration;
dirty = server.dirty;
- cmd->proc(c);
+ c->cmd->proc(c);
dirty = server.dirty-dirty;
- cmd->microseconds += ustime()-start;
- cmd->calls++;
+ duration = ustime()-start;
+ c->cmd->microseconds += duration;
+ slowlogPushEntryIfNeeded(c->argv,c->argc,duration);
+ c->cmd->calls++;
if (server.appendonly && dirty)
- feedAppendOnlyFile(cmd,c->db->id,c->argv,c->argc);
- if ((dirty || cmd->flags & REDIS_CMD_FORCE_REPLICATION) &&
+ feedAppendOnlyFile(c->cmd,c->db->id,c->argv,c->argc);
+ if ((dirty || c->cmd->flags & REDIS_CMD_FORCE_REPLICATION) &&
listLength(server.slaves))
replicationFeedSlaves(server.slaves,c->db->id,c->argv,c->argc);
if (listLength(server.monitors))
@@ -1032,8 +1041,6 @@ void call(redisClient *c, struct redisCommand *cmd) {
* and other operations can be performed by the caller. Otherwise
* if 0 is returned the client was destroied (i.e. after QUIT). */
int processCommand(redisClient *c) {
- struct redisCommand *cmd;
-
/* The QUIT command is handled separately. Normal command procs will
* go through checking for replication and QUIT will cause trouble
* when FORCE_REPLICATION is enabled and would be implemented in
@@ -1045,28 +1052,29 @@ int processCommand(redisClient *c) {
}
/* Now lookup the command and check ASAP about trivial error conditions
- * such wrong arity, bad command name and so forth. */
- cmd = lookupCommand(c->argv[0]->ptr);
- if (!cmd) {
+ * such as wrong arity, bad command name and so forth. */
+ c->cmd = lookupCommand(c->argv[0]->ptr);
+ if (!c->cmd) {
addReplyErrorFormat(c,"unknown command '%s'",
(char*)c->argv[0]->ptr);
return REDIS_OK;
- } else if ((cmd->arity > 0 && cmd->arity != c->argc) ||
- (c->argc < -cmd->arity)) {
+ } else if ((c->cmd->arity > 0 && c->cmd->arity != c->argc) ||
+ (c->argc < -c->cmd->arity)) {
addReplyErrorFormat(c,"wrong number of arguments for '%s' command",
- cmd->name);
+ c->cmd->name);
return REDIS_OK;
}
/* Check if the user is authenticated */
- if (server.requirepass && !c->authenticated && cmd->proc != authCommand) {
+ if (server.requirepass && !c->authenticated && c->cmd->proc != authCommand)
+ {
addReplyError(c,"operation not permitted");
return REDIS_OK;
}
/* If cluster is enabled, redirect here */
if (server.cluster_enabled &&
- !(cmd->getkeys_proc == NULL && cmd->firstkey == 0)) {
+ !(c->cmd->getkeys_proc == NULL && c->cmd->firstkey == 0)) {
int hashslot;
if (server.cluster.state != REDIS_CLUSTER_OK) {
@@ -1074,7 +1082,7 @@ int processCommand(redisClient *c) {
return REDIS_OK;
} else {
int ask;
- clusterNode *n = getNodeByQuery(c,cmd,c->argv,c->argc,&hashslot,&ask);
+ clusterNode *n = getNodeByQuery(c,c->cmd,c->argv,c->argc,&hashslot,&ask);
if (n == NULL) {
addReplyError(c,"Multi keys request invalid in cluster");
return REDIS_OK;
@@ -1093,7 +1101,7 @@ int processCommand(redisClient *c) {
* keys in the dataset). If there are not the only thing we can do
* is returning an error. */
if (server.maxmemory) freeMemoryIfNeeded();
- if (server.maxmemory && (cmd->flags & REDIS_CMD_DENYOOM) &&
+ if (server.maxmemory && (c->cmd->flags & REDIS_CMD_DENYOOM) &&
zmalloc_used_memory() > server.maxmemory)
{
addReplyError(c,"command not allowed when used memory > 'maxmemory'");
@@ -1103,8 +1111,10 @@ int processCommand(redisClient *c) {
/* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */
if ((dictSize(c->pubsub_channels) > 0 || listLength(c->pubsub_patterns) > 0)
&&
- cmd->proc != subscribeCommand && cmd->proc != unsubscribeCommand &&
- cmd->proc != psubscribeCommand && cmd->proc != punsubscribeCommand) {
+ c->cmd->proc != subscribeCommand &&
+ c->cmd->proc != unsubscribeCommand &&
+ c->cmd->proc != psubscribeCommand &&
+ c->cmd->proc != punsubscribeCommand) {
addReplyError(c,"only (P)SUBSCRIBE / (P)UNSUBSCRIBE / QUIT allowed in this context");
return REDIS_OK;
}
@@ -1113,7 +1123,7 @@ int processCommand(redisClient *c) {
* we are a slave with a broken link with master. */
if (server.masterhost && server.replstate != REDIS_REPL_CONNECTED &&
server.repl_serve_stale_data == 0 &&
- cmd->proc != infoCommand && cmd->proc != slaveofCommand)
+ c->cmd->proc != infoCommand && c->cmd->proc != slaveofCommand)
{
addReplyError(c,
"link with MASTER is down and slave-serve-stale-data is set to no");
@@ -1121,20 +1131,20 @@ int processCommand(redisClient *c) {
}
/* Loading DB? Return an error if the command is not INFO */
- if (server.loading && cmd->proc != infoCommand) {
+ if (server.loading && c->cmd->proc != infoCommand) {
addReply(c, shared.loadingerr);
return REDIS_OK;
}
/* Exec the command */
if (c->flags & REDIS_MULTI &&
- cmd->proc != execCommand && cmd->proc != discardCommand &&
- cmd->proc != multiCommand && cmd->proc != watchCommand)
+ c->cmd->proc != execCommand && c->cmd->proc != discardCommand &&
+ c->cmd->proc != multiCommand && c->cmd->proc != watchCommand)
{
- queueMultiCommand(c,cmd);
+ queueMultiCommand(c);
addReply(c,shared.queued);
} else {
- call(c,cmd);
+ call(c);
}
return REDIS_OK;
}
@@ -1305,22 +1315,6 @@ sds genRedisInfoString(char *section) {
);
}
- /* Allocation statistics */
- if (allsections || !strcasecmp(section,"allocstats")) {
- if (sections++) info = sdscat(info,"\r\n");
- info = sdscat(info, "# Allocstats\r\nallocation_stats:");
- for (j = 0; j <= ZMALLOC_MAX_ALLOC_STAT; j++) {
- size_t count = zmalloc_allocations_for_size(j);
- if (count) {
- if (info[sdslen(info)-1] != ':') info = sdscatlen(info,",",1);
- info = sdscatprintf(info,"%s%d=%zu",
- (j == ZMALLOC_MAX_ALLOC_STAT) ? ">=" : "",
- j,count);
- }
- }
- info = sdscat(info,"\r\n");
- }
-
/* Persistence */
if (allsections || defsections || !strcasecmp(section,"persistence")) {
if (sections++) info = sdscat(info,"\r\n");
@@ -1456,8 +1450,8 @@ sds genRedisInfoString(char *section) {
"# CPU\r\n"
"used_cpu_sys:%.2f\r\n"
"used_cpu_user:%.2f\r\n"
- "used_cpu_sys_childrens:%.2f\r\n"
- "used_cpu_user_childrens:%.2f\r\n",
+ "used_cpu_sys_children:%.2f\r\n"
+ "used_cpu_user_children:%.2f\r\n",
(float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000,
(float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000,
(float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000,
diff --git a/src/redis.h b/src/redis.h
index 9775e2984..1d094c1d7 100644
--- a/src/redis.h
+++ b/src/redis.h
@@ -21,17 +21,17 @@
#include <netinet/in.h>
#include <lua.h>
-#include "ae.h" /* Event driven programming library */
-#include "sds.h" /* Dynamic safe strings */
-#include "dict.h" /* Hash tables */
-#include "adlist.h" /* Linked lists */
+#include "ae.h" /* Event driven programming library */
+#include "sds.h" /* Dynamic safe strings */
+#include "dict.h" /* Hash tables */
+#include "adlist.h" /* Linked lists */
#include "zmalloc.h" /* total memory usage aware version of malloc/free */
-#include "anet.h" /* Networking the easy way */
-#include "zipmap.h" /* Compact string -> string data structure */
+#include "anet.h" /* Networking the easy way */
+#include "zipmap.h" /* Compact string -> string data structure */
#include "ziplist.h" /* Compact list data structure */
-#include "intset.h" /* Compact integer set structure */
-#include "version.h"
-#include "util.h"
+#include "intset.h" /* Compact integer set structure */
+#include "version.h" /* Version macro */
+#include "util.h" /* Misc functions useful in many places */
/* Error codes */
#define REDIS_OK 0
@@ -53,6 +53,8 @@
#define REDIS_MAX_LOGMSG_LEN 1024 /* Default maximum length of syslog messages */
#define REDIS_AUTO_AOFREWRITE_PERC 100
#define REDIS_AUTO_AOFREWRITE_MIN_SIZE (1024*1024)
+#define REDIS_SLOWLOG_LOG_SLOWER_THAN 10000
+#define REDIS_SLOWLOG_MAX_LEN 64
/* Hash table parameters */
#define REDIS_HT_MINFILL 10 /* Minimal hash table fill 10% */
@@ -312,6 +314,7 @@ typedef struct redisClient {
sds querybuf;
int argc;
robj **argv;
+ struct redisCommand *cmd;
int reqtype;
int multibulklen; /* number of multi bulk arguments left to read */
long bulklen; /* length of bulk argument in multi bulk request */
@@ -530,6 +533,10 @@ struct redisServer {
long long stat_keyspace_misses; /* number of failed lookups of keys */
size_t stat_peak_memory; /* max used memory record */
long long stat_fork_time; /* time needed to perform latets fork() */
+ list *slowlog;
+ long long slowlog_entry_id;
+ long long slowlog_log_slower_than;
+ unsigned long slowlog_max_len;
/* Configuration */
int verbosity;
int maxidletime;
@@ -807,7 +814,7 @@ void popGenericCommand(redisClient *c, int where);
void unwatchAllKeys(redisClient *c);
void initClientMultiState(redisClient *c);
void freeClientMultiState(redisClient *c);
-void queueMultiCommand(redisClient *c, struct redisCommand *cmd);
+void queueMultiCommand(redisClient *c);
void touchWatchedKey(redisDb *db, robj *key);
void touchWatchedKeysOnFlush(int dbid);
@@ -918,7 +925,7 @@ int processCommand(redisClient *c);
void setupSignalHandlers(void);
struct redisCommand *lookupCommand(sds name);
struct redisCommand *lookupCommandByCString(char *s);
-void call(redisClient *c, struct redisCommand *cmd);
+void call(redisClient *c);
int prepareForShutdown();
void redisLog(int level, const char *fmt, ...);
void redisLogRaw(int level, const char *msg);
diff --git a/src/slowlog.c b/src/slowlog.c
new file mode 100644
index 000000000..cfd66dc63
--- /dev/null
+++ b/src/slowlog.c
@@ -0,0 +1,115 @@
+#include "redis.h"
+#include "slowlog.h"
+
+/* Slowlog implements a system that is able to remember the latest N
+ * queries that took more than M microseconds to execute.
+ *
+ * The execution time to reach to be logged in the slow log is set
+ * using the 'slowlog-log-slower-than' config directive, that is also
+ * readable and writable using the CONFIG SET/GET command.
+ *
+ * The slow queries log is actually not "logged" in the Redis log file
+ * but is accessible thanks to the SLOWLOG command. */
+
+/* Create a new slowlog entry.
+ * Incrementing the ref count of all the objects retained is up to
+ * this function. */
+slowlogEntry *slowlogCreateEntry(robj **argv, int argc, long long duration) {
+ slowlogEntry *se = zmalloc(sizeof(*se));
+ int j;
+
+ se->argc = argc;
+ se->argv = zmalloc(sizeof(robj*)*argc);
+ for (j = 0; j < argc; j++) {
+ se->argv[j] = argv[j];
+ incrRefCount(argv[j]);
+ }
+ se->time = time(NULL);
+ se->duration = duration;
+ se->id = server.slowlog_entry_id++;
+ return se;
+}
+
+/* Free a slow log entry. The argument is void so that the prototype of this
+ * function matches the one of the 'free' method of adlist.c.
+ *
+ * This function will take care to release all the retained object. */
+void slowlogFreeEntry(void *septr) {
+ slowlogEntry *se = septr;
+ int j;
+
+ for (j = 0; j < se->argc; j++)
+ decrRefCount(se->argv[j]);
+ zfree(se->argv);
+ zfree(se);
+}
+
+/* Initialize the slow log. This function should be called a single time
+ * at server startup. */
+void slowlogInit(void) {
+ server.slowlog = listCreate();
+ server.slowlog_entry_id = 0;
+ listSetFreeMethod(server.slowlog,slowlogFreeEntry);
+}
+
+/* Push a new entry into the slow log.
+ * This function will make sure to trim the slow log accordingly to the
+ * configured max length. */
+void slowlogPushEntryIfNeeded(robj **argv, int argc, long long duration) {
+ if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */
+ if (duration >= server.slowlog_log_slower_than)
+ listAddNodeHead(server.slowlog,slowlogCreateEntry(argv,argc,duration));
+
+ /* Remove old entries if needed. */
+ while (listLength(server.slowlog) > server.slowlog_max_len)
+ listDelNode(server.slowlog,listLast(server.slowlog));
+}
+
+/* Remove all the entries from the current slow log. */
+void slowlogReset(void) {
+ while (listLength(server.slowlog) > 0)
+ listDelNode(server.slowlog,listLast(server.slowlog));
+}
+
+/* The SLOWLOG command. Implements all the subcommands needed to handle the
+ * Redis slow log. */
+void slowlogCommand(redisClient *c) {
+ if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"reset")) {
+ slowlogReset();
+ addReply(c,shared.ok);
+ } else if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"len")) {
+ addReplyLongLong(c,listLength(server.slowlog));
+ } else if ((c->argc == 2 || c->argc == 3) &&
+ !strcasecmp(c->argv[1]->ptr,"get"))
+ {
+ long count = 10, sent = 0;
+ listIter li;
+ void *totentries;
+ listNode *ln;
+ slowlogEntry *se;
+
+ if (c->argc == 3 &&
+ getLongFromObjectOrReply(c,c->argv[2],&count,NULL) != REDIS_OK)
+ return;
+
+ listRewind(server.slowlog,&li);
+ totentries = addDeferredMultiBulkLength(c);
+ while(count-- && (ln = listNext(&li))) {
+ int j;
+
+ se = ln->value;
+ addReplyMultiBulkLen(c,4);
+ addReplyLongLong(c,se->id);
+ addReplyLongLong(c,se->time);
+ addReplyLongLong(c,se->duration);
+ addReplyMultiBulkLen(c,se->argc);
+ for (j = 0; j < se->argc; j++)
+ addReplyBulk(c,se->argv[j]);
+ sent++;
+ }
+ setDeferredMultiBulkLength(c,totentries,sent);
+ } else {
+ addReplyError(c,
+ "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN.");
+ }
+}
diff --git a/src/slowlog.h b/src/slowlog.h
new file mode 100644
index 000000000..bad770db4
--- /dev/null
+++ b/src/slowlog.h
@@ -0,0 +1,15 @@
+/* This structure defines an entry inside the slow log list */
+typedef struct slowlogEntry {
+ robj **argv;
+ int argc;
+ long long id; /* Unique entry identifier. */
+ long long duration; /* Time spent by the query, in nanoseconds. */
+ time_t time; /* Unix time at which the query was executed. */
+} slowlogEntry;
+
+/* Exported API */
+void slowlogInit(void);
+void slowlogPushEntryIfNeeded(robj **argv, int argc, long long duration);
+
+/* Exported commands */
+void slowlogCommand(redisClient *c);
diff --git a/src/t_list.c b/src/t_list.c
index d88277261..5427293f9 100644
--- a/src/t_list.c
+++ b/src/t_list.c
@@ -910,6 +910,7 @@ void blockingPopGenericCommand(redisClient *c, int where) {
if (listTypeLength(o) != 0) {
/* If the list contains elements fall back to the usual
* non-blocking POP operation */
+ struct redisCommand *orig_cmd;
robj *argv[2], **orig_argv;
int orig_argc;
@@ -917,6 +918,7 @@ void blockingPopGenericCommand(redisClient *c, int where) {
* popGenericCommand() as the command takes a single key. */
orig_argv = c->argv;
orig_argc = c->argc;
+ orig_cmd = c->cmd;
argv[1] = c->argv[j];
c->argv = argv;
c->argc = 2;
@@ -934,6 +936,7 @@ void blockingPopGenericCommand(redisClient *c, int where) {
/* Fix the client structure with the original stuff */
c->argv = orig_argv;
c->argc = orig_argc;
+ c->cmd = orig_cmd;
return;
}
diff --git a/src/valgrind.sup b/src/valgrind.sup
index 7ba757548..3024d63bc 100644
--- a/src/valgrind.sup
+++ b/src/valgrind.sup
@@ -3,3 +3,15 @@
Memcheck:Cond
fun:lzf_compress
}
+
+{
+ <lzf_unitialized_hash_table>
+ Memcheck:Value4
+ fun:lzf_compress
+}
+
+{
+ <lzf_unitialized_hash_table>
+ Memcheck:Value8
+ fun:lzf_compress
+}
diff --git a/src/zmalloc.c b/src/zmalloc.c
index 428951a46..5408c2faf 100644
--- a/src/zmalloc.c
+++ b/src/zmalloc.c
@@ -60,16 +60,13 @@
#define update_zmalloc_stat_alloc(__n,__size) do { \
size_t _n = (__n); \
- size_t _stat_slot = (__size < ZMALLOC_MAX_ALLOC_STAT) ? __size : ZMALLOC_MAX_ALLOC_STAT; \
if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \
if (zmalloc_thread_safe) { \
pthread_mutex_lock(&used_memory_mutex); \
used_memory += _n; \
- zmalloc_allocations[_stat_slot]++; \
pthread_mutex_unlock(&used_memory_mutex); \
} else { \
used_memory += _n; \
- zmalloc_allocations[_stat_slot]++; \
} \
} while(0)
@@ -88,8 +85,6 @@
static size_t used_memory = 0;
static int zmalloc_thread_safe = 0;
pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Note that malloc_allocations elements are initialized to zero by C */
-size_t zmalloc_allocations[ZMALLOC_MAX_ALLOC_STAT+1];
static void zmalloc_oom(size_t size) {
fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n",
@@ -190,11 +185,6 @@ size_t zmalloc_used_memory(void) {
return um;
}
-size_t zmalloc_allocations_for_size(size_t size) {
- if (size > ZMALLOC_MAX_ALLOC_STAT) return 0;
- return zmalloc_allocations[size];
-}
-
void zmalloc_enable_thread_safeness(void) {
zmalloc_thread_safe = 1;
}
diff --git a/src/zmalloc.h b/src/zmalloc.h
index 49792795f..7ee556a37 100644
--- a/src/zmalloc.h
+++ b/src/zmalloc.h
@@ -75,8 +75,5 @@ size_t zmalloc_used_memory(void);
void zmalloc_enable_thread_safeness(void);
float zmalloc_get_fragmentation_ratio(void);
size_t zmalloc_get_rss(void);
-size_t zmalloc_allocations_for_size(size_t size);
-
-#define ZMALLOC_MAX_ALLOC_STAT 256
#endif /* __ZMALLOC_H */
diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl
index 927969b62..a554f9ef1 100644
--- a/tests/integration/aof.tcl
+++ b/tests/integration/aof.tcl
@@ -32,6 +32,7 @@ tags {"aof"} {
start_server_aof [list dir $server_path] {
test "Unfinished MULTI: Server should not have been started" {
+ if {$::valgrind} {after 2000}
assert_equal 0 [is_alive $srv]
}
@@ -49,6 +50,7 @@ tags {"aof"} {
start_server_aof [list dir $server_path] {
test "Short read: Server should not have been started" {
+ if {$::valgrind} {after 2000}
assert_equal 0 [is_alive $srv]
}
@@ -101,4 +103,29 @@ tags {"aof"} {
assert_equal 1 [$client scard set]
}
}
+
+ ## Test that EXPIREAT is loaded correctly
+ create_aof {
+ append_to_aof [formatCommand rpush list foo]
+ append_to_aof [formatCommand expireat list 1000]
+ append_to_aof [formatCommand rpush list bar]
+ }
+
+ start_server_aof [list dir $server_path] {
+ test "AOF+EXPIRE: Server should have been started" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "AOF+EXPIRE: List should be empty" {
+ set client [redis [dict get $srv host] [dict get $srv port]]
+ assert_equal 0 [$client llen list]
+ }
+ }
+
+ start_server {overrides {appendonly {yes} appendfilename {appendonly.aof}}} {
+ test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} {
+ r set x 10
+ r expire x -1
+ }
+ }
}
diff --git a/tests/integration/replication-2.tcl b/tests/integration/replication-2.tcl
new file mode 100644
index 000000000..5450bdd85
--- /dev/null
+++ b/tests/integration/replication-2.tcl
@@ -0,0 +1,27 @@
+start_server {tags {"repl"}} {
+ start_server {} {
+ test {First server should have role slave after SLAVEOF} {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ after 1000
+ s -1 role
+ } {slave}
+
+ test {MASTER and SLAVE dataset should be identical after complex ops} {
+ createComplexDataset r 10000
+ after 500
+ if {[r debug digest] ne [r -1 debug digest]} {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ puts "Master - Slave inconsistency"
+ puts "Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ assert_equal [r debug digest] [r -1 debug digest]
+ }
+ }
+}
diff --git a/tests/integration/replication-3.tcl b/tests/integration/replication-3.tcl
new file mode 100644
index 000000000..e660bf4e5
--- /dev/null
+++ b/tests/integration/replication-3.tcl
@@ -0,0 +1,31 @@
+start_server {tags {"repl"}} {
+ start_server {} {
+ test {First server should have role slave after SLAVEOF} {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ after 1000
+ s -1 role
+ } {slave}
+
+ if {$::accurate} {set numops 50000} else {set numops 5000}
+
+ test {MASTER and SLAVE consistency with expire} {
+ createComplexDataset r $numops useexpire
+ after 4000 ;# Make sure everything expired before taking the digest
+ r keys * ;# Force DEL syntesizing to slave
+ after 1000 ;# Wait another second. Now everything should be fine.
+ if {[r debug digest] ne [r -1 debug digest]} {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ puts "Master - Slave inconsistency"
+ puts "Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ assert_equal [r debug digest] [r -1 debug digest]
+ }
+ }
+}
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index 227356b24..0a1cd409f 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -23,44 +23,6 @@ start_server {tags {"repl"}} {
after 1000
assert_equal [r debug digest] [r -1 debug digest]
}
-
- test {MASTER and SLAVE dataset should be identical after complex ops} {
- createComplexDataset r 10000
- after 500
- if {[r debug digest] ne [r -1 debug digest]} {
- set csv1 [csvdump r]
- set csv2 [csvdump {r -1}]
- set fd [open /tmp/repldump1.txt w]
- puts -nonewline $fd $csv1
- close $fd
- set fd [open /tmp/repldump2.txt w]
- puts -nonewline $fd $csv2
- close $fd
- puts "Master - Slave inconsistency"
- puts "Run diff -u against /tmp/repldump*.txt for more info"
- }
- assert_equal [r debug digest] [r -1 debug digest]
- }
-
- test {MASTER and SLAVE consistency with expire} {
- createComplexDataset r 50000 useexpire
- after 4000 ;# Make sure everything expired before taking the digest
- r keys * ;# Force DEL syntesizing to slave
- after 1000 ;# Wait another second. Now everything should be fine.
- if {[r debug digest] ne [r -1 debug digest]} {
- set csv1 [csvdump r]
- set csv2 [csvdump {r -1}]
- set fd [open /tmp/repldump1.txt w]
- puts -nonewline $fd $csv1
- close $fd
- set fd [open /tmp/repldump2.txt w]
- puts -nonewline $fd $csv2
- close $fd
- puts "Master - Slave inconsistency"
- puts "Run diff -u against /tmp/repldump*.txt for more info"
- }
- assert_equal [r debug digest] [r -1 debug digest]
- }
}
}
@@ -92,6 +54,7 @@ start_server {tags {"repl"}} {
test {SET on the master should immediately propagate} {
r -1 set mykey bar
+ if {$::valgrind} {after 2000}
r 0 get mykey
} {bar}
}
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
index c92754611..3fa1725f3 100644
--- a/tests/support/server.tcl
+++ b/tests/support/server.tcl
@@ -1,5 +1,6 @@
set ::global_overrides {}
set ::tags {}
+set ::valgrind_errors {}
proc error_and_quit {config_file error} {
puts "!!COULD NOT START REDIS-SERVER\n"
@@ -16,11 +17,9 @@ proc check_valgrind_errors stderr {
close $fd
if {![regexp -- {ERROR SUMMARY: 0 errors} $buf] ||
- ![regexp -- {definitely lost: 0 bytes} $buf]} {
- puts "*** VALGRIND ERRORS ***"
- puts $buf
- puts "--- press enter to continue ---"
- gets stdin
+ (![regexp -- {definitely lost: 0 bytes} $buf] &&
+ ![regexp -- {no leaks are possible} $buf])} {
+ send_data_packet $::test_server_fd err "Valgrind error: $buf\n"
}
}
@@ -182,7 +181,7 @@ proc start_server {options {code undefined}} {
# check that the server actually started
# ugly but tries to be as fast as possible...
- set retrynum 20
+ set retrynum 100
set serverisup 0
if {$::verbose} {
diff --git a/tests/support/test.tcl b/tests/support/test.tcl
index a6199f4bd..4e68905a5 100644
--- a/tests/support/test.tcl
+++ b/tests/support/test.tcl
@@ -49,60 +49,28 @@ proc color_term {} {
expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]}
}
-# This is called before starting the test
-proc announce_test {s} {
+proc colorstr {color str} {
if {[color_term]} {
- puts -nonewline "$s\033\[0K"
- flush stdout
- set ::backward_count [string length $s]
- }
-}
-
-# This is called after the test finished
-proc colored_dot {tags passed} {
- if {[color_term]} {
- # Go backward and delete what announc_test function printed.
- puts -nonewline "\033\[${::backward_count}D\033\[0K\033\[J"
-
- # Print a coloured char, accordingly to test outcome and tags.
- if {[lsearch $tags list] != -1} {
- set colorcode {31}
- set ch L
- } elseif {[lsearch $tags hash] != -1} {
- set colorcode {32}
- set ch H
- } elseif {[lsearch $tags set] != -1} {
- set colorcode {33}
- set ch S
- } elseif {[lsearch $tags zset] != -1} {
- set colorcode {34}
- set ch Z
- } elseif {[lsearch $tags basic] != -1} {
- set colorcode {35}
- set ch B
- } elseif {[lsearch $tags scripting] != -1} {
- set colorcode {36}
- set ch X
- } else {
- set colorcode {37}
- set ch .
+ set b 0
+ if {[string range $color 0 4] eq {bold-}} {
+ set b 1
+ set color [string range $color 5 end]
+ }
+ switch $color {
+ red {set colorcode {31}}
+ green {set colorcode {32}}
+ yellow {set colorcode {33}}
+ blue {set colorcode {34}}
+ magenta {set colorcode {35}}
+ cyan {set colorcode {36}}
+ white {set colorcode {37}}
+ default {set colorcode {37}}
}
if {$colorcode ne {}} {
- if {$passed} {
- puts -nonewline "\033\[0;${colorcode};40m"
- } else {
- puts -nonewline "\033\[7;${colorcode};40m"
- }
- puts -nonewline $ch
- puts -nonewline "\033\[0m"
- flush stdout
+ return "\033\[$b;${colorcode};40m$str\033\[0m"
}
} else {
- if {$passed} {
- puts -nonewline .
- } else {
- puts -nonewline F
- }
+ return $str
}
}
@@ -130,16 +98,9 @@ proc test {name code {okpattern undefined}} {
incr ::num_tests
set details {}
- lappend details $::curfile
- lappend details $::tags
- lappend details $name
+ lappend details "$name in $::curfile"
- if {$::verbose} {
- puts -nonewline [format "#%03d %-68s " $::num_tests $name]
- flush stdout
- } else {
- announce_test $name
- }
+ send_data_packet $::test_server_fd testing $name
if {[catch {set retval [uplevel 1 $code]} error]} {
if {[string match "assertion:*" $error]} {
@@ -148,12 +109,7 @@ proc test {name code {okpattern undefined}} {
lappend ::tests_failed $details
incr ::num_failed
- if {$::verbose} {
- puts "FAILED"
- puts "$msg\n"
- } else {
- colored_dot $::tags 0
- }
+ send_data_packet $::test_server_fd err [join $details "\n"]
} else {
# Re-raise, let handler up the stack take care of this.
error $error $::errorInfo
@@ -161,33 +117,21 @@ proc test {name code {okpattern undefined}} {
} else {
if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} {
incr ::num_passed
- if {$::verbose} {
- puts "PASSED"
- } else {
- colored_dot $::tags 1
- }
+ send_data_packet $::test_server_fd ok $name
} else {
set msg "Expected '$okpattern' to equal or match '$retval'"
lappend details $msg
lappend ::tests_failed $details
incr ::num_failed
- if {$::verbose} {
- puts "FAILED"
- puts "$msg\n"
- } else {
- colored_dot $::tags 0
- }
+ send_data_packet $::test_server_fd err [join $details "\n"]
}
}
- flush stdout
if {$::traceleaks} {
set output [exec leaks redis-server]
if {![string match {*0 leaks*} $output]} {
- puts "--- Test \"$name\" leaked! ---"
- puts $output
- exit 1
+ send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output"
}
}
}
diff --git a/tests/support/tmpfile.tcl b/tests/support/tmpfile.tcl
index 287b09317..809f58730 100644
--- a/tests/support/tmpfile.tcl
+++ b/tests/support/tmpfile.tcl
@@ -4,13 +4,7 @@ file mkdir $::tmproot
# returns a dirname unique to this process to write to
proc tmpdir {basename} {
- if {$::diskstore} {
- # For diskstore we want to use the same dir again and again
- # otherwise everything is too slow.
- set dir [file join $::tmproot $basename.diskstore]
- } else {
- set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]]
- }
+ set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]]
file mkdir $dir
set _ $dir
}
diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
index f505f26ae..559d02647 100644
--- a/tests/test_helper.tcl
+++ b/tests/test_helper.tcl
@@ -9,6 +9,33 @@ source tests/support/tmpfile.tcl
source tests/support/test.tcl
source tests/support/util.tcl
+set ::all_tests {
+ unit/printver
+ unit/auth
+ unit/protocol
+ unit/basic
+ unit/type/list
+ unit/type/list-2
+ unit/type/list-3
+ unit/type/set
+ unit/type/zset
+ unit/type/hash
+ unit/sort
+ unit/expire
+ unit/other
+ unit/cas
+ unit/quit
+ integration/replication
+ integration/replication-2
+ integration/replication-3
+ integration/aof
+ unit/pubsub
+ unit/slowlog
+ unit/scripting
+}
+# Index to the next test to run in the ::all_tests list.
+set ::next_test 0
+
set ::host 127.0.0.1
set ::port 16379
set ::traceleaks 0
@@ -19,12 +46,22 @@ set ::allowtags {}
set ::external 0; # If "1" this means, we are running against external instance
set ::file ""; # If set, runs only the tests in this comma separated list
set ::curfile ""; # Hold the filename of the current suite
-set ::diskstore 0; # Don't touch this by hand. The test itself will toggle it.
+set ::accurate 0; # If true runs fuzz tests with more iterations
+set ::force_failure 0
+
+# Set to 1 when we are running in client mode. The Redis test uses a
+# server-client model to run tests simultaneously. The server instance
+# runs the specified number of client instances that will actually run tests.
+# The server is responsible of showing the result to the user, and exit with
+# the appropriate exit code depending on the test outcome.
+set ::client 0
+set ::numclients 16
proc execute_tests name {
set path "tests/$name.tcl"
set ::curfile $path
source $path
+ send_data_packet $::test_server_fd done "$name"
}
# Setup a list to hold a stack of server configs. When calls to start_server
@@ -104,93 +141,191 @@ proc s {args} {
}
proc cleanup {} {
- puts "Cleanup: warning may take some time..."
+ puts -nonewline "Cleanup: may take some time... "
+ flush stdout
catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]}
catch {exec rm -rf {*}[glob tests/tmp/server.*]}
+ puts "OK"
}
-proc execute_everything {} {
- if 0 {
- # Use this when hacking on new tests.
- set ::verbose 1
- execute_tests "unit/first"
- return
- }
-
- execute_tests "unit/printver"
- execute_tests "unit/auth"
- execute_tests "unit/protocol"
- execute_tests "unit/basic"
- execute_tests "unit/type/list"
- execute_tests "unit/type/set"
- execute_tests "unit/type/zset"
- execute_tests "unit/type/hash"
- execute_tests "unit/sort"
- execute_tests "unit/expire"
- execute_tests "unit/other"
- execute_tests "unit/cas"
- execute_tests "unit/quit"
- execute_tests "integration/replication"
- execute_tests "integration/aof"
-# execute_tests "integration/redis-cli"
- execute_tests "unit/pubsub"
- execute_tests "unit/scripting"
-
- return; # No diskstore tests for now...
- # run tests with diskstore enabled
- puts "\nRunning diskstore tests... this is slow, press Ctrl+C if not interested.."
- set ::diskstore 1
- lappend ::denytags nodiskstore
- set ::global_overrides {diskstore-enabled yes}
- execute_tests "unit/protocol"
- execute_tests "unit/basic"
- execute_tests "unit/type/list"
- execute_tests "unit/type/set"
- execute_tests "unit/type/zset"
- execute_tests "unit/type/hash"
- execute_tests "unit/sort"
- execute_tests "unit/expire"
- execute_tests "unit/other"
- execute_tests "unit/cas"
-}
-
-proc main {} {
+proc test_server_main {} {
cleanup
+ # Open a listening socket, trying different ports in order to find a
+ # non busy one.
+ set port 11111
+ while 1 {
+ puts "Starting test server at port $port"
+ if {[catch {socket -server accept_test_clients $port} e]} {
+ if {[string match {*address already in use*} $e]} {
+ if {$port == 20000} {
+ puts "Can't find an available TCP port for test server."
+ exit 1
+ } else {
+ incr port
+ }
+ } else {
+ puts "Fatal error starting test server: $e"
+ exit 1
+ }
+ } else {
+ break
+ }
+ }
+
+ # Start the client instances
+ set ::clients_pids {}
+ for {set j 0} {$j < $::numclients} {incr j} {
+ set p [exec tclsh8.5 [info script] {*}$::argv \
+ --client $port --port [expr {$::port+($j*10)}] &]
+ lappend ::clients_pids $p
+ }
- if {[string length $::file] > 0} {
- foreach {file} [split $::file ,] {
- execute_tests $file
+ # Setup global state for the test server
+ set ::idle_clients {}
+ set ::active_clients {}
+ array set ::clients_start_time {}
+ set ::clients_time_history {}
+ set ::failed_tests {}
+
+ # Enter the event loop to handle clients I/O
+ after 100 test_server_cron
+ vwait forever
+}
+
+# This function gets called 10 times per second, for now does nothing but
+# may be used in the future in order to detect test clients taking too much
+# time to execute the task.
+proc test_server_cron {} {
+}
+
+proc accept_test_clients {fd addr port} {
+ fileevent $fd readable [list read_from_test_client $fd]
+}
+
+# This is the readable handler of our test server. Clients send us messages
+# in the form of a status code such and additional data. Supported
+# status types are:
+#
+# ready: the client is ready to execute the command. Only sent at client
+# startup. The server will queue the client FD in the list of idle
+# clients.
+# testing: just used to signal that a given test started.
+# ok: a test was executed with success.
+# err: a test was executed with an error.
+# exception: there was a runtime exception while executing the test.
+# done: all the specified test file was processed, this test client is
+# ready to accept a new task.
+proc read_from_test_client fd {
+ set bytes [gets $fd]
+ set payload [read $fd $bytes]
+ foreach {status data} $payload break
+ if {$status eq {ready}} {
+ puts "\[$status\]: $data"
+ signal_idle_client $fd
+ } elseif {$status eq {done}} {
+ set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}]
+ puts "\[[colorstr yellow $status]\]: $data ($elapsed seconds)"
+ puts "+++ [expr {[llength $::active_clients]-1}] units still in execution."
+ lappend ::clients_time_history $elapsed $data
+ signal_idle_client $fd
+ } elseif {$status eq {ok}} {
+ puts "\[[colorstr green $status]\]: $data"
+ } elseif {$status eq {err}} {
+ set err "\[[colorstr red $status]\]: $data"
+ puts $err
+ lappend ::failed_tests $err
+ } elseif {$status eq {exception}} {
+ puts "\[[colorstr red $status]\]: $data"
+ foreach p $::clients_pids {
+ catch {exec kill -9 $p}
}
+ exit 1
+ } elseif {$status eq {testing}} {
+ # No op
} else {
- execute_everything
+ puts "\[$status\]: $data"
}
+}
- cleanup
- puts "\n[expr $::num_tests] tests, $::num_passed passed, $::num_failed failed\n"
- if {$::num_failed > 0} {
- set curheader ""
- puts "Failures:"
- foreach {test} $::tests_failed {
- set header [lindex $test 0]
- append header " ("
- append header [join [lindex $test 1] ","]
- append header ")"
-
- if {$curheader ne $header} {
- set curheader $header
- puts "\n$curheader:"
- }
-
- set name [lindex $test 2]
- set msg [lindex $test 3]
- puts "- $name: $msg"
+# A new client is idle. Remove it from the list of active clients and
+# if there are still test units to run, launch them.
+proc signal_idle_client fd {
+ # Remove this fd from the list of active clients.
+ set ::active_clients \
+ [lsearch -all -inline -not -exact $::active_clients $fd]
+ # New unit to process?
+ if {$::next_test != [llength $::all_tests]} {
+ puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"]
+ set ::clients_start_time($fd) [clock seconds]
+ send_data_packet $fd run [lindex $::all_tests $::next_test]
+ lappend ::active_clients $fd
+ incr ::next_test
+ } else {
+ lappend ::idle_clients $fd
+ if {[llength $::active_clients] == 0} {
+ the_end
}
+ }
+}
- puts ""
+# The the_end funciton gets called when all the test units were already
+# executed, so the test finished.
+proc the_end {} {
+ # TODO: print the status, exit with the rigth exit code.
+ puts "\n The End\n"
+ puts "Execution time of different units:"
+ foreach {time name} $::clients_time_history {
+ puts " $time seconds - $name"
+ }
+ if {[llength $::failed_tests]} {
+ puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n"
+ foreach failed $::failed_tests {
+ puts "*** $failed"
+ }
+ cleanup
exit 1
+ } else {
+ puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n"
+ cleanup
+ exit 0
}
}
+# The client is not even driven (the test server is instead) as we just need
+# to read the command, execute, reply... all this in a loop.
+proc test_client_main server_port {
+ set ::test_server_fd [socket localhost $server_port]
+ send_data_packet $::test_server_fd ready [pid]
+ while 1 {
+ set bytes [gets $::test_server_fd]
+ set payload [read $::test_server_fd $bytes]
+ foreach {cmd data} $payload break
+ if {$cmd eq {run}} {
+ execute_tests $data
+ } else {
+ error "Unknown test client command: $cmd"
+ }
+ }
+}
+
+proc send_data_packet {fd status data} {
+ set payload [list $status $data]
+ puts $fd [string length $payload]
+ puts -nonewline $fd $payload
+ flush $fd
+}
+
+proc print_help_screen {} {
+ puts [join {
+ "--valgrind Run the test over valgrind."
+ "--accurate Run slow randomized tests for more iterations."
+ "--single <unit> Just execute the specified unit (see next option)."
+ "--list-tests List all the available test units."
+ "--force-failure Force the execution of a test that always fails."
+ "--help Print this help screen."
+ } "\n"]
+}
+
# parse arguments
for {set j 0} {$j < [llength $argv]} {incr j} {
set opt [lindex $argv $j]
@@ -206,9 +341,6 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
incr j
} elseif {$opt eq {--valgrind}} {
set ::valgrind 1
- } elseif {$opt eq {--file}} {
- set ::file $arg
- incr j
} elseif {$opt eq {--host}} {
set ::external 1
set ::host $arg
@@ -216,20 +348,47 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
} elseif {$opt eq {--port}} {
set ::port $arg
incr j
- } elseif {$opt eq {--verbose}} {
- set ::verbose 1
+ } elseif {$opt eq {--accurate}} {
+ set ::accurate 1
+ } elseif {$opt eq {--force-failure}} {
+ set ::force_failure 1
+ } elseif {$opt eq {--single}} {
+ set ::all_tests $arg
+ incr j
+ } elseif {$opt eq {--list-tests}} {
+ foreach t $::all_tests {
+ puts $t
+ }
+ exit 0
+ } elseif {$opt eq {--client}} {
+ set ::client 1
+ set ::test_server_port $arg
+ incr j
+ } elseif {$opt eq {--help}} {
+ print_help_screen
+ exit 0
} else {
puts "Wrong argument: $opt"
exit 1
}
}
-if {[catch { main } err]} {
- if {[string length $err] > 0} {
- # only display error when not generated by the test suite
- if {$err ne "exception"} {
- puts $::errorInfo
+if {$::client} {
+ if {[catch { test_client_main $::test_server_port } err]} {
+ set estr "Executing test client: $err.\n$::errorInfo"
+ if {[catch {send_data_packet $::test_server_fd exception $estr}]} {
+ puts $estr
}
exit 1
}
+} else {
+ if {[catch { test_server_main } err]} {
+ if {[string length $err] > 0} {
+ # only display error when not generated by the test suite
+ if {$err ne "exception"} {
+ puts $::errorInfo
+ }
+ exit 1
+ }
+ }
}
diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl
index 6f16ed589..415a0f538 100644
--- a/tests/unit/expire.tcl
+++ b/tests/unit/expire.tcl
@@ -7,7 +7,7 @@ start_server {tags {"expire"}} {
set v4 [r ttl x]
r expire x 4
list $v1 $v2 $v3 $v4
- } {1 5 1 10}
+ } {1 [45] 1 10}
test {EXPIRE - It should be still possible to read 'x'} {
r get x
diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl
index 716d6897a..702c291f9 100644
--- a/tests/unit/other.tcl
+++ b/tests/unit/other.tcl
@@ -1,4 +1,11 @@
start_server {tags {"other"}} {
+ if {$::force_failure} {
+ # This is used just for test suite development purposes.
+ test {Failing test} {
+ format err
+ } {ok}
+ }
+
test {SAVE - make sure there are all the types as values} {
# Wait for a background saving in progress to terminate
waitForBgsave r
@@ -12,11 +19,12 @@ start_server {tags {"other"}} {
r save
} {OK}
- tags {slow nodiskstore} {
+ tags {slow} {
+ if {$::accurate} {set iterations 10000} else {set iterations 1000}
foreach fuzztype {binary alpha compr} {
test "FUZZ stresser with data model $fuzztype" {
set err 0
- for {set i 0} {$i < 10000} {incr i} {
+ for {set i 0} {$i < $iterations} {incr i} {
set fuzz [randstring 0 512 $fuzztype]
r set foo $fuzz
set got [r get foo]
@@ -46,11 +54,12 @@ start_server {tags {"other"}} {
set _ $err
} {*invalid*}
- tags {consistency nodiskstore} {
+ tags {consistency} {
if {![catch {package require sha1}]} {
+ if {$::accurate} {set numops 10000} else {set numops 1000}
test {Check consistency of different data types after a reload} {
r flushdb
- createComplexDataset r 10000
+ createComplexDataset r $numops
set dump [csvdump r]
set sha1 [r debug digest]
r debug reload
@@ -102,25 +111,19 @@ start_server {tags {"other"}} {
r flushdb
r set x 10
r expire x 1000
- if {$::diskstore} {
- r debug flushcache
- } else {
- r save
- r debug reload
- }
+ r save
+ r debug reload
set ttl [r ttl x]
set e1 [expr {$ttl > 900 && $ttl <= 1000}]
- if {!$::diskstore} {
- r bgrewriteaof
- waitForBgrewriteaof r
- r debug loadaof
- }
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
set ttl [r ttl x]
set e2 [expr {$ttl > 900 && $ttl <= 1000}]
list $e1 $e2
} {1 1}
- tags {protocol nodiskstore} {
+ tags {protocol} {
test {PIPELINING stresser (also a regression for the old epoll bug)} {
set fd2 [socket $::host $::port]
fconfigure $fd2 -encoding binary -translation binary
@@ -244,6 +247,7 @@ start_server {tags {"other"}} {
} {0 0}
test {Perform a final SAVE to leave a clean DB on disk} {
+ waitForBgsave r
r save
} {OK}
}
diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl
new file mode 100644
index 000000000..55a71e985
--- /dev/null
+++ b/tests/unit/slowlog.tcl
@@ -0,0 +1,41 @@
+start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
+ test {SLOWLOG - check that it starts with an empty log} {
+ r slowlog len
+ } {0}
+
+ test {SLOWLOG - only logs commands taking more time than specified} {
+ r config set slowlog-log-slower-than 100000
+ r ping
+ assert_equal [r slowlog len] 0
+ r debug sleep 0.2
+ assert_equal [r slowlog len] 1
+ }
+
+ test {SLOWLOG - max entries is correctly handled} {
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-max-len 10
+ for {set i 0} {$i < 100} {incr i} {
+ r ping
+ }
+ r slowlog len
+ } {10}
+
+ test {SLOWLOG - GET optional argument to limit output len works} {
+ llength [r slowlog get 5]
+ } {5}
+
+ test {SLOWLOG - RESET subcommand works} {
+ r config set slowlog-log-slower-than 100000
+ r slowlog reset
+ r slowlog len
+ } {0}
+
+ test {SLOWLOG - logged entry sanity check} {
+ r debug sleep 0.2
+ set e [lindex [r slowlog get] 0]
+ assert_equal [llength $e] 4
+ assert_equal [lindex $e 0] 105
+ assert_equal [expr {[lindex $e 2] > 100000}] 1
+ assert_equal [lindex $e 3] {debug sleep 0.2}
+ }
+}
diff --git a/tests/unit/type/list-2.tcl b/tests/unit/type/list-2.tcl
new file mode 100644
index 000000000..bf6a055eb
--- /dev/null
+++ b/tests/unit/type/list-2.tcl
@@ -0,0 +1,44 @@
+start_server {
+ tags {"list"}
+ overrides {
+ "list-max-ziplist-value" 16
+ "list-max-ziplist-entries" 256
+ }
+} {
+ source "tests/unit/type/list-common.tcl"
+
+ foreach {type large} [array get largevalue] {
+ tags {"slow"} {
+ test "LTRIM stress testing - $type" {
+ set mylist {}
+ set startlen 32
+ r del mylist
+
+ # Start with the large value to ensure the
+ # right encoding is used.
+ r rpush mylist $large
+ lappend mylist $large
+
+ for {set i 0} {$i < $startlen} {incr i} {
+ set str [randomInt 9223372036854775807]
+ r rpush mylist $str
+ lappend mylist $str
+ }
+
+ for {set i 0} {$i < 1000} {incr i} {
+ set min [expr {int(rand()*$startlen)}]
+ set max [expr {$min+int(rand()*$startlen)}]
+ set mylist [lrange $mylist $min $max]
+ r ltrim mylist $min $max
+ assert_equal $mylist [r lrange mylist 0 -1]
+
+ for {set j [r llen mylist]} {$j < $startlen} {incr j} {
+ set str [randomInt 9223372036854775807]
+ r rpush mylist $str
+ lappend mylist $str
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/type/list-3.tcl b/tests/unit/type/list-3.tcl
new file mode 100644
index 000000000..9410022fd
--- /dev/null
+++ b/tests/unit/type/list-3.tcl
@@ -0,0 +1,70 @@
+start_server {
+ tags {list ziplist}
+ overrides {
+ "list-max-ziplist-value" 200000
+ "list-max-ziplist-entries" 256
+ }
+} {
+ test {Explicit regression for a list bug} {
+ set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyo<xSJ1oObDYd`ppZuW6P@fS278YaOx=s6lvdFlMbP0[SbkI^Kr\HBXtuFaA^mDx:yzS4a[skiiPWhT<nNfAf=aQVfclcuwDrfe;iVuKdNvB9kbfq>tK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}}
+ r del l
+ r rpush l [lindex $mylist 0]
+ r rpush l [lindex $mylist 1]
+ assert_equal [r lindex l 0] [lindex $mylist 0]
+ assert_equal [r lindex l 1] [lindex $mylist 1]
+ }
+
+ tags {slow} {
+ test {ziplist implementation: value encoding and backlink} {
+ if {$::accurate} {set iterations 100} else {set iterations 10}
+ for {set j 0} {$j < $iterations} {incr j} {
+ r del l
+ set l {}
+ for {set i 0} {$i < 200} {incr i} {
+ randpath {
+ set data [string repeat x [randomInt 100000]]
+ } {
+ set data [randomInt 65536]
+ } {
+ set data [randomInt 4294967296]
+ } {
+ set data [randomInt 18446744073709551616]
+ }
+ lappend l $data
+ r rpush l $data
+ }
+ assert_equal [llength $l] [r llen l]
+ # Traverse backward
+ for {set i 199} {$i >= 0} {incr i -1} {
+ if {[lindex $l $i] ne [r lindex l $i]} {
+ assert_equal [lindex $l $i] [r lindex l $i]
+ }
+ }
+ }
+ }
+
+ test {ziplist implementation: encoding stress testing} {
+ for {set j 0} {$j < 200} {incr j} {
+ r del l
+ set l {}
+ set len [randomInt 400]
+ for {set i 0} {$i < $len} {incr i} {
+ set rv [randomValue]
+ randpath {
+ lappend l $rv
+ r rpush l $rv
+ } {
+ set l [concat [list $rv] $l]
+ r lpush l $rv
+ }
+ }
+ assert_equal [llength $l] [r llen l]
+ for {set i 0} {$i < $len} {incr i} {
+ if {[lindex $l $i] ne [r lindex l $i]} {
+ assert_equal [lindex $l $i] [r lindex l $i]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/type/list-common.tcl b/tests/unit/type/list-common.tcl
new file mode 100644
index 000000000..ab45f0b31
--- /dev/null
+++ b/tests/unit/type/list-common.tcl
@@ -0,0 +1,5 @@
+# We need a value larger than list-max-ziplist-value to make sure
+# the list has the right encoding when it is swapped in again.
+array set largevalue {}
+set largevalue(ziplist) "hello"
+set largevalue(linkedlist) [string repeat "hello" 4]
diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl
index 616abd21a..ff178db41 100644
--- a/tests/unit/type/list.tcl
+++ b/tests/unit/type/list.tcl
@@ -5,11 +5,7 @@ start_server {
"list-max-ziplist-entries" 256
}
} {
- # We need a value larger than list-max-ziplist-value to make sure
- # the list has the right encoding when it is swapped in again.
- array set largevalue {}
- set largevalue(ziplist) "hello"
- set largevalue(linkedlist) [string repeat "hello" 4]
+ source "tests/unit/type/list-common.tcl"
test {LPUSH, RPUSH, LLENGTH, LINDEX - ziplist} {
# first lpush then rpush
@@ -152,8 +148,11 @@ start_server {
test "BLPOP with variadic LPUSH" {
set rd [redis_deferring_client]
r del blist target
+ if {$::valgrind} {after 100}
$rd blpop blist 0
+ if {$::valgrind} {after 100}
assert_equal 2 [r lpush blist foo bar]
+ if {$::valgrind} {after 100}
assert_equal {blist foo} [$rd read]
assert_equal bar [lindex [r lrange blist 0 -1] 0]
}
@@ -671,38 +670,6 @@ start_server {
assert_equal {} [trim_list $type 0 -6]
}
- tags {"slow"} {
- test "LTRIM stress testing - $type" {
- set mylist {}
- set startlen 32
- r del mylist
-
- # Start with the large value to ensure the
- # right encoding is used.
- r rpush mylist $large
- lappend mylist $large
-
- for {set i 0} {$i < $startlen} {incr i} {
- set str [randomInt 9223372036854775807]
- r rpush mylist $str
- lappend mylist $str
- }
-
- for {set i 0} {$i < 1000} {incr i} {
- set min [expr {int(rand()*$startlen)}]
- set max [expr {$min+int(rand()*$startlen)}]
- set mylist [lrange $mylist $min $max]
- r ltrim mylist $min $max
- assert_equal $mylist [r lrange mylist 0 -1]
-
- for {set j [r llen mylist]} {$j < $startlen} {incr j} {
- set str [randomInt 9223372036854775807]
- r rpush mylist $str
- lappend mylist $str
- }
- }
- }
- }
}
foreach {type large} [array get largevalue] {
@@ -760,76 +727,5 @@ start_server {
assert_equal 1 [r lrem myotherlist 1 2]
assert_equal 3 [r llen myotherlist]
}
-
- }
-}
-
-start_server {
- tags {list ziplist}
- overrides {
- "list-max-ziplist-value" 200000
- "list-max-ziplist-entries" 256
- }
-} {
- test {Explicit regression for a list bug} {
- set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyo<xSJ1oObDYd`ppZuW6P@fS278YaOx=s6lvdFlMbP0[SbkI^Kr\HBXtuFaA^mDx:yzS4a[skiiPWhT<nNfAf=aQVfclcuwDrfe;iVuKdNvB9kbfq>tK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}}
- r del l
- r rpush l [lindex $mylist 0]
- r rpush l [lindex $mylist 1]
- assert_equal [r lindex l 0] [lindex $mylist 0]
- assert_equal [r lindex l 1] [lindex $mylist 1]
- }
-
- tags {slow} {
- test {ziplist implementation: value encoding and backlink} {
- for {set j 0} {$j < 100} {incr j} {
- r del l
- set l {}
- for {set i 0} {$i < 200} {incr i} {
- randpath {
- set data [string repeat x [randomInt 100000]]
- } {
- set data [randomInt 65536]
- } {
- set data [randomInt 4294967296]
- } {
- set data [randomInt 18446744073709551616]
- }
- lappend l $data
- r rpush l $data
- }
- assert_equal [llength $l] [r llen l]
- # Traverse backward
- for {set i 199} {$i >= 0} {incr i -1} {
- if {[lindex $l $i] ne [r lindex l $i]} {
- assert_equal [lindex $l $i] [r lindex l $i]
- }
- }
- }
- }
-
- test {ziplist implementation: encoding stress testing} {
- for {set j 0} {$j < 200} {incr j} {
- r del l
- set l {}
- set len [randomInt 400]
- for {set i 0} {$i < $len} {incr i} {
- set rv [randomValue]
- randpath {
- lappend l $rv
- r rpush l $rv
- } {
- set l [concat [list $rv] $l]
- r lpush l $rv
- }
- }
- assert_equal [llength $l] [r llen l]
- for {set i 0} {$i < 200} {incr i} {
- if {[lindex $l $i] ne [r lindex l $i]} {
- assert_equal [lindex $l $i] [r lindex l $i]
- }
- }
- }
- }
}
}
diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl
index 46d40f6fb..41f5f588f 100644
--- a/tests/unit/type/zset.tcl
+++ b/tests/unit/type/zset.tcl
@@ -527,7 +527,7 @@ start_server {tags {"zset"}} {
} elseif {$encoding == "skiplist"} {
r config set zset-max-ziplist-entries 0
r config set zset-max-ziplist-value 0
- set elements 1000
+ if {$::accurate} {set elements 1000} else {set elements 100}
} else {
puts "Unknown sorted set encoding"
exit