summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2012-10-11 18:36:18 +0200
committerantirez <antirez@gmail.com>2012-10-11 18:36:18 +0200
commitc3ff470889c2bcf4d9521cf547d32760ef5bbc6c (patch)
tree2bd8aea3974b2f0aea37091867e0aa12a1d1ee6b
parent0e25c0ccf4d1b2c329366aecbe821c5b317be230 (diff)
parentf4eb4b335264f2a88316a7fd8752d740f1a3cb63 (diff)
downloadredis-c3ff470889c2bcf4d9521cf547d32760ef5bbc6c.tar.gz
Merge remote-tracking branch 'origin/2.6' into 2.6
-rw-r--r--00-RELEASENOTES12
-rw-r--r--CONTRIBUTING20
-rw-r--r--src/ae.c20
-rw-r--r--src/ae.h1
-rw-r--r--src/config.c7
-rw-r--r--src/dict.c68
-rw-r--r--src/dict.h2
-rw-r--r--src/redis.c5
-rw-r--r--src/replication.c2
-rw-r--r--src/sort.c108
-rw-r--r--src/version.h2
-rw-r--r--tests/unit/sort.tcl41
-rwxr-xr-xutils/install_server.sh25
-rw-r--r--utils/redis.conf.tpl495
14 files changed, 267 insertions, 541 deletions
diff --git a/00-RELEASENOTES b/00-RELEASENOTES
index 12f1e3d18..02a3db3a0 100644
--- a/00-RELEASENOTES
+++ b/00-RELEASENOTES
@@ -14,6 +14,18 @@ HIGH: There is a critical bug that may affect a subset of users. Upgrade!
CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP.
--------------------------------------------------------------------------------
+---[ Redis 2.5.14 (2.6 Release Candidate 8) ]
+
+* [BUGFIX] Fixed compilation on FreeBSD.
+* [IMPROVED] SRANDMEMBER <count> that returns multiple random elements.
+* [IMPROVED] Sentinel backported to 2.6. It will be taken in sync with 2.8.
+* [IMPROVED] Helper function for scripting to return errors and status replies.
+* [IMPROVED] SORT by nosort [ASC|DESC] uses sorted set elements ordering.
+* [BUGFIX] Better resistence to system clock skew.
+* [IMPROVED] Warn the user when the configured maxmemory seems odd.
+* [BUGFIX] Hashing function is now murmurhash2 for security purposes.
+* [IMPROVED] Install script no longer uses a template but redis.conf itself.
+
---[ Redis 2.5.13 (2.6 Release Candidate 7) ]
UPGRADE URGENCY: HIGH
diff --git a/CONTRIBUTING b/CONTRIBUTING
index 623c5a6ec..1ecd13945 100644
--- a/CONTRIBUTING
+++ b/CONTRIBUTING
@@ -1,8 +1,22 @@
-1. Enter irc.freenode.org #redis and start talking with 'antirez' and/or 'pietern' to check if there is interest for such a feature and to understand the probability of it being merged. We'll try hard to keep Redis simple... so you'll likely encounter high resistance.
+# IMPORTANT: HOW TO USE REDIS GITHUB ISSUES
-2. Drop a message to the Redis Google Group with a proposal of semantics/API.
+* Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature
+ requests. Everything else belongs to the Redis Google Group.
-3. If steps 1 and 2 are ok, use the following procedure to submit a patch:
+ PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected
+ bugs in the Github issues system. We'll be very happy to help you and provide
+ all the support in the Redis Google Group.
+
+ Redis Google Group address:
+
+ https://groups.google.com/forum/?fromgroups#!forum/redis-db
+
+# How to provide a patch for a new feature
+
+1. Drop a message to the Redis Google Group with a proposal of semantics/API.
+
+2. If in steps 1 you get an acknowledge from the project leaders, use the
+ following procedure to submit a patch:
a. Fork Redis on github ( http://help.github.com/fork-a-repo/ )
b. Create a topic branch (git checkout -b my_branch)
diff --git a/src/ae.c b/src/ae.c
index ba53b4568..d2faed326 100644
--- a/src/ae.c
+++ b/src/ae.c
@@ -37,6 +37,7 @@
#include <stdlib.h>
#include <poll.h>
#include <string.h>
+#include <time.h>
#include "ae.h"
#include "zmalloc.h"
@@ -67,6 +68,7 @@ aeEventLoop *aeCreateEventLoop(int setsize) {
eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize);
if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err;
eventLoop->setsize = setsize;
+ eventLoop->lastTime = time(NULL);
eventLoop->timeEventHead = NULL;
eventLoop->timeEventNextId = 0;
eventLoop->stop = 0;
@@ -236,6 +238,24 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
int processed = 0;
aeTimeEvent *te;
long long maxId;
+ time_t now = time(NULL);
+
+ /* If the system clock is moved to the future, and then set back to the
+ * right value, time events may be delayed in a random way. Often this
+ * means that scheduled operations will not be performed soon enough.
+ *
+ * Here we try to detect system clock skews, and force all the time
+ * events to be processed ASAP when this happens: the idea is that
+ * processing events earlier is less dangerous than delaying them
+ * indefinitely, and practice suggests it is. */
+ if (now < eventLoop->lastTime) {
+ te = eventLoop->timeEventHead;
+ while(te) {
+ te->when_sec = 0;
+ te = te->next;
+ }
+ }
+ eventLoop->lastTime = now;
te = eventLoop->timeEventHead;
maxId = eventLoop->timeEventNextId-1;
diff --git a/src/ae.h b/src/ae.h
index e1dccfc76..f52a075e5 100644
--- a/src/ae.h
+++ b/src/ae.h
@@ -88,6 +88,7 @@ typedef struct aeEventLoop {
int maxfd; /* highest file descriptor currently registered */
int setsize; /* max number of file descriptors tracked */
long long timeEventNextId;
+ time_t lastTime; /* Used to detect system clock skew */
aeFileEvent *events; /* Registered events */
aeFiredEvent *fired; /* Fired events */
aeTimeEvent *timeEventHead;
diff --git a/src/config.c b/src/config.c
index 768db1e9a..a36eb9a39 100644
--- a/src/config.c
+++ b/src/config.c
@@ -438,7 +438,12 @@ void configSetCommand(redisClient *c) {
if (getLongLongFromObject(o,&ll) == REDIS_ERR ||
ll < 0) goto badfmt;
server.maxmemory = ll;
- if (server.maxmemory) freeMemoryIfNeeded();
+ if (server.maxmemory) {
+ if (server.maxmemory < zmalloc_used_memory()) {
+ redisLog(REDIS_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy.");
+ }
+ freeMemoryIfNeeded();
+ }
} else if (!strcasecmp(c->argv[2]->ptr,"maxmemory-policy")) {
if (!strcasecmp(o->ptr,"volatile-lru")) {
server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU;
diff --git a/src/dict.c b/src/dict.c
index 66bb983a8..ec58e8200 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -85,29 +85,73 @@ unsigned int dictIdentityHashFunction(unsigned int key)
return key;
}
-static int dict_hash_function_seed = 5381;
+static uint32_t dict_hash_function_seed = 5381;
-void dictSetHashFunctionSeed(unsigned int seed) {
+void dictSetHashFunctionSeed(uint32_t seed) {
dict_hash_function_seed = seed;
}
-unsigned int dictGetHashFunctionSeed(void) {
+uint32_t dictGetHashFunctionSeed(void) {
return dict_hash_function_seed;
}
-/* Generic hash function (a popular one from Bernstein).
- * I tested a few and this was the best. */
-unsigned int dictGenHashFunction(const unsigned char *buf, int len) {
- unsigned int hash = dict_hash_function_seed;
+/* MurmurHash2, by Austin Appleby
+ * Note - This code makes a few assumptions about how your machine behaves -
+ * 1. We can read a 4-byte value from any address without crashing
+ * 2. sizeof(int) == 4
+ *
+ * And it has a few limitations -
+ *
+ * 1. It will not work incrementally.
+ * 2. It will not produce the same results on little-endian and big-endian
+ * machines.
+ */
+unsigned int dictGenHashFunction(const void *key, int len) {
+ /* 'm' and 'r' are mixing constants generated offline.
+ They're not really 'magic', they just happen to work well. */
+ uint32_t seed = dict_hash_function_seed;
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
- while (len--)
- hash = ((hash << 5) + hash) + (*buf++); /* hash * 33 + c */
- return hash;
+ /* Initialize the hash to a 'random' value */
+ uint32_t h = seed ^ len;
+
+ /* Mix 4 bytes at a time into the hash */
+ const unsigned char *data = (const unsigned char *)key;
+
+ while(len >= 4) {
+ uint32_t k = *(uint32_t*)data;
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h *= m;
+ h ^= k;
+
+ data += 4;
+ len -= 4;
+ }
+
+ /* Handle the last few bytes of the input array */
+ switch(len) {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0]; h *= m;
+ };
+
+ /* Do a few final mixes of the hash to ensure the last few
+ * bytes are well-incorporated. */
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return (unsigned int)h;
}
-/* And a case insensitive version */
+/* And a case insensitive hash function (based on djb hash) */
unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len) {
- unsigned int hash = dict_hash_function_seed;
+ unsigned int hash = (unsigned int)dict_hash_function_seed;
while (len--)
hash = ((hash << 5) + hash) + (tolower(*buf++)); /* hash * 33 + c */
diff --git a/src/dict.h b/src/dict.h
index 5f8569535..f480ae539 100644
--- a/src/dict.h
+++ b/src/dict.h
@@ -155,7 +155,7 @@ dictEntry *dictNext(dictIterator *iter);
void dictReleaseIterator(dictIterator *iter);
dictEntry *dictGetRandomKey(dict *d);
void dictPrintStats(dict *d);
-unsigned int dictGenHashFunction(const unsigned char *buf, int len);
+unsigned int dictGenHashFunction(const void *key, int len);
unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len);
void dictEmpty(dict *d);
void dictEnableResize(void);
diff --git a/src/redis.c b/src/redis.c
index 8dc6b9a92..f26bdab0c 100644
--- a/src/redis.c
+++ b/src/redis.c
@@ -2579,6 +2579,11 @@ int main(int argc, char **argv) {
redisLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket);
}
+ /* Warning the user about suspicious maxmemory setting. */
+ if (server.maxmemory > 0 && server.maxmemory < 1024*1024) {
+ redisLog(REDIS_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory);
+ }
+
aeSetBeforeSleepProc(server.el,beforeSleep);
aeMain(server.el);
aeDeleteEventLoop(server.el);
diff --git a/src/replication.c b/src/replication.c
index 871ce95ae..c1e461916 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -721,7 +721,7 @@ void replicationCron(void) {
if (server.masterhost && server.repl_state == REDIS_REPL_TRANSFER &&
(time(NULL)-server.repl_transfer_lastio) > server.repl_timeout)
{
- redisLog(REDIS_WARNING,"Timeout receiving bulk data from MASTER...");
+ redisLog(REDIS_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value.");
replicationAbortSyncTransfer();
}
diff --git a/src/sort.c b/src/sort.c
index e5178cd0d..d18a52959 100644
--- a/src/sort.c
+++ b/src/sort.c
@@ -2,6 +2,8 @@
#include "pqsort.h" /* Partial qsort for SORT+LIMIT */
#include <math.h> /* isnan() */
+zskiplistNode* zslGetElementByRank(zskiplist *zsl, unsigned long rank);
+
redisSortOperation *createSortOperation(int type, robj *pattern) {
redisSortOperation *so = zmalloc(sizeof(*so));
so->type = type;
@@ -156,8 +158,9 @@ void sortCommand(redisClient *c) {
/* Lookup the key to sort. It must be of the right types */
sortval = lookupKeyRead(c->db,c->argv[1]);
- if (sortval && sortval->type != REDIS_SET && sortval->type != REDIS_LIST &&
- sortval->type != REDIS_ZSET)
+ if (sortval && sortval->type != REDIS_SET &&
+ sortval->type != REDIS_LIST &&
+ sortval->type != REDIS_ZSET)
{
addReply(c,shared.wrongtypeerr);
return;
@@ -167,7 +170,7 @@ void sortCommand(redisClient *c) {
* Operations can be GET/DEL/INCR/DECR */
operations = listCreate();
listSetFreeMethod(operations,zfree);
- j = 2;
+ j = 2; /* options start at argv[2] */
/* Now we need to protect sortval incrementing its count, in the future
* SORT may have options able to overwrite/delete keys during the sorting
@@ -213,13 +216,18 @@ void sortCommand(redisClient *c) {
j++;
}
- /* If we have STORE we need to force sorting for deterministic output
- * and replication. We use alpha sorting since this is guaranteed to
- * work with any input.
+ /* For the STORE option, or when SORT is called from a Lua script,
+ * we want to force a specific ordering even when no explicit ordering
+ * was asked (SORT BY nosort). This guarantees that replication / AOF
+ * is deterministic.
*
- * We also want determinism when SORT is called from Lua scripts, so
- * in this case we also force alpha sorting. */
- if ((storekey || c->flags & REDIS_LUA_CLIENT) && dontsort) {
+ * However in the case 'dontsort' is true, but the type to sort is a
+ * sorted set, we don't need to do anything as ordering is guaranteed
+ * in this special case. */
+ if ((storekey || c->flags & REDIS_LUA_CLIENT) &&
+ (dontsort && sortval->type != REDIS_ZSET))
+ {
+ /* Force ALPHA sorting */
dontsort = 0;
alpha = 1;
sortby = NULL;
@@ -229,13 +237,41 @@ void sortCommand(redisClient *c) {
if (sortval->type == REDIS_ZSET)
zsetConvert(sortval, REDIS_ENCODING_SKIPLIST);
- /* Load the sorting vector with all the objects to sort */
+ /* Objtain the length of the object to sort. */
switch(sortval->type) {
case REDIS_LIST: vectorlen = listTypeLength(sortval); break;
case REDIS_SET: vectorlen = setTypeSize(sortval); break;
case REDIS_ZSET: vectorlen = dictSize(((zset*)sortval->ptr)->dict); break;
default: vectorlen = 0; redisPanic("Bad SORT type"); /* Avoid GCC warning */
}
+
+ /* Perform LIMIT start,count sanity checking. */
+ start = (limit_start < 0) ? 0 : limit_start;
+ end = (limit_count < 0) ? vectorlen-1 : start+limit_count-1;
+ if (start >= vectorlen) {
+ start = vectorlen-1;
+ end = vectorlen-2;
+ }
+ if (end >= vectorlen) end = vectorlen-1;
+
+ /* Optimization:
+ *
+ * 1) if the object to sort is a sorted set.
+ * 2) There is nothing to sort as dontsort is true (BY <constant string>).
+ * 3) We have a LIMIT option that actually reduces the number of elements
+ * to fetch.
+ *
+ * In this case to load all the objects in the vector is a huge waste of
+ * resources. We just allocate a vector that is big enough for the selected
+ * range length, and make sure to load just this part in the vector. */
+ if (sortval->type == REDIS_ZSET &&
+ dontsort &&
+ (start != 0 || end != vectorlen-1))
+ {
+ vectorlen = end-start+1;
+ }
+
+ /* Load the sorting vector with all the objects to sort */
vector = zmalloc(sizeof(redisSortObject)*vectorlen);
j = 0;
@@ -259,6 +295,48 @@ void sortCommand(redisClient *c) {
j++;
}
setTypeReleaseIterator(si);
+ } else if (sortval->type == REDIS_ZSET && dontsort) {
+ /* Special handling for a sorted set, if 'dontsort' is true.
+ * This makes sure we return elements in the sorted set original
+ * ordering, accordingly to DESC / ASC options.
+ *
+ * Note that in this case we also handle LIMIT here in a direct
+ * way, just getting the required range, as an optimization. */
+
+ zset *zs = sortval->ptr;
+ zskiplist *zsl = zs->zsl;
+ zskiplistNode *ln;
+ robj *ele;
+ int rangelen = vectorlen;
+
+ /* Check if starting point is trivial, before doing log(N) lookup. */
+ if (desc) {
+ long zsetlen = dictSize(((zset*)sortval->ptr)->dict);
+
+ ln = zsl->tail;
+ if (start > 0)
+ ln = zslGetElementByRank(zsl,zsetlen-start);
+ } else {
+ ln = zsl->header->level[0].forward;
+ if (start > 0)
+ ln = zslGetElementByRank(zsl,start+1);
+ }
+
+ while(rangelen--) {
+ redisAssertWithInfo(c,sortval,ln != NULL);
+ ele = ln->obj;
+ vector[j].obj = ele;
+ vector[j].u.score = 0;
+ vector[j].u.cmpobj = NULL;
+ j++;
+ ln = desc ? ln->backward : ln->level[0].forward;
+ }
+ /* The code producing the output does not know that in the case of
+ * sorted set, 'dontsort', and LIMIT, we are able to get just the
+ * range, already sorted, so we need to adjust "start" and "end"
+ * to make sure start is set to 0. */
+ end -= start;
+ start = 0;
} else if (sortval->type == REDIS_ZSET) {
dict *set = ((zset*)sortval->ptr)->dict;
dictIterator *di;
@@ -319,16 +397,6 @@ void sortCommand(redisClient *c) {
}
}
- /* We are ready to sort the vector... perform a bit of sanity check
- * on the LIMIT option too. We'll use a partial version of quicksort. */
- start = (limit_start < 0) ? 0 : limit_start;
- end = (limit_count < 0) ? vectorlen-1 : start+limit_count-1;
- if (start >= vectorlen) {
- start = vectorlen-1;
- end = vectorlen-2;
- }
- if (end >= vectorlen) end = vectorlen-1;
-
if (dontsort == 0) {
server.sort_desc = desc;
server.sort_alpha = alpha;
diff --git a/src/version.h b/src/version.h
index 6329f488a..9e21cb969 100644
--- a/src/version.h
+++ b/src/version.h
@@ -1 +1 @@
-#define REDIS_VERSION "2.5.13"
+#define REDIS_VERSION "2.5.14"
diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl
index 5a181641c..6c5644a79 100644
--- a/tests/unit/sort.tcl
+++ b/tests/unit/sort.tcl
@@ -118,6 +118,47 @@ start_server {
r sort zset alpha desc
} {e d c b a}
+ test "SORT sorted set BY nosort should retain ordering" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ r multi
+ r sort zset by nosort asc
+ r sort zset by nosort desc
+ r exec
+ } {{a c e b d} {d b e c a}}
+
+ test "SORT sorted set BY nosort + LIMIT" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ assert_equal [r sort zset by nosort asc limit 0 1] {a}
+ assert_equal [r sort zset by nosort desc limit 0 1] {d}
+ assert_equal [r sort zset by nosort asc limit 0 2] {a c}
+ assert_equal [r sort zset by nosort desc limit 0 2] {d b}
+ assert_equal [r sort zset by nosort limit 5 10] {}
+ assert_equal [r sort zset by nosort limit -10 100] {a c e b d}
+ }
+
+ test "SORT sorted set BY nosort works as expected from scripts" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ r eval {
+ return {redis.call('sort','zset','by','nosort','asc'),
+ redis.call('sort','zset','by','nosort','desc')}
+ } 0
+ } {{a c e b d} {d b e c a}}
+
test "SORT sorted set: +inf and -inf handling" {
r del zset
r zadd zset -100 a
diff --git a/utils/install_server.sh b/utils/install_server.sh
index 70f0adfe3..789a97803 100755
--- a/utils/install_server.sh
+++ b/utils/install_server.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# Copyright 2011 Dvir Volk <dvirsk at gmail dot com>. All rights reserved.
#
@@ -48,6 +48,7 @@ if [ `whoami` != "root" ] ; then
exit 1
fi
+
#Read the redis port
read -p "Please select the redis port for this instance: [$_REDIS_PORT] " REDIS_PORT
if [ ! `echo $REDIS_PORT | egrep "^[0-9]+\$"` ] ; then
@@ -99,7 +100,7 @@ fi
#render the tmplates
TMP_FILE="/tmp/$REDIS_PORT.conf"
-TPL_FILE="./redis.conf.tpl"
+DEFAULT_CONFIG="../redis.conf"
INIT_TPL_FILE="./redis_init_script.tpl"
INIT_SCRIPT_DEST="/etc/init.d/redis_$REDIS_PORT"
PIDFILE="/var/run/redis_$REDIS_PORT.pid"
@@ -112,9 +113,19 @@ if [ ! "$CLI_EXEC" ] ; then
CLI_EXEC=`dirname $REDIS_EXECUTABLE`"/redis-cli"
fi
-#Generate config file from template
+#Generate config file from the default config file as template
+#changing only the stuff we're controlling from this script
echo "## Generated by install_server.sh ##" > $TMP_FILE
-cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done
+
+SED_EXPR="s#^port [0-9]{4}\$#port ${REDIS_PORT}#;\
+s#^logfile .+\$#logfile ${REDIS_LOG_FILE}#;\
+s#^dir .+\$#dir ${REDIS_DATA_DIR}#;\
+s#^pidfile .+\$#pidfile ${PIDFILE}#;\
+s#^daemonize no\$#daemonize yes#;"
+echo $SED_EXPR
+sed -r "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE
+
+#cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done
cp -f $TMP_FILE $REDIS_CONFIG_FILE || exit 1
#Generate sample script from template file
@@ -146,9 +157,9 @@ REDIS_CHKCONFIG_INFO=\
# Description: Redis daemon\n
### END INIT INFO\n\n"
-if [[ ! `which chkconfig` ]] ; then
+if [ !`which chkconfig` ] ; then
#combine the header and the template (which is actually a static footer)
- echo -e $REDIS_INIT_HEADER > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
+ echo $REDIS_INIT_HEADER > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
else
#if we're a box with chkconfig on it we want to include info for chkconfig
echo -e $REDIS_INIT_HEADER $REDIS_CHKCONFIG_INFO > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
@@ -160,7 +171,7 @@ echo "Copied $TMP_FILE => $INIT_SCRIPT_DEST"
#Install the service
echo "Installing service..."
-if [[ ! `which chkconfig` ]] ; then
+if [ !`which chkconfig` ] ; then
#if we're not a chkconfig box assume we're able to use update-rc.d
update-rc.d redis_$REDIS_PORT defaults && echo "Success!"
else
diff --git a/utils/redis.conf.tpl b/utils/redis.conf.tpl
deleted file mode 100644
index 33d99a509..000000000
--- a/utils/redis.conf.tpl
+++ /dev/null
@@ -1,495 +0,0 @@
-# Redis configuration file example
-
-# Note on units: when memory size is needed, it is possible to specify
-# it in the usual form of 1k 5GB 4M and so forth:
-#
-# 1k => 1000 bytes
-# 1kb => 1024 bytes
-# 1m => 1000000 bytes
-# 1mb => 1024*1024 bytes
-# 1g => 1000000000 bytes
-# 1gb => 1024*1024*1024 bytes
-#
-# units are case insensitive so 1GB 1Gb 1gB are all the same.
-
-# By default Redis does not run as a daemon. Use 'yes' if you need it.
-# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
-daemonize yes
-
-# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
-# default. You can specify a custom pid file location here.
-pidfile $PIDFILE
-
-# Accept connections on the specified port, default is 6379.
-# If port 0 is specified Redis will not listen on a TCP socket.
-port $REDIS_PORT
-
-# If you want you can bind a single interface, if the bind option is not
-# specified all the interfaces will listen for incoming connections.
-#
-# bind 127.0.0.1
-
-# Specify the path for the unix socket that will be used to listen for
-# incoming connections. There is no default, so Redis will not listen
-# on a unix socket when not specified.
-#
-# unixsocket /tmp/redis.sock
-# unixsocketperm 755
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# Set server verbosity to 'debug'
-# it can be one of:
-# debug (a lot of information, useful for development/testing)
-# verbose (many rarely useful info, but not a mess like the debug level)
-# notice (moderately verbose, what you want in production probably)
-# warning (only very important / critical messages are logged)
-loglevel notice
-
-# Specify the log file name. Also 'stdout' can be used to force
-# Redis to log on the standard output. Note that if you use standard
-# output for logging but daemonize, logs will be sent to /dev/null
-logfile $REDIS_LOG_FILE
-
-# To enable logging to the system logger, just set 'syslog-enabled' to yes,
-# and optionally update the other syslog parameters to suit your needs.
-# syslog-enabled no
-
-# Specify the syslog identity.
-# syslog-ident redis
-
-# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
-# syslog-facility local0
-
-# Set the number of databases. The default database is DB 0, you can select
-# a different one on a per-connection basis using SELECT <dbid> where
-# dbid is a number between 0 and 'databases'-1
-databases 16
-
-################################ SNAPSHOTTING #################################
-#
-# Save the DB on disk:
-#
-# save <seconds> <changes>
-#
-# Will save the DB if both the given number of seconds and the given
-# number of write operations against the DB occurred.
-#
-# In the example below the behaviour will be to save:
-# after 900 sec (15 min) if at least 1 key changed
-# after 300 sec (5 min) if at least 10 keys changed
-# after 60 sec if at least 10000 keys changed
-#
-# Note: you can disable saving at all commenting all the "save" lines.
-#
-# It is also possible to remove all the previously configured save
-# points by adding a save directive with a single empty string argument
-# like in the following example:
-#
-# save ""
-
-save 900 1
-save 300 10
-save 60 10000
-
-# By default Redis will stop accepting writes if RDB snapshots are enabled
-# (at least one save point) and the latest background save failed.
-# This will make the user aware (in an hard way) that data is not persisting
-# on disk properly, otherwise chances are that no one will notice and some
-# distater will happen.
-#
-# If the background saving process will start working again Redis will
-# automatically allow writes again.
-#
-# However if you have setup your proper monitoring of the Redis server
-# and persistence, you may want to disable this feature so that Redis will
-# continue to work as usually even if there are problems with disk,
-# permissions, and so forth.
-stop-writes-on-bgsave-error yes
-
-# Compress string objects using LZF when dump .rdb databases?
-# For default that's set to 'yes' as it's almost always a win.
-# If you want to save some CPU in the saving child set it to 'no' but
-# the dataset will likely be bigger if you have compressible values or keys.
-rdbcompression yes
-
-# The filename where to dump the DB
-dbfilename dump.rdb
-
-# The working directory.
-#
-# The DB will be written inside this directory, with the filename specified
-# above using the 'dbfilename' configuration directive.
-#
-# Also the Append Only File will be created inside this directory.
-#
-# Note that you must specify a directory here, not a file name.
-dir $REDIS_DATA_DIR
-
-################################# REPLICATION #################################
-
-# Master-Slave replication. Use slaveof to make a Redis instance a copy of
-# another Redis server. Note that the configuration is local to the slave
-# so for example it is possible to configure the slave to save the DB with a
-# different interval, or to listen to another port, and so on.
-#
-# slaveof <masterip> <masterport>
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
-#
-# masterauth <master-password>
-
-# When a slave lost the connection with the master, or when the replication
-# is still in progress, the slave can act in two different ways:
-#
-# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
-# still reply to client requests, possibly with out of date data, or the
-# data set may just be empty if this is the first synchronization.
-#
-# 2) if slave-serve-stale data is set to 'no' the slave will reply with
-# an error "SYNC with master in progress" to all the kind of commands
-# but to INFO and SLAVEOF.
-#
-slave-serve-stale-data yes
-
-# Slaves send PINGs to server in a predefined interval. It's possible to change
-# this interval with the repl_ping_slave_period option. The default value is 10
-# seconds.
-#
-# repl-ping-slave-period 10
-
-# The following option sets a timeout for both Bulk transfer I/O timeout and
-# master data or ping response timeout. The default value is 60 seconds.
-#
-# It is important to make sure that this value is greater than the value
-# specified for repl-ping-slave-period otherwise a timeout will be detected
-# every time there is low traffic between the master and the slave.
-#
-# repl-timeout 60
-
-################################## SECURITY ###################################
-
-# Require clients to issue AUTH <PASSWORD> before processing any other
-# commands. This might be useful in environments in which you do not trust
-# others with access to the host running redis-server.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since Redis is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-# requirepass foobared
-
-# Command renaming.
-#
-# It is possible to change the name of dangerous commands in a shared
-# environment. For instance the CONFIG command may be renamed into something
-# of hard to guess so that it will be still available for internal-use
-# tools but not available for general clients.
-#
-# Example:
-#
-# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
-#
-# It is also possible to completely kill a command renaming it into
-# an empty string:
-#
-# rename-command CONFIG ""
-
-################################### LIMITS ####################################
-
-# Set the max number of connected clients at the same time. By default
-# this limit is set to 10000 clients, however if the Redis server is not
-# able ot configure the process file limit to allow for the specified limit
-# the max number of allowed clients is set to the current file limit
-# minus 32 (as Redis reserves a few file descriptors for internal uses).
-#
-# Once the limit is reached Redis will close all the new connections sending
-# an error 'max number of clients reached'.
-#
-# maxclients 10000
-
-# Don't use more memory than the specified amount of bytes.
-# When the memory limit is reached Redis will try to remove keys
-# accordingly to the eviction policy selected (see maxmemmory-policy).
-#
-# If Redis can't remove keys according to the policy, or if the policy is
-# set to 'noeviction', Redis will start to reply with errors to commands
-# that would use more memory, like SET, LPUSH, and so on, and will continue
-# to reply to read-only commands like GET.
-#
-# This option is usually useful when using Redis as an LRU cache, or to set
-# an hard memory limit for an instance (using the 'noeviction' policy).
-#
-# WARNING: If you have slaves attached to an instance with maxmemory on,
-# the size of the output buffers needed to feed the slaves are subtracted
-# from the used memory count, so that network problems / resyncs will
-# not trigger a loop where keys are evicted, and in turn the output
-# buffer of slaves is full with DELs of keys evicted triggering the deletion
-# of more keys, and so forth until the database is completely emptied.
-#
-# In short... if you have slaves attached it is suggested that you set a lower
-# limit for maxmemory so that there is some free RAM on the system for slave
-# output buffers (but this is not needed if the policy is 'noeviction').
-#
-# maxmemory <bytes>
-
-# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
-# is reached? You can select among five behavior:
-#
-# volatile-lru -> remove the key with an expire set using an LRU algorithm
-# allkeys-lru -> remove any key accordingly to the LRU algorithm
-# volatile-random -> remove a random key with an expire set
-# allkeys-random -> remove a random key, any key
-# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
-# noeviction -> don't expire at all, just return an error on write operations
-#
-# Note: with all the kind of policies, Redis will return an error on write
-# operations, when there are not suitable keys for eviction.
-#
-# At the date of writing this commands are: set setnx setex append
-# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
-# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
-# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
-# getset mset msetnx exec sort
-#
-# The default is:
-#
-# maxmemory-policy volatile-lru
-
-# LRU and minimal TTL algorithms are not precise algorithms but approximated
-# algorithms (in order to save memory), so you can select as well the sample
-# size to check. For instance for default Redis will check three keys and
-# pick the one that was used less recently, you can change the sample size
-# using the following configuration directive.
-#
-# maxmemory-samples 3
-
-############################## APPEND ONLY MODE ###############################
-
-# By default Redis asynchronously dumps the dataset on disk. If you can live
-# with the idea that the latest records will be lost if something like a crash
-# happens this is the preferred way to run Redis. If instead you care a lot
-# about your data and don't want to that a single record can get lost you should
-# enable the append only mode: when this mode is enabled Redis will append
-# every write operation received in the file appendonly.aof. This file will
-# be read on startup in order to rebuild the full dataset in memory.
-#
-# Note that you can have both the async dumps and the append only file if you
-# like (you have to comment the "save" statements above to disable the dumps).
-# Still if append only mode is enabled Redis will load the data from the
-# log file at startup ignoring the dump.rdb file.
-#
-# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
-# log file in background when it gets too big.
-
-appendonly no
-
-# The name of the append only file (default: "appendonly.aof")
-# appendfilename appendonly.aof
-
-# The fsync() call tells the Operating System to actually write data on disk
-# instead to wait for more data in the output buffer. Some OS will really flush
-# data on disk, some other OS will just try to do it ASAP.
-#
-# Redis supports three different modes:
-#
-# no: don't fsync, just let the OS flush the data when it wants. Faster.
-# always: fsync after every write to the append only log . Slow, Safest.
-# everysec: fsync only if one second passed since the last fsync. Compromise.
-#
-# The default is "everysec" that's usually the right compromise between
-# speed and data safety. It's up to you to understand if you can relax this to
-# "no" that will let the operating system flush the output buffer when
-# it wants, for better performances (but if you can live with the idea of
-# some data loss consider the default persistence mode that's snapshotting),
-# or on the contrary, use "always" that's very slow but a bit safer than
-# everysec.
-#
-# If unsure, use "everysec".
-
-# appendfsync always
-appendfsync everysec
-# appendfsync no
-
-# When the AOF fsync policy is set to always or everysec, and a background
-# saving process (a background save or AOF log background rewriting) is
-# performing a lot of I/O against the disk, in some Linux configurations
-# Redis may block too long on the fsync() call. Note that there is no fix for
-# this currently, as even performing fsync in a different thread will block
-# our synchronous write(2) call.
-#
-# In order to mitigate this problem it's possible to use the following option
-# that will prevent fsync() from being called in the main process while a
-# BGSAVE or BGREWRITEAOF is in progress.
-#
-# This means that while another child is saving the durability of Redis is
-# the same as "appendfsync none", that in practical terms means that it is
-# possible to lost up to 30 seconds of log in the worst scenario (with the
-# default Linux settings).
-#
-# If you have latency problems turn this to "yes". Otherwise leave it as
-# "no" that is the safest pick from the point of view of durability.
-no-appendfsync-on-rewrite no
-
-# Automatic rewrite of the append only file.
-# Redis is able to automatically rewrite the log file implicitly calling
-# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
-#
-# This is how it works: Redis remembers the size of the AOF file after the
-# latest rewrite (or if no rewrite happened since the restart, the size of
-# the AOF at startup is used).
-#
-# This base size is compared to the current size. If the current size is
-# bigger than the specified percentage, the rewrite is triggered. Also
-# you need to specify a minimal size for the AOF file to be rewritten, this
-# is useful to avoid rewriting the AOF file even if the percentage increase
-# is reached but it is still pretty small.
-#
-# Specify a percentage of zero in order to disable the automatic AOF
-# rewrite feature.
-
-auto-aof-rewrite-percentage 100
-auto-aof-rewrite-min-size 64mb
-
-################################ LUA SCRIPTING ###############################
-
-# Max execution time of a Lua script in milliseconds.
-#
-# If the maximum execution time is reached Redis will log that a script is
-# still in execution after the maximum allowed time and will start to
-# reply to queries with an error.
-#
-# When a long running script exceed the maximum execution time only the
-# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
-# used to stop a script that did not yet called write commands. The second
-# is the only way to shut down the server in the case a write commands was
-# already issue by the script but the user don't want to wait for the natural
-# termination of the script.
-#
-# Set it to 0 or a negative value for unlimited execution without warnings.
-lua-time-limit 5000
-
-################################## SLOW LOG ###################################
-
-# The Redis Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Redis
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 10000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 1024
-
-############################### ADVANCED CONFIG ###############################
-
-# Hashes are encoded using a memory efficient data structure when they have a
-# small number of entries, and the biggest entry does not exceed a given
-# threshold. These thresholds can be configured using the following directives.
-hash-max-ziplist-entries 512
-hash-max-ziplist-value 64
-
-# Similarly to hashes, small lists are also encoded in a special way in order
-# to save a lot of space. The special representation is only used when
-# you are under the following limits:
-list-max-ziplist-entries 512
-list-max-ziplist-value 64
-
-# Sets have a special encoding in just one case: when a set is composed
-# of just strings that happens to be integers in radix 10 in the range
-# of 64 bit signed integers.
-# The following configuration setting sets the limit in the size of the
-# set in order to use this special memory saving encoding.
-set-max-intset-entries 512
-
-# Similarly to hashes and lists, sorted sets are also specially encoded in
-# order to save a lot of space. This encoding is only used when the length and
-# elements of a sorted set are below the following limits:
-zset-max-ziplist-entries 128
-zset-max-ziplist-value 64
-
-# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
-# order to help rehashing the main Redis hash table (the one mapping top-level
-# keys to values). The hash table implementation Redis uses (see dict.c)
-# performs a lazy rehashing: the more operation you run into an hash table
-# that is rehashing, the more rehashing "steps" are performed, so if the
-# server is idle the rehashing is never complete and some more memory is used
-# by the hash table.
-#
-# The default is to use this millisecond 10 times every second in order to
-# active rehashing the main dictionaries, freeing memory when possible.
-#
-# If unsure:
-# use "activerehashing no" if you have hard latency requirements and it is
-# not a good thing in your environment that Redis can reply form time to time
-# to queries with 2 milliseconds delay.
-#
-# use "activerehashing yes" if you don't have such hard requirements but
-# want to free memory asap when possible.
-activerehashing yes
-
-# The client output buffer limits can be used to force disconnection of clients
-# that are not reading data from the server fast enough for some reason (a
-# common reason is that a Pub/Sub client can't consume messages as fast as the
-# publisher can produce them).
-#
-# The limit can be set differently for the three different classes of clients:
-#
-# normal -> normal clients
-# slave -> slave clients and MONITOR clients
-# pubsub -> clients subcribed to at least one pubsub channel or pattern
-#
-# The syntax of every client-output-buffer-limit directive is the following:
-#
-# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
-#
-# A client is immediately disconnected once the hard limit is reached, or if
-# the soft limit is reached and remains reached for the specified number of
-# seconds (continuously).
-# So for instance if the hard limit is 32 megabytes and the soft limit is
-# 16 megabytes / 10 seconds, the client will get disconnected immediately
-# if the size of the output buffers reach 32 megabytes, but will also get
-# disconnected if the client reaches 16 megabytes and continuously overcomes
-# the limit for 10 seconds.
-#
-# By default normal clients are not limited because they don't receive data
-# without asking (in a push way), but just after a request, so only
-# asynchronous clients may create a scenario where data is requested faster
-# than it can read.
-#
-# Instead there is a default limit for pubsub and slave clients, since
-# subscribers and slaves receive data in a push fashion.
-#
-# Both the hard or the soft limit can be disabled just setting it to zero.
-client-output-buffer-limit normal 0 0 0
-client-output-buffer-limit slave 256mb 64mb 60
-client-output-buffer-limit pubsub 32mb 8mb 60
-
-################################## INCLUDES ###################################
-
-# Include one or more other config files here. This is useful if you
-# have a standard template that goes to all Redis server but also need
-# to customize a few per-server settings. Include files can include
-# other files, so use this wisely.
-#
-# include /path/to/local.conf
-# include /path/to/other.conf