summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--00-RELEASENOTES4
-rw-r--r--redis.conf2
-rw-r--r--sentinel.conf2
-rw-r--r--src/adlist.c6
-rw-r--r--src/ae.c6
-rw-r--r--src/ae_evport.c12
-rw-r--r--src/anet.c4
-rw-r--r--src/aof.c2
-rw-r--r--src/bio.c2
-rw-r--r--src/bitops.c4
-rw-r--r--src/cluster.c12
-rw-r--r--src/config.c6
-rw-r--r--src/crc16.c2
-rw-r--r--src/db.c6
-rw-r--r--src/debug.c4
-rw-r--r--src/dict.c2
-rw-r--r--src/lzfP.h8
-rwxr-xr-xsrc/mkreleasehdr.sh2
-rw-r--r--src/multi.c4
-rw-r--r--src/networking.c8
-rw-r--r--src/object.c2
-rw-r--r--src/pubsub.c2
-rw-r--r--src/rdb.c10
-rw-r--r--src/rdb.h2
-rw-r--r--src/redis-check-dump.c4
-rw-r--r--src/redis-cli.c6
-rwxr-xr-xsrc/redis-trib.rb10
-rw-r--r--src/redis.c28
-rw-r--r--src/redis.h28
-rw-r--r--src/release.c4
-rw-r--r--src/replication.c10
-rw-r--r--src/scripting.c12
-rw-r--r--src/sds.c4
-rw-r--r--src/sentinel.c20
-rw-r--r--src/sha1.c36
-rw-r--r--src/sort.c6
-rw-r--r--src/t_list.c10
-rw-r--r--src/t_set.c4
-rw-r--r--src/t_string.c4
-rw-r--r--src/t_zset.c4
-rw-r--r--src/zmalloc.c2
-rw-r--r--tests/unit/scripting.tcl2
42 files changed, 154 insertions, 154 deletions
diff --git a/00-RELEASENOTES b/00-RELEASENOTES
index d91f0c29d..36317ca35 100644
--- a/00-RELEASENOTES
+++ b/00-RELEASENOTES
@@ -26,7 +26,7 @@ Also the following redis.conf and CONFIG GET / SET parameters changed name:
* hash-max-zipmap-entries, now replaced by hash-max-ziplist-entries
* hash-max-zipmap-value, now replaced by hash-max-ziplist-value
- * glueoutputbuf was no completely removed as it does not make sense
+ * glueoutputbuf was now completely removed as it does not make sense
---------
CHANGELOG
@@ -46,7 +46,7 @@ UPGRADE URGENCY: We suggest new users to start with 2.6.0, and old users to
in slaves.
* Milliseconds resolution expires, also added new commands with milliseconds
precision (PEXPIRE, PTTL, ...).
-* Clinets max output buffer soft and hard limits. You can specifiy different
+* Clients max output buffer soft and hard limits. You can specifiy different
limits for different classes of clients (normal,pubsub,slave).
* AOF is now able to rewrite aggregate data types using variadic commands,
often producing an AOF that is faster to save, load, and is smaller in size.
diff --git a/redis.conf b/redis.conf
index 01abdbbfa..848b0ab45 100644
--- a/redis.conf
+++ b/redis.conf
@@ -246,7 +246,7 @@ slave-priority 100
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
-# able ot configure the process file limit to allow for the specified limit
+# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
diff --git a/sentinel.conf b/sentinel.conf
index 94169ee8f..ac687b535 100644
--- a/sentinel.conf
+++ b/sentinel.conf
@@ -71,7 +71,7 @@ sentinel parallel-syncs mymaster 1
# Default is 15 minutes.
sentinel failover-timeout mymaster 900000
-# SCRIPTS EXECTION
+# SCRIPTS EXECUTION
#
# sentinel notification-script and sentinel reconfig-script are used in order
# to configure scripts that are called to notify the system administrator
diff --git a/src/adlist.c b/src/adlist.c
index e48957e3a..f075e1bda 100644
--- a/src/adlist.c
+++ b/src/adlist.c
@@ -97,7 +97,7 @@ list *listAddNodeHead(list *list, void *value)
return list;
}
-/* Add a new node to the list, to tail, contaning the specified 'value'
+/* Add a new node to the list, to tail, containing the specified 'value'
* pointer as value.
*
* On error, NULL is returned and no operation is performed (i.e. the
@@ -308,7 +308,7 @@ listNode *listSearchKey(list *list, void *key)
/* Return the element at the specified zero-based index
* where 0 is the head, 1 is the element next to head
* and so on. Negative integers are used in order to count
- * from the tail, -1 is the last element, -2 the penultimante
+ * from the tail, -1 is the last element, -2 the penultimate
* and so on. If the index is out of range NULL is returned. */
listNode *listIndex(list *list, long index) {
listNode *n;
@@ -330,7 +330,7 @@ void listRotate(list *list) {
if (listLength(list) <= 1) return;
- /* Detatch current tail */
+ /* Detach current tail */
list->tail = tail->prev;
list->tail->next = NULL;
/* Move it as head */
diff --git a/src/ae.c b/src/ae.c
index 90be4e28f..6ca9a5153 100644
--- a/src/ae.c
+++ b/src/ae.c
@@ -309,7 +309,7 @@ static int processTimeEvents(aeEventLoop *eventLoop) {
/* Process every pending time event, then every pending file event
* (that may be registered by time event callbacks just processed).
* Without special flags the function sleeps until some file event
- * fires, or when the next time event occurrs (if any).
+ * fires, or when the next time event occurs (if any).
*
* If flags is 0, the function does nothing and returns.
* if flags has AE_ALL_EVENTS set, all the kind of events are processed.
@@ -356,7 +356,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags)
if (tvp->tv_usec < 0) tvp->tv_usec = 0;
} else {
/* If we have to check for events but need to return
- * ASAP because of AE_DONT_WAIT we need to se the timeout
+ * ASAP because of AE_DONT_WAIT we need to set the timeout
* to zero */
if (flags & AE_DONT_WAIT) {
tv.tv_sec = tv.tv_usec = 0;
@@ -395,7 +395,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags)
return processed; /* return the number of processed file/time events */
}
-/* Wait for millseconds until the given file descriptor becomes
+/* Wait for milliseconds until the given file descriptor becomes
* writable/readable/exception */
int aeWait(int fd, int mask, long long milliseconds) {
struct pollfd pfd;
diff --git a/src/ae_evport.c b/src/ae_evport.c
index 0196dccf4..94413c132 100644
--- a/src/ae_evport.c
+++ b/src/ae_evport.c
@@ -50,15 +50,15 @@ static int evport_debug = 0;
* aeApiPoll, the corresponding file descriptors become dissociated from the
* port. This is necessary because poll events are level-triggered, so if the
* fd didn't become dissociated, it would immediately fire another event since
- * the underlying state hasn't changed yet. We must reassociate the file
+ * the underlying state hasn't changed yet. We must re-associate the file
* descriptor, but only after we know that our caller has actually read from it.
* The ae API does not tell us exactly when that happens, but we do know that
* it must happen by the time aeApiPoll is called again. Our solution is to
- * keep track of the last fds returned by aeApiPoll and reassociate them next
+ * keep track of the last fds returned by aeApiPoll and re-associate them next
* time aeApiPoll is invoked.
*
* To summarize, in this module, each fd association is EITHER (a) represented
- * only via the in-kernel assocation OR (b) represented by pending_fds and
+ * only via the in-kernel association OR (b) represented by pending_fds and
* pending_masks. (b) is only true for the last fds we returned from aeApiPoll,
* and only until we enter aeApiPoll again (at which point we restore the
* in-kernel association).
@@ -164,7 +164,7 @@ static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) {
* This fd was recently returned from aeApiPoll. It should be safe to
* assume that the consumer has processed that poll event, but we play
* it safer by simply updating pending_mask. The fd will be
- * reassociated as usual when aeApiPoll is called again.
+ * re-associated as usual when aeApiPoll is called again.
*/
if (evport_debug)
fprintf(stderr, "aeApiAddEvent: adding to pending fd %d\n", fd);
@@ -228,7 +228,7 @@ static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) {
* ENOMEM is a potentially transient condition, but the kernel won't
* generally return it unless things are really bad. EAGAIN indicates
* we've reached an resource limit, for which it doesn't make sense to
- * retry (counterintuitively). All other errors indicate a bug. In any
+ * retry (counter-intuitively). All other errors indicate a bug. In any
* of these cases, the best we can do is to abort.
*/
abort(); /* will not return */
@@ -243,7 +243,7 @@ static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
port_event_t event[MAX_EVENT_BATCHSZ];
/*
- * If we've returned fd events before, we must reassociate them with the
+ * If we've returned fd events before, we must re-associate them with the
* port now, before calling port_get(). See the block comment at the top of
* this file for an explanation of why.
*/
diff --git a/src/anet.c b/src/anet.c
index ae8e9a658..4da3e28db 100644
--- a/src/anet.c
+++ b/src/anet.c
@@ -61,7 +61,7 @@ int anetNonBlock(char *err, int fd)
{
int flags;
- /* Set the socket nonblocking.
+ /* Set the socket non-blocking.
* Note that fcntl(2) for F_GETFL and F_SETFL can't be
* interrupted by a signal. */
if ((flags = fcntl(fd, F_GETFL)) == -1) {
@@ -132,7 +132,7 @@ static int anetCreateSocket(char *err, int domain) {
return ANET_ERR;
}
- /* Make sure connection-intensive things like the redis benckmark
+ /* Make sure connection-intensive things like the redis benchmark
* will be able to close/open sockets a zillion of times */
if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) {
anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno));
diff --git a/src/aof.c b/src/aof.c
index fe5c64972..7e1512ebd 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -385,7 +385,7 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a
sds buf = sdsempty();
robj *tmpargv[3];
- /* The DB this command was targetting is not the same as the last command
+ /* The DB this command was targeting is not the same as the last command
* we appendend. To issue a SELECT command is needed. */
if (dictid != server.aof_selected_db) {
char seldb[64];
diff --git a/src/bio.c b/src/bio.c
index 0fec24d0b..4bd5a17c6 100644
--- a/src/bio.c
+++ b/src/bio.c
@@ -74,7 +74,7 @@ static list *bio_jobs[REDIS_BIO_NUM_OPS];
static unsigned long long bio_pending[REDIS_BIO_NUM_OPS];
/* This structure represents a background Job. It is only used locally to this
- * file as the API deos not expose the internals at all. */
+ * file as the API does not expose the internals at all. */
struct bio_job {
time_t time; /* Time at which the job was created. */
/* Job specific arguments pointers. If we need to pass more than three
diff --git a/src/bitops.c b/src/bitops.c
index 3ef0a8f3d..75f3317a9 100644
--- a/src/bitops.c
+++ b/src/bitops.c
@@ -34,7 +34,7 @@
* Helpers and low level bit functions.
* -------------------------------------------------------------------------- */
-/* This helper function used by GETBIT / SETBIT parses the bit offset arguemnt
+/* This helper function used by GETBIT / SETBIT parses the bit offset argument
* making sure an error is returned if it is negative or if it overflows
* Redis 512 MB limit for the string value. */
static int getBitOffsetFromArgument(redisClient *c, robj *o, size_t *offset) {
@@ -189,7 +189,7 @@ void bitopCommand(redisClient *c) {
char *opname = c->argv[1]->ptr;
robj *o, *targetkey = c->argv[2];
long op, j, numkeys;
- robj **objects; /* Array of soruce objects. */
+ robj **objects; /* Array of source objects. */
unsigned char **src; /* Array of source strings pointers. */
long *len, maxlen = 0; /* Array of length of src strings, and max len. */
long minlen = 0; /* Min len among the input keys. */
diff --git a/src/cluster.c b/src/cluster.c
index cbcdf373d..24382b692 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -177,7 +177,7 @@ int clusterLoadConfig(char *filename) {
return REDIS_OK;
fmterr:
- redisLog(REDIS_WARNING,"Unrecovarable error: corrupted cluster config file.");
+ redisLog(REDIS_WARNING,"Unrecoverable error: corrupted cluster config file.");
fclose(fp);
exit(1);
}
@@ -985,7 +985,7 @@ void clusterCron(void) {
time_t min_ping_sent = 0;
clusterNode *min_ping_node = NULL;
- /* Check if we have disconnected nodes and reestablish the connection. */
+ /* Check if we have disconnected nodes and re-establish the connection. */
di = dictGetIterator(server.cluster.nodes);
while((de = dictNext(di)) != NULL) {
clusterNode *node = dictGetVal(de);
@@ -1069,7 +1069,7 @@ void clusterCron(void) {
clusterUpdateState();
}
} else {
- /* Timeout reached. Set the noad se possibly failing if it is
+ /* Timeout reached. Set the node as possibly failing if it is
* not already in this state. */
if (!(node->flags & (REDIS_NODE_PFAIL|REDIS_NODE_FAIL))) {
redisLog(REDIS_DEBUG,"*** NODE %.40s possibly failing",
@@ -1803,7 +1803,7 @@ try_again:
redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,c->argv[3]->ptr,sdslen(c->argv[3]->ptr)));
redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,ttl));
- /* Emit the payload argument, that is the serailized object using
+ /* Emit the payload argument, that is the serialized object using
* the DUMP format. */
createDumpPayload(&payload,o);
redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,payload.io.buffer.ptr,
@@ -1815,7 +1815,7 @@ try_again:
if (replace)
redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"REPLACE",7));
- /* Tranfer the query to the other node in 64K chunks. */
+ /* Transfer the query to the other node in 64K chunks. */
errno = 0;
{
sds buf = cmd.io.buffer.ptr;
@@ -1882,7 +1882,7 @@ socket_rd_err:
}
/* The ASKING command is required after a -ASK redirection.
- * The client should issue ASKING before to actualy send the command to
+ * The client should issue ASKING before to actually send the command to
* the target instance. See the Redis Cluster specification for more
* information. */
void askingCommand(redisClient *c) {
diff --git a/src/config.c b/src/config.c
index ec1786ae6..e5443f8ec 100644
--- a/src/config.c
+++ b/src/config.c
@@ -333,7 +333,7 @@ void loadServerConfigFromString(char *config) {
goto loaderr;
}
- /* If the target command name is the emtpy string we just
+ /* If the target command name is the empty string we just
* remove it from the command table. */
retval = dictDelete(server.commands, argv[1]);
redisAssert(retval == DICT_OK);
@@ -378,7 +378,7 @@ void loadServerConfigFromString(char *config) {
soft = memtoll(argv[3],NULL);
soft_seconds = atoi(argv[4]);
if (soft_seconds < 0) {
- err = "Negative number of seconds in soft limt is invalid";
+ err = "Negative number of seconds in soft limit is invalid";
goto loaderr;
}
server.client_obuf_limits[class].hard_limit_bytes = hard;
@@ -423,7 +423,7 @@ loaderr:
* in the 'options' string to the config file before loading.
*
* Both filename and options can be NULL, in such a case are considered
- * emtpy. This way loadServerConfig can be used to just load a file or
+ * empty. This way loadServerConfig can be used to just load a file or
* just load a string. */
void loadServerConfig(char *filename, char *options) {
sds config = sdsempty();
diff --git a/src/crc16.c b/src/crc16.c
index d3a232bad..398ad73fb 100644
--- a/src/crc16.c
+++ b/src/crc16.c
@@ -29,7 +29,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* CRC16 implementation acording to CCITT standards.
+/* CRC16 implementation according to CCITT standards.
*
* Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the
* following parameters:
diff --git a/src/db.c b/src/db.c
index a73d1fbf5..eda05c85f 100644
--- a/src/db.c
+++ b/src/db.c
@@ -44,7 +44,7 @@ robj *lookupKey(redisDb *db, robj *key) {
if (de) {
robj *val = dictGetVal(de);
- /* Update the access time for the aging algorithm.
+ /* Update the access time for the ageing algorithm.
* Don't do it if we have a saving child, as this will trigger
* a copy on write madness. */
if (server.rdb_child_pid == -1 && server.aof_child_pid == -1)
@@ -85,7 +85,7 @@ robj *lookupKeyWriteOrReply(redisClient *c, robj *key, robj *reply) {
}
/* Add the key to the DB. It's up to the caller to increment the reference
- * counte of the value if needed.
+ * counter of the value if needed.
*
* The program is aborted if the key already exists. */
void dbAdd(redisDb *db, robj *key, robj *val) {
@@ -549,7 +549,7 @@ int expireIfNeeded(redisDb *db, robj *key) {
* for *AT variants of the command, or the current time for relative expires).
*
* unit is either UNIT_SECONDS or UNIT_MILLISECONDS, and is only used for
- * the argv[2] parameter. The basetime is always specified in milliesconds. */
+ * the argv[2] parameter. The basetime is always specified in milliseconds. */
void expireGenericCommand(redisClient *c, long long basetime, int unit) {
dictEntry *de;
robj *key = c->argv[1], *param = c->argv[2];
diff --git a/src/debug.c b/src/debug.c
index 39ddb4327..897a71ace 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -44,7 +44,7 @@
/* ================================= Debugging ============================== */
/* Compute the sha1 of string at 's' with 'len' bytes long.
- * The SHA1 is then xored againt the string pointed by digest.
+ * The SHA1 is then xored against the string pointed by digest.
* Since xor is commutative, this operation is used in order to
* "add" digests relative to unordered elements.
*
@@ -69,7 +69,7 @@ void xorObjectDigest(unsigned char *digest, robj *o) {
}
/* This function instead of just computing the SHA1 and xoring it
- * against diget, also perform the digest of "digest" itself and
+ * against digest, also perform the digest of "digest" itself and
* replace the old value with the new one.
*
* So the final digest will be:
diff --git a/src/dict.c b/src/dict.c
index d47fc6ac1..2346f5bea 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -610,7 +610,7 @@ static int _dictExpandIfNeeded(dict *d)
/* Incremental rehashing already in progress. Return. */
if (dictIsRehashing(d)) return DICT_OK;
- /* If the hash table is empty expand it to the intial size. */
+ /* If the hash table is empty expand it to the initial size. */
if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);
/* If we reached the 1:1 ratio, and we are allowed to resize the hash
diff --git a/src/lzfP.h b/src/lzfP.h
index d533f1829..10d804e04 100644
--- a/src/lzfP.h
+++ b/src/lzfP.h
@@ -93,7 +93,7 @@
/*
* Avoid assigning values to errno variable? for some embedding purposes
- * (linux kernel for example), this is neccessary. NOTE: this breaks
+ * (linux kernel for example), this is necessary. NOTE: this breaks
* the documentation in lzf.h.
*/
#ifndef AVOID_ERRNO
@@ -101,7 +101,7 @@
#endif
/*
- * Wether to pass the LZF_STATE variable as argument, or allocate it
+ * Whether to pass the LZF_STATE variable as argument, or allocate it
* on the stack. For small-stack environments, define this to 1.
* NOTE: this breaks the prototype in lzf.h.
*/
@@ -110,11 +110,11 @@
#endif
/*
- * Wether to add extra checks for input validity in lzf_decompress
+ * Whether to add extra checks for input validity in lzf_decompress
* and return EINVAL if the input stream has been corrupted. This
* only shields against overflowing the input buffer and will not
* detect most corrupted streams.
- * This check is not normally noticable on modern hardware
+ * This check is not normally noticeable on modern hardware
* (<1% slowdown), but might slow down older cpus considerably.
*/
#ifndef CHECK_INPUT
diff --git a/src/mkreleasehdr.sh b/src/mkreleasehdr.sh
index d07cf6ae0..f3bbac015 100755
--- a/src/mkreleasehdr.sh
+++ b/src/mkreleasehdr.sh
@@ -4,7 +4,7 @@ GIT_DIRTY=`git diff 2> /dev/null | wc -l`
BUILD_ID=`uname -n`"-"`date +%s`
test -f release.h || touch release.h
(cat release.h | grep SHA1 | grep $GIT_SHA1) && \
-(cat release.h | grep DIRTY | grep $GIT_DIRTY) && exit 0 # Already uptodate
+(cat release.h | grep DIRTY | grep $GIT_DIRTY) && exit 0 # Already up-to-date
echo "#define REDIS_GIT_SHA1 \"$GIT_SHA1\"" > release.h
echo "#define REDIS_GIT_DIRTY \"$GIT_DIRTY\"" >> release.h
echo "#define REDIS_BUILD_ID \"$BUILD_ID\"" >> release.h
diff --git a/src/multi.c b/src/multi.c
index 064a40944..dfac15c34 100644
--- a/src/multi.c
+++ b/src/multi.c
@@ -102,7 +102,7 @@ void discardCommand(redisClient *c) {
}
/* Send a MULTI command to all the slaves and AOF file. Check the execCommand
- * implememntation for more information. */
+ * implementation for more information. */
void execCommandReplicateMulti(redisClient *c) {
robj *multistring = createStringObject("MULTI",5);
@@ -223,7 +223,7 @@ void watchForKey(redisClient *c, robj *key) {
incrRefCount(key);
}
listAddNodeTail(clients,c);
- /* Add the new key to the lits of keys watched by this client */
+ /* Add the new key to the list of keys watched by this client */
wk = zmalloc(sizeof(*wk));
wk->key = key;
wk->db = c->db;
diff --git a/src/networking.c b/src/networking.c
index d1a8d3b3f..f48d27c07 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -378,7 +378,7 @@ void *addDeferredMultiBulkLength(redisClient *c) {
return listLast(c->reply);
}
-/* Populate the length object and try glueing it to the next chunk. */
+/* Populate the length object and try gluing it to the next chunk. */
void setDeferredMultiBulkLength(redisClient *c, void *node, long length) {
listNode *ln = (listNode*)node;
robj *len, *next;
@@ -404,7 +404,7 @@ void setDeferredMultiBulkLength(redisClient *c, void *node, long length) {
asyncCloseClientOnOutputBufferLimitReached(c);
}
-/* Add a duble as a bulk reply */
+/* Add a double as a bulk reply */
void addReplyDouble(redisClient *c, double d) {
char dbuf[128], sbuf[128];
int dlen, slen;
@@ -526,7 +526,7 @@ static void acceptCommonHandler(int fd, int flags) {
}
/* If maxclient directive is set and this is one client more... close the
* connection. Note that we create the client instead to check before
- * for this condition, since now the socket is already set in nonblocking
+ * for this condition, since now the socket is already set in non-blocking
* mode and we can send an error for free using the Kernel I/O */
if (listLength(server.clients) > server.maxclients) {
char *err = "-ERR max number of clients reached\r\n";
@@ -941,7 +941,7 @@ int processMultibulkBuffer(redisClient *c) {
/* Not enough data (+2 == trailing \r\n) */
break;
} else {
- /* Optimization: if the buffer contanins JUST our bulk element
+ /* Optimization: if the buffer containns JUST our bulk element
* instead of creating a new object by *copying* the sds we
* just use the current sds string. */
if (pos == 0 &&
diff --git a/src/object.c b/src/object.c
index c2b89709d..00cf023b0 100644
--- a/src/object.c
+++ b/src/object.c
@@ -72,7 +72,7 @@ robj *createStringObjectFromLongDouble(long double value) {
int len;
/* We use 17 digits precision since with 128 bit floats that precision
- * after rouding is able to represent most small decimal numbers in a way
+ * after rounding is able to represent most small decimal numbers in a way
* that is "non surprising" for the user (that is, most small decimal
* numbers will be represented in a way that when converted back into
* a string are exactly the same as what the user typed.) */
diff --git a/src/pubsub.c b/src/pubsub.c
index 5f91334c3..510676242 100644
--- a/src/pubsub.c
+++ b/src/pubsub.c
@@ -117,7 +117,7 @@ int pubsubUnsubscribeChannel(redisClient *c, robj *channel, int notify) {
return retval;
}
-/* Subscribe a client to a pattern. Returns 1 if the operation succeeded, or 0 if the clinet was already subscribed to that pattern. */
+/* Subscribe a client to a pattern. Returns 1 if the operation succeeded, or 0 if the client was already subscribed to that pattern. */
int pubsubSubscribePattern(redisClient *c, robj *pattern) {
int retval = 0;
diff --git a/src/rdb.c b/src/rdb.c
index 5a08b6346..97492f4ec 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -265,7 +265,7 @@ err:
return NULL;
}
-/* Save a string objet as [len][data] on disk. If the object is a string
+/* Save a string object as [len][data] on disk. If the object is a string
* representation of an integer value we try to save it in a special form */
int rdbSaveRawString(rio *rdb, unsigned char *s, size_t len) {
int enclen;
@@ -321,7 +321,7 @@ int rdbSaveLongLongAsStringObject(rio *rdb, long long value) {
/* Like rdbSaveStringObjectRaw() but handle encoded objects */
int rdbSaveStringObject(rio *rdb, robj *obj) {
/* Avoid to decode the object, then encode it again, if the
- * object is alrady integer encoded. */
+ * object is already integer encoded. */
if (obj->encoding == REDIS_ENCODING_INT) {
return rdbSaveLongLongAsStringObject(rdb,(long)obj->ptr);
} else {
@@ -367,7 +367,7 @@ robj *rdbLoadEncodedStringObject(rio *rdb) {
}
/* Save a double value. Doubles are saved as strings prefixed by an unsigned
- * 8 bit integer specifing the length of the representation.
+ * 8 bit integer specifying the length of the representation.
* This 8 bit integer has special values in order to specify the following
* conditions:
* 253: not a number
@@ -606,7 +606,7 @@ off_t rdbSavedObjectLen(robj *o) {
/* Save a key-value pair, with expire time, type, key, value.
* On error -1 is returned.
- * On success if the key was actaully saved 1 is returned, otherwise 0
+ * On success if the key was actually saved 1 is returned, otherwise 0
* is returned (the key was already expired). */
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val,
long long expiretime, long long now)
@@ -1109,7 +1109,7 @@ int rdbLoad(char *filename) {
/* We read the time so we need to read the object type again. */
if ((type = rdbLoadType(&rdb)) == -1) goto eoferr;
/* the EXPIRETIME opcode specifies time in seconds, so convert
- * into milliesconds. */
+ * into milliseconds. */
expiretime *= 1000;
} else if (type == REDIS_RDB_OPCODE_EXPIRETIME_MS) {
/* Milliseconds precision expire times introduced with RDB
diff --git a/src/rdb.h b/src/rdb.h
index d0f2aad86..54ee4e514 100644
--- a/src/rdb.h
+++ b/src/rdb.h
@@ -51,7 +51,7 @@
* number specify the kind of object that follows.
* See the REDIS_RDB_ENC_* defines.
*
- * Lenghts up to 63 are stored using a single byte, most DB keys, and may
+ * Lengths up to 63 are stored using a single byte, most DB keys, and may
* values, will fit inside. */
#define REDIS_RDB_6BITLEN 0
#define REDIS_RDB_14BITLEN 1
diff --git a/src/redis-check-dump.c b/src/redis-check-dump.c
index 950655a02..d09527781 100644
--- a/src/redis-check-dump.c
+++ b/src/redis-check-dump.c
@@ -79,7 +79,7 @@
* number specify the kind of object that follows.
* See the REDIS_RDB_ENC_* defines.
*
- * Lenghts up to 63 are stored using a single byte, most DB keys, and may
+ * Lengths up to 63 are stored using a single byte, most DB keys, and may
* values, will fit inside. */
#define REDIS_RDB_6BITLEN 0
#define REDIS_RDB_14BITLEN 1
@@ -133,7 +133,7 @@ typedef struct {
char success;
} entry;
-/* Global vars that are actally used as constants. The following double
+/* Global vars that are actually used as constants. The following double
* values are used for double on-disk serialization, and are initialized
* at runtime to avoid strange compiler optimizations. */
static double R_Zero, R_PosInf, R_NegInf, R_Nan;
diff --git a/src/redis-cli.c b/src/redis-cli.c
index 1fb076fa5..318f1822a 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -308,7 +308,7 @@ static int cliSelect() {
return REDIS_ERR;
}
-/* Connect to the client. If force is not zero the connection is performed
+/* Connect to the server. If force is not zero the connection is performed
* even if there is already a connected socket. */
static int cliConnect(int force) {
if (context == NULL || force) {
@@ -976,7 +976,7 @@ static void slaveMode(void) {
char buf[1024];
fprintf(stderr,"SYNC with master, discarding %llu "
- "bytes of bulk tranfer...\n", payload);
+ "bytes of bulk transfer...\n", payload);
/* Discard the payload. */
while(payload) {
@@ -1141,7 +1141,7 @@ static void pipeMode(void) {
int j;
eof = 1;
- /* Everything transfered, so we queue a special
+ /* Everything transferred, so we queue a special
* ECHO command that we can match in the replies
* to make sure everything was read from the server. */
for (j = 0; j < 20; j++)
diff --git a/src/redis-trib.rb b/src/redis-trib.rb
index 1b83e19c2..58ad5a86f 100755
--- a/src/redis-trib.rb
+++ b/src/redis-trib.rb
@@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# TODO (temporary here, we'll move this into the Github issues once
-# redis-trib initial implementation is complted).
+# redis-trib initial implementation is completed).
#
# - Make sure that if the rehashing fails in the middle redis-trib will try
# to recover.
@@ -17,7 +17,7 @@
# 1) If there is a node that pretend to receive a slot, or to migrate a
# slot, but has no entries in that slot, fix it.
# 2) If there is a node having keys in slots that are not owned by it
-# fix this condiiton moving the entries in the same node.
+# fix this condition moving the entries in the same node.
# 3) Perform more possibly slow tests about the state of the cluster.
# 4) When aborted slot migration is detected, fix it.
@@ -168,12 +168,12 @@ class ClusterNode
# for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
slots = @info[:slots].keys.sort
- # As we want to aggregate adiacent slots we convert all the
+ # As we want to aggregate adjacent slots we convert all the
# slot integers into ranges (with just one element)
# So we have something like [1..1,2..2, ... and so forth.
slots.map!{|x| x..x}
- # Finally we group ranges with adiacent elements.
+ # Finally we group ranges with adjacent elements.
slots = slots.reduce([]) {|a,b|
if !a.empty? && b.first == (a[-1].last)+1
a[0..-2] + [(a[-1].first)..(b.last)]
@@ -313,7 +313,7 @@ class RedisTrib
def compute_reshard_table(sources,numslots)
moved = []
# Sort from bigger to smaller instance, for two reasons:
- # 1) If we take less slots than instanes it is better to start getting from
+ # 1) If we take less slots than instances it is better to start getting from
# the biggest instances.
# 2) We take one slot more from the first instance in the case of not perfect
# divisibility. Like we have 3 nodes and need to get 10 slots, we take
diff --git a/src/redis.c b/src/redis.c
index 4408fbd0e..b50c5bfc2 100644
--- a/src/redis.c
+++ b/src/redis.c
@@ -99,7 +99,7 @@ struct redisCommand *commandTable;
* m: may increase memory usage once called. Don't allow if out of memory.
* a: admin command, like SAVE or SHUTDOWN.
* p: Pub/Sub related command.
- * f: force replication of this command, regarless of server.dirty.
+ * f: force replication of this command, regardless of server.dirty.
* s: command not allowed in scripts.
* R: random command. Command is not deterministic, that is, the same command
* with the same arguments, with the same key space, may have different
@@ -290,7 +290,7 @@ void redisLogRaw(int level, const char *msg) {
if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg);
}
-/* Like redisLogRaw() but with printf-alike support. This is the funciton that
+/* Like redisLogRaw() but with printf-alike support. This is the function that
* is used across the code. The raw version is only used in order to dump
* the INFO output on crash. */
void redisLog(int level, const char *fmt, ...) {
@@ -365,7 +365,7 @@ void exitFromChild(int retcode) {
/*====================== Hash table type implementation ==================== */
-/* This is an hash table type that uses the SDS dynamic strings libary as
+/* This is an hash table type that uses the SDS dynamic strings library as
* keys and radis objects as values (objects can hold SDS strings,
* lists, sets). */
@@ -539,7 +539,7 @@ dictType commandTableDictType = {
NULL /* val destructor */
};
-/* Hash type hash table (note that small hashes are represented with zimpaps) */
+/* Hash type hash table (note that small hashes are represented with zipmaps) */
dictType hashDictType = {
dictEncObjHash, /* hash function */
NULL, /* key dup */
@@ -761,7 +761,7 @@ int clientsCronHandleTimeout(redisClient *c) {
/* The client query buffer is an sds.c string that can end with a lot of
* free space not used, this function reclaims space if needed.
*
- * The funciton always returns 0 as it never terminates the client. */
+ * The function always returns 0 as it never terminates the client. */
int clientsCronResizeQueryBuffer(redisClient *c) {
size_t querybuf_size = sdsAllocSize(c->querybuf);
time_t idletime = server.unixtime - c->lastinteraction;
@@ -819,11 +819,11 @@ void clientsCron(void) {
*
* - Active expired keys collection (it is also performed in a lazy way on
* lookup).
- * - Software watchdong.
+ * - Software watchdog.
* - Update some statistic.
* - Incremental rehashing of the DBs hash tables.
* - Triggering BGSAVE / AOF rewrite, and handling of terminated children.
- * - Clients timeout of differnet kinds.
+ * - Clients timeout of different kinds.
* - Replication reconnection.
* - Many more...
*
@@ -852,7 +852,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
/* We have just 22 bits per object for LRU information.
* So we use an (eventually wrapping) LRU clock with 10 seconds resolution.
- * 2^22 bits with 10 seconds resoluton is more or less 1.5 years.
+ * 2^22 bits with 10 seconds resolution is more or less 1.5 years.
*
* Note that even if this will wrap after 1.5 years it's not a problem,
* everything will still work but just some object will appear younger
@@ -890,7 +890,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
}
}
- /* We don't want to resize the hash tables while a bacground saving
+ /* We don't want to resize the hash tables while a background saving
* is in progress: the saving child is created using fork() that is
* implemented with a copy-on-write semantic in most modern systems, so
* if we resize the HT while there is the saving child at work actually
@@ -1215,7 +1215,7 @@ void initServerConfig() {
R_NegInf = -1.0/R_Zero;
R_Nan = R_Zero/R_Zero;
- /* Command table -- we intiialize it here as it is part of the
+ /* Command table -- we initiialize it here as it is part of the
* initial configuration, since command names may be changed via
* redis.conf using the rename-command directive. */
server.commands = dictCreate(&commandTableDictType,NULL);
@@ -1526,7 +1526,7 @@ void call(redisClient *c, int flags) {
long long dirty, start = ustime(), duration;
/* Sent the command to clients in MONITOR mode, only if the commands are
- * not geneated from reading an AOF. */
+ * not generated from reading an AOF. */
if (listLength(server.monitors) &&
!server.loading &&
!(c->cmd->flags & REDIS_CMD_SKIP_MONITOR))
@@ -1588,8 +1588,8 @@ void call(redisClient *c, int flags) {
* server for a bulk read from the client.
*
* If 1 is returned the client is still alive and valid and
- * and other operations can be performed by the caller. Otherwise
- * if 0 is returned the client was destroied (i.e. after QUIT). */
+ * other operations can be performed by the caller. Otherwise
+ * if 0 is returned the client was destroyed (i.e. after QUIT). */
int processCommand(redisClient *c) {
/* The QUIT command is handled separately. Normal command procs will
* go through checking for replication and QUIT will cause trouble
@@ -1865,7 +1865,7 @@ void echoCommand(redisClient *c) {
void timeCommand(redisClient *c) {
struct timeval tv;
- /* gettimeofday() can only fail if &tv is a bad addresss so we
+ /* gettimeofday() can only fail if &tv is a bad address so we
* don't check for errors. */
gettimeofday(&tv,NULL);
addReplyMultiBulkLen(c,2);
diff --git a/src/redis.h b/src/redis.h
index b8ee62cb7..8c1604ee6 100644
--- a/src/redis.h
+++ b/src/redis.h
@@ -144,12 +144,12 @@
*
* 00|000000 => if the two MSB are 00 the len is the 6 bits of this byte
* 01|000000 00000000 => 01, the len is 14 byes, 6 bits + 8 bits of next byte
- * 10|000000 [32 bit integer] => if it's 01, a full 32 bit len will follow
+ * 10|000000 [32 bit integer] => if it's 10, a full 32 bit len will follow
* 11|000000 this means: specially encoded object will follow. The six bits
* number specify the kind of object that follows.
* See the REDIS_RDB_ENC_* defines.
*
- * Lenghts up to 63 are stored using a single byte, most DB keys, and may
+ * Lengths up to 63 are stored using a single byte, most DB keys, and may
* values, will fit inside. */
#define REDIS_RDB_6BITLEN 0
#define REDIS_RDB_14BITLEN 1
@@ -319,7 +319,7 @@ typedef struct redisObject {
void *ptr;
} robj;
-/* Macro used to initalize a Redis object allocated on the stack.
+/* Macro used to initialize a Redis object allocated on the stack.
* Note that this macro is taken near the structure definition to make sure
* we'll update it when the structure is changed, to avoid bugs like
* bug #85 introduced exactly in this way. */
@@ -376,7 +376,7 @@ typedef struct readyList {
robj *key;
} readyList;
-/* With multiplexing we need to take per-clinet state.
+/* With multiplexing we need to take per-client state.
* Clients are taken in a liked list. */
typedef struct redisClient {
int fd;
@@ -554,14 +554,14 @@ typedef struct {
/* Redis cluster messages header */
/* Note that the PING, PONG and MEET messages are actually the same exact
- * kind of packet. PONG is the reply to ping, in the extact format as a PING,
+ * kind of packet. PONG is the reply to ping, in the exact format as a PING,
* while MEET is a special PING that forces the receiver to add the sender
* as a node (if it is not already in the list). */
#define CLUSTERMSG_TYPE_PING 0 /* Ping */
#define CLUSTERMSG_TYPE_PONG 1 /* Pong (reply to Ping) */
#define CLUSTERMSG_TYPE_MEET 2 /* Meet "let's join" message */
#define CLUSTERMSG_TYPE_FAIL 3 /* Mark node xxx as failing */
-#define CLUSTERMSG_TYPE_PUBLISH 4 /* Pub/Sub Publish propatagion */
+#define CLUSTERMSG_TYPE_PUBLISH 4 /* Pub/Sub Publish propagation */
/* Initially we don't know our "name", but we'll find it once we connect
* to the first node, using the getsockname() function. Then we'll use this
@@ -645,7 +645,7 @@ struct redisServer {
mode_t unixsocketperm; /* UNIX socket permission */
int ipfd; /* TCP socket file descriptor */
int sofd; /* Unix socket file descriptor */
- int cfd; /* Cluster bus lisetning socket */
+ int cfd; /* Cluster bus listening socket */
list *clients; /* List of active clients */
list *clients_to_close; /* Clients to close asynchronously */
list *slaves, *monitors; /* List of slaves and MONITORs */
@@ -669,7 +669,7 @@ struct redisServer {
long long stat_keyspace_hits; /* Number of successful lookups of keys */
long long stat_keyspace_misses; /* Number of failed lookups of keys */
size_t stat_peak_memory; /* Max used memory record */
- long long stat_fork_time; /* Time needed to perform latets fork() */
+ long long stat_fork_time; /* Time needed to perform latest fork() */
long long stat_rejected_conn; /* Clients rejected because of maxclients */
list *slowlog; /* SLOWLOG list of commands */
long long slowlog_entry_id; /* SLOWLOG current entry ID */
@@ -718,7 +718,7 @@ struct redisServer {
char *rdb_filename; /* Name of RDB file */
int rdb_compression; /* Use compression in RDB? */
int rdb_checksum; /* Use RDB checksum? */
- time_t lastsave; /* Unix time of last save succeeede */
+ time_t lastsave; /* Unix time of last successful save */
time_t rdb_save_time_last; /* Time used by last RDB save run. */
time_t rdb_save_time_start; /* Current RDB save start time. */
int lastbgsave_status; /* REDIS_OK or REDIS_ERR */
@@ -753,7 +753,7 @@ struct redisServer {
/* Limits */
unsigned int maxclients; /* Max number of simultaneous clients */
unsigned long long maxmemory; /* Max number of memory bytes to use */
- int maxmemory_policy; /* Policy for key evition */
+ int maxmemory_policy; /* Policy for key eviction */
int maxmemory_samples; /* Pricision of random sampling */
/* Blocked clients */
unsigned int bpop_blocked_clients; /* Number of clients blocked by lists */
@@ -793,7 +793,7 @@ struct redisServer {
int lua_timedout; /* True if we reached the time limit for script
execution. */
int lua_kill; /* Kill the script if true. */
- /* Assert & bug reportign */
+ /* Assert & bug reporting */
char *assert_failed;
char *assert_file;
int assert_line;
@@ -812,14 +812,14 @@ struct redisCommand {
char *name;
redisCommandProc *proc;
int arity;
- char *sflags; /* Flags as string represenation, one char per flag. */
+ char *sflags; /* Flags as string representation, one char per flag. */
int flags; /* The actual flags, obtained from the 'sflags' field. */
/* Use a function to determine keys arguments in a command line.
* Used for Redis Cluster redirect. */
redisGetKeysProc *getkeys_proc;
/* What keys should be loaded in background when calling this command? */
int firstkey; /* The first argument that's a key (0 = no keys) */
- int lastkey; /* THe last argument that's a key */
+ int lastkey; /* The last argument that's a key */
int keystep; /* The step between first and last key */
long long microseconds, calls;
};
@@ -866,7 +866,7 @@ typedef struct {
dictIterator *di;
} setTypeIterator;
-/* Structure to hold hash iteration abstration. Note that iteration over
+/* Structure to hold hash iteration abstraction. Note that iteration over
* hashes involves both fields and values. Because it is possible that
* not both are required, store pointers in the iterator to avoid
* unnecessary memory allocation for fields/values. */
diff --git a/src/release.c b/src/release.c
index 34c3d813c..4c5e1f745 100644
--- a/src/release.c
+++ b/src/release.c
@@ -27,8 +27,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-/* Every time the Redis Git SHA1 or Dirty status changes only this file
- * small file is recompiled, as we access this information in all the other
+/* Every time the Redis Git SHA1 or Dirty status changes only this small
+ * file is recompiled, as we access this information in all the other
* files using this functions. */
#include <string.h>
diff --git a/src/replication.c b/src/replication.c
index 15fc71e7d..2f0cba701 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -52,7 +52,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_START) continue;
/* Feed slaves that are waiting for the initial SYNC (so these commands
- * are queued in the output buffer until the intial SYNC completes),
+ * are queued in the output buffer until the initial SYNC completes),
* or are already in sync with the master. */
if (slave->slaveseldb != dictid) {
robj *selectcmd;
@@ -115,7 +115,7 @@ void replicationFeedMonitors(redisClient *c, list *monitors, int dictid, robj **
}
void syncCommand(redisClient *c) {
- /* ignore SYNC if aleady slave or in monitor mode */
+ /* ignore SYNC if already slave or in monitor mode */
if (c->flags & REDIS_SLAVE) return;
/* Refuse SYNC requests if we are a slave but the link with our master
@@ -229,7 +229,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
if (slave->repldboff == 0) {
/* Write the bulk write count before to transfer the DB. In theory here
* we don't know how much room there is in the output buffer of the
- * socket, but in pratice SO_SNDLOWAT (the minimum count for output
+ * socket, but in practice SO_SNDLOWAT (the minimum count for output
* operations) will never be smaller than the few bytes we need. */
sds bulkcount;
@@ -272,7 +272,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
}
}
-/* This function is called at the end of every backgrond saving.
+/* This function is called at the end of every background saving.
* The argument bgsaveerr is REDIS_OK if the background saving succeeded
* otherwise REDIS_ERR is passed to the function.
*
@@ -451,7 +451,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) {
stopAppendOnly();
while (retry-- && startAppendOnly() == REDIS_ERR) {
- redisLog(REDIS_WARNING,"Failed enabling the AOF after successful master synchrnization! Trying it again in one second.");
+ redisLog(REDIS_WARNING,"Failed enabling the AOF after successful master synchronization! Trying it again in one second.");
sleep(1);
}
if (!retry) {
diff --git a/src/scripting.c b/src/scripting.c
index 46301eb11..6661f3748 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -48,7 +48,7 @@ void sha1hex(char *digest, char *script, size_t len);
/* Take a Redis reply in the Redis protocol format and convert it into a
* Lua type. Thanks to this function, and the introduction of not connected
- * clients, it is trvial to implement the redis() lua function.
+ * clients, it is trivial to implement the redis() lua function.
*
* Basically we take the arguments, execute the Redis command in the context
* of a non connected client, then take the generated reply and convert it
@@ -58,7 +58,7 @@ void sha1hex(char *digest, char *script, size_t len);
*
* Note: in this function we do not do any sanity check as the reply is
* generated by Redis directly. This allows us to go faster.
- * The reply string can be altered during the parsing as it is discared
+ * The reply string can be altered during the parsing as it is discarded
* after the conversion is completed.
*
* Errors are returned as a table with a single 'err' field set to the
@@ -597,7 +597,7 @@ void scriptingInit(void) {
lua_setglobal(lua,"math");
- /* Add a helper funciton that we use to sort the multi bulk output of non
+ /* Add a helper function that we use to sort the multi bulk output of non
* deterministic commands, when containing 'false' elements. */
{
char *compare_func = "function __redis__compare_helper(a,b)\n"
@@ -638,7 +638,7 @@ void scriptingReset(void) {
scriptingInit();
}
-/* Perform the SHA1 of the input string. We use this both for hasing script
+/* Perform the SHA1 of the input string. We use this both for hashing script
* bodies in order to obtain the Lua function name, and in the implementation
* of redis.sha1().
*
@@ -677,7 +677,7 @@ void luaReplyToRedisReply(redisClient *c, lua_State *lua) {
case LUA_TTABLE:
/* We need to check if it is an array, an error, or a status reply.
* Error are returned as a single element table with 'err' field.
- * Status replies are returned as single elment table with 'ok' field */
+ * Status replies are returned as single element table with 'ok' field */
lua_pushstring(lua,"err");
lua_gettable(lua,-2);
t = lua_type(lua,-1);
@@ -834,7 +834,7 @@ void evalGenericCommand(redisClient *c, int evalsha) {
if (lua_isnil(lua,1)) {
lua_pop(lua,1); /* remove the nil from the stack */
/* Function not defined... let's define it if we have the
- * body of the funciton. If this is an EVALSHA call we can just
+ * body of the function. If this is an EVALSHA call we can just
* return an error. */
if (evalsha) {
addReply(c, shared.noscripterr);
diff --git a/src/sds.c b/src/sds.c
index e8491acfa..85858a4f0 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -141,7 +141,7 @@ size_t sdsAllocSize(sds s) {
* right-trim the string.
*
* Using sdsIncrLen() and sdsMakeRoomFor() it is possible to mount the
- * following schema to cat bytes coming from the kerenl to the end of an
+ * following schema to cat bytes coming from the kernel to the end of an
* sds string new things without copying into an intermediate buffer:
*
* oldlen = sdslen(s);
@@ -596,7 +596,7 @@ void sdssplitargs_free(sds *argv, int argc) {
}
/* Modify the string substituting all the occurrences of the set of
- * characters specifed in the 'from' string to the corresponding character
+ * characters specified in the 'from' string to the corresponding character
* in the 'to' array.
*
* For instance: sdsmapchars(mystring, "ho", "01", 2)
diff --git a/src/sentinel.c b/src/sentinel.c
index d8a960713..8009e5ed9 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -969,9 +969,9 @@ const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri) {
* a master's Sentinels dictionary, we want to be very sure about not
* having duplicated instances for any reason. This is so important because
* we use those other sentinels in order to run our quorum protocol to
- * understand if it's time to proceeed with the fail over.
+ * understand if it's time to proceed with the fail over.
*
- * Making sure no duplication is possible we greately improve the robustness
+ * Making sure no duplication is possible we greatly improve the robustness
* of the quorum (otherwise we may end counting the same instance multiple
* times for some reason).
*
@@ -1238,7 +1238,7 @@ void sentinelKillLink(sentinelRedisInstance *ri, redisAsyncContext *c) {
* cleanup needed.
*
* Note: we don't free the hiredis context as hiredis will do it for us
- * for async conenctions. */
+ * for async connections. */
void sentinelDisconnectInstanceFromContext(const redisAsyncContext *c) {
sentinelRedisInstance *ri = c->data;
int pubsub;
@@ -1647,7 +1647,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd
/* Update the last activity in the pubsub channel. Note that since we
* receive our messages as well this timestamp can be used to detect
- * if the link is probably diconnected even if it seems otherwise. */
+ * if the link is probably disconnected even if it seems otherwise. */
ri->pc_last_activity = mstime();
/* Sanity check in the reply we expect, so that the code that follows
@@ -1939,7 +1939,7 @@ void addReplySentinelRedisInstance(redisClient *c, sentinelRedisInstance *ri) {
setDeferredMultiBulkLength(c,mbl,fields*2);
}
-/* Output a number of instances contanined inside a dictionary as
+/* Output a number of instances contained inside a dictionary as
* Redis protocol. */
void addReplyDictOfRedisInstances(redisClient *c, dict *instances) {
dictIterator *di;
@@ -2535,7 +2535,7 @@ void sentinelStartFailoverIfNeeded(sentinelRedisInstance *master) {
* 3) info_refresh more recent than SENTINEL_INFO_VALIDITY_TIME.
* 4) master_link_down_time no more than:
* (now - master->s_down_since_time) + (master->down_after_period * 10).
- * 5) Slave priority can't be zero, otherwise the slave is discareded.
+ * 5) Slave priority can't be zero, otherwise the slave is discarded.
*
* Among all the slaves matching the above conditions we select the slave
* with lower slave_priority. If priority is the same we select the slave
@@ -2611,10 +2611,10 @@ void sentinelFailoverWaitStart(sentinelRedisInstance *ri) {
/* If we in "wait start" but the master is no longer in ODOWN nor in
* SDOWN condition we abort the failover. This is important as it
* prevents a useless failover in a a notable case of netsplit, where
- * the senitnels are split from the redis instances. In this case
+ * the sentinels are split from the redis instances. In this case
* the failover will not start while there is the split because no
* good slave can be reached. However when the split is resolved, we
- * can go to waitstart if the slave is back rechable a few milliseconds
+ * can go to waitstart if the slave is back reachable a few milliseconds
* before the master is. In that case when the master is back online
* we cancel the failover. */
if ((ri->flags & (SRI_S_DOWN|SRI_O_DOWN|SRI_FORCE_FAILOVER)) == 0) {
@@ -3026,13 +3026,13 @@ void sentinelHandleDictOfRedisInstances(dict *instances) {
* following conditions happen:
*
* 1) The Sentiel process for some time is blocked, for every kind of
- * random reason: the load is huge, the computer was freezed for some time
+ * random reason: the load is huge, the computer was frozen for some time
* in I/O or alike, the process was stopped by a signal. Everything.
* 2) The system clock was altered significantly.
*
* Under both this conditions we'll see everything as timed out and failing
* without good reasons. Instead we enter the TILT mode and wait
- * for SENTIENL_TILT_PERIOD to elapse before starting to act again.
+ * for SENTINEL_TILT_PERIOD to elapse before starting to act again.
*
* During TILT time we still collect information, we just do not act. */
void sentinelCheckTiltCondition(void) {
diff --git a/src/sha1.c b/src/sha1.c
index 26a5565ee..59e6f461d 100644
--- a/src/sha1.c
+++ b/src/sha1.c
@@ -57,13 +57,13 @@ A million repetitions of "a"
void SHA1Transform(u_int32_t state[5], const unsigned char buffer[64])
{
-u_int32_t a, b, c, d, e;
-typedef union {
- unsigned char c[64];
- u_int32_t l[16];
-} CHAR64LONG16;
+ u_int32_t a, b, c, d, e;
+ typedef union {
+ unsigned char c[64];
+ u_int32_t l[16];
+ } CHAR64LONG16;
#ifdef SHA1HANDSOFF
-CHAR64LONG16 block[1]; /* use array to appear as a pointer */
+ CHAR64LONG16 block[1]; /* use array to appear as a pointer */
memcpy(block, buffer, 64);
#else
/* The following had better never be used because it causes the
@@ -71,7 +71,7 @@ CHAR64LONG16 block[1]; /* use array to appear as a pointer */
* And the result is written through. I threw a "const" in, hoping
* this will cause a diagnostic.
*/
-CHAR64LONG16* block = (const CHAR64LONG16*)buffer;
+ CHAR64LONG16* block = (const CHAR64LONG16*)buffer;
#endif
/* Copy context->state[] to working vars */
a = state[0];
@@ -132,12 +132,11 @@ void SHA1Init(SHA1_CTX* context)
void SHA1Update(SHA1_CTX* context, const unsigned char* data, u_int32_t len)
{
-u_int32_t i;
-u_int32_t j;
+ u_int32_t i, j;
j = context->count[0];
if ((context->count[0] += len << 3) < j)
- context->count[1]++;
+ context->count[1]++;
context->count[1] += (len>>29);
j = (j >> 3) & 63;
if ((j + len) > 63) {
@@ -157,9 +156,9 @@ u_int32_t j;
void SHA1Final(unsigned char digest[20], SHA1_CTX* context)
{
-unsigned i;
-unsigned char finalcount[8];
-unsigned char c;
+ unsigned i;
+ unsigned char finalcount[8];
+ unsigned char c;
#if 0 /* untested "improvement" by DHR */
/* Convert context->count to a sequence of bytes
@@ -170,12 +169,12 @@ unsigned char c;
unsigned char *fcp = &finalcount[8];
for (i = 0; i < 2; i++)
- {
- u_int32_t t = context->count[i];
- int j;
+ {
+ u_int32_t t = context->count[i];
+ int j;
- for (j = 0; j < 4; t >>= 8, j++)
- *--fcp = (unsigned char) t
+ for (j = 0; j < 4; t >>= 8, j++)
+ *--fcp = (unsigned char) t;
}
#else
for (i = 0; i < 8; i++) {
@@ -226,3 +225,4 @@ main(int argc, char **argv)
}
#endif
+
diff --git a/src/sort.c b/src/sort.c
index 39505b136..cd54072f3 100644
--- a/src/sort.c
+++ b/src/sort.c
@@ -45,7 +45,7 @@ redisSortOperation *createSortOperation(int type, robj *pattern) {
/* Return the value associated to the key with a name obtained using
* the following rules:
*
- * 1) The first occurence of '*' in 'pattern' is substituted with 'subst'.
+ * 1) The first occurrence of '*' in 'pattern' is substituted with 'subst'.
*
* 2) If 'pattern' matches the "->" string, everything on the left of
* the arrow is treated as the name of an hash field, and the part on the
@@ -147,7 +147,7 @@ int sortCompare(const void *s1, const void *s2) {
cmp = -1;
} else {
/* Objects have the same score, but we don't want the comparison
- * to be undefined, so we compare objects lexicographycally.
+ * to be undefined, so we compare objects lexicographically.
* This way the result of SORT is deterministic. */
cmp = compareStringObjects(so1->obj,so2->obj);
}
@@ -205,7 +205,7 @@ void sortCommand(redisClient *c) {
/* Now we need to protect sortval incrementing its count, in the future
* SORT may have options able to overwrite/delete keys during the sorting
- * and the sorted key itself may get destroied */
+ * and the sorted key itself may get destroyed */
if (sortval)
incrRefCount(sortval);
else
diff --git a/src/t_list.c b/src/t_list.c
index 50db6c536..16b5e1be5 100644
--- a/src/t_list.c
+++ b/src/t_list.c
@@ -45,10 +45,10 @@ void listTypeTryConversion(robj *subject, robj *value) {
listTypeConvert(subject,REDIS_ENCODING_LINKEDLIST);
}
-/* The function pushes an elmenet to the specified list object 'subject',
+/* The function pushes an element to the specified list object 'subject',
* at head or tail position as specified by 'where'.
*
- * There is no need for the caller to incremnet the refcount of 'value' as
+ * There is no need for the caller to increment the refcount of 'value' as
* the function takes care of it if needed. */
void listTypePush(robj *subject, robj *value, int where) {
/* Check if we need to convert the ziplist */
@@ -825,7 +825,7 @@ void unblockClientWaitingData(redisClient *c) {
/* If the specified key has clients blocked waiting for list pushes, this
* function will put the key reference into the server.ready_keys list.
* Note that db->ready_keys is an hash table that allows us to avoid putting
- * the same key agains and again in the list in case of multiple pushes
+ * the same key again and again in the list in case of multiple pushes
* made by a script or in the context of MULTI/EXEC.
*
* The list will be finally processed by handleClientsBlockedOnLists() */
@@ -858,7 +858,7 @@ void signalListAsReady(redisClient *c, robj *key) {
*
* 1) Provide the client with the 'value' element.
* 2) If the dstkey is not NULL (we are serving a BRPOPLPUSH) also push the
- * 'value' element on the destionation list (the LPUSH side of the command).
+ * 'value' element on the destination list (the LPUSH side of the command).
* 3) Propagate the resulting BRPOP, BLPOP and additional LPUSH if any into
* the AOF and replication channel.
*
@@ -868,7 +868,7 @@ void signalListAsReady(redisClient *c, robj *key) {
*
* The function returns REDIS_OK if we are able to serve the client, otherwise
* REDIS_ERR is returned to signal the caller that the list POP operation
- * should be undoed as the client was not served: This only happens for
+ * should be undone as the client was not served: This only happens for
* BRPOPLPUSH that fails to push the value to the destination key as it is
* of the wrong type. */
int serveClientBlockedOnList(redisClient *receiver, robj *key, robj *dstkey, redisDb *db, robj *value, int where)
diff --git a/src/t_set.c b/src/t_set.c
index 01ac92aa4..0909d0301 100644
--- a/src/t_set.c
+++ b/src/t_set.c
@@ -188,7 +188,7 @@ robj *setTypeNextObject(setTypeIterator *si) {
* The caller provides both pointers to be populated with the right
* object. The return value of the function is the object->encoding
* field of the object and is used by the caller to check if the
- * int64_t pointer or the redis object pointere was populated.
+ * int64_t pointer or the redis object pointer was populated.
*
* When an object is returned (the set was a real set) the ref count
* of the object is not incremented so this function can be considered
@@ -606,7 +606,7 @@ void sinterGenericCommand(redisClient *c, robj **setkeys, unsigned long setnum,
sets[j] = setobj;
}
/* Sort sets from the smallest to largest, this will improve our
- * algorithm's performace */
+ * algorithm's performance */
qsort(sets,setnum,sizeof(robj*),qsortCompareSetsByCardinality);
/* The first thing we should output is the total number of elements...
diff --git a/src/t_string.c b/src/t_string.c
index 00510c710..8ba915a58 100644
--- a/src/t_string.c
+++ b/src/t_string.c
@@ -43,7 +43,7 @@ static int checkStringLength(redisClient *c, long long size) {
}
void setGenericCommand(redisClient *c, int nx, robj *key, robj *val, robj *expire, int unit) {
- long long milliseconds = 0; /* initialized to avoid an harmness warning */
+ long long milliseconds = 0; /* initialized to avoid any harmness warning */
if (expire) {
if (getLongLongFromObjectOrReply(c, expire, &milliseconds, NULL) != REDIS_OK)
@@ -340,7 +340,7 @@ void incrbyfloatCommand(redisClient *c) {
addReplyBulk(c,new);
/* Always replicate INCRBYFLOAT as a SET command with the final value
- * in order to make sure that differences in float pricision or formatting
+ * in order to make sure that differences in float prrcision or formatting
* will not create differences in replicas or after an AOF restart. */
aux = createStringObject("SET",3);
rewriteClientCommandArgument(c,0,aux);
diff --git a/src/t_zset.c b/src/t_zset.c
index f5cdec3e4..0d92f681f 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -125,7 +125,7 @@ zskiplistNode *zslInsert(zskiplist *zsl, double score, robj *obj) {
}
/* we assume the key is not already inside, since we allow duplicated
* scores, and the re-insertion of score and redis object should never
- * happpen since the caller of zslInsert() should test in the hash table
+ * happen since the caller of zslInsert() should test in the hash table
* if the element is already inside or not. */
level = zslRandomLevel();
if (level > zsl->level) {
@@ -285,7 +285,7 @@ zskiplistNode *zslLastInRange(zskiplist *zsl, zrangespec range) {
}
/* Delete all the elements with score between min and max from the skiplist.
- * Min and mx are inclusive, so a score >= min || score <= max is deleted.
+ * Min and max are inclusive, so a score >= min || score <= max is deleted.
* Note that this function takes the reference to the hash table view of the
* sorted set, in order to remove the elements from the hash table too. */
unsigned long zslDeleteRangeByScore(zskiplist *zsl, zrangespec range, dict *dict) {
diff --git a/src/zmalloc.c b/src/zmalloc.c
index a9ba09435..210425828 100644
--- a/src/zmalloc.c
+++ b/src/zmalloc.c
@@ -250,7 +250,7 @@ void zmalloc_set_oom_handler(void (*oom_handler)(size_t)) {
*
* For this kind of "fast RSS reporting" usages use instead the
* function RedisEstimateRSS() that is a much faster (and less precise)
- * version of the funciton. */
+ * version of the function. */
#if defined(HAVE_PROC_STAT)
#include <unistd.h>
diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl
index 3dbfc902b..e42f87725 100644
--- a/tests/unit/scripting.tcl
+++ b/tests/unit/scripting.tcl
@@ -34,7 +34,7 @@ start_server {tags {"scripting"}} {
r eval {return {1,2,3,'ciao',{1,2}}} 0
} {1 2 3 ciao {1 2}}
- test {EVAL - Are the KEYS and ARGS arrays populated correctly?} {
+ test {EVAL - Are the KEYS and ARGV arrays populated correctly?} {
r eval {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a b c d
} {a b c d}