summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSalvatore Sanfilippo <antirez@gmail.com>2014-10-06 09:44:54 +0200
committerSalvatore Sanfilippo <antirez@gmail.com>2014-10-06 09:44:54 +0200
commit3c6f9ac37c849c82aebf5b45e895faa6cc80e7be (patch)
tree5c4440927f0494b3e616e7d47991e660732165f6
parent389ec305b3e984422b91eca112e4ce37728fba20 (diff)
parentbe0061635eee4b489d3bb0f756ddb38dcc65dedd (diff)
downloadredis-3c6f9ac37c849c82aebf5b45e895faa6cc80e7be.tar.gz
Merge pull request #1902 from mattsta/comment-fixes
ALL comment fixes
-rw-r--r--00-RELEASENOTES2
-rw-r--r--BUGS2
-rw-r--r--deps/hiredis/test.c2
-rw-r--r--deps/linenoise/.gitignore4
-rw-r--r--deps/linenoise/README.markdown16
-rw-r--r--deps/linenoise/example.c45
-rw-r--r--redis.conf63
-rw-r--r--sentinel.conf2
-rw-r--r--src/adlist.c2
-rw-r--r--src/aof.c26
-rw-r--r--src/cluster.c26
-rw-r--r--src/cluster.h6
-rw-r--r--src/config.c2
-rw-r--r--src/debug.c2
-rw-r--r--src/dict.c62
-rw-r--r--src/networking.c6
-rw-r--r--src/rdb.c2
-rw-r--r--src/redis.c4
-rw-r--r--src/redis.h2
-rw-r--r--src/replication.c12
-rw-r--r--src/scripting.c2
-rw-r--r--src/sds.c4
-rw-r--r--src/sentinel.c2
-rw-r--r--src/util.c2
-rw-r--r--src/ziplist.c9
-rw-r--r--tests/cluster/cluster.tcl2
-rw-r--r--tests/cluster/run.tcl2
-rw-r--r--tests/cluster/tests/03-failover-loop.tcl2
-rw-r--r--tests/cluster/tests/05-slave-selection.tcl2
-rw-r--r--tests/instances.tcl2
-rw-r--r--tests/integration/aof.tcl2
-rw-r--r--tests/integration/replication.tcl10
-rw-r--r--tests/sentinel/run.tcl2
-rw-r--r--tests/support/redis.tcl2
-rw-r--r--tests/support/server.tcl10
-rw-r--r--tests/support/test.tcl2
-rw-r--r--tests/test_helper.tcl4
-rw-r--r--tests/unit/auth.tcl2
-rw-r--r--tests/unit/basic.tcl8
-rw-r--r--tests/unit/bitops.tcl2
-rw-r--r--tests/unit/introspection.tcl2
-rw-r--r--tests/unit/maxmemory.tcl6
-rw-r--r--tests/unit/sort.tcl2
-rw-r--r--utils/lru/test-lru.rb4
44 files changed, 210 insertions, 165 deletions
diff --git a/00-RELEASENOTES b/00-RELEASENOTES
index 81ff184fe..ce472159e 100644
--- a/00-RELEASENOTES
+++ b/00-RELEASENOTES
@@ -5,7 +5,7 @@ There is no release notes for this branch, it gets forked into another branch
every time there is a partial feature freeze in order to eventually create
a new stable release.
-Usually "unstable" is stable enough for you to use it in development enviromnets
+Usually "unstable" is stable enough for you to use it in development environments
however you should never use it in production environments. It is possible
to download the latest stable release here:
diff --git a/BUGS b/BUGS
index 96d52bf8b..a8e936892 100644
--- a/BUGS
+++ b/BUGS
@@ -1 +1 @@
-Plese check https://github.com/antirez/redis/issues
+Please check https://github.com/antirez/redis/issues
diff --git a/deps/hiredis/test.c b/deps/hiredis/test.c
index 713cc06c5..2cc35a46f 100644
--- a/deps/hiredis/test.c
+++ b/deps/hiredis/test.c
@@ -51,7 +51,7 @@ static redisContext *select_database(redisContext *c) {
assert(reply != NULL);
freeReplyObject(reply);
- /* Make sure the DB is emtpy */
+ /* Make sure the DB is empty */
reply = redisCommand(c,"DBSIZE");
assert(reply != NULL);
if (reply->type == REDIS_REPLY_INTEGER && reply->integer == 0) {
diff --git a/deps/linenoise/.gitignore b/deps/linenoise/.gitignore
index 28f258a30..7ab7825f5 100644
--- a/deps/linenoise/.gitignore
+++ b/deps/linenoise/.gitignore
@@ -1 +1,3 @@
-linenoise_example*
+linenoise_example
+*.dSYM
+history.txt
diff --git a/deps/linenoise/README.markdown b/deps/linenoise/README.markdown
index 9612da47f..2d21dc4e2 100644
--- a/deps/linenoise/README.markdown
+++ b/deps/linenoise/README.markdown
@@ -1,8 +1,12 @@
# Linenoise
-A minimal, zero-config, BSD licensed, readline replacement.
+A minimal, zero-config, BSD licensed, readline replacement used in Redis,
+MongoDB, and Android.
-News: linenoise is now part of [Android](http://android.git.kernel.org/?p=platform/system/core.git;a=tree;f=liblinenoise;h=56450eaed7f783760e5e6a5993ef75cde2e29dea;hb=HEAD Android)!
+* Single and multi line editing mode with the usual key bindings implemented.
+* History handling.
+* Completion.
+* About 1,100 lines of BSD license source code.
## Can a line editing library be 20k lines of code?
@@ -10,7 +14,7 @@ Line editing with some support for history is a really important feature for com
So what usually happens is either:
- * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Readl world example of this problem: Tclsh).
+ * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh).
* Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance).
The result is a pollution of binaries without line editing support.
@@ -23,13 +27,14 @@ Apparently almost every terminal you can happen to use today has some kind of su
Since it's so young I guess there are a few bugs, or the lib may not compile or work with some operating system, but it's a matter of a few weeks and eventually we'll get it right, and there will be no excuses for not shipping command line tools without built-in line editing support.
-The library is currently less than 400 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software.
+The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software.
## Tested with...
* Linux text only console ($TERM = linux)
* Linux KDE terminal application ($TERM = xterm)
* Linux xterm ($TERM = xterm)
+ * Linux Buildroot ($TERM = vt100)
* Mac OS X iTerm ($TERM = xterm)
* Mac OS X default Terminal.app ($TERM = xterm)
* OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen)
@@ -40,6 +45,7 @@ Please test it everywhere you can and report back!
## Let's push this forward!
-Please fork it and add something interesting and send me a pull request. What's especially interesting are fixes, new key bindings, completion.
+Patches should be provided in the respect of linenoise sensibility for small
+easy to understand code.
Send feedbacks to antirez at gmail
diff --git a/deps/linenoise/example.c b/deps/linenoise/example.c
index ea0b515c1..a2f0936ed 100644
--- a/deps/linenoise/example.c
+++ b/deps/linenoise/example.c
@@ -1,5 +1,6 @@
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include "linenoise.h"
@@ -10,16 +11,52 @@ void completion(const char *buf, linenoiseCompletions *lc) {
}
}
-int main(void) {
+int main(int argc, char **argv) {
char *line;
+ char *prgname = argv[0];
+ /* Parse options, with --multiline we enable multi line editing. */
+ while(argc > 1) {
+ argc--;
+ argv++;
+ if (!strcmp(*argv,"--multiline")) {
+ linenoiseSetMultiLine(1);
+ printf("Multi-line mode enabled.\n");
+ } else if (!strcmp(*argv,"--keycodes")) {
+ linenoisePrintKeyCodes();
+ exit(0);
+ } else {
+ fprintf(stderr, "Usage: %s [--multiline] [--keycodes]\n", prgname);
+ exit(1);
+ }
+ }
+
+ /* Set the completion callback. This will be called every time the
+ * user uses the <tab> key. */
linenoiseSetCompletionCallback(completion);
+
+ /* Load history from file. The history file is just a plain text file
+ * where entries are separated by newlines. */
linenoiseHistoryLoad("history.txt"); /* Load the history at startup */
+
+ /* Now this is the main loop of the typical linenoise-based application.
+ * The call to linenoise() will block as long as the user types something
+ * and presses enter.
+ *
+ * The typed string is returned as a malloc() allocated string by
+ * linenoise, so the user needs to free() it. */
while((line = linenoise("hello> ")) != NULL) {
- if (line[0] != '\0') {
+ /* Do something with the string. */
+ if (line[0] != '\0' && line[0] != '/') {
printf("echo: '%s'\n", line);
- linenoiseHistoryAdd(line);
- linenoiseHistorySave("history.txt"); /* Save every new entry */
+ linenoiseHistoryAdd(line); /* Add to the history. */
+ linenoiseHistorySave("history.txt"); /* Save the history on disk. */
+ } else if (!strncmp(line,"/historylen",11)) {
+ /* The "/historylen" command will change the history len. */
+ int len = atoi(line+11);
+ linenoiseHistorySetMaxLen(len);
+ } else if (line[0] == '/') {
+ printf("Unreconized command: %s\n", line);
}
free(line);
}
diff --git a/redis.conf b/redis.conf
index e03b3aa9a..0547cada2 100644
--- a/redis.conf
+++ b/redis.conf
@@ -15,7 +15,7 @@
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
-# have a standard template that goes to all Redis server but also need
+# have a standard template that goes to all Redis servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
@@ -131,7 +131,7 @@ databases 16
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
-# Note: you can disable saving at all commenting all the "save" lines.
+# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
@@ -180,9 +180,9 @@ dbfilename dump.rdb
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
-#
+#
# The Append Only File will also be created inside this directory.
-#
+#
# Note that you must specify a directory here, not a file name.
dir ./
@@ -279,7 +279,7 @@ repl-disable-tcp-nodelay no
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
-# The biggest the replication backlog, the longer the time the slave can be
+# The bigger the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
@@ -318,7 +318,7 @@ slave-priority 100
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
-# This option does not GUARANTEES that N replicas will accept the write, but
+# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
@@ -340,7 +340,7 @@ slave-priority 100
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
-#
+#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
@@ -406,18 +406,18 @@ slave-priority 100
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
-#
+#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
-# allkeys-lru -> remove any key accordingly to the LRU algorithm
+# allkeys-lru -> remove any key according to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
-#
+#
# Note: with any of the above policies, Redis will return an error on write
-# operations, when there are not suitable keys for eviction.
+# operations, when there are no suitable keys for eviction.
#
-# At the date of writing this commands are: set setnx setex append
+# At the date of writing these commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
@@ -465,13 +465,13 @@ appendonly no
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
-# instead to wait for more data in the output buffer. Some OS will really flush
+# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
-# always: fsync after every write to the append only log . Slow, Safest.
+# always: fsync after every write to the append only log. Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
@@ -506,7 +506,7 @@ appendfsync everysec
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
-#
+#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
@@ -515,7 +515,7 @@ no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
-#
+#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
@@ -564,11 +564,11 @@ aof-load-truncated yes
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
-# When a long running script exceed the maximum execution time only the
+# When a long running script exceeds the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
-# is the only way to shut down the server in the case a write commands was
-# already issue by the script but the user don't want to wait for the natural
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
@@ -585,12 +585,12 @@ lua-time-limit 5000
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
-# Make sure that instances running in the same system does not have
+# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
# cluster-config-file nodes-6379.conf
-# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
# for it to be considered in failure state.
# Most other internal time limits are multiple of the node timeout.
#
@@ -684,7 +684,7 @@ lua-time-limit 5000
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
-#
+#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
@@ -725,7 +725,7 @@ latency-monitor-threshold 0
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
-#
+#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
@@ -749,8 +749,8 @@ latency-monitor-threshold 0
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
-# by zero or multiple characters. The empty string means that notifications
-# are disabled at all.
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
@@ -782,7 +782,7 @@ list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
-# of just strings that happens to be integers in radix 10 in the range
+# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
@@ -800,7 +800,7 @@ zset-max-ziplist-value 64
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
-#
+#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
@@ -815,13 +815,13 @@ hll-sparse-max-bytes 3000
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
-#
+#
# The default is to use this millisecond 10 times every second in order to
-# active rehashing the main dictionaries, freeing memory when possible.
+# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
-# not a good thing in your environment that Redis can reply form time to time
+# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
@@ -870,7 +870,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
-# tasks to perform accordingly to the specified "hz" value.
+# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
@@ -887,4 +887,3 @@ hz 10
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
-
diff --git a/sentinel.conf b/sentinel.conf
index 4b3b79242..39d1044e2 100644
--- a/sentinel.conf
+++ b/sentinel.conf
@@ -29,7 +29,7 @@ port 26379
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Redis Sentinel to chdir to /tmp at startup is the simplest thing
-# for the process to don't interferer with administrative tasks such as
+# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
diff --git a/src/adlist.c b/src/adlist.c
index b4dba420f..b4cc785be 100644
--- a/src/adlist.c
+++ b/src/adlist.c
@@ -71,7 +71,7 @@ void listRelease(list *list)
zfree(list);
}
-/* Add a new node to the list, to head, contaning the specified 'value'
+/* Add a new node to the list, to head, containing the specified 'value'
* pointer as value.
*
* On error, NULL is returned and no operation is performed (i.e. the
diff --git a/src/aof.c b/src/aof.c
index 81f5f1fcd..0af519bfa 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -74,7 +74,7 @@ void aofRewriteBufferReset(void) {
listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree);
}
-/* Return the current size of the AOF rerwite buffer. */
+/* Return the current size of the AOF rewrite buffer. */
unsigned long aofRewriteBufferSize(void) {
listNode *ln;
listIter li;
@@ -245,7 +245,7 @@ int startAppendOnly(void) {
redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error.");
return REDIS_ERR;
}
- /* We correctly switched on AOF, now wait for the rerwite to be complete
+ /* We correctly switched on AOF, now wait for the rewrite to be complete
* in order to append data on disk. */
server.aof_state = REDIS_AOF_WAIT_REWRITE;
return REDIS_OK;
@@ -286,7 +286,7 @@ void flushAppendOnlyFile(int force) {
* the write for a couple of seconds. */
if (sync_in_progress) {
if (server.aof_flush_postponed_start == 0) {
- /* No previous write postponinig, remember that we are
+ /* No previous write postponing, remember that we are
* postponing the flush and return. */
server.aof_flush_postponed_start = server.unixtime;
return;
@@ -337,7 +337,7 @@ void flushAppendOnlyFile(int force) {
last_write_error_log = server.unixtime;
}
- /* Lof the AOF write error and record the error code. */
+ /* Log the AOF write error and record the error code. */
if (nwritten == -1) {
if (can_log) {
redisLog(REDIS_WARNING,"Error writing to the AOF file: %s",
@@ -361,7 +361,7 @@ void flushAppendOnlyFile(int force) {
"ftruncate: %s", strerror(errno));
}
} else {
- /* If the ftrunacate() succeeded we can set nwritten to
+ /* If the ftruncate() succeeded we can set nwritten to
* -1 since there is no longer partial data into the AOF. */
nwritten = -1;
}
@@ -373,7 +373,7 @@ void flushAppendOnlyFile(int force) {
/* We can't recover when the fsync policy is ALWAYS since the
* reply for the client is already in the output buffers, and we
* have the contract with the user that on acknowledged write data
- * is synched on disk. */
+ * is synced on disk. */
redisLog(REDIS_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting...");
exit(1);
} else {
@@ -468,7 +468,7 @@ sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, r
long long when;
robj *argv[3];
- /* Make sure we can use strtol */
+ /* Make sure we can use strtoll */
seconds = getDecodedObject(seconds);
when = strtoll(seconds->ptr,NULL,10);
/* Convert argument into milliseconds for EXPIRE, SETEX, EXPIREAT */
@@ -499,7 +499,7 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a
robj *tmpargv[3];
/* The DB this command was targeting is not the same as the last command
- * we appendend. To issue a SELECT command is needed. */
+ * we appended. To issue a SELECT command is needed. */
if (dictid != server.aof_selected_db) {
char seldb[64];
@@ -593,7 +593,7 @@ void freeFakeClient(struct redisClient *c) {
zfree(c);
}
-/* Replay the append log file. On error REDIS_OK is returned. On non fatal
+/* Replay the append log file. On success REDIS_OK is returned. On non fatal
* error (the append only file is zero-length) REDIS_ERR is returned. On
* fatal error an error message is logged and the program exists. */
int loadAppendOnlyFile(char *filename) {
@@ -1000,7 +1000,7 @@ int rewriteHashObject(rio *r, robj *key, robj *o) {
* the difference accumulated from the parent into a buffer, that is
* concatenated at the end of the rewrite. */
ssize_t aofReadDiffFromParent(void) {
- char buf[65536]; /* Default pipe buffer size on most Linux sytems. */
+ char buf[65536]; /* Default pipe buffer size on most Linux systems. */
ssize_t nread, total = 0;
while ((nread =
@@ -1114,7 +1114,7 @@ int rewriteAppendOnlyFile(char *filename) {
/* Read again a few times to get more data from the parent.
* We can't read forever (the server may receive data from clients
- * fater than it is able to send data to the child), so we try to read
+ * faster than it is able to send data to the child), so we try to read
* some more data in a loop as soon as there is a good chance more data
* will come. If it looks like we are wasting time, we abort (this
* happens after 20 ms without new data). */
@@ -1250,7 +1250,7 @@ void aofClosePipes(void) {
}
/* ----------------------------------------------------------------------------
- * AOF backgorund rewrite
+ * AOF background rewrite
* ------------------------------------------------------------------------- */
/* This is how rewriting of the append only file in background works:
@@ -1392,7 +1392,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency);
redisLog(REDIS_NOTICE,
- "Redidual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));
+ "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));
/* The only remaining thing to do is to rename the temporary file to
* the configured file and switch the file descriptor used to do AOF
diff --git a/src/cluster.c b/src/cluster.c
index 821fe1734..149c9d937 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -124,7 +124,7 @@ int clusterLoadConfig(char *filename) {
return REDIS_ERR;
}
- /* Parse the file. Note that single liens of the cluster config file can
+ /* Parse the file. Note that single lines of the cluster config file can
* be really long as they include all the hash slots of the node.
* This means in the worst possible case, half of the Redis slots will be
* present in a single line, possibly in importing or migrating state, so
@@ -1133,7 +1133,7 @@ int clusterStartHandshake(char *ip, int port) {
/* Add the node with a random address (NULL as first argument to
* createClusterNode()). Everything will be fixed during the
- * handskake. */
+ * handshake. */
n = createClusterNode(NULL,REDIS_NODE_HANDSHAKE|REDIS_NODE_MEET);
memcpy(n->ip,norm_ip,sizeof(n->ip));
n->port = port;
@@ -1284,7 +1284,7 @@ void clusterSetNodeAsMaster(clusterNode *n) {
* node (see the function comments for more info).
*
* The 'sender' is the node for which we received a configuration update.
- * Sometimes it is not actaully the "Sender" of the information, like in the case
+ * Sometimes it is not actually the "Sender" of the information, like in the case
* we receive the info via an UPDATE packet. */
void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) {
int j;
@@ -1597,7 +1597,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_UPDATE_STATE);
}
- /* Free this node as we alrady have it. This will
+ /* Free this node as we already have it. This will
* cause the link to be freed as well. */
freeClusterNode(link->node);
return 0;
@@ -1794,7 +1794,7 @@ int clusterProcessPacket(clusterLink *link) {
}
} else {
redisLog(REDIS_NOTICE,
- "Ignoring FAIL message from unknonw node %.40s about %.40s",
+ "Ignoring FAIL message from unknown node %.40s about %.40s",
hdr->sender, hdr->data.fail.about.nodename);
}
} else if (type == CLUSTERMSG_TYPE_PUBLISH) {
@@ -1863,7 +1863,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_FSYNC_CONFIG);
- /* Check the bitmap of served slots and udpate our
+ /* Check the bitmap of served slots and update our
* config accordingly. */
clusterUpdateSlotsConfigWith(n,reportedConfigEpoch,
hdr->data.update.nodecfg.slots);
@@ -2761,7 +2761,7 @@ void clusterHandleManualFailover(void) {
/* Return ASAP if no manual failover is in progress. */
if (server.cluster->mf_end == 0) return;
- /* If mf_can_start is non-zero, the failover was alrady triggered so the
+ /* If mf_can_start is non-zero, the failover was already triggered so the
* next steps are performed by clusterHandleSlaveFailover(). */
if (server.cluster->mf_can_start) return;
@@ -3300,7 +3300,7 @@ int verifyClusterConfigWithData(void) {
* assigned to this slot. Fix this condition. */
update_config++;
- /* Case A: slot is unassigned. Take responsability for it. */
+ /* Case A: slot is unassigned. Take responsibility for it. */
if (server.cluster->slots[j] == NULL) {
redisLog(REDIS_WARNING, "I have keys for unassigned slot %d. "
"Taking responsibility for it.",j);
@@ -3613,7 +3613,7 @@ void clusterCommand(redisClient *c) {
int del = !strcasecmp(c->argv[1]->ptr,"delslots");
memset(slots,0,REDIS_CLUSTER_SLOTS);
- /* Check that all the arguments are parsable and that all the
+ /* Check that all the arguments are parseable and that all the
* slots are not already busy. */
for (j = 2; j < c->argc; j++) {
if ((slot = getSlotOrReply(c,c->argv[j])) == -1) {
@@ -4180,14 +4180,14 @@ void restoreCommand(redisClient *c) {
* This sockets are closed when the max number we cache is reached, and also
* in serverCron() when they are around for more than a few seconds. */
#define MIGRATE_SOCKET_CACHE_ITEMS 64 /* max num of items in the cache. */
-#define MIGRATE_SOCKET_CACHE_TTL 10 /* close cached socekts after 10 sec. */
+#define MIGRATE_SOCKET_CACHE_TTL 10 /* close cached sockets after 10 sec. */
typedef struct migrateCachedSocket {
int fd;
time_t last_use_time;
} migrateCachedSocket;
-/* Return a TCP scoket connected with the target instance, possibly returning
+/* Return a TCP socket connected with the target instance, possibly returning
* a cached one.
*
* This function is responsible of sending errors to the client if a
@@ -4196,7 +4196,7 @@ typedef struct migrateCachedSocket {
* attempt to free it after usage.
*
* If the caller detects an error while using the socket, migrateCloseSocket()
- * should be called so that the connection will be craeted from scratch
+ * should be called so that the connection will be created from scratch
* the next time. */
int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) {
int fd;
@@ -4452,7 +4452,7 @@ void askingCommand(redisClient *c) {
addReply(c,shared.ok);
}
-/* The READONLY command is uesd by clients to enter the read-only mode.
+/* The READONLY command is used by clients to enter the read-only mode.
* In this mode slaves will not redirect clients as long as clients access
* with read-only commands to keys that are served by the slave's master. */
void readonlyCommand(redisClient *c) {
diff --git a/src/cluster.h b/src/cluster.h
index adad0645f..3287afe72 100644
--- a/src/cluster.h
+++ b/src/cluster.h
@@ -11,7 +11,7 @@
#define REDIS_CLUSTER_NAMELEN 40 /* sha1 hex length */
#define REDIS_CLUSTER_PORT_INCR 10000 /* Cluster port = baseport + PORT_INCR */
-/* The following defines are amunt of time, sometimes expressed as
+/* The following defines are amount of time, sometimes expressed as
* multiplicators of the node timeout value (when ending with MULT). */
#define REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT 15000
#define REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY 10 /* Slave max data age factor. */
@@ -51,7 +51,7 @@ typedef struct clusterLink {
#define REDIS_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */
#define REDIS_NODE_NOADDR 64 /* We don't know the address of this node */
#define REDIS_NODE_MEET 128 /* Send a MEET message to this node */
-#define REDIS_NODE_PROMOTED 256 /* Master was a slave propoted by failover */
+#define REDIS_NODE_PROMOTED 256 /* Master was a slave promoted by failover */
#define REDIS_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
#define nodeIsMaster(n) ((n)->flags & REDIS_NODE_MASTER)
@@ -117,7 +117,7 @@ typedef struct clusterState {
or zero if stil not received. */
int mf_can_start; /* If non-zero signal that the manual failover
can start requesting masters vote. */
- /* The followign fields are uesd by masters to take state on elections. */
+ /* The followign fields are used by masters to take state on elections. */
uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */
int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */
long long stats_bus_messages_sent; /* Num of msg sent via cluster bus. */
diff --git a/src/config.c b/src/config.c
index db0847935..43507000f 100644
--- a/src/config.c
+++ b/src/config.c
@@ -1493,7 +1493,7 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int
rewriteConfigRewriteLine(state,option,line,force);
}
-/* Rewrite the syslog-fability option. */
+/* Rewrite the syslog-facility option. */
void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) {
int value = server.syslog_facility, j;
int force = value != LOG_LOCAL0;
diff --git a/src/debug.c b/src/debug.c
index 641b10024..68878385f 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -852,7 +852,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
redisLog(REDIS_WARNING,
"\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n"
-" Please report the crash opening an issue on github:\n\n"
+" Please report the crash by opening an issue on github:\n\n"
" http://github.com/antirez/redis/issues\n\n"
" Suspect RAM error? Use redis-server --test-memory to verify it.\n\n"
);
diff --git a/src/dict.c b/src/dict.c
index 8eb3da34b..29d400099 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -709,72 +709,72 @@ static unsigned long rev(unsigned long v) {
/* dictScan() is used to iterate over the elements of a dictionary.
*
- * Iterating works in the following way:
+ * Iterating works the following way:
*
* 1) Initially you call the function using a cursor (v) value of 0.
* 2) The function performs one step of the iteration, and returns the
- * new cursor value that you must use in the next call.
+ * new cursor value you must use in the next call.
* 3) When the returned cursor is 0, the iteration is complete.
*
- * The function guarantees that all the elements that are present in the
- * dictionary from the start to the end of the iteration are returned.
- * However it is possible that some element is returned multiple time.
+ * The function guarantees all elements present in the
+ * dictionary get returned between the start and end of the iteration.
+ * However it is possible some elements get returned multiple times.
*
- * For every element returned, the callback 'fn' passed as argument is
- * called, with 'privdata' as first argument and the dictionar entry
+ * For every element returned, the callback argument 'fn' is
+ * called with 'privdata' as first argument and the dictionary entry
* 'de' as second argument.
*
* HOW IT WORKS.
*
- * The algorithm used in the iteration was designed by Pieter Noordhuis.
+ * The iteration algorithm was designed by Pieter Noordhuis.
* The main idea is to increment a cursor starting from the higher order
- * bits, that is, instead of incrementing the cursor normally, the bits
+ * bits. That is, instead of incrementing the cursor normally, the bits
* of the cursor are reversed, then the cursor is incremented, and finally
* the bits are reversed again.
*
- * This strategy is needed because the hash table may be resized from one
- * call to the other call of the same iteration.
+ * This strategy is needed because the hash table may be resized between
+ * iteration calls.
*
* dict.c hash tables are always power of two in size, and they
* use chaining, so the position of an element in a given table is given
- * always by computing the bitwise AND between Hash(key) and SIZE-1
+ * by computing the bitwise AND between Hash(key) and SIZE-1
* (where SIZE-1 is always the mask that is equivalent to taking the rest
* of the division between the Hash of the key and SIZE).
*
* For example if the current hash table size is 16, the mask is
- * (in binary) 1111. The position of a key in the hash table will be always
+ * (in binary) 1111. The position of a key in the hash table will always be
* the last four bits of the hash output, and so forth.
*
* WHAT HAPPENS IF THE TABLE CHANGES IN SIZE?
*
- * If the hash table grows, elements can go anyway in one multiple of
- * the old bucket: for example let's say that we already iterated with
- * a 4 bit cursor 1100, since the mask is 1111 (hash table size = 16).
+ * If the hash table grows, elements can go anywhere in one multiple of
+ * the old bucket: for example let's say we already iterated with
+ * a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16).
*
- * If the hash table will be resized to 64 elements, and the new mask will
- * be 111111, the new buckets that you obtain substituting in ??1100
- * either 0 or 1, can be targeted only by keys that we already visited
+ * If the hash table will be resized to 64 elements, then the new mask will
+ * be 111111. The new buckets you obtain by substituting in ??1100
+ * with either 0 or 1 can be targeted only by keys we already visited
* when scanning the bucket 1100 in the smaller hash table.
*
* By iterating the higher bits first, because of the inverted counter, the
- * cursor does not need to restart if the table size gets bigger, and will
- * just continue iterating with cursors that don't have '1100' at the end,
- * nor any other combination of final 4 bits already explored.
+ * cursor does not need to restart if the table size gets bigger. It will
+ * continue iterating using cursors without '1100' at the end, and also
+ * without any other combination of the final 4 bits already explored.
*
* Similarly when the table size shrinks over time, for example going from
- * 16 to 8, If a combination of the lower three bits (the mask for size 8
- * is 111) was already completely explored, it will not be visited again
- * as we are sure that, we tried for example, both 0111 and 1111 (all the
+ * 16 to 8, if a combination of the lower three bits (the mask for size 8
+ * is 111) were already completely explored, it would not be visited again
+ * because we are sure we tried, for example, both 0111 and 1111 (all the
* variations of the higher bit) so we don't need to test it again.
*
* WAIT... YOU HAVE *TWO* TABLES DURING REHASHING!
*
- * Yes, this is true, but we always iterate the smaller one of the tables,
- * testing also all the expansions of the current cursor into the larger
- * table. So for example if the current cursor is 101 and we also have a
+ * Yes, this is true, but we always iterate the smaller table first, then
+ * we test all the expansions of the current cursor into the larger
+ * table. For example if the current cursor is 101 and we also have a
* larger table of size 16, we also test (0)101 and (1)101 inside the larger
* table. This reduces the problem back to having only one table, where
- * the larger one, if exists, is just an expansion of the smaller one.
+ * the larger one, if it exists, is just an expansion of the smaller one.
*
* LIMITATIONS
*
@@ -783,11 +783,11 @@ static unsigned long rev(unsigned long v) {
*
* The disadvantages resulting from this design are:
*
- * 1) It is possible that we return duplicated elements. However this is usually
+ * 1) It is possible we return elements more than once. However this is usually
* easy to deal with in the application level.
* 2) The iterator must return multiple elements per call, as it needs to always
* return all the keys chained in a given bucket, and all the expansions, so
- * we are sure we don't miss keys moving.
+ * we are sure we don't miss keys moving during rehashing.
* 3) The reverse cursor is somewhat hard to understand at first, but this
* comment is supposed to help.
*/
diff --git a/src/networking.c b/src/networking.c
index fb49b7964..c7b1c9ba7 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -1230,9 +1230,9 @@ void formatPeerId(char *peerid, size_t peerid_len, char *ip, int port) {
}
/* A Redis "Peer ID" is a colon separated ip:port pair.
- * For IPv4 it's in the form x.y.z.k:pork, example: "127.0.0.1:1234".
+ * For IPv4 it's in the form x.y.z.k:port, example: "127.0.0.1:1234".
* For IPv6 addresses we use [] around the IP part, like in "[::1]:1234".
- * For Unix socekts we use path:0, like in "/tmp/redis:0".
+ * For Unix sockets we use path:0, like in "/tmp/redis:0".
*
* A Peer ID always fits inside a buffer of REDIS_PEER_ID_LEN bytes, including
* the null term.
@@ -1259,7 +1259,7 @@ int genClientPeerId(redisClient *client, char *peerid, size_t peerid_len) {
}
/* This function returns the client peer id, by creating and caching it
- * if client->perrid is NULL, otherwise returning the cached value.
+ * if client->peerid is NULL, otherwise returning the cached value.
* The Peer ID never changes during the life of the client, however it
* is expensive to compute. */
char *getClientPeerId(redisClient *c) {
diff --git a/src/rdb.c b/src/rdb.c
index afaef2681..4d789bc2b 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -475,7 +475,7 @@ int rdbLoadObjectType(rio *rdb) {
return type;
}
-/* Save a Redis object. Returns -1 on error, 0 on success. */
+/* Save a Redis object. Returns -1 on error, number of bytes written on success. */
int rdbSaveObject(rio *rdb, robj *o) {
int n, nwritten = 0;
diff --git a/src/redis.c b/src/redis.c
index 5ce7d1d93..e7faa8859 100644
--- a/src/redis.c
+++ b/src/redis.c
@@ -407,7 +407,7 @@ void exitFromChild(int retcode) {
/*====================== Hash table type implementation ==================== */
/* This is a hash table type that uses the SDS dynamic strings library as
- * keys and radis objects as values (objects can hold SDS strings,
+ * keys and redis objects as values (objects can hold SDS strings,
* lists, sets). */
void dictVanillaFree(void *privdata, void *val)
@@ -1978,7 +1978,7 @@ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
}
/* It is possible to call the function forceCommandPropagation() inside a
- * Redis command implementaiton in order to to force the propagation of a
+ * Redis command implementation in order to to force the propagation of a
* specific command execution into AOF / Replication. */
void forceCommandPropagation(redisClient *c, int flags) {
if (flags & REDIS_PROPAGATE_REPL) c->flags |= REDIS_FORCE_REPL;
diff --git a/src/redis.h b/src/redis.h
index 7ad03dc9b..a1ae0f2bc 100644
--- a/src/redis.h
+++ b/src/redis.h
@@ -499,7 +499,7 @@ typedef struct readyList {
} readyList;
/* With multiplexing we need to take per-client state.
- * Clients are taken in a liked list. */
+ * Clients are taken in a linked list. */
typedef struct redisClient {
uint64_t id; /* Client incremental unique ID. */
int fd;
diff --git a/src/replication.c b/src/replication.c
index ff0a0141a..16014c8a9 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -82,7 +82,7 @@ void resizeReplicationBacklog(long long newsize) {
server.repl_backlog = zmalloc(server.repl_backlog_size);
server.repl_backlog_histlen = 0;
server.repl_backlog_idx = 0;
- /* Next byte we have is... the next since the buffer is emtpy. */
+ /* Next byte we have is... the next since the buffer is empty. */
server.repl_backlog_off = server.master_repl_offset+1;
}
}
@@ -200,7 +200,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
/* We need to feed the buffer with the object as a bulk reply
* not just as a plain string, so create the $..CRLF payload len
- * ad add the final CRLF */
+ * and add the final CRLF */
aux[0] = '$';
len = ll2string(aux+1,sizeof(aux)-1,objlen);
aux[len+1] = '\r';
@@ -376,7 +376,7 @@ int masterTryPartialResynchronization(redisClient *c) {
listAddNodeTail(server.slaves,c);
/* We can't use the connection buffers since they are used to accumulate
* new commands at this stage. But we are sure the socket send buffer is
- * emtpy so this write will never fail actually. */
+ * empty so this write will never fail actually. */
buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n");
if (write(c->fd,buf,buflen) != buflen) {
freeClientAsync(c);
@@ -408,7 +408,7 @@ need_full_resync:
return REDIS_ERR;
}
-/* SYNC ad PSYNC command implemenation. */
+/* SYNC and PSYNC command implemenation. */
void syncCommand(redisClient *c) {
/* ignore SYNC if already slave or in monitor mode */
if (c->flags & REDIS_SLAVE) return;
@@ -1460,7 +1460,7 @@ void replicationDiscardCachedMaster(void) {
/* Turn the cached master into the current master, using the file descriptor
* passed as argument as the socket for the new master.
*
- * This funciton is called when successfully setup a partial resynchronization
+ * This function is called when successfully setup a partial resynchronization
* so the stream of data that we'll receive will start from were this
* master left. */
void replicationResurrectCachedMaster(int newfd) {
@@ -1750,7 +1750,7 @@ long long replicationGetSlaveOffset(void) {
/* --------------------------- REPLICATION CRON ---------------------------- */
-/* Replication cron funciton, called 1 time per second. */
+/* Replication cron function, called 1 time per second. */
void replicationCron(void) {
/* Non blocking connection timeout? */
if (server.masterhost &&
diff --git a/src/scripting.c b/src/scripting.c
index 5cd0b299c..77a98abdc 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -717,7 +717,7 @@ void scriptingInit(void) {
server.lua_client->flags |= REDIS_LUA_CLIENT;
}
- /* Lua beginners ofter don't use "local", this is likely to introduce
+ /* Lua beginners often don't use "local", this is likely to introduce
* subtle bugs in their code. To prevent problems we protect accesses
* to global variables. */
scriptingEnableGlobalsProtection(lua);
diff --git a/src/sds.c b/src/sds.c
index 95454e997..0ad925b4a 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -43,7 +43,7 @@
* The string is always null-termined (all the sds strings are, always) so
* even if you create an sds string with:
*
- * mystring = sdsnewlen("abc",3");
+ * mystring = sdsnewlen("abc",3);
*
* You can print the string with printf() as there is an implicit \0 at the
* end of the string. However the string is binary safe and can contain
@@ -109,7 +109,7 @@ void sdsupdatelen(sds s) {
sh->len = reallen;
}
-/* Modify an sds string on-place to make it empty (zero length).
+/* Modify an sds string in-place to make it empty (zero length).
* However all the existing buffer is not discarded but set as free space
* so that next append operations will not require allocations up to the
* number of bytes previously available. */
diff --git a/src/sentinel.c b/src/sentinel.c
index 06f53c128..8e78a2263 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -2106,7 +2106,7 @@ void sentinelPublishReplyCallback(redisAsyncContext *c, void *reply, void *privd
* or sent directly to this sentinel via the (fake) PUBLISH command of Sentinel.
*
* If the master name specified in the message is not known, the message is
- * discareded. */
+ * discarded. */
void sentinelProcessHelloMessage(char *hello, int hello_len) {
/* Format is composed of 8 tokens:
* 0=ip,1=port,2=runid,3=current_epoch,4=master_name,
diff --git a/src/util.c b/src/util.c
index 1b1798658..80242ff71 100644
--- a/src/util.c
+++ b/src/util.c
@@ -385,7 +385,7 @@ int string2l(const char *s, size_t slen, long *lval) {
}
/* Convert a double to a string representation. Returns the number of bytes
- * required. The representation should always be parsable by stdtod(3). */
+ * required. The representation should always be parsable by strtod(3). */
int d2string(char *buf, size_t len, double value) {
if (isnan(value)) {
len = snprintf(buf,len,"nan");
diff --git a/src/ziplist.c b/src/ziplist.c
index 4a0111105..64a22adfc 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -183,7 +183,7 @@ static unsigned int zipIntSize(unsigned char encoding) {
return 0;
}
-/* Encode the length 'l' writing it in 'p'. If p is NULL it just returns
+/* Encode the length 'rawlen' writing it in 'p'. If p is NULL it just returns
* the amount of bytes required to encode such a length. */
static unsigned int zipEncodeLength(unsigned char *p, unsigned char encoding, unsigned int rawlen) {
unsigned char len = 1, buf[5];
@@ -739,8 +739,8 @@ unsigned char *ziplistPrev(unsigned char *zl, unsigned char *p) {
}
}
-/* Get entry pointed to by 'p' and store in either 'e' or 'v' depending
- * on the encoding of the entry. 'e' is always set to NULL to be able
+/* Get entry pointed to by 'p' and store in either '*sstr' or 'sval' depending
+ * on the encoding of the entry. '*sstr' is always set to NULL to be able
* to find out whether the string pointer or the integer value was set.
* Return 0 if 'p' points to the end of the ziplist, 1 otherwise. */
unsigned int ziplistGet(unsigned char *p, unsigned char **sstr, unsigned int *slen, long long *sval) {
@@ -788,7 +788,8 @@ unsigned char *ziplistDeleteRange(unsigned char *zl, unsigned int index, unsigne
return (p == NULL) ? zl : __ziplistDelete(zl,p,num);
}
-/* Compare entry pointer to by 'p' with 'entry'. Return 1 if equal. */
+/* Compare entry pointer to by 'p' with 'sstr' of length 'slen'. */
+/* Return 1 if equal. */
unsigned int ziplistCompare(unsigned char *p, unsigned char *sstr, unsigned int slen) {
zlentry entry;
unsigned char sencoding;
diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl
index 55f979f2f..0647914dc 100644
--- a/tests/cluster/cluster.tcl
+++ b/tests/cluster/cluster.tcl
@@ -1,7 +1,7 @@
# Cluster-specific test functions.
#
# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
-# This softare is released under the BSD License. See the COPYING file for
+# This software is released under the BSD License. See the COPYING file for
# more information.
# Returns a parsed CLUSTER NODES output as a list of dictionaries.
diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl
index 7af442ecc..69a160c4f 100644
--- a/tests/cluster/run.tcl
+++ b/tests/cluster/run.tcl
@@ -1,5 +1,5 @@
# Cluster test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
-# This softare is released under the BSD License. See the COPYING file for
+# This software is released under the BSD License. See the COPYING file for
# more information.
cd tests/cluster
diff --git a/tests/cluster/tests/03-failover-loop.tcl b/tests/cluster/tests/03-failover-loop.tcl
index 3a966732a..8e1bcd6fe 100644
--- a/tests/cluster/tests/03-failover-loop.tcl
+++ b/tests/cluster/tests/03-failover-loop.tcl
@@ -89,7 +89,7 @@ while {[incr iterations -1]} {
test "Restarting node #$tokill" {
restart_instance redis $tokill
}
-
+
test "Instance #$tokill is now a slave" {
wait_for_condition 1000 50 {
[RI $tokill role] eq {slave}
diff --git a/tests/cluster/tests/05-slave-selection.tcl b/tests/cluster/tests/05-slave-selection.tcl
index 4167d64be..6efedce5d 100644
--- a/tests/cluster/tests/05-slave-selection.tcl
+++ b/tests/cluster/tests/05-slave-selection.tcl
@@ -83,7 +83,7 @@ test "Cluster should eventually be up again" {
assert_cluster_state ok
}
-test "Node #10 should eventaully replicate node #5" {
+test "Node #10 should eventually replicate node #5" {
set port5 [get_instance_attrib redis 5 port]
wait_for_condition 1000 50 {
([lindex [R 10 role] 2] == $port5) &&
diff --git a/tests/instances.tcl b/tests/instances.tcl
index 84ebec1c2..426508f33 100644
--- a/tests/instances.tcl
+++ b/tests/instances.tcl
@@ -4,7 +4,7 @@
# instances.
#
# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
-# This softare is released under the BSD License. See the COPYING file for
+# This software is released under the BSD License. See the COPYING file for
# more information.
package require Tcl 8.5
diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl
index 4003550d1..7ea70943c 100644
--- a/tests/integration/aof.tcl
+++ b/tests/integration/aof.tcl
@@ -169,7 +169,7 @@ tags {"aof"} {
assert_equal 1 [is_alive $srv]
}
- test "Fixed AOF: Keyspace should contain values that were parsable" {
+ test "Fixed AOF: Keyspace should contain values that were parseable" {
set client [redis [dict get $srv host] [dict get $srv port]]
wait_for_condition 50 100 {
[catch {$client ping} e] == 0
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index ae1977dc2..767349e56 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -35,17 +35,17 @@ start_server {tags {"repl"}} {
start_server {tags {"repl"}} {
r set mykey foo
-
+
start_server {} {
test {Second server should have role master at first} {
s role
} {master}
-
+
test {SLAVEOF should start with link status "down"} {
r slaveof [srv -1 host] [srv -1 port]
s master_link_status
} {down}
-
+
test {The role should immediately be changed to "slave"} {
s role
} {slave}
@@ -54,11 +54,11 @@ start_server {tags {"repl"}} {
test {Sync should have transferred keys from master} {
r get mykey
} {foo}
-
+
test {The link status should be up} {
s master_link_status
} {up}
-
+
test {SET on the master should immediately propagate} {
r -1 set mykey bar
diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl
index 66198af94..f33029959 100644
--- a/tests/sentinel/run.tcl
+++ b/tests/sentinel/run.tcl
@@ -1,5 +1,5 @@
# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
-# This softare is released under the BSD License. See the COPYING file for
+# This software is released under the BSD License. See the COPYING file for
# more information.
cd tests/sentinel
diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl
index ad9cbe8ab..cd8ae3a34 100644
--- a/tests/support/redis.tcl
+++ b/tests/support/redis.tcl
@@ -18,7 +18,7 @@
# $r ping [list handlePong]
# }
# }
-#
+#
# set r [redis]
# $r blocking 0
# $r get fo [list handlePong]
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
index 9f92ce31e..0e2e2982a 100644
--- a/tests/support/server.tcl
+++ b/tests/support/server.tcl
@@ -178,10 +178,10 @@ proc start_server {options {code undefined}} {
dict set config $directive $arguments
}
}
-
+
# use a different directory every time a server is started
dict set config dir [tmpdir server]
-
+
# start every server on a different port
set ::port [find_available_port [expr {$::port+1}]]
dict set config port $::port
@@ -190,7 +190,7 @@ proc start_server {options {code undefined}} {
foreach {directive arguments} [concat $::global_overrides $overrides] {
dict set config $directive $arguments
}
-
+
# write new configuration to temporary file
set config_file [tmpfile redis.conf]
set fp [open $config_file w+]
@@ -208,7 +208,7 @@ proc start_server {options {code undefined}} {
} else {
exec src/redis-server $config_file > $stdout 2> $stderr &
}
-
+
# check that the server actually started
# ugly but tries to be as fast as possible...
if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
@@ -233,7 +233,7 @@ proc start_server {options {code undefined}} {
start_server_error $config_file $err
return
}
-
+
# find out the pid
while {![info exists pid]} {
regexp {PID:\s(\d+)} [exec cat $stdout] _ pid
diff --git a/tests/support/test.tcl b/tests/support/test.tcl
index bf2cb0e2f..7d390cc47 100644
--- a/tests/support/test.tcl
+++ b/tests/support/test.tcl
@@ -29,7 +29,7 @@ proc assert_error {pattern code} {
if {[catch {uplevel 1 $code} error]} {
assert_match $pattern $error
} else {
- error "assertion:Expected an error but nothing was catched"
+ error "assertion:Expected an error but nothing was caught"
}
}
diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
index 78b979469..9d975cfb7 100644
--- a/tests/test_helper.tcl
+++ b/tests/test_helper.tcl
@@ -1,5 +1,5 @@
# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com
-# This softare is released under the BSD License. See the COPYING file for
+# This software is released under the BSD License. See the COPYING file for
# more information.
package require Tcl 8.5
@@ -306,7 +306,7 @@ proc signal_idle_client fd {
}
}
-# The the_end funciton gets called when all the test units were already
+# The the_end function gets called when all the test units were already
# executed, so the test finished.
proc the_end {} {
# TODO: print the status, exit with the rigth exit code.
diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl
index 15753e9e7..633cda95c 100644
--- a/tests/unit/auth.tcl
+++ b/tests/unit/auth.tcl
@@ -10,7 +10,7 @@ start_server {tags {"auth"} overrides {requirepass foobar}} {
catch {r auth wrong!} err
set _ $err
} {ERR*invalid password}
-
+
test {Arbitrary command gives an error when AUTH is required} {
catch {r set foo bar} err
set _ $err
diff --git a/tests/unit/basic.tcl b/tests/unit/basic.tcl
index 90a5b4cc3..b0b3b9bac 100644
--- a/tests/unit/basic.tcl
+++ b/tests/unit/basic.tcl
@@ -83,7 +83,7 @@ start_server {tags {"basic"}} {
for {set x 9999} {$x >= 0} {incr x -1} {
set val [r get $x]
if {$val ne $x} {
- set err "Eleemnt at position $x is $val instead of $x"
+ set err "Element at position $x is $val instead of $x"
break
}
}
@@ -323,7 +323,7 @@ start_server {tags {"basic"}} {
catch {r foobaredcommand} err
string match ERR* $err
} {1}
-
+
test {RENAME basic usage} {
r set mykey hello
r rename mykey mykey1
@@ -449,7 +449,7 @@ start_server {tags {"basic"}} {
r select 9
format $res
} {hello world foo bared}
-
+
test {MGET} {
r flushdb
r set foo BAR
@@ -505,7 +505,7 @@ start_server {tags {"basic"}} {
r set foo bar
list [r getset foo xyz] [r get foo]
} {bar xyz}
-
+
test {MSET base case} {
r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n"
r mget x y z
diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl
index 896310980..9751850ad 100644
--- a/tests/unit/bitops.tcl
+++ b/tests/unit/bitops.tcl
@@ -125,7 +125,7 @@ start_server {tags {"bitops"}} {
test {BITOP where dest and target are the same key} {
r set s "\xaa\x00\xff\x55"
r bitop not s s
- r get s
+ r get s
} "\x55\xff\x00\xaa"
test {BITOP AND|OR|XOR don't change the string with single input key} {
diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl
index 54742bb02..342bb939a 100644
--- a/tests/unit/introspection.tcl
+++ b/tests/unit/introspection.tcl
@@ -27,7 +27,7 @@ start_server {tags {"introspection"}} {
test {CLIENT LIST shows empty fields for unassigned names} {
r client list
} {*name= *}
-
+
test {CLIENT SETNAME does not accept spaces} {
catch {r client setname "foo bar"} e
set e
diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl
index 1431a2ac7..e6bf7860c 100644
--- a/tests/unit/maxmemory.tcl
+++ b/tests/unit/maxmemory.tcl
@@ -28,7 +28,7 @@ start_server {tags {"maxmemory"}} {
} {
test "maxmemory - is the memory limit honoured? (policy $policy)" {
# make sure to start with a blank instance
- r flushall
+ r flushall
# Get the current memory limit and calculate a new limit.
# We just add 100k to the current memory size so that it is
# fast for us to reach that limit.
@@ -60,7 +60,7 @@ start_server {tags {"maxmemory"}} {
} {
test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" {
# make sure to start with a blank instance
- r flushall
+ r flushall
# Get the current memory limit and calculate a new limit.
# We just add 100k to the current memory size so that it is
# fast for us to reach that limit.
@@ -102,7 +102,7 @@ start_server {tags {"maxmemory"}} {
} {
test "maxmemory - policy $policy should only remove volatile keys." {
# make sure to start with a blank instance
- r flushall
+ r flushall
# Get the current memory limit and calculate a new limit.
# We just add 100k to the current memory size so that it is
# fast for us to reach that limit.
diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl
index 54b0cc7e2..490158f14 100644
--- a/tests/unit/sort.tcl
+++ b/tests/unit/sort.tcl
@@ -187,7 +187,7 @@ start_server {
assert_equal [lsort -real $floats] [r sort mylist]
}
- test "SORT with STORE returns zero if result is empty (github isse 224)" {
+ test "SORT with STORE returns zero if result is empty (github issue 224)" {
r flushdb
r sort foo store bar
} {0}
diff --git a/utils/lru/test-lru.rb b/utils/lru/test-lru.rb
index d4b0f88cf..ee0527ef4 100644
--- a/utils/lru/test-lru.rb
+++ b/utils/lru/test-lru.rb
@@ -49,9 +49,9 @@ inserted = r.dbsize
first_set_max_id = id
puts "#{r.dbsize} keys inserted"
-# Access keys sequencially
+# Access keys sequentially
-puts "Access keys sequencially"
+puts "Access keys sequentially"
(1..first_set_max_id).each{|id|
r.get(id)
# sleep 0.001