summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/.codespellrc5
-rw-r--r--.github/ISSUE_TEMPLATE/crash_report.md2
-rw-r--r--.github/wordlist.txt15
-rw-r--r--.github/workflows/spell-check.yml25
-rw-r--r--CONTRIBUTING6
-rw-r--r--README.md4
-rw-r--r--sentinel.conf2
-rw-r--r--src/acl.c6
-rw-r--r--src/aof.c6
-rw-r--r--src/bitops.c4
-rw-r--r--src/cli_common.c2
-rw-r--r--src/cli_common.h4
-rw-r--r--src/cluster.c4
-rw-r--r--src/config.c2
-rw-r--r--src/connhelpers.h2
-rw-r--r--src/crcspeed.c2
-rw-r--r--src/db.c2
-rw-r--r--src/debug.c2
-rw-r--r--src/evict.c2
-rw-r--r--src/geo.c8
-rw-r--r--src/geohash_helper.c2
-rw-r--r--src/hyperloglog.c2
-rw-r--r--src/intset.c2
-rw-r--r--src/listpack.c16
-rw-r--r--src/lolwut.c2
-rw-r--r--src/lolwut5.c2
-rw-r--r--src/lolwut6.c4
-rw-r--r--src/module.c138
-rw-r--r--src/modules/hellodict.c2
-rw-r--r--src/modules/hellotype.c2
-rw-r--r--src/networking.c14
-rw-r--r--src/notify.c3
-rw-r--r--src/object.c6
-rw-r--r--src/pubsub.c4
-rw-r--r--src/rax.c20
-rw-r--r--src/rdb.c12
-rw-r--r--src/redis-benchmark.c6
-rw-r--r--src/redis-check-rdb.c4
-rw-r--r--src/redis-cli.c14
-rw-r--r--src/replication.c20
-rw-r--r--src/rio.c2
-rw-r--r--src/scripting.c4
-rw-r--r--src/sds.c8
-rw-r--r--src/sentinel.c6
-rw-r--r--src/server.c24
-rw-r--r--src/server.h14
-rw-r--r--src/sort.c2
-rw-r--r--src/stream.h2
-rw-r--r--src/t_stream.c22
-rw-r--r--src/t_zset.c10
-rw-r--r--src/tls.c2
-rw-r--r--src/tracking.c6
-rw-r--r--src/util.h2
-rw-r--r--src/ziplist.c16
-rw-r--r--src/zipmap.c8
-rw-r--r--tests/cluster/tests/03-failover-loop.tcl2
-rw-r--r--tests/cluster/tests/08-update-msg.tcl4
-rw-r--r--tests/cluster/tests/12.1-replica-migration-3.tcl2
-rw-r--r--tests/cluster/tests/16-transactions-on-replica.tcl4
-rw-r--r--tests/cluster/tests/18-info.tcl8
-rw-r--r--tests/integration/corrupt-dump-fuzzer.tcl2
-rw-r--r--tests/integration/corrupt-dump.tcl4
-rw-r--r--tests/integration/psync2.tcl2
-rw-r--r--tests/integration/rdb.tcl2
-rw-r--r--tests/integration/replication.tcl2
-rw-r--r--tests/modules/commandfilter.c2
-rw-r--r--tests/modules/keyspace_events.c2
-rw-r--r--tests/modules/propagate.c2
-rw-r--r--tests/sentinel/tests/07-down-conditions.tcl2
-rw-r--r--tests/support/server.tcl4
-rw-r--r--tests/support/util.tcl2
-rw-r--r--tests/unit/introspection.tcl2
-rw-r--r--tests/unit/memefficiency.tcl2
-rw-r--r--tests/unit/moduleapi/auth.tcl2
-rw-r--r--tests/unit/moduleapi/misc.tcl4
-rw-r--r--tests/unit/multi.tcl2
-rw-r--r--tests/unit/obuf-limits.tcl2
-rw-r--r--tests/unit/scripting.tcl2
-rw-r--r--tests/unit/type/stream-cgroups.tcl2
-rw-r--r--utils/lru/lfu-simulation.c2
-rw-r--r--utils/redis-sha1.rb2
-rwxr-xr-xutils/speed-regression.tcl2
-rw-r--r--utils/srandmember/showfreq.rb2
-rw-r--r--utils/tracking_collisions.c4
84 files changed, 315 insertions, 269 deletions
diff --git a/.github/.codespellrc b/.github/.codespellrc
new file mode 100644
index 000000000..88146bef7
--- /dev/null
+++ b/.github/.codespellrc
@@ -0,0 +1,5 @@
+[codespell]
+quiet-level = 2
+count =
+skip = ./deps,./src/crc16_slottable.h
+ignore-words = ./.github/wordlist.txt
diff --git a/.github/ISSUE_TEMPLATE/crash_report.md b/.github/ISSUE_TEMPLATE/crash_report.md
index c608ccdc1..0b0350a12 100644
--- a/.github/ISSUE_TEMPLATE/crash_report.md
+++ b/.github/ISSUE_TEMPLATE/crash_report.md
@@ -14,7 +14,7 @@ Paste the complete crash log between the quotes below. Please include a few line
```
```
-**Aditional information**
+**Additional information**
1. OS distribution and version
2. Steps to reproduce (if any)
diff --git a/.github/wordlist.txt b/.github/wordlist.txt
new file mode 100644
index 000000000..119291b82
--- /dev/null
+++ b/.github/wordlist.txt
@@ -0,0 +1,15 @@
+ake
+bale
+fle
+fo
+gameboy
+mutli
+nd
+nees
+oll
+optin
+ot
+smove
+te
+tre
+cancelability \ No newline at end of file
diff --git a/.github/workflows/spell-check.yml b/.github/workflows/spell-check.yml
new file mode 100644
index 000000000..7aaa9ffa8
--- /dev/null
+++ b/.github/workflows/spell-check.yml
@@ -0,0 +1,25 @@
+# A CI action that using codespell to check spell.
+# .github/.codespellrc is a config file.
+# .github/wordlist.txt is a list of words that will ignore word checks.
+# More details please check the following link:
+# https://github.com/codespell-project/codespell
+name: Spellcheck
+
+on:
+ push:
+ pull_request:
+
+jobs:
+ build:
+ name: Spellcheck
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Install prerequisites
+ run: sudo pip install codespell==2.0.0
+
+ - name: Spell check
+ run: codespell --config=./.github/.codespellrc
diff --git a/CONTRIBUTING b/CONTRIBUTING
index 22b8efe48..56b71834d 100644
--- a/CONTRIBUTING
+++ b/CONTRIBUTING
@@ -18,7 +18,7 @@ all the support in the mailing list.
There is also an active community of Redis users at Stack Overflow:
- http://stackoverflow.com/questions/tagged/redis
+ https://stackoverflow.com/questions/tagged/redis
Issues and pull requests for documentation belong on the redis-doc repo:
@@ -38,10 +38,10 @@ Here you'll see if there is consensus about your idea.
2. If in step 1 you get an acknowledgment from the project leaders, use the
following procedure to submit a patch:
- a. Fork Redis on github ( http://help.github.com/fork-a-repo/ )
+ a. Fork Redis on github ( https://docs.github.com/en/github/getting-started-with-github/fork-a-repo )
b. Create a topic branch (git checkout -b my_branch)
c. Push to your branch (git push origin my_branch)
- d. Initiate a pull request on github ( https://help.github.com/articles/creating-a-pull-request/ )
+ d. Initiate a pull request on github ( https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request )
e. Done :)
3. Keep in mind that we are very overloaded, so issues and PRs sometimes wait
diff --git a/README.md b/README.md
index eecb61daa..8de38df38 100644
--- a/README.md
+++ b/README.md
@@ -380,8 +380,8 @@ aof.c and rdb.c
As you can guess from the names, these files implement the RDB and AOF
persistence for Redis. Redis uses a persistence model based on the `fork()`
-system call in order to create a thread with the same (shared) memory
-content of the main Redis thread. This secondary thread dumps the content
+system call in order to create a process with the same (shared) memory
+content of the main Redis process. This secondary process dumps the content
of the memory on disk. This is used by `rdb.c` to create the snapshots
on disk and by `aof.c` in order to perform the AOF rewrite when the
append only file gets too big.
diff --git a/sentinel.conf b/sentinel.conf
index 1b0b4cadc..c5341168e 100644
--- a/sentinel.conf
+++ b/sentinel.conf
@@ -178,7 +178,7 @@ acllog-max-len 128
# incoming connections (via ACL), and for outgoing connections (via
# sentinel-user and sentinel-pass)
#
-# The requirepass is not compatable with aclfile option and the ACL LOAD
+# The requirepass is not compatible with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
# sentinel sentinel-user <username>
diff --git a/src/acl.c b/src/acl.c
index 86f73fe7e..32c1f5dc3 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -1363,7 +1363,7 @@ int ACLCheckPubsubPerm(client *c, int idx, int count, int literal, int *idxptr)
}
-/* Check whether the command is ready to be exceuted by ACLCheckCommandPerm.
+/* Check whether the command is ready to be executed by ACLCheckCommandPerm.
* If check passes, then check whether pub/sub channels of the command is
* ready to be executed by ACLCheckPubsubPerm */
int ACLCheckAllPerm(client *c, int *idxptr) {
@@ -2254,8 +2254,8 @@ void authCommand(client *c) {
* will just use "default" as username. */
robj *username, *password;
if (c->argc == 2) {
- /* Mimic the old behavior of giving an error for the two commands
- * from if no password is configured. */
+ /* Mimic the old behavior of giving an error for the two argument
+ * form if no password is configured. */
if (DefaultUser->flags & USER_FLAG_NOPASS) {
addReplyError(c,"AUTH <password> called without any password "
"configured for the default user. Are you sure "
diff --git a/src/aof.c b/src/aof.c
index 38da4bf31..4d842e519 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -419,7 +419,7 @@ void flushAppendOnlyFile(int force) {
* than two seconds this is still ok. Postpone again. */
return;
}
- /* Otherwise fall trough, and go write since we can't wait
+ /* Otherwise fall through, and go write since we can't wait
* over two seconds. */
server.aof_delayed_fsync++;
serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
@@ -1745,7 +1745,7 @@ int rewriteAppendOnlyFileBackground(void) {
server.aof_rewrite_scheduled = 0;
server.aof_rewrite_time_start = time(NULL);
- /* We set appendseldb to -1 in order to force the next call to the
+ /* We set aof_selected_db to -1 in order to force the next call to the
* feedAppendOnlyFile() to issue a SELECT command, so the differences
* accumulated by the parent into server.aof_rewrite_buf will start
* with a SELECT statement and it will be safe to merge. */
@@ -1885,7 +1885,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK);
} else {
/* AOF enabled */
- oldfd = -1; /* We'll set this to the current AOF filedes later. */
+ oldfd = -1; /* We'll set this to the current AOF file descriptor later. */
}
/* Rename the temporary file. This will not unlink the target file if
diff --git a/src/bitops.c b/src/bitops.c
index afd79ad88..3994b01c6 100644
--- a/src/bitops.c
+++ b/src/bitops.c
@@ -472,7 +472,7 @@ int getBitfieldTypeFromArgument(client *c, robj *o, int *sign, int *bits) {
return C_OK;
}
-/* This is an helper function for commands implementations that need to write
+/* This is a helper function for commands implementations that need to write
* bits to a string object. The command creates or pad with zeroes the string
* so that the 'maxbit' bit can be addressed. The object is finally
* returned. Otherwise if the key holds a wrong type NULL is returned and
@@ -1025,7 +1025,7 @@ void bitfieldGeneric(client *c, int flags) {
return;
}
- /* Lookup by making room up to the farest bit reached by
+ /* Lookup by making room up to the farthest bit reached by
* this operation. */
if ((o = lookupStringForBitCommand(c,
highest_write_offset)) == NULL) {
diff --git a/src/cli_common.c b/src/cli_common.c
index e88327ace..8ec7de70f 100644
--- a/src/cli_common.c
+++ b/src/cli_common.c
@@ -166,7 +166,7 @@ ssize_t cliWriteConn(redisContext *c, const char *buf, size_t buf_len)
*
* Do we still have data that was there prior to our buf? If so,
* restore buffer to it's original state and report no new data was
- * writen.
+ * written.
*/
if (sdslen(c->obuf) > buf_len) {
sdsrange(c->obuf, 0, -(buf_len+1));
diff --git a/src/cli_common.h b/src/cli_common.h
index 16d6ec2a9..e7f2e10e7 100644
--- a/src/cli_common.h
+++ b/src/cli_common.h
@@ -16,9 +16,9 @@ typedef struct cliSSLconfig {
char *cert;
/* Private key file to authenticate with, or NULL */
char *key;
- /* Prefered cipher list, or NULL (applies only to <= TLSv1.2) */
+ /* Preferred cipher list, or NULL (applies only to <= TLSv1.2) */
char* ciphers;
- /* Prefered ciphersuites list, or NULL (applies only to TLSv1.3) */
+ /* Preferred ciphersuites list, or NULL (applies only to TLSv1.3) */
char* ciphersuites;
} cliSSLconfig;
diff --git a/src/cluster.c b/src/cluster.c
index f34c33162..cd4def90f 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -1185,7 +1185,7 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) {
* CLUSTER nodes blacklist
*
* The nodes blacklist is just a way to ensure that a given node with a given
- * Node ID is not readded before some time elapsed (this time is specified
+ * Node ID is not re-added before some time elapsed (this time is specified
* in seconds in CLUSTER_BLACKLIST_TTL).
*
* This is useful when we want to remove a node from the cluster completely:
@@ -4203,7 +4203,7 @@ sds clusterGenNodeDescription(clusterNode *node, int use_pport) {
"connected" : "disconnected");
/* Slots served by this instance. If we already have slots info,
- * append it diretly, otherwise, generate slots only if it has. */
+ * append it directly, otherwise, generate slots only if it has. */
if (node->slots_info) {
ci = sdscatsds(ci, node->slots_info);
} else if (node->numslots > 0) {
diff --git a/src/config.c b/src/config.c
index d9fa5da2a..ed1f81fd4 100644
--- a/src/config.c
+++ b/src/config.c
@@ -581,7 +581,7 @@ void loadServerConfigFromString(char *config) {
int flags = keyspaceEventsStringToFlags(argv[1]);
if (flags == -1) {
- err = "Invalid event class character. Use 'g$lshzxeA'.";
+ err = "Invalid event class character. Use 'Ag$lshzxeKEtmd'.";
goto loaderr;
}
server.notify_keyspace_events = flags;
diff --git a/src/connhelpers.h b/src/connhelpers.h
index 86250d09e..b32e44dba 100644
--- a/src/connhelpers.h
+++ b/src/connhelpers.h
@@ -41,7 +41,7 @@
* of connections from within a handler.
*/
-/* Incremenet connection references.
+/* Increment connection references.
*
* Inside a connection handler, we guarantee refs >= 1 so it is always
* safe to connClose().
diff --git a/src/crcspeed.c b/src/crcspeed.c
index 67cb8fd9f..9682d8e0b 100644
--- a/src/crcspeed.c
+++ b/src/crcspeed.c
@@ -248,7 +248,7 @@ uint16_t crcspeed16big(uint16_t big_table[8][256], uint16_t crc_in, void *buf,
/* Return the CRC of buf[0..len-1] with initial crc, processing eight bytes
at a time using passed-in lookup table.
- This selects one of two routines depending on the endianess of
+ This selects one of two routines depending on the endianness of
the architecture. */
uint64_t crcspeed64native(uint64_t table[8][256], uint64_t crc, void *buf,
size_t len) {
diff --git a/src/db.c b/src/db.c
index 8a65f0ffb..5a71d7fd1 100644
--- a/src/db.c
+++ b/src/db.c
@@ -295,7 +295,7 @@ robj *dbRandomKey(redisDb *db) {
* it could happen that all the keys are already logically
* expired in the slave, so the function cannot stop because
* expireIfNeeded() is false, nor it can stop because
- * dictGetRandomKey() returns NULL (there are keys to return).
+ * dictGetFairRandomKey() returns NULL (there are keys to return).
* To prevent the infinite loop we do some tries, but if there
* are the conditions for an infinite loop, eventually we
* return a key name that may be already expired. */
diff --git a/src/debug.c b/src/debug.c
index 098ce6ef7..0ec4e3b97 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -1980,7 +1980,7 @@ void disableWatchdog(void) {
* of microseconds, i.e. -10 means 100 nanoseconds. */
void debugDelay(int usec) {
/* Since even the shortest sleep results in context switch and system call,
- * the way we achive short sleeps is by statistically sleeping less often. */
+ * the way we achieve short sleeps is by statistically sleeping less often. */
if (usec < 0) usec = (rand() % -usec) == 0 ? 1: 0;
if (usec) usleep(usec);
}
diff --git a/src/evict.c b/src/evict.c
index 227e15a8d..9f0aac1af 100644
--- a/src/evict.c
+++ b/src/evict.c
@@ -133,7 +133,7 @@ void evictionPoolAlloc(void) {
EvictionPoolLRU = ep;
}
-/* This is an helper function for performEvictions(), it is used in order
+/* This is a helper function for performEvictions(), it is used in order
* to populate the evictionPool with a few entries every time we want to
* expire a key. Keys with idle time bigger than one of the current
* keys are added. Keys are always added if there are free entries.
diff --git a/src/geo.c b/src/geo.c
index efb622b36..b7eebfd7a 100644
--- a/src/geo.c
+++ b/src/geo.c
@@ -213,7 +213,7 @@ void addReplyDoubleDistance(client *c, double d) {
* representing a point, and a GeoShape, appends this entry as a geoPoint
* into the specified geoArray only if the point is within the search area.
*
- * returns C_OK if the point is included, or REIDS_ERR if it is outside. */
+ * returns C_OK if the point is included, or C_ERR if it is outside. */
int geoAppendIfWithinShape(geoArray *ga, GeoShape *shape, double score, sds member) {
double distance = 0, xy[2];
@@ -241,10 +241,10 @@ int geoAppendIfWithinShape(geoArray *ga, GeoShape *shape, double score, sds memb
}
/* Query a Redis sorted set to extract all the elements between 'min' and
- * 'max', appending them into the array of geoPoint structures 'gparray'.
+ * 'max', appending them into the array of geoPoint structures 'geoArray'.
* The command returns the number of elements added to the array.
*
- * Elements which are farest than 'radius' from the specified 'x' and 'y'
+ * Elements which are farther than 'radius' from the specified 'x' and 'y'
* coordinates are not included.
*
* The ability of this function to append to an existing set of points is
@@ -330,7 +330,7 @@ void scoresOfGeoHashBox(GeoHashBits hash, GeoHashFix52Bits *min, GeoHashFix52Bit
*
* To get the min score we just use the initial hash value left
* shifted enough to get the 52 bit value. Later we increment the
- * 6 bit prefis (see the hash.bits++ statement), and get the new
+ * 6 bit prefix (see the hash.bits++ statement), and get the new
* prefix: 101011, which we align again to 52 bits to get the maximum
* value (which is excluded from the search). So we get everything
* between the two following scores (represented in binary):
diff --git a/src/geohash_helper.c b/src/geohash_helper.c
index fec193e8b..ec4dbd23a 100644
--- a/src/geohash_helper.c
+++ b/src/geohash_helper.c
@@ -216,7 +216,7 @@ GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) {
return bits;
}
-/* Calculate distance using haversin great circle distance formula. */
+/* Calculate distance using haversine great circle distance formula. */
double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) {
double lat1r, lon1r, lat2r, lon2r, u, v;
lat1r = deg_rad(lat1d);
diff --git a/src/hyperloglog.c b/src/hyperloglog.c
index 94ae2a85b..3620d2d4a 100644
--- a/src/hyperloglog.c
+++ b/src/hyperloglog.c
@@ -899,7 +899,7 @@ promote: /* Promote to dense representation. */
* the element belongs to is incremented if needed.
*
* This function is actually a wrapper for hllSparseSet(), it only performs
- * the hashshing of the element to obtain the index and zeros run length. */
+ * the hashing of the element to obtain the index and zeros run length. */
int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) {
long index;
uint8_t count = hllPatLen(ele,elesize,&index);
diff --git a/src/intset.c b/src/intset.c
index 9ba13898d..4a9214864 100644
--- a/src/intset.c
+++ b/src/intset.c
@@ -306,7 +306,7 @@ int intsetValidateIntegrity(const unsigned char *p, size_t size, int deep) {
return 0;
}
- /* check that the size matchies (all records are inside the buffer). */
+ /* check that the size matches (all records are inside the buffer). */
uint32_t count = intrev32ifbe(is->length);
if (sizeof(*is) + count*record_size != size)
return 0;
diff --git a/src/listpack.c b/src/listpack.c
index db3518bb8..b15b1acdd 100644
--- a/src/listpack.c
+++ b/src/listpack.c
@@ -116,7 +116,7 @@
(p)[5] = ((v)>>8)&0xff; \
} while(0)
-/* Validates that 'p' is not ouside the listpack.
+/* Validates that 'p' is not outside the listpack.
* All function that return a pointer to an element in the listpack will assert
* that this element is valid, so it can be freely used.
* Generally functions such lpNext and lpDelete assume the input pointer is
@@ -125,7 +125,7 @@
assert((p) >= (lp)+LP_HDR_SIZE && (p) < (lp)+lpGetTotalBytes((lp))); \
} while (0)
-/* Similar to the above, but validates the entire element lenth rather than just
+/* Similar to the above, but validates the entire element length rather than just
* it's pointer. */
#define ASSERT_INTEGRITY_LEN(lp, p, len) do { \
assert((p) >= (lp)+LP_HDR_SIZE && (p)+(len) < (lp)+lpGetTotalBytes((lp))); \
@@ -218,7 +218,7 @@ int lpStringToInt64(const char *s, unsigned long slen, int64_t *value) {
/* Create a new, empty listpack.
* On success the new listpack is returned, otherwise an error is returned.
* Pre-allocate at least `capacity` bytes of memory,
- * over-allocated memory can be shrinked by `lpShrinkToFit`.
+ * over-allocated memory can be shrunk by `lpShrinkToFit`.
* */
unsigned char *lpNew(size_t capacity) {
unsigned char *lp = lp_malloc(capacity > LP_HDR_SIZE+1 ? capacity : LP_HDR_SIZE+1);
@@ -416,7 +416,7 @@ uint32_t lpCurrentEncodedSizeUnsafe(unsigned char *p) {
}
/* Return bytes needed to encode the length of the listpack element pointed by 'p'.
- * This includes just the encodign byte, and the bytes needed to encode the length
+ * This includes just the encoding byte, and the bytes needed to encode the length
* of the element (excluding the element data itself)
* If the element encoding is wrong then 0 is returned. */
uint32_t lpCurrentEncodedSizeBytes(unsigned char *p) {
@@ -641,7 +641,7 @@ unsigned char *lpGet(unsigned char *p, int64_t *count, unsigned char *intbuf) {
*
* If 'newp' is not NULL, at the end of a successful call '*newp' will be set
* to the address of the element just added, so that it will be possible to
- * continue an interation with lpNext() and lpPrev().
+ * continue an interaction with lpNext() and lpPrev().
*
* For deletion operations ('ele' set to NULL) 'newp' is set to the next
* element, on the right of the deleted one, or to NULL if the deleted element
@@ -879,7 +879,7 @@ int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes) {
if (!lenbytes)
return 0;
- /* make sure the encoded entry length doesn't rech outside the edge of the listpack */
+ /* make sure the encoded entry length doesn't reach outside the edge of the listpack */
if (OUT_OF_RANGE(p + lenbytes))
return 0;
@@ -888,7 +888,7 @@ int lpValidateNext(unsigned char *lp, unsigned char **pp, size_t lpbytes) {
unsigned long encodedBacklen = lpEncodeBacklen(NULL,entrylen);
entrylen += encodedBacklen;
- /* make sure the entry doesn't rech outside the edge of the listpack */
+ /* make sure the entry doesn't reach outside the edge of the listpack */
if (OUT_OF_RANGE(p + entrylen))
return 0;
@@ -925,7 +925,7 @@ int lpValidateIntegrity(unsigned char *lp, size_t size, int deep){
if (!deep)
return 1;
- /* Validate the invividual entries. */
+ /* Validate the individual entries. */
uint32_t count = 0;
unsigned char *p = lpFirst(lp);
while(p) {
diff --git a/src/lolwut.c b/src/lolwut.c
index 931f311cd..c014840e9 100644
--- a/src/lolwut.c
+++ b/src/lolwut.c
@@ -84,7 +84,7 @@ void lolwutCommand(client *c) {
}
}
-/* ========================== LOLWUT Canvase ===============================
+/* ========================== LOLWUT Canvas ===============================
* Many LOLWUT versions will likely print some computer art to the screen.
* This is the case with LOLWUT 5 and LOLWUT 6, so here there is a generic
* canvas implementation that can be reused. */
diff --git a/src/lolwut5.c b/src/lolwut5.c
index d864888ba..1240168d0 100644
--- a/src/lolwut5.c
+++ b/src/lolwut5.c
@@ -102,7 +102,7 @@ lwCanvas *lwDrawSchotter(int console_cols, int squares_per_row, int squares_per_
}
/* Converts the canvas to an SDS string representing the UTF8 characters to
- * print to the terminal in order to obtain a graphical representaiton of the
+ * print to the terminal in order to obtain a graphical representation of the
* logical canvas. The actual returned string will require a terminal that is
* width/2 large and height/4 tall in order to hold the whole image without
* overflowing or scrolling, since each Barille character is 2x4. */
diff --git a/src/lolwut6.c b/src/lolwut6.c
index 471bf66c8..1ba111c2d 100644
--- a/src/lolwut6.c
+++ b/src/lolwut6.c
@@ -32,7 +32,7 @@
* fun and interesting, and should be replaced by a new implementation at
* each new version of Redis.
*
- * Thanks to Michele Hiki Falcone for the original image that ispired
+ * Thanks to Michele Hiki Falcone for the original image that inspired
* the image, part of his game, Plaguemon.
*
* Thanks to the Shhh computer art collective for the help in tuning the
@@ -180,7 +180,7 @@ void lolwut6Command(client *c) {
return;
/* Limits. We want LOLWUT to be always reasonably fast and cheap to execute
- * so we have maximum number of columns, rows, and output resulution. */
+ * so we have maximum number of columns, rows, and output resolution. */
if (cols < 1) cols = 1;
if (cols > 1000) cols = 1000;
if (rows < 1) rows = 1;
diff --git a/src/module.c b/src/module.c
index 2b2c44555..e04a60b48 100644
--- a/src/module.c
+++ b/src/module.c
@@ -851,7 +851,7 @@ int64_t commandFlagsFromString(char *s) {
* other reason.
* * **"no-auth"**: This command can be run by an un-authenticated client.
* Normally this is used by a command that is used
- * to authenticate a client.
+ * to authenticate a client.
* * **"may-replicate"**: This command may generate replication traffic, even
* though it's not a write command.
*
@@ -962,7 +962,7 @@ long long RM_Milliseconds(void) {
* the elapsed execution time when RM_BlockedClientMeasureTimeEnd() is called.
* Within the same command, you can call multiple times
* RM_BlockedClientMeasureTimeStart() and RM_BlockedClientMeasureTimeEnd()
- * to accummulate indepedent time intervals to the background duration.
+ * to accumulate independent time intervals to the background duration.
* This method always return REDISMODULE_OK. */
int RM_BlockedClientMeasureTimeStart(RedisModuleBlockedClient *bc) {
elapsedStart(&(bc->background_timer));
@@ -1277,19 +1277,19 @@ void RM_RetainString(RedisModuleCtx *ctx, RedisModuleString *str) {
* The main difference between the two is that this function will always
* succeed, whereas RedisModule_RetainString() may fail because of an
* assertion.
-*
+*
* The function returns a pointer to RedisModuleString, which is owned
* by the caller. It requires a call to RedisModule_FreeString() to free
* the string when automatic memory management is disabled for the context.
* When automatic memory management is enabled, you can either call
* RedisModule_FreeString() or let the automation free it.
-*
+*
* This function is more efficient than RedisModule_CreateStringFromString()
* because whenever possible, it avoids copying the underlying
* RedisModuleString. The disadvantage of using this function is that it
* might not be possible to use RedisModule_StringAppendBuffer() on the
* returned RedisModuleString.
-*
+*
* It is possible to call this function with a NULL context.
*/
RedisModuleString* RM_HoldString(RedisModuleCtx *ctx, RedisModuleString *str) {
@@ -1555,7 +1555,7 @@ int RM_ReplyWithArray(RedisModuleCtx *ctx, long len) {
return REDISMODULE_OK;
}
-/* Reply to the client with a null array, simply null in RESP3
+/* Reply to the client with a null array, simply null in RESP3
* null array in RESP2.
*
* The function always returns REDISMODULE_OK. */
@@ -1566,7 +1566,7 @@ int RM_ReplyWithNullArray(RedisModuleCtx *ctx) {
return REDISMODULE_OK;
}
-/* Reply to the client with an empty array.
+/* Reply to the client with an empty array.
*
* The function always returns REDISMODULE_OK. */
int RM_ReplyWithEmptyArray(RedisModuleCtx *ctx) {
@@ -1664,7 +1664,7 @@ int RM_ReplyWithEmptyString(RedisModuleCtx *ctx) {
return REDISMODULE_OK;
}
-/* Reply with a binary safe string, which should not be escaped or filtered
+/* Reply with a binary safe string, which should not be escaped or filtered
* taking in input a C buffer pointer and length.
*
* The function always returns REDISMODULE_OK. */
@@ -1882,7 +1882,7 @@ unsigned long long RM_GetClientId(RedisModuleCtx *ctx) {
/* Return the ACL user name used by the client with the specified client ID.
* Client ID can be obtained with RM_GetClientId() API. If the client does not
- * exist, NULL is returned and errno is set to ENOENT. If the client isn't
+ * exist, NULL is returned and errno is set to ENOENT. If the client isn't
* using an ACL user, NULL is returned and errno is set to ENOTSUP */
RedisModuleString *RM_GetClientUserNameById(RedisModuleCtx *ctx, uint64_t id) {
client *client = lookupClientByID(id);
@@ -1902,7 +1902,7 @@ RedisModuleString *RM_GetClientUserNameById(RedisModuleCtx *ctx, uint64_t id) {
return str;
}
-/* This is an helper for RM_GetClientInfoById() and other functions: given
+/* This is a helper for RM_GetClientInfoById() and other functions: given
* a client, it populates the client info structure with the appropriate
* fields depending on the version provided. If the version is not valid
* then REDISMODULE_ERR is returned. Otherwise the function returns
@@ -1935,7 +1935,7 @@ int modulePopulateClientInfoStructure(void *ci, client *client, int structver) {
return REDISMODULE_OK;
}
-/* This is an helper for moduleFireServerEvent() and other functions:
+/* This is a helper for moduleFireServerEvent() and other functions:
* It populates the replication info structure with the appropriate
* fields depending on the version provided. If the version is not valid
* then REDISMODULE_ERR is returned. Otherwise the function returns
@@ -2354,7 +2354,7 @@ int RM_UnlinkKey(RedisModuleKey *key) {
* REDISMODULE_NO_EXPIRE is returned. */
mstime_t RM_GetExpire(RedisModuleKey *key) {
mstime_t expire = getExpire(key->db,key->key);
- if (expire == -1 || key->value == NULL)
+ if (expire == -1 || key->value == NULL)
return REDISMODULE_NO_EXPIRE;
expire -= mstime();
return expire >= 0 ? expire : 0;
@@ -2386,7 +2386,7 @@ int RM_SetExpire(RedisModuleKey *key, mstime_t expire) {
* REDISMODULE_NO_EXPIRE is returned. */
mstime_t RM_GetAbsExpire(RedisModuleKey *key) {
mstime_t expire = getExpire(key->db,key->key);
- if (expire == -1 || key->value == NULL)
+ if (expire == -1 || key->value == NULL)
return REDISMODULE_NO_EXPIRE;
return expire;
}
@@ -3049,8 +3049,8 @@ int RM_ZsetRangePrev(RedisModuleKey *key) {
*
* The function is variadic and the user must specify pairs of field
* names and values, both as RedisModuleString pointers (unless the
- * CFIELD option is set, see later). At the end of the field/value-ptr pairs,
- * NULL must be specified as last argument to signal the end of the arguments
+ * CFIELD option is set, see later). At the end of the field/value-ptr pairs,
+ * NULL must be specified as last argument to signal the end of the arguments
* in the variadic function.
*
* Example to set the hash argv[1] to the value argv[2]:
@@ -3093,7 +3093,7 @@ int RM_ZsetRangePrev(RedisModuleKey *key) {
*
* The number of fields existing in the hash prior to the call, which have been
* updated (its old value has been replaced by a new value) or deleted. If the
- * flag REDISMODULE_HASH_COUNT_ALL is set, insterted fields not previously
+ * flag REDISMODULE_HASH_COUNT_ALL is set, inserted fields not previously
* existing in the hash are also counted.
*
* If the return value is zero, `errno` is set (since Redis 6.2) as follows:
@@ -3333,7 +3333,7 @@ int RM_StreamAdd(RedisModuleKey *key, int flags, RedisModuleStreamID *id, RedisM
return REDISMODULE_ERR;
}
- /* Create key if necessery */
+ /* Create key if necessary */
int created = 0;
if (key->value == NULL) {
moduleCreateEmptyKey(key, REDISMODULE_KEYTYPE_STREAM);
@@ -4147,9 +4147,9 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch
if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,&error_code) !=
server.cluster->myself)
{
- if (error_code == CLUSTER_REDIR_DOWN_RO_STATE) {
+ if (error_code == CLUSTER_REDIR_DOWN_RO_STATE) {
errno = EROFS;
- } else if (error_code == CLUSTER_REDIR_DOWN_STATE) {
+ } else if (error_code == CLUSTER_REDIR_DOWN_STATE) {
errno = ENETDOWN;
} else {
errno = EPERM;
@@ -4470,7 +4470,7 @@ robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, robj *value) {
* to have significant internal complexity. To determine this, the defrag mechanism
* uses the free_effort callback and the 'active-defrag-max-scan-fields' config directive.
* NOTE: The value is passed as a `void**` and the function is expected to update the
- * pointer if the top-level value pointer is defragmented and consequentially changes.
+ * pointer if the top-level value pointer is defragmented and consequently changes.
*
* Note: the module name "AAAAAAAAA" is reserved and produces an error, it
* happens to be pretty lame as well.
@@ -4925,7 +4925,7 @@ ssize_t rdbSaveModulesAux(rio *rdb, int when) {
* foreach key,value {
* AddElement(key);
* AddElement(value);
- * EndSquence();
+ * EndSequence();
* }
*
* Because the key and value will be always in the above order, while instead
@@ -5832,7 +5832,7 @@ void moduleReleaseGIL(void) {
* etc), and the subscriber callback receives only events that match a specific
* mask of event types.
*
- * When subscribing to notifications with RedisModule_SubscribeToKeyspaceEvents
+ * When subscribing to notifications with RedisModule_SubscribeToKeyspaceEvents
* the module must provide an event type-mask, denoting the events the subscriber
* is interested in. This can be an ORed mask of any of the following flags:
*
@@ -6058,13 +6058,13 @@ int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, un
}
/* Return an array of string pointers, each string pointer points to a cluster
- * node ID of exactly REDISMODULE_NODE_ID_SIZE bytes (without any null term).
+ * node ID of exactly REDISMODULE_NODE_ID_LEN bytes (without any null term).
* The number of returned node IDs is stored into `*numnodes`.
* However if this function is called by a module not running an a Redis
* instance with Redis Cluster enabled, NULL is returned instead.
*
* The IDs returned can be used with RedisModule_GetClusterNodeInfo() in order
- * to get more information about single nodes.
+ * to get more information about single node.
*
* The array returned by this function must be freed using the function
* RedisModule_FreeClusterNodesList().
@@ -6074,7 +6074,7 @@ int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, un
* size_t count, j;
* char **ids = RedisModule_GetClusterNodesList(ctx,&count);
* for (j = 0; j < count; j++) {
- * RedisModule_Log("notice","Node %.*s",
+ * RedisModule_Log(ctx,"notice","Node %.*s",
* REDISMODULE_NODE_ID_LEN,ids[j]);
* }
* RedisModule_FreeClusterNodesList(ids);
@@ -6386,20 +6386,20 @@ int RM_GetTimerInfo(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remain
/* --------------------------------------------------------------------------
* ## Modules ACL API
*
- * Implements a hook into the authentication and authorization within Redis.
+ * Implements a hook into the authentication and authorization within Redis.
* --------------------------------------------------------------------------*/
/* This function is called when a client's user has changed and invokes the
* client's user changed callback if it was set. This callback should
- * cleanup any state the module was tracking about this client.
- *
- * A client's user can be changed through the AUTH command, module
+ * cleanup any state the module was tracking about this client.
+ *
+ * A client's user can be changed through the AUTH command, module
* authentication, and when a client is freed. */
void moduleNotifyUserChanged(client *c) {
if (c->auth_callback) {
c->auth_callback(c->id, c->auth_callback_privdata);
- /* The callback will fire exactly once, even if the user remains
+ /* The callback will fire exactly once, even if the user remains
* the same. It is expected to completely clean up the state
* so all references are cleared here. */
c->auth_callback = NULL;
@@ -6439,7 +6439,7 @@ static void moduleFreeAuthenticatedClients(RedisModule *module) {
if (!c->auth_module) continue;
RedisModule *auth_module = (RedisModule *) c->auth_module;
- if (auth_module == module) {
+ if (auth_module == module) {
revokeClientAuthentication(c);
}
}
@@ -6483,37 +6483,37 @@ int RM_FreeModuleUser(RedisModuleUser *user) {
return REDISMODULE_OK;
}
-/* Sets the permissions of a user created through the redis module
- * interface. The syntax is the same as ACL SETUSER, so refer to the
+/* Sets the permissions of a user created through the redis module
+ * interface. The syntax is the same as ACL SETUSER, so refer to the
* documentation in acl.c for more information. See RM_CreateModuleUser
* for detailed usage.
- *
+ *
* Returns REDISMODULE_OK on success and REDISMODULE_ERR on failure
* and will set an errno describing why the operation failed. */
int RM_SetModuleUserACL(RedisModuleUser *user, const char* acl) {
return ACLSetUser(user->user, acl, -1);
}
-/* Authenticate the client associated with the context with
+/* Authenticate the client associated with the context with
* the provided user. Returns REDISMODULE_OK on success and
* REDISMODULE_ERR on error.
- *
+ *
* This authentication can be tracked with the optional callback and private
* data fields. The callback will be called whenever the user of the client
* changes. This callback should be used to cleanup any state that is being
* kept in the module related to the client authentication. It will only be
* called once, even when the user hasn't changed, in order to allow for a
* new callback to be specified. If this authentication does not need to be
- * tracked, pass in NULL for the callback and privdata.
- *
+ * tracked, pass in NULL for the callback and privdata.
+ *
* If client_id is not NULL, it will be filled with the id of the client
- * that was authenticated. This can be used with the
- * RM_DeauthenticateAndCloseClient() API in order to deauthenticate a
- * previously authenticated client if the authentication is no longer valid.
- *
+ * that was authenticated. This can be used with the
+ * RM_DeauthenticateAndCloseClient() API in order to deauthenticate a
+ * previously authenticated client if the authentication is no longer valid.
+ *
* For expensive authentication operations, it is recommended to block the
* client and do the authentication in the background and then attach the user
- * to the client in a threadsafe context. */
+ * to the client in a threadsafe context. */
static int authenticateClientWithUser(RedisModuleCtx *ctx, user *user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) {
if (user->flags & USER_FLAG_DISABLED) {
return REDISMODULE_ERR;
@@ -6543,18 +6543,18 @@ static int authenticateClientWithUser(RedisModuleCtx *ctx, user *user, RedisModu
}
-/* Authenticate the current context's user with the provided redis acl user.
+/* Authenticate the current context's user with the provided redis acl user.
* Returns REDISMODULE_ERR if the user is disabled.
- *
+ *
* See authenticateClientWithUser for information about callback, client_id,
* and general usage for authentication. */
int RM_AuthenticateClientWithUser(RedisModuleCtx *ctx, RedisModuleUser *module_user, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) {
return authenticateClientWithUser(ctx, module_user->user, callback, privdata, client_id);
}
-/* Authenticate the current context's user with the provided redis acl user.
+/* Authenticate the current context's user with the provided redis acl user.
* Returns REDISMODULE_ERR if the user is disabled or the user does not exist.
- *
+ *
* See authenticateClientWithUser for information about callback, client_id,
* and general usage for authentication. */
int RM_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, const char *name, size_t len, RedisModuleUserChangedFunc callback, void *privdata, uint64_t *client_id) {
@@ -6567,15 +6567,15 @@ int RM_AuthenticateClientWithACLUser(RedisModuleCtx *ctx, const char *name, size
}
/* Deauthenticate and close the client. The client resources will not be
- * be immediately freed, but will be cleaned up in a background job. This is
- * the recommended way to deauthenicate a client since most clients can't
+ * be immediately freed, but will be cleaned up in a background job. This is
+ * the recommended way to deauthenticate a client since most clients can't
* handle users becoming deauthenticated. Returns REDISMODULE_ERR when the
- * client doesn't exist and REDISMODULE_OK when the operation was successful.
- *
+ * client doesn't exist and REDISMODULE_OK when the operation was successful.
+ *
* The client ID is returned from the RM_AuthenticateClientWithUser and
* RM_AuthenticateClientWithACLUser APIs, but can be obtained through
- * the CLIENT api or through server events.
- *
+ * the CLIENT api or through server events.
+ *
* This function is not thread safe, and must be executed within the context
* of a command or thread safe context. */
int RM_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id) {
@@ -6598,7 +6598,7 @@ int RM_DeauthenticateAndCloseClient(RedisModuleCtx *ctx, uint64_t client_id) {
*
* - Connection ID does not exist
* - Connection is not a TLS connection
- * - Connection is a TLS connection but no client ceritifcate was used
+ * - Connection is a TLS connection but no client certificate was used
*/
RedisModuleString *RM_GetClientCertificate(RedisModuleCtx *ctx, uint64_t client_id) {
client *c = lookupClientByID(client_id);
@@ -6698,7 +6698,7 @@ void *RM_DictGet(RedisModuleDict *d, RedisModuleString *key, int *nokey) {
}
/* Remove the specified key from the dictionary, returning REDISMODULE_OK if
- * the key was found and delted, or REDISMODULE_ERR if instead there was
+ * the key was found and deleted, or REDISMODULE_ERR if instead there was
* no such key in the dictionary. When the operation is successful, if
* 'oldval' is not NULL, then '*oldval' is set to the value stored at the
* key before it was deleted. Using this feature it is possible to get
@@ -6720,7 +6720,7 @@ int RM_DictDel(RedisModuleDict *d, RedisModuleString *key, void *oldval) {
* operators available are:
*
* * `^` -- Seek the first (lexicographically smaller) key.
- * * `$` -- Seek the last (lexicographically biffer) key.
+ * * `$` -- Seek the last (lexicographically bigger) key.
* * `>` -- Seek the first element greater than the specified key.
* * `>=` -- Seek the first element greater or equal than the specified key.
* * `<` -- Seek the first element smaller than the specified key.
@@ -6806,7 +6806,7 @@ void *RM_DictNextC(RedisModuleDictIter *di, size_t *keylen, void **dataptr) {
/* This function is exactly like RedisModule_DictNext() but after returning
* the currently selected element in the iterator, it selects the previous
- * element (laxicographically smaller) instead of the next one. */
+ * element (lexicographically smaller) instead of the next one. */
void *RM_DictPrevC(RedisModuleDictIter *di, size_t *keylen, void **dataptr) {
if (!raxPrev(&di->ri)) return NULL;
if (keylen) *keylen = di->ri.key_len;
@@ -6829,7 +6829,7 @@ RedisModuleString *RM_DictNext(RedisModuleCtx *ctx, RedisModuleDictIter *di, voi
}
/* Like RedisModule_DictNext() but after returning the currently selected
- * element in the iterator, it selects the previous element (laxicographically
+ * element in the iterator, it selects the previous element (lexicographically
* smaller) instead of the next one. */
RedisModuleString *RM_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) {
size_t keylen;
@@ -6841,7 +6841,7 @@ RedisModuleString *RM_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, voi
/* Compare the element currently pointed by the iterator to the specified
* element given by key/keylen, according to the operator 'op' (the set of
* valid operators are the same valid for RedisModule_DictIteratorStart).
- * If the comparision is successful the command returns REDISMODULE_OK
+ * If the comparison is successful the command returns REDISMODULE_OK
* otherwise REDISMODULE_ERR is returned.
*
* This is useful when we want to just emit a lexicographical range, so
@@ -7308,7 +7308,7 @@ int moduleUnregisterUsedAPI(RedisModule *module) {
/* Unregister all filters registered by a module.
* This is called when a module is being unloaded.
- *
+ *
* Returns the number of filters unregistered. */
int moduleUnregisterFilters(RedisModule *module) {
listIter li;
@@ -7614,7 +7614,7 @@ void RM_ScanCursorDestroy(RedisModuleScanCursor *cursor) {
* RedisModule_ScanCursorDestroy(c);
*
* It is also possible to use this API from another thread while the lock
- * is acquired during the actuall call to RM_Scan:
+ * is acquired during the actual call to RM_Scan:
*
* RedisModuleCursor *c = RedisModule_ScanCursorCreate();
* RedisModule_ThreadSafeContextLock(ctx);
@@ -7711,7 +7711,7 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) {
* RedisModule_ScanCursorDestroy(c);
*
* It is also possible to use this API from another thread while the lock is acquired during
- * the actuall call to RM_ScanKey, and re-opening the key each time:
+ * the actual call to RM_ScanKey, and re-opening the key each time:
*
* RedisModuleCursor *c = RedisModule_ScanCursorCreate();
* RedisModule_ThreadSafeContextLock(ctx);
@@ -7813,7 +7813,7 @@ int RM_ScanKey(RedisModuleKey *key, RedisModuleScanCursor *cursor, RedisModuleSc
* ## Module fork API
* -------------------------------------------------------------------------- */
-/* Create a background child process with the current frozen snaphost of the
+/* Create a background child process with the current frozen snapshot of the
* main process where you can do some processing in the background without
* affecting / freezing the traffic and no need for threads and GIL locking.
* Note that Redis allows for only one concurrent fork.
@@ -7973,7 +7973,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) {
* The above events are triggered not just when the user calls the
* relevant commands like BGSAVE, but also when a saving operation
* or AOF rewriting occurs because of internal server triggers.
- * The SYNC_RDB_START sub events are happening in the forground due to
+ * The SYNC_RDB_START sub events are happening in the foreground due to
* SAVE command, FLUSHALL, or server shutdown, and the other RDB and
* AOF sub events are executed in a background fork child, so any
* action the module takes can only affect the generated AOF or RDB,
@@ -8001,7 +8001,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) {
* int32_t dbnum; // Flushed database number, -1 for all the DBs
* // in the case of the FLUSHALL operation.
*
- * The start event is called *before* the operation is initated, thus
+ * The start event is called *before* the operation is initiated, thus
* allowing the callback to call DBSIZE or other operation on the
* yet-to-free keyspace.
*
@@ -8093,7 +8093,7 @@ void ModuleForkDoneHandler(int exitcode, int bysignal) {
*
* This event is called repeatedly called while an RDB or AOF file
* is being loaded.
- * The following sub events are availble:
+ * The following sub events are available:
*
* * `REDISMODULE_SUBEVENT_LOADING_PROGRESS_RDB`
* * `REDISMODULE_SUBEVENT_LOADING_PROGRESS_AOF`
@@ -8219,7 +8219,7 @@ int RM_IsSubEventSupported(RedisModuleEvent event, int64_t subevent) {
}
/* This is called by the Redis internals every time we want to fire an
- * event that can be interceppted by some module. The pointer 'data' is useful
+ * event that can be intercepted by some module. The pointer 'data' is useful
* in order to populate the event-specific structure when needed, in order
* to return the structure with more information to the callback.
*
@@ -8963,11 +8963,11 @@ int RM_ModuleTypeReplaceValue(RedisModuleKey *key, moduleType *mt, void *new_val
/* For a specified command, parse its arguments and return an array that
* contains the indexes of all key name arguments. This function is
- * essnetially a more efficient way to do COMMAND GETKEYS.
+ * essentially a more efficient way to do COMMAND GETKEYS.
*
* A NULL return value indicates the specified command has no keys, or
* an error condition. Error conditions are indicated by setting errno
- * as folllows:
+ * as follows:
*
* * ENOENT: Specified command does not exist.
* * EINVAL: Invalid command arity specified.
diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c
index 1428a1381..3725e432a 100644
--- a/src/modules/hellodict.c
+++ b/src/modules/hellodict.c
@@ -88,7 +88,7 @@ int cmd_KEYRANGE(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
/* Reply with the matching items. */
char *key;
size_t keylen;
- long long replylen = 0; /* Keep track of the amitted array len. */
+ long long replylen = 0; /* Keep track of the emitted array len. */
RedisModule_ReplyWithArray(ctx,REDISMODULE_POSTPONED_ARRAY_LEN);
while((key = RedisModule_DictNextC(iter,&keylen,NULL)) != NULL) {
if (replylen >= count) break;
diff --git a/src/modules/hellotype.c b/src/modules/hellotype.c
index 4f2d1d730..4a251f888 100644
--- a/src/modules/hellotype.c
+++ b/src/modules/hellotype.c
@@ -229,7 +229,7 @@ void HelloBlock_FreeData(RedisModuleCtx *ctx, void *privdata) {
RedisModule_Free(privdata);
}
-/* HELLOTYPE.BRANGE key first count timeout -- This is a blocking verison of
+/* HELLOTYPE.BRANGE key first count timeout -- This is a blocking version of
* the RANGE operation, in order to show how to use the API
* RedisModule_BlockClientOnKeys(). */
int HelloTypeBRange_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
diff --git a/src/networking.c b/src/networking.c
index 70bafb5ea..6cd48cccc 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -302,7 +302,7 @@ void _addReplyProtoToList(client *c, const char *s, size_t len) {
/* Note that 'tail' may be NULL even if we have a tail node, because when
* addReplyDeferredLen() is used, it sets a dummy node to NULL just
- * fo fill it later, when the size of the bulk length is set. */
+ * to fill it later, when the size of the bulk length is set. */
/* Append to tail string when possible. */
if (tail) {
@@ -1752,7 +1752,7 @@ int processInlineBuffer(client *c) {
/* Masters should never send us inline protocol to run actual
* commands. If this happens, it is likely due to a bug in Redis where
* we got some desynchronization in the protocol, for example
- * beause of a PSYNC gone bad.
+ * because of a PSYNC gone bad.
*
* However the is an exception: masters may send us just a newline
* to keep the connection active. */
@@ -1783,7 +1783,7 @@ int processInlineBuffer(client *c) {
return C_OK;
}
-/* Helper function. Record protocol erro details in server log,
+/* Helper function. Record protocol error details in server log,
* and set the client as CLIENT_CLOSE_AFTER_REPLY and
* CLIENT_PROTOCOL_ERROR. */
#define PROTO_DUMP_LEN 128
@@ -2367,7 +2367,7 @@ sds getAllClientsInfoString(int type) {
/* This function implements CLIENT SETNAME, including replying to the
* user with an error if the charset is wrong (in that case C_ERR is
- * returned). If the function succeeeded C_OK is returned, and it's up
+ * returned). If the function succeeded C_OK is returned, and it's up
* to the caller to send a reply if needed.
*
* Setting an empty string as name has the effect of unsetting the
@@ -2484,7 +2484,7 @@ void clientCommand(client *c) {
"UNPAUSE",
" Stop the current client pause, resuming traffic.",
"PAUSE <timeout> [WRITE|ALL]",
-" Suspend all, or just write, clients for <timout> milliseconds.",
+" Suspend all, or just write, clients for <timeout> milliseconds.",
"REPLY (ON|OFF|SKIP)",
" Control the replies sent to the current connection.",
"SETNAME <name>",
@@ -3312,7 +3312,7 @@ void flushSlavesOutputBuffers(void) {
*
* A main use case of this function is to allow pausing replication traffic
* so that a failover without data loss to occur. Replicas will continue to receive
- * traffic to faciliate this functionality.
+ * traffic to facilitate this functionality.
*
* This function is also internally used by Redis Cluster for the manual
* failover procedure implemented by CLUSTER FAILOVER.
@@ -3401,7 +3401,7 @@ void processEventsWhileBlocked(void) {
AE_FILE_EVENTS|AE_DONT_WAIT|
AE_CALL_BEFORE_SLEEP|AE_CALL_AFTER_SLEEP);
/* Note that server.events_processed_while_blocked will also get
- * incremeted by callbacks called by the event loop handlers. */
+ * incremented by callbacks called by the event loop handlers. */
server.events_processed_while_blocked += ae_events;
long long events = server.events_processed_while_blocked - startval;
if (!events) break;
diff --git a/src/notify.c b/src/notify.c
index afaddbfca..28c0048cb 100644
--- a/src/notify.c
+++ b/src/notify.c
@@ -93,8 +93,9 @@ sds keyspaceEventsFlagsToString(int flags) {
/* The API provided to the rest of the Redis core is a simple function:
*
- * notifyKeyspaceEvent(char *event, robj *key, int dbid);
+ * notifyKeyspaceEvent(int type, char *event, robj *key, int dbid);
*
+ * 'type' is the notification class we define in `server.h`.
* 'event' is a C string representing the event name.
* 'key' is a Redis object representing the key name.
* 'dbid' is the database ID where the key lives. */
diff --git a/src/object.c b/src/object.c
index 8e8ba7785..0f63d980a 100644
--- a/src/object.c
+++ b/src/object.c
@@ -762,7 +762,7 @@ char *strEncoding(int encoding) {
/* =========================== Memory introspection ========================= */
-/* This is an helper function with the goal of estimating the memory
+/* This is a helper function with the goal of estimating the memory
* size of a radix tree that is used to store Stream IDs.
*
* Note: to guess the size of the radix tree is not trivial, so we
@@ -1208,9 +1208,9 @@ int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle,
* below statement will expand to lru_idle*1000/1000. */
lru_idle = lru_idle*lru_multiplier/LRU_CLOCK_RESOLUTION;
long lru_abs = lru_clock - lru_idle; /* Absolute access time. */
- /* If the LRU field underflows (since LRU it is a wrapping
+ /* If the LRU field underflow (since LRU it is a wrapping
* clock), the best we can do is to provide a large enough LRU
- * that is half-way in the circlular LRU clock we use: this way
+ * that is half-way in the circular LRU clock we use: this way
* the computed idle time for this object will stay high for quite
* some time. */
if (lru_abs < 0)
diff --git a/src/pubsub.c b/src/pubsub.c
index 3409deac2..6d58e1b89 100644
--- a/src/pubsub.c
+++ b/src/pubsub.c
@@ -344,7 +344,7 @@ void subscribeCommand(client *c) {
* expect a reply per command and so can not execute subscribe.
*
* Notice that we have a special treatment for multi because of
- * backword compatibility
+ * backward compatibility
*/
addReplyError(c, "SUBSCRIBE isn't allowed for a DENY BLOCKING client");
return;
@@ -377,7 +377,7 @@ void psubscribeCommand(client *c) {
* expect a reply per command and so can not execute subscribe.
*
* Notice that we have a special treatment for multi because of
- * backword compatibility
+ * backward compatibility
*/
addReplyError(c, "PSUBSCRIBE isn't allowed for a DENY BLOCKING client");
return;
diff --git a/src/rax.c b/src/rax.c
index 0826b974a..a82b2e7ee 100644
--- a/src/rax.c
+++ b/src/rax.c
@@ -182,7 +182,7 @@ static inline void raxStackFree(raxStack *ts) {
)
/* Allocate a new non compressed node with the specified number of children.
- * If datafiled is true, the allocation is made large enough to hold the
+ * If datafield is true, the allocation is made large enough to hold the
* associated data pointer.
* Returns the new node pointer. On out of memory NULL is returned. */
raxNode *raxNewNode(size_t children, int datafield) {
@@ -259,7 +259,7 @@ raxNode *raxAddChild(raxNode *n, unsigned char c, raxNode **childptr, raxNode **
size_t curlen = raxNodeCurrentLength(n);
n->size++;
size_t newlen = raxNodeCurrentLength(n);
- n->size--; /* For now restore the orignal size. We'll update it only on
+ n->size--; /* For now restore the original size. We'll update it only on
success at the end. */
/* Alloc the new child we will link to 'n'. */
@@ -352,8 +352,8 @@ raxNode *raxAddChild(raxNode *n, unsigned char c, raxNode **childptr, raxNode **
* we don't need to do anything if there was already some padding to use. In
* that case the final destination of the pointers will be the same, however
* in our example there was no pre-existing padding, so we added one byte
- * plus thre bytes of padding. After the next memmove() things will look
- * like thata:
+ * plus there bytes of padding. After the next memmove() things will look
+ * like that:
*
* [HDR*][abde][....][Aptr][Bptr][....][Dptr][Eptr]|AUXP|
*/
@@ -653,7 +653,7 @@ int raxGenericInsert(rax *rax, unsigned char *s, size_t len, void *data, void **
* Let $SPLITPOS be the zero-based index at which, in the
* compressed node array of characters, we stopped iterating because
* there were no more keys character to match. So in the example of
- * the node "ANNIBALE", addig the string "ANNI", the $SPLITPOS is 4.
+ * the node "ANNIBALE", adding the string "ANNI", the $SPLITPOS is 4.
*
* 1. Save the current compressed node $NEXT pointer (the pointer to the
* child element, that is always present in compressed nodes).
@@ -666,7 +666,7 @@ int raxGenericInsert(rax *rax, unsigned char *s, size_t len, void *data, void **
*
* 3. Trim the current node to contain the first $SPLITPOS characters.
* As usually if the new node length is just 1, set iscompr to 0.
- * Take the iskey / associated value as it was in the orignal node.
+ * Take the iskey / associated value as it was in the original node.
* Fix the parent's reference.
*
* 4. Set the postfix node as the only child pointer of the trimmed
@@ -1102,9 +1102,9 @@ int raxRemove(rax *rax, unsigned char *s, size_t len, void **old) {
* We try to navigate upward till there are other nodes that can be
* compressed, when we reach the upper node which is not a key and has
* a single child, we scan the chain of children to collect the
- * compressable part of the tree, and replace the current node with the
+ * compressible part of the tree, and replace the current node with the
* new one, fixing the child pointer to reference the first non
- * compressable node.
+ * compressible node.
*
* Example of case "1". A tree stores the keys "FOO" = 1 and
* "FOOBAR" = 2:
@@ -1341,7 +1341,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) {
if (it->node_cb && it->node_cb(&it->node))
memcpy(cp,&it->node,sizeof(it->node));
/* For "next" step, stop every time we find a key along the
- * way, since the key is lexicograhically smaller compared to
+ * way, since the key is lexicographically smaller compared to
* what follows in the sub-children. */
if (it->node->iskey) {
it->data = raxGetData(it->node);
@@ -1409,7 +1409,7 @@ int raxIteratorNextStep(raxIterator *it, int noup) {
}
/* Seek the greatest key in the subtree at the current node. Return 0 on
- * out of memory, otherwise 1. This is an helper function for different
+ * out of memory, otherwise 1. This is a helper function for different
* iteration functions below. */
int raxSeekGreatest(raxIterator *it) {
while(it->node->size) {
diff --git a/src/rdb.c b/src/rdb.c
index 5f9989f77..dfc08afcd 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -699,7 +699,7 @@ int rdbLoadObjectType(rio *rdb) {
/* This helper function serializes a consumer group Pending Entries List (PEL)
* into the RDB file. The 'nacks' argument tells the function if also persist
- * the informations about the not acknowledged message, or if to persist
+ * the information about the not acknowledged message, or if to persist
* just the IDs: this is useful because for the global consumer group PEL
* we serialized the NACKs as well, but when serializing the local consumer
* PELs we just add the ID, that will be resolved inside the global PEL to
@@ -1446,13 +1446,13 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
/* Note that we may call this function in signal handle 'sigShutdownHandler',
* so we need guarantee all functions we call are async-signal-safe.
- * If we call this function from signal handle, we won't call bg_unlink that
+ * If we call this function from signal handle, we won't call bg_unlink that
* is not async-signal-safe. */
void rdbRemoveTempFile(pid_t childpid, int from_signal) {
char tmpfile[256];
char pid[32];
- /* Generate temp rdb file name using aync-signal safe functions. */
+ /* Generate temp rdb file name using async-signal safe functions. */
int pid_len = ll2string(pid, sizeof(pid), childpid);
strcpy(tmpfile, "temp-");
strncpy(tmpfile+5, pid, pid_len);
@@ -1614,7 +1614,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
}
}
} else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) {
- /* Read list/set value. */
+ /* Read sorted set value. */
uint64_t zsetlen;
size_t maxelelen = 0;
zset *zs;
@@ -2625,7 +2625,7 @@ eoferr:
* to do the actual loading. Moreover the ETA displayed in the INFO
* output is initialized and finalized.
*
- * If you pass an 'rsi' structure initialied with RDB_SAVE_OPTION_INIT, the
+ * If you pass an 'rsi' structure initialized with RDB_SAVE_OPTION_INIT, the
* loading code will fill the information fields in the structure. */
int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags) {
FILE *fp;
@@ -2901,7 +2901,7 @@ void bgsaveCommand(client *c) {
* information inside the RDB file. Currently the structure explicitly
* contains just the currently selected DB from the master stream, however
* if the rdbSave*() family functions receive a NULL rsi structure also
- * the Replication ID/offset is not saved. The function popultes 'rsi'
+ * the Replication ID/offset is not saved. The function populates 'rsi'
* that is normally stack-allocated in the caller, returns the populated
* pointer if the instance has a valid master client, otherwise NULL
* is returned, and the RDB saving will not persist any replication related
diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c
index d124bc0dd..6761ee6f3 100644
--- a/src/redis-benchmark.c
+++ b/src/redis-benchmark.c
@@ -1466,7 +1466,7 @@ int parseOptions(int argc, const char **argv) {
config.idlemode = 1;
} else if (!strcmp(argv[i],"-e")) {
printf("WARNING: -e option has been deprecated. "
- "We now immediatly exit on error to avoid false results.\n");
+ "We now immediately exit on error to avoid false results.\n");
} else if (!strcmp(argv[i],"-t")) {
if (lastarg) goto invalid;
/* We get the list of tests to run as a string in the form
@@ -1586,11 +1586,11 @@ usage:
" --insecure Allow insecure TLS connection by skipping cert validation.\n"
" --cert <file> Client certificate to authenticate with.\n"
" --key <file> Private key file to authenticate with.\n"
-" --tls-ciphers <list> Sets the list of prefered ciphers (TLSv1.2 and below)\n"
+" --tls-ciphers <list> Sets the list of preferred ciphers (TLSv1.2 and below)\n"
" in order of preference from highest to lowest separated by colon (\":\").\n"
" See the ciphers(1ssl) manpage for more information about the syntax of this string.\n"
#ifdef TLS1_3_VERSION
-" --tls-ciphersuites <list> Sets the list of prefered ciphersuites (TLSv1.3)\n"
+" --tls-ciphersuites <list> Sets the list of preferred ciphersuites (TLSv1.3)\n"
" in order of preference from highest to lowest separated by colon (\":\").\n"
" See the ciphers(1ssl) manpage for more information about the syntax of this string,\n"
" and specifically for TLSv1.3 ciphersuites.\n"
diff --git a/src/redis-check-rdb.c b/src/redis-check-rdb.c
index 6ddfda7ff..8f57fa4d4 100644
--- a/src/redis-check-rdb.c
+++ b/src/redis-check-rdb.c
@@ -128,7 +128,7 @@ void rdbCheckError(const char *fmt, ...) {
rdbShowGenericInfo();
}
-/* Print informations during RDB checking. */
+/* Print information during RDB checking. */
void rdbCheckInfo(const char *fmt, ...) {
char msg[1024];
va_list ap;
@@ -265,7 +265,7 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) {
} else if (type == RDB_OPCODE_AUX) {
/* AUX: generic string-string fields. Use to add state to RDB
* which is backward compatible. Implementations of RDB loading
- * are requierd to skip AUX fields they don't understand.
+ * are required to skip AUX fields they don't understand.
*
* An AUX field is composed of two strings: key and value. */
robj *auxkey, *auxval;
diff --git a/src/redis-cli.c b/src/redis-cli.c
index 39535a9e0..b1d2d13ed 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -1077,7 +1077,7 @@ int isColorTerm(void) {
return t != NULL && strstr(t,"xterm") != NULL;
}
-/* Helper function for sdsCatColorizedLdbReply() appending colorize strings
+/* Helper function for sdsCatColorizedLdbReply() appending colorize strings
* to an SDS string. */
sds sdscatcolor(sds o, char *s, size_t len, char *color) {
if (!isColorTerm()) return sdscatlen(o,s,len);
@@ -1893,11 +1893,11 @@ static void usage(void) {
" --insecure Allow insecure TLS connection by skipping cert validation.\n"
" --cert <file> Client certificate to authenticate with.\n"
" --key <file> Private key file to authenticate with.\n"
-" --tls-ciphers <list> Sets the list of prefered ciphers (TLSv1.2 and below)\n"
+" --tls-ciphers <list> Sets the list of preferred ciphers (TLSv1.2 and below)\n"
" in order of preference from highest to lowest separated by colon (\":\").\n"
" See the ciphers(1ssl) manpage for more information about the syntax of this string.\n"
#ifdef TLS1_3_VERSION
-" --tls-ciphersuites <list> Sets the list of prefered ciphersuites (TLSv1.3)\n"
+" --tls-ciphersuites <list> Sets the list of preferred ciphersuites (TLSv1.3)\n"
" in order of preference from highest to lowest separated by colon (\":\").\n"
" See the ciphers(1ssl) manpage for more information about the syntax of this string,\n"
" and specifically for TLSv1.3 ciphersuites.\n"
@@ -1909,7 +1909,7 @@ static void usage(void) {
" --quoted-input Force input to be handled as quoted strings.\n"
" --csv Output in CSV format.\n"
" --show-pushes <yn> Whether to print RESP3 PUSH messages. Enabled by default when\n"
-" STDOUT is a tty but can be overriden with --show-pushes no.\n"
+" STDOUT is a tty but can be overridden with --show-pushes no.\n"
" --stat Print rolling stats about server: mem, clients, ...\n"
" --latency Enter a special mode continuously sampling latency.\n"
" If you use this mode in an interactive session it runs\n"
@@ -2639,7 +2639,7 @@ static int parseClusterNodeAddress(char *addr, char **ip_ptr, int *port_ptr,
* been provided it must be in the form of 'ip:port', elsewhere
* the first argument must be the ip and the second one the port.
* If host and port can be detected, it returns 1 and it stores host and
- * port into variables referenced by'ip_ptr' and 'port_ptr' pointers,
+ * port into variables referenced by 'ip_ptr' and 'port_ptr' pointers,
* elsewhere it returns 0. */
static int getClusterHostFromCmdArgs(int argc, char **argv,
char **ip_ptr, int *port_ptr) {
@@ -2992,7 +2992,7 @@ result:
* So a greater score means a worse anti-affinity level, while zero
* means perfect anti-affinity.
*
- * The anti affinity optimizator will try to get a score as low as
+ * The anti affinity optimization will try to get a score as low as
* possible. Since we do not want to sacrifice the fact that slaves should
* not be in the same host as the master, we assign 10000 times the score
* to this violation, so that we'll optimize for the second factor only
@@ -8183,7 +8183,7 @@ static void LRUTestMode(void) {
}
/*------------------------------------------------------------------------------
- * Intrisic latency mode.
+ * Intrinsic latency mode.
*
* Measure max latency of a running process that does not result from
* syscalls. Basically this software should provide a hint about how much
diff --git a/src/replication.c b/src/replication.c
index bd0c6ca8c..7e27a9b56 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -789,7 +789,7 @@ void syncCommand(client *c) {
/* Increment stats for failed PSYNCs, but only if the
* replid is not "?", as this is used by slaves to force a full
- * resync on purpose when they are not albe to partially
+ * resync on purpose when they are not able to partially
* resync. */
if (master_replid[0] != '?') server.stat_sync_partial_err++;
}
@@ -870,7 +870,7 @@ void syncCommand(client *c) {
* in order to synchronize. */
serverLog(LL_NOTICE,"Current BGSAVE has socket target. Waiting for next BGSAVE for SYNC");
- /* CASE 3: There is no BGSAVE is progress. */
+ /* CASE 3: There is no BGSAVE is in progress. */
} else {
if (server.repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF) &&
server.repl_diskless_sync_delay)
@@ -1234,7 +1234,7 @@ void rdbPipeReadHandler(struct aeEventLoop *eventLoop, int fd, void *clientData,
}
serverLog(LL_WARNING,"Diskless rdb transfer, done reading from pipe, %d replicas still up.", stillUp);
/* Now that the replicas have finished reading, notify the child that it's safe to exit.
- * When the server detectes the child has exited, it can mark the replica as online, and
+ * When the server detects the child has exited, it can mark the replica as online, and
* start streaming the replication buffers. */
close(server.rdb_child_exit_pipe);
server.rdb_child_exit_pipe = -1;
@@ -1336,7 +1336,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
*
* So things work like that:
*
- * 1. We end trasnferring the RDB file via socket.
+ * 1. We end transferring the RDB file via socket.
* 2. The replica is put ONLINE but the write handler
* is not installed.
* 3. The replica however goes really online, and pings us
@@ -1351,7 +1351,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
* in advance). Detecting such final EOF string is much
* simpler and less CPU intensive if no more data is sent
* after such final EOF. So we don't want to glue the end of
- * the RDB trasfer with the start of the other replication
+ * the RDB transfer with the start of the other replication
* data. */
slave->replstate = SLAVE_STATE_ONLINE;
slave->repl_put_online_on_ack = 1;
@@ -1717,7 +1717,7 @@ void readSyncBulkPayload(connection *conn) {
* such case we want just to read the RDB file in memory. */
serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Flushing old data");
- /* We need to stop any AOF rewriting child before flusing and parsing
+ /* We need to stop any AOF rewriting child before flushing and parsing
* the RDB, otherwise we'll create a copy-on-write disaster. */
if (server.aof_state != AOF_OFF) stopAppendOnly();
@@ -2408,7 +2408,7 @@ void syncWithMaster(connection *conn) {
server.repl_state = REPL_STATE_SEND_PSYNC;
}
- /* Try a partial resynchonization. If we don't have a cached master
+ /* Try a partial resynchronization. If we don't have a cached master
* slaveTryPartialResynchronization() will at least try to use PSYNC
* to start a full resynchronization so that we get the master replid
* and the global offset, to try a partial resync at the next
@@ -2901,7 +2901,7 @@ void replicationCacheMaster(client *c) {
unlinkClient(c);
/* Reset the master client so that's ready to accept new commands:
- * we want to discard te non processed query buffers and non processed
+ * we want to discard the non processed query buffers and non processed
* offsets, including pending transactions, already populated arguments,
* pending outputs to the master. */
sdsclear(server.master->querybuf);
@@ -2935,13 +2935,13 @@ void replicationCacheMaster(client *c) {
replicationHandleMasterDisconnection();
}
-/* This function is called when a master is turend into a slave, in order to
+/* This function is called when a master is turned into a slave, in order to
* create from scratch a cached master for the new client, that will allow
* to PSYNC with the slave that was promoted as the new master after a
* failover.
*
* Assuming this instance was previously the master instance of the new master,
- * the new master will accept its replication ID, and potentiall also the
+ * the new master will accept its replication ID, and potential also the
* current offset if no data was lost during the failover. So we use our
* current replication ID and offset in order to synthesize a cached master. */
void replicationCacheMasterUsingMyself(void) {
diff --git a/src/rio.c b/src/rio.c
index 0d107708f..529f0aeb9 100644
--- a/src/rio.c
+++ b/src/rio.c
@@ -310,7 +310,7 @@ static size_t rioFdWrite(rio *r, const void *buf, size_t len) {
if (!doflush)
return 1;
}
- /* Flusing the buffered data. set 'p' and 'len' accordintly. */
+ /* Flushing the buffered data. set 'p' and 'len' accordingly. */
p = (unsigned char*) r->io.fd.buf;
len = sdslen(r->io.fd.buf);
}
diff --git a/src/scripting.c b/src/scripting.c
index 740ef2766..73f40e3f9 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -1667,7 +1667,7 @@ void evalGenericCommand(client *c, int evalsha) {
* To do so we use a cache of SHA1s of scripts that we already propagated
* as full EVAL, that's called the Replication Script Cache.
*
- * For replication, everytime a new slave attaches to the master, we need to
+ * For replication, every time a new slave attaches to the master, we need to
* flush our cache of scripts that can be replicated as EVALSHA, while
* for AOF we need to do so every time we rewrite the AOF file. */
if (evalsha && !server.lua_replicate_commands) {
@@ -2275,7 +2275,7 @@ sds ldbCatStackValue(sds s, lua_State *lua, int idx) {
}
/* Produce a debugger log entry representing the value of the Lua object
- * currently on the top of the stack. The element is ot popped nor modified.
+ * currently on the top of the stack. The element is not popped nor modified.
* Check ldbCatStackValue() for the actual implementation. */
void ldbLogStackValue(lua_State *lua, char *prefix) {
sds s = sdsnew(prefix);
diff --git a/src/sds.c b/src/sds.c
index 2addd56a5..17cd6ae69 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -92,7 +92,7 @@ static inline size_t sdsTypeMaxSize(char type) {
* If NULL is used for 'init' the string is initialized with zero bytes.
* If SDS_NOINIT is used, the buffer is left uninitialized;
*
- * The string is always null-termined (all the sds strings are, always) so
+ * The string is always null-terminated (all the sds strings are, always) so
* even if you create an sds string with:
*
* mystring = sdsnewlen("abc",3);
@@ -469,7 +469,7 @@ sds sdscpylen(sds s, const char *t, size_t len) {
return s;
}
-/* Like sdscpylen() but 't' must be a null-termined string so that the length
+/* Like sdscpylen() but 't' must be a null-terminated string so that the length
* of the string is obtained with strlen(). */
sds sdscpy(sds s, const char *t) {
return sdscpylen(s, t, strlen(t));
@@ -731,7 +731,7 @@ sds sdscatfmt(sds s, char const *fmt, ...) {
}
/* Remove the part of the string from left and from right composed just of
- * contiguous characters found in 'cset', that is a null terminted C string.
+ * contiguous characters found in 'cset', that is a null terminated C string.
*
* After the call, the modified sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call.
@@ -1179,7 +1179,7 @@ sds sdstemplate(const char *template, sdstemplate_callback_t cb_func, void *cb_a
res = sdscat(res, p);
break;
} else if (sv > p) {
- /* Found: copy anything up to the begining of the variable */
+ /* Found: copy anything up to the beginning of the variable */
res = sdscatlen(res, p, sv - p);
}
diff --git a/src/sentinel.c b/src/sentinel.c
index 662909d4a..060e499ad 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -1422,7 +1422,7 @@ sentinelRedisInstance *sentinelRedisInstanceLookupSlave(
/* We need to handle a slave_addr that is potentially a hostname.
* If that is the case, depending on configuration we either resolve
- * it and use the IP addres or fail.
+ * it and use the IP address or fail.
*/
addr = createSentinelAddr(slave_addr, port);
if (!addr) return NULL;
@@ -3550,7 +3550,7 @@ void sentinelCommand(client *c) {
"SENTINELS <master-name>",
" Show a list of Sentinel instances for this master and their state.",
"SET <master-name> <option> <value>",
-" Set configuration paramters for certain masters.",
+" Set configuration parameters for certain masters.",
"SIMULATE-FAILURE (CRASH-AFTER-ELECTION|CRASH-AFTER-PROMOTION|HELP)",
" Simulate a Sentinel crash.",
NULL
@@ -3990,7 +3990,7 @@ void sentinelSetCommand(client *c) {
int old_j = j; /* Used to know what to log as an event. */
if (!strcasecmp(option,"down-after-milliseconds") && moreargs > 0) {
- /* down-after-millisecodns <milliseconds> */
+ /* down-after-milliseconds <milliseconds> */
robj *o = c->argv[++j];
if (getLongLongFromObject(o,&ll) == C_ERR || ll <= 0) {
badarg = j;
diff --git a/src/server.c b/src/server.c
index 428e4aef1..dcbc07a31 100644
--- a/src/server.c
+++ b/src/server.c
@@ -1759,7 +1759,7 @@ int clientsCronTrackExpansiveClients(client *c, int time_idx) {
/* Iterating all the clients in getMemoryOverheadData() is too slow and
* in turn would make the INFO command too slow. So we perform this
* computation incrementally and track the (not instantaneous but updated
- * to the second) total memory used by clients using clinetsCron() in
+ * to the second) total memory used by clients using clientsCron() in
* a more incremental way (depending on server.hz). */
int clientsCronTrackClientsMemUsage(client *c) {
size_t mem = 0;
@@ -2203,7 +2203,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
}
}
}
- /* Just for the sake of defensive programming, to avoid forgeting to
+ /* Just for the sake of defensive programming, to avoid forgetting to
* call this function when need. */
updateDictResizePolicy();
@@ -2482,7 +2482,7 @@ void afterSleep(struct aeEventLoop *eventLoop) {
/* Do NOT add anything above moduleAcquireGIL !!! */
- /* Aquire the modules GIL so that their threads won't touch anything. */
+ /* Acquire the modules GIL so that their threads won't touch anything. */
if (!ProcessingEventsWhileBlocked) {
if (moduleCount()) moduleAcquireGIL();
}
@@ -2637,7 +2637,7 @@ void createSharedObjects(void) {
shared.bulkhdr[j] = createObject(OBJ_STRING,
sdscatprintf(sdsempty(),"$%d\r\n",j));
}
- /* The following two shared objects, minstring and maxstrings, are not
+ /* The following two shared objects, minstring and maxstring, are not
* actually used for their value but as a special object meaning
* respectively the minimum possible string and the maximum possible
* string in string comparisons for the ZRANGEBYLEX command. */
@@ -2829,7 +2829,7 @@ int restartServer(int flags, mstime_t delay) {
return C_ERR;
}
- /* Close all file descriptors, with the exception of stdin, stdout, strerr
+ /* Close all file descriptors, with the exception of stdin, stdout, stderr
* which are useful if we restart a Redis server which is not daemonized. */
for (j = 3; j < (int)server.maxclients + 1024; j++) {
/* Test the descriptor validity before closing it, otherwise
@@ -3596,7 +3596,7 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
execCommandPropagateMulti(dbid);
/* This needs to be unreachable since the dataset should be fixed during
- * client pause, otherwise data may be lossed during a failover. */
+ * client pause, otherwise data may be lost during a failover. */
serverAssert(!(areClientsPaused() && !server.client_pause_in_transaction));
if (server.aof_state != AOF_OFF && flags & PROPAGATE_AOF)
@@ -3912,7 +3912,7 @@ void call(client *c, int flags) {
}
/* Used when a command that is ready for execution needs to be rejected, due to
- * varios pre-execution checks. it returns the appropriate error to the client.
+ * various pre-execution checks. it returns the appropriate error to the client.
* If there's a transaction is flags it as dirty, and if the command is EXEC,
* it aborts the transaction.
* Note: 'reply' is expected to end with \r\n */
@@ -4230,7 +4230,7 @@ int processCommand(client *c) {
* The main objective here is to prevent abuse of client pause check
* from which replicas are exempt. */
if ((c->flags & CLIENT_SLAVE) && (is_may_replicate_command || is_write_command || is_read_command)) {
- rejectCommandFormat(c, "Replica can't interract with the keyspace");
+ rejectCommandFormat(c, "Replica can't interact with the keyspace");
return C_OK;
}
@@ -4322,7 +4322,7 @@ int prepareForShutdown(int flags) {
/* Note that, in killRDBChild normally has backgroundSaveDoneHandler
* doing it's cleanup, but in this case this code will not be reached,
* so we need to call rdbRemoveTempFile which will close fd(in order
- * to unlink file actully) in background thread.
+ * to unlink file actually) in background thread.
* The temp rdb file fd may won't be closed when redis exits quickly,
* but OS will close this fd when process exits. */
rdbRemoveTempFile(server.child_pid, 0);
@@ -5691,12 +5691,12 @@ int changeBindAddr(sds *addrlist, int addrlist_len) {
/* Re-Listen TCP and TLS */
server.ipfd.count = 0;
if (server.port != 0 && listenToPort(server.port, &server.ipfd) != C_OK) {
- serverPanic("Failed to restore old listening sockets.");
+ serverPanic("Failed to restore old listening TCP socket.");
}
server.tlsfd.count = 0;
if (server.tls_port != 0 && listenToPort(server.tls_port, &server.tlsfd) != C_OK) {
- serverPanic("Failed to restore old listening sockets.");
+ serverPanic("Failed to restore old listening TLS socket.");
}
result = C_ERR;
@@ -5959,7 +5959,7 @@ void loadDataFromDisk(void) {
memcpy(server.replid,rsi.repl_id,sizeof(server.replid));
server.master_repl_offset = rsi.repl_offset;
/* If we are a slave, create a cached master from this
- * information, in order to allow partial resynchronizations
+ * information, in order to allow partial resynchronization
* with masters. */
replicationCacheMasterUsingMyself();
selectDb(server.cached_master,rsi.repl_stream_db);
diff --git a/src/server.h b/src/server.h
index dd0ef2e8d..7b080e098 100644
--- a/src/server.h
+++ b/src/server.h
@@ -314,7 +314,7 @@ typedef enum {
REPL_STATE_CONNECTING, /* Connecting to master */
/* --- Handshake states, must be ordered --- */
REPL_STATE_RECEIVE_PING_REPLY, /* Wait for PING reply */
- REPL_STATE_SEND_HANDSHAKE, /* Send handshake sequance to master */
+ REPL_STATE_SEND_HANDSHAKE, /* Send handshake sequence to master */
REPL_STATE_RECEIVE_AUTH_REPLY, /* Wait for AUTH reply */
REPL_STATE_RECEIVE_PORT_REPLY, /* Wait for REPLCONF reply */
REPL_STATE_RECEIVE_IP_REPLY, /* Wait for REPLCONF reply */
@@ -963,7 +963,7 @@ typedef struct client {
/* In clientsCronTrackClientsMemUsage() we track the memory usage of
* each client and add it to the sum of all the clients of a given type,
* however we need to remember what was the old contribution of each
- * client, and in which categoty the client was, in order to remove it
+ * client, and in which category the client was, in order to remove it
* before adding it the new value. */
uint64_t client_cron_last_memory_usage;
int client_cron_last_memory_type;
@@ -1291,7 +1291,7 @@ struct redisServer {
long long stat_numconnections; /* Number of connections received */
long long stat_expiredkeys; /* Number of expired keys */
double stat_expired_stale_perc; /* Percentage of keys probably expired */
- long long stat_expired_time_cap_reached_count; /* Early expire cylce stops.*/
+ long long stat_expired_time_cap_reached_count; /* Early expire cycle stops.*/
long long stat_expire_cycle_time_used; /* Cumulative microseconds used. */
long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */
long long stat_keyspace_hits; /* Number of successful lookups of keys */
@@ -1348,7 +1348,7 @@ struct redisServer {
int active_expire_effort; /* From 1 (default) to 10, active effort. */
int active_defrag_enabled;
int sanitize_dump_payload; /* Enables deep sanitization for ziplist and listpack in RDB and RESTORE. */
- int skip_checksum_validation; /* Disables checksum validateion for RDB and RESTORE payload. */
+ int skip_checksum_validation; /* Disable checksum validation for RDB and RESTORE payload. */
int jemalloc_bg_thread; /* Enable jemalloc background thread */
size_t active_defrag_ignore_bytes; /* minimum amount of fragmentation waste to start active defrag */
int active_defrag_threshold_lower; /* minimum percentage of fragmentation to start active defrag */
@@ -1433,10 +1433,10 @@ struct redisServer {
int rdb_pipe_bufflen; /* that was read from the the rdb pipe. */
int rdb_key_save_delay; /* Delay in microseconds between keys while
* writing the RDB. (for testings). negative
- * value means fractions of microsecons (on average). */
+ * value means fractions of microseconds (on average). */
int key_load_delay; /* Delay in microseconds between keys while
* loading aof or rdb. (for testings). negative
- * value means fractions of microsecons (on average). */
+ * value means fractions of microseconds (on average). */
/* Pipe and data structures for child -> parent info sharing. */
int child_info_pipe[2]; /* Pipe used to write the child_info_data. */
int child_info_nread; /* Num of bytes of the last read from pipe */
@@ -1649,7 +1649,7 @@ struct redisServer {
struct sentinelConfig *sentinel_config; /* sentinel config to load at startup time. */
/* Coordinate failover info */
mstime_t failover_end_time; /* Deadline for failover command. */
- int force_failover; /* If true then failover will be foreced at the
+ int force_failover; /* If true then failover will be forced at the
* deadline, otherwise failover is aborted. */
char *target_replica_host; /* Failover target host. If null during a
* failover then any replica can be used. */
diff --git a/src/sort.c b/src/sort.c
index 3b67cc639..53f9ef0cb 100644
--- a/src/sort.c
+++ b/src/sort.c
@@ -312,7 +312,7 @@ void sortCommand(client *c) {
if (sortval->type == OBJ_ZSET)
zsetConvert(sortval, OBJ_ENCODING_SKIPLIST);
- /* Objtain the length of the object to sort. */
+ /* Obtain the length of the object to sort. */
switch(sortval->type) {
case OBJ_LIST: vectorlen = listTypeLength(sortval); break;
case OBJ_SET: vectorlen = setTypeSize(sortval); break;
diff --git a/src/stream.h b/src/stream.h
index 1f2132365..dd7c46d41 100644
--- a/src/stream.h
+++ b/src/stream.h
@@ -86,7 +86,7 @@ typedef struct streamNACK {
in the last delivery. */
} streamNACK;
-/* Stream propagation informations, passed to functions in order to propagate
+/* Stream propagation information, passed to functions in order to propagate
* XCLAIM commands to AOF and slaves. */
typedef struct streamPropInfo {
robj *keyname;
diff --git a/src/t_stream.c b/src/t_stream.c
index 678fceeee..f3cfeb0b2 100644
--- a/src/t_stream.c
+++ b/src/t_stream.c
@@ -626,7 +626,7 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_
lp_count += 3; /* Add the 3 fixed fields flags + ms-diff + seq-diff. */
if (!(flags & STREAM_ITEM_FLAG_SAMEFIELDS)) {
/* If the item is not compressed, it also has the fields other than
- * the values, and an additional num-fileds field. */
+ * the values, and an additional num-fields field. */
lp_count += numfields+1;
}
lp = lpAppendInteger(lp,lp_count);
@@ -968,7 +968,7 @@ static int streamParseAddOrTrimArgsOrReply(client *c, streamAddTrimArgs *args, i
}
if (c == server.master || c->id == CLIENT_ID_AOF) {
- /* If command cam from master or from AOF we must not enforce maxnodes
+ /* If command came from master or from AOF we must not enforce maxnodes
* (The maxlen/minid argument was re-written to make sure there's no
* inconsistency). */
args->limit = 0;
@@ -1365,7 +1365,7 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam
argv[12] = shared.lastid;
argv[13] = createObjectFromStreamID(&group->last_id);
- /* We use progagate() because this code path is not always called from
+ /* We use propagate() because this code path is not always called from
* the command execution context. Moreover this will just alter the
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
@@ -1390,7 +1390,7 @@ void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupna
argv[3] = groupname;
argv[4] = createObjectFromStreamID(&group->last_id);
- /* We use progagate() because this code path is not always called from
+ /* We use propagate() because this code path is not always called from
* the command execution context. Moreover this will just alter the
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
@@ -1412,7 +1412,7 @@ void streamPropagateConsumerCreation(client *c, robj *key, robj *groupname, sds
argv[3] = groupname;
argv[4] = createObject(OBJ_STRING,sdsdup(consumername));
- /* We use progagate() because this code path is not always called from
+ /* We use propagate() because this code path is not always called from
* the command execution context. Moreover this will just alter the
* consumer group state, and we don't need MULTI/EXEC wrapping because
* there is no message state cross-message atomicity required. */
@@ -1576,7 +1576,7 @@ size_t streamReplyWithRange(client *c, stream *s, streamID *start, streamID *end
return arraylen;
}
-/* This is an helper function for streamReplyWithRange() when called with
+/* This is a helper function for streamReplyWithRange() when called with
* group and consumer arguments, but with a range that is referring to already
* delivered messages. In this case we just emit messages that are already
* in the history of the consumer, fetching the IDs from its PEL.
@@ -1944,7 +1944,7 @@ void xreadCommand(client *c) {
if (c->flags & CLIENT_LUA) {
/*
* Although the CLIENT_DENY_BLOCKING flag should protect from blocking the client
- * on Lua/MULTI/RM_Call we want special treatment for Lua to keep backword compatibility.
+ * on Lua/MULTI/RM_Call we want special treatment for Lua to keep backward compatibility.
* There is no sense to use BLOCK option within Lua. */
addReplyErrorFormat(c, "%s command is not allowed with BLOCK option from scripts", (char *)c->argv[0]->ptr);
return;
@@ -2506,7 +2506,7 @@ void xsetidCommand(client *c) {
/* XACK <key> <group> <id> <id> ... <id>
*
* Acknowledge a message as processed. In practical terms we just check the
- * pendine entries list (PEL) of the group, and delete the PEL entry both from
+ * pending entries list (PEL) of the group, and delete the PEL entry both from
* the group and the consumer (pending messages are referenced in both places).
*
* Return value of the command is the number of messages successfully
@@ -2572,7 +2572,7 @@ cleanup:
* delivery time and so forth. */
void xpendingCommand(client *c) {
int justinfo = c->argc == 3; /* Without the range just outputs general
- informations about the PEL. */
+ information about the PEL. */
robj *key = c->argv[1];
robj *groupname = c->argv[2];
robj *consumername = NULL;
@@ -2928,7 +2928,7 @@ void xclaimCommand(client *c) {
streamNACK *nack = raxFind(group->pel,buf,sizeof(buf));
/* If FORCE is passed, let's check if at least the entry
- * exists in the Stream. In such case, we'll crate a new
+ * exists in the Stream. In such case, we'll create a new
* entry in the PEL from scratch, so that XCLAIM can also
* be used to create entries in the PEL. Useful for AOF
* and replication of consumer groups. */
@@ -3548,7 +3548,7 @@ NULL
}
/* Validate the integrity stream listpack entries structure. Both in term of a
- * valid listpack, but also that the structure of the entires matches a valid
+ * valid listpack, but also that the structure of the entries matches a valid
* stream. return 1 if valid 0 if not valid. */
int streamValidateListpackIntegrity(unsigned char *lp, size_t size, int deep) {
int valid_record;
diff --git a/src/t_zset.c b/src/t_zset.c
index 1f7040d1c..333e0221e 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -594,7 +594,7 @@ int zslParseLexRangeItem(robj *item, sds *dest, int *ex) {
}
}
-/* Free a lex range structure, must be called only after zelParseLexRange()
+/* Free a lex range structure, must be called only after zslParseLexRange()
* populated the structure with success (C_OK returned). */
void zslFreeLexRange(zlexrangespec *spec) {
if (spec->min != shared.minstring &&
@@ -806,7 +806,7 @@ void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr) {
}
/* Move to the previous entry based on the values in eptr and sptr. Both are
- * set to NULL when there is no next entry. */
+ * set to NULL when there is no prev entry. */
void zzlPrev(unsigned char *zl, unsigned char **eptr, unsigned char **sptr) {
unsigned char *_eptr, *_sptr;
serverAssert(*eptr != NULL && *sptr != NULL);
@@ -1610,7 +1610,7 @@ robj *zsetDup(robj *o) {
return zobj;
}
-/* callback for to check the ziplist doesn't have duplicate recoreds */
+/* callback for to check the ziplist doesn't have duplicate records */
static int _zsetZiplistValidateIntegrity(unsigned char *p, void *userdata) {
struct {
long count;
@@ -2481,7 +2481,7 @@ static void zdiffAlgorithm2(zsetopsrc *src, long setnum, zset *dstzset, size_t *
if (cardinality == 0) break;
}
- /* Redize dict if needed after removing multiple elements */
+ /* Resize dict if needed after removing multiple elements */
if (htNeedsResize(dstzset->dict)) dictResize(dstzset->dict);
/* Using this algorithm, we can't calculate the max element as we go,
@@ -3600,7 +3600,7 @@ void zrangeGenericCommand(zrange_result_handler *handler, int argc_start, int st
}
}
- /* Use defaults if not overriden by arguments. */
+ /* Use defaults if not overridden by arguments. */
if (direction == ZRANGE_DIRECTION_AUTO)
direction = ZRANGE_DIRECTION_FORWARD;
if (rangetype == ZRANGE_AUTO)
diff --git a/src/tls.c b/src/tls.c
index ffd3b0ad0..611c6dcc6 100644
--- a/src/tls.c
+++ b/src/tls.c
@@ -475,7 +475,7 @@ static void tlsEventHandler(struct aeEventLoop *el, int fd, void *clientData, in
/* Process the return code received from OpenSSL>
* Update the want parameter with expected I/O.
- * Update the connection's error state if a real error has occured.
+ * Update the connection's error state if a real error has occurred.
* Returns an SSL error code, or 0 if no further handling is required.
*/
static int handleSSLReturnCode(tls_connection *conn, int ret_value, WantIOType *want) {
diff --git a/src/tracking.c b/src/tracking.c
index a11e4b7d7..f472309f6 100644
--- a/src/tracking.c
+++ b/src/tracking.c
@@ -249,7 +249,7 @@ void trackingRememberKeys(client *c) {
/* Given a key name, this function sends an invalidation message in the
* proper channel (depending on RESP version: PubSub or Push message) and
- * to the proper client (in case fo redirection), in the context of the
+ * to the proper client (in case of redirection), in the context of the
* client 'c' with tracking enabled.
*
* In case the 'proto' argument is non zero, the function will assume that
@@ -448,7 +448,7 @@ void trackingInvalidateKeysOnFlush(int async) {
*
* So Redis allows the user to configure a maximum number of keys for the
* invalidation table. This function makes sure that we don't go over the
- * specified fill rate: if we are over, we can just evict informations about
+ * specified fill rate: if we are over, we can just evict information about
* a random key, and send invalidation messages to clients like if the key was
* modified. */
void trackingLimitUsedSlots(void) {
@@ -493,7 +493,7 @@ void trackingLimitUsedSlots(void) {
* include keys that were modified the last time by this client, in order
* to implement the NOLOOP option.
*
- * If the resultin array would be empty, NULL is returned instead. */
+ * If the resulting array would be empty, NULL is returned instead. */
sds trackingBuildBroadcastReply(client *c, rax *keys) {
raxIterator ri;
uint64_t count;
diff --git a/src/util.h b/src/util.h
index 3bf8907c0..bf2115ab7 100644
--- a/src/util.h
+++ b/src/util.h
@@ -38,7 +38,7 @@
* This should be the size of the buffer given to ld2string */
#define MAX_LONG_DOUBLE_CHARS 5*1024
-/* long double to string convertion options */
+/* long double to string conversion options */
typedef enum {
LD_STR_AUTO, /* %.17Lg */
LD_STR_HUMAN, /* %.17Lf + Trimming of trailing zeros */
diff --git a/src/ziplist.c b/src/ziplist.c
index 89270dd93..4943010ee 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -54,7 +54,7 @@
*
* The length of the previous entry, <prevlen>, is encoded in the following way:
* If this length is smaller than 254 bytes, it will only consume a single
- * byte representing the length as an unsinged 8 bit integer. When the length
+ * byte representing the length as an unsigned 8 bit integer. When the length
* is greater than or equal to 254, it will consume 5 bytes. The first byte is
* set to 254 (FE) to indicate a larger value is following. The remaining 4
* bytes take the length of the previous entry as value.
@@ -620,7 +620,7 @@ static inline int zipEntrySafe(unsigned char* zl, size_t zlbytes, unsigned char
unsigned char *zllast = zl + zlbytes - ZIPLIST_END_SIZE;
#define OUT_OF_RANGE(p) (unlikely((p) < zlfirst || (p) > zllast))
- /* If threre's no possibility for the header to reach outside the ziplist,
+ /* If there's no possibility for the header to reach outside the ziplist,
* take the fast path. (max lensize and prevrawlensize are both 5 bytes) */
if (p >= zlfirst && p + 10 < zllast) {
ZIP_DECODE_PREVLEN(p, e->prevrawlensize, e->prevrawlen);
@@ -631,16 +631,16 @@ static inline int zipEntrySafe(unsigned char* zl, size_t zlbytes, unsigned char
/* We didn't call ZIP_ASSERT_ENCODING, so we check lensize was set to 0. */
if (unlikely(e->lensize == 0))
return 0;
- /* Make sure the entry doesn't rech outside the edge of the ziplist */
+ /* Make sure the entry doesn't reach outside the edge of the ziplist */
if (OUT_OF_RANGE(p + e->headersize + e->len))
return 0;
- /* Make sure prevlen doesn't rech outside the edge of the ziplist */
+ /* Make sure prevlen doesn't reach outside the edge of the ziplist */
if (validate_prevlen && OUT_OF_RANGE(p - e->prevrawlen))
return 0;
return 1;
}
- /* Make sure the pointer doesn't rech outside the edge of the ziplist */
+ /* Make sure the pointer doesn't reach outside the edge of the ziplist */
if (OUT_OF_RANGE(p))
return 0;
@@ -664,11 +664,11 @@ static inline int zipEntrySafe(unsigned char* zl, size_t zlbytes, unsigned char
ZIP_DECODE_LENGTH(p + e->prevrawlensize, e->encoding, e->lensize, e->len);
e->headersize = e->prevrawlensize + e->lensize;
- /* Make sure the entry doesn't rech outside the edge of the ziplist */
+ /* Make sure the entry doesn't reach outside the edge of the ziplist */
if (OUT_OF_RANGE(p + e->headersize + e->len))
return 0;
- /* Make sure prevlen doesn't rech outside the edge of the ziplist */
+ /* Make sure prevlen doesn't reach outside the edge of the ziplist */
if (validate_prevlen && OUT_OF_RANGE(p - e->prevrawlen))
return 0;
@@ -827,7 +827,7 @@ unsigned char *__ziplistCascadeUpdate(unsigned char *zl, unsigned char *p) {
/* An entry's prevlen can only increment 4 bytes. */
zipStorePrevEntryLength(p, cur.prevrawlen+delta);
}
- /* Foward to previous entry. */
+ /* Forward to previous entry. */
prevoffset -= cur.prevrawlen;
cnt--;
}
diff --git a/src/zipmap.c b/src/zipmap.c
index c24e81355..e39c27708 100644
--- a/src/zipmap.c
+++ b/src/zipmap.c
@@ -399,7 +399,7 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
while(*p != ZIPMAP_END) {
/* read the field name length encoding type */
s = zipmapGetEncodedLengthSize(p);
- /* make sure the entry length doesn't rech outside the edge of the zipmap */
+ /* make sure the entry length doesn't reach outside the edge of the zipmap */
if (OUT_OF_RANGE(p+s))
return 0;
@@ -408,13 +408,13 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
p += s; /* skip the encoded field size */
p += l; /* skip the field */
- /* make sure the entry doesn't rech outside the edge of the zipmap */
+ /* make sure the entry doesn't reach outside the edge of the zipmap */
if (OUT_OF_RANGE(p))
return 0;
/* read the value length encoding type */
s = zipmapGetEncodedLengthSize(p);
- /* make sure the entry length doesn't rech outside the edge of the zipmap */
+ /* make sure the entry length doesn't reach outside the edge of the zipmap */
if (OUT_OF_RANGE(p+s))
return 0;
@@ -425,7 +425,7 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
p += l+e; /* skip the value and free space */
count++;
- /* make sure the entry doesn't rech outside the edge of the zipmap */
+ /* make sure the entry doesn't reach outside the edge of the zipmap */
if (OUT_OF_RANGE(p))
return 0;
}
diff --git a/tests/cluster/tests/03-failover-loop.tcl b/tests/cluster/tests/03-failover-loop.tcl
index 8e1bcd6fe..5c84b2604 100644
--- a/tests/cluster/tests/03-failover-loop.tcl
+++ b/tests/cluster/tests/03-failover-loop.tcl
@@ -1,7 +1,7 @@
# Failover stress test.
# In this test a different node is killed in a loop for N
# iterations. The test checks that certain properties
-# are preseved across iterations.
+# are preserved across iterations.
source "../tests/includes/init-tests.tcl"
diff --git a/tests/cluster/tests/08-update-msg.tcl b/tests/cluster/tests/08-update-msg.tcl
index 6f9661db0..9011f3205 100644
--- a/tests/cluster/tests/08-update-msg.tcl
+++ b/tests/cluster/tests/08-update-msg.tcl
@@ -1,9 +1,9 @@
# Test UPDATE messages sent by other nodes when the currently authorirative
-# master is unavaialble. The test is performed in the following steps:
+# master is unavailable. The test is performed in the following steps:
#
# 1) Master goes down.
# 2) Slave failover and becomes new master.
-# 3) New master is partitoned away.
+# 3) New master is partitioned away.
# 4) Old master returns.
# 5) At this point we expect the old master to turn into a slave ASAP because
# of the UPDATE messages it will receive from the other nodes when its
diff --git a/tests/cluster/tests/12.1-replica-migration-3.tcl b/tests/cluster/tests/12.1-replica-migration-3.tcl
index 46a9f79e3..42620084a 100644
--- a/tests/cluster/tests/12.1-replica-migration-3.tcl
+++ b/tests/cluster/tests/12.1-replica-migration-3.tcl
@@ -53,7 +53,7 @@ test "Wait cluster to be stable" {
}
}
-test "Master #0 stil should have its replicas" {
+test "Master #0 still should have its replicas" {
assert { [llength [lindex [R 0 role] 2]] >= 2 }
}
diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl
index baed15fbc..ee9f7c6e6 100644
--- a/tests/cluster/tests/16-transactions-on-replica.tcl
+++ b/tests/cluster/tests/16-transactions-on-replica.tcl
@@ -13,7 +13,7 @@ test "Cluster should start ok" {
set primary [Rn 0]
set replica [Rn 1]
-test "Cant read from replica without READONLY" {
+test "Can't read from replica without READONLY" {
$primary SET a 1
wait_for_ofs_sync $primary $replica
catch {$replica GET a} err
@@ -25,7 +25,7 @@ test "Can read from replica after READONLY" {
assert {[$replica GET a] eq {1}}
}
-test "Can preform HSET primary and HGET from replica" {
+test "Can perform HSET primary and HGET from replica" {
$primary HSET h a 1
$primary HSET h b 2
$primary HSET h c 3
diff --git a/tests/cluster/tests/18-info.tcl b/tests/cluster/tests/18-info.tcl
index 978d9d1da..68c62d357 100644
--- a/tests/cluster/tests/18-info.tcl
+++ b/tests/cluster/tests/18-info.tcl
@@ -13,12 +13,12 @@ test "Cluster should start ok" {
set primary1 [Rn 0]
set primary2 [Rn 1]
-proc cmdstat {instace cmd} {
- return [cmdrstat $cmd $instace]
+proc cmdstat {instance cmd} {
+ return [cmdrstat $cmd $instance]
}
-proc errorstat {instace cmd} {
- return [errorrstat $cmd $instace]
+proc errorstat {instance cmd} {
+ return [errorrstat $cmd $instance]
}
test "errorstats: rejected call due to MOVED Redirection" {
diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl
index e51617b7b..f2494b10f 100644
--- a/tests/integration/corrupt-dump-fuzzer.tcl
+++ b/tests/integration/corrupt-dump-fuzzer.tcl
@@ -52,7 +52,7 @@ proc generate_types {} {
generate_collections big 10
# make sure our big stream also has a listpack record that has different
- # field names than the master recored
+ # field names than the master recorded
r xadd streambig * item 1 value 1
r xadd streambig * item 1 unique value
}
diff --git a/tests/integration/corrupt-dump.tcl b/tests/integration/corrupt-dump.tcl
index c0fec6d6a..c707d3410 100644
--- a/tests/integration/corrupt-dump.tcl
+++ b/tests/integration/corrupt-dump.tcl
@@ -1,7 +1,7 @@
# tests of corrupt ziplist payload with valid CRC
# * setting crash-memcheck-enabled to no to avoid issues with valgrind
# * setting use-exit-on-panic to yes so that valgrind can search for leaks
-# * settng debug set-skip-checksum-validation to 1 on some tests for which we
+# * setting debug set-skip-checksum-validation to 1 on some tests for which we
# didn't bother to fake a valid checksum
# * some tests set sanitize-dump-payload to no and some to yet, depending on
# what we want to test
@@ -214,7 +214,7 @@ test {corrupt payload: hash ziplist uneven record count} {
}
}
-test {corrupt payload: hash dupliacte records} {
+test {corrupt payload: hash duplicate records} {
# when we do perform full sanitization, we expect duplicate records to fail the restore
start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
r config set sanitize-dump-payload yes
diff --git a/tests/integration/psync2.tcl b/tests/integration/psync2.tcl
index 9a5855549..508e959ea 100644
--- a/tests/integration/psync2.tcl
+++ b/tests/integration/psync2.tcl
@@ -117,7 +117,7 @@ start_server {} {
set used [list $master_id]
test "PSYNC2: \[NEW LAYOUT\] Set #$master_id as master" {
$R($master_id) slaveof no one
- $R($master_id) config set repl-ping-replica-period 1 ;# increse the chance that random ping will cause issues
+ $R($master_id) config set repl-ping-replica-period 1 ;# increase the chance that random ping will cause issues
if {$counter_value == 0} {
$R($master_id) set x $counter_value
}
diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl
index d11112464..3f19ffe43 100644
--- a/tests/integration/rdb.tcl
+++ b/tests/integration/rdb.tcl
@@ -130,7 +130,7 @@ start_server_and_kill_it [list "dir" $server_path] {
start_server {} {
test {Test FLUSHALL aborts bgsave} {
- # 1000 keys with 1ms sleep per key shuld take 1 second
+ # 1000 keys with 1ms sleep per key should take 1 second
r config set rdb-key-save-delay 1000
r debug populate 1000
r bgsave
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index 5477266b1..e104f2437 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -419,7 +419,7 @@ test {slave fails full sync and diskless load swapdb recovers it} {
fail "Replica didn't get into loading mode"
}
- # make sure that next sync will not start immediately so that we can catch the slave in betweeen syncs
+ # make sure that next sync will not start immediately so that we can catch the slave in between syncs
$master config set repl-diskless-sync-delay 5
# for faster server shutdown, make rdb saving fast again (the fork is already uses the slow one)
$master config set rdb-key-save-delay 0
diff --git a/tests/modules/commandfilter.c b/tests/modules/commandfilter.c
index 571ed1701..03fd679af 100644
--- a/tests/modules/commandfilter.c
+++ b/tests/modules/commandfilter.c
@@ -82,7 +82,7 @@ void CommandFilter_CommandFilter(RedisModuleCommandFilterCtx *filter)
* - Remove @delme
* - Replace @replaceme
* - Append @insertbefore or @insertafter
- * - Prefix with Log command if @log encounterd
+ * - Prefix with Log command if @log encountered
*/
int log = 0;
int pos = 0;
diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c
index 9305774cd..8a55e0f44 100644
--- a/tests/modules/keyspace_events.c
+++ b/tests/modules/keyspace_events.c
@@ -36,7 +36,7 @@
#include <stdio.h>
#include <string.h>
-/** strores all the keys on which we got 'loaded' keyspace notification **/
+/** stores all the keys on which we got 'loaded' keyspace notification **/
RedisModuleDict *loaded_event_log = NULL;
/** stores all the keys on which we got 'module' keyspace notification **/
RedisModuleDict *module_event_log = NULL;
diff --git a/tests/modules/propagate.c b/tests/modules/propagate.c
index ac04d4f9d..766c61ea5 100644
--- a/tests/modules/propagate.c
+++ b/tests/modules/propagate.c
@@ -75,7 +75,7 @@ void timerNestedHandler(RedisModuleCtx *ctx, void *data) {
int repl = (long long)data;
/* The goal is the trigger a module command that calls RM_Replicate
- * in order to test MULTI/EXEC structre */
+ * in order to test MULTI/EXEC structure */
RedisModule_Replicate(ctx,"INCRBY","cc","timer-nested-start","1");
RedisModuleCallReply *reply = RedisModule_Call(ctx,"propagate-test.nested", repl? "!" : "");
RedisModule_FreeCallReply(reply);
diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl
index 0a696fa6b..19cabde49 100644
--- a/tests/sentinel/tests/07-down-conditions.tcl
+++ b/tests/sentinel/tests/07-down-conditions.tcl
@@ -52,7 +52,7 @@ test "SDOWN is triggered by masters advertising as slaves" {
ensure_master_up
}
-test "SDOWN is triggered by misconfigured instance repling with errors" {
+test "SDOWN is triggered by misconfigured instance replying with errors" {
ensure_master_up
set orig_dir [lindex [R 0 config get dir] 1]
set orig_save [lindex [R 0 config get save] 1]
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
index 7a17a721f..54b0e4ed0 100644
--- a/tests/support/server.tcl
+++ b/tests/support/server.tcl
@@ -202,7 +202,7 @@ proc tags_acceptable {tags err_return} {
# doesn't really belong here, but highly coupled to code in start_server
proc tags {tags code} {
- # If we 'tags' contain multiple tags, quoted and seperated by spaces,
+ # If we 'tags' contain multiple tags, quoted and separated by spaces,
# we want to get rid of the quotes in order to have a proper list
set tags [string map { \" "" } $tags]
set ::tags [concat $::tags $tags]
@@ -366,7 +366,7 @@ proc start_server {options {code undefined}} {
set omit $value
}
"tags" {
- # If we 'tags' contain multiple tags, quoted and seperated by spaces,
+ # If we 'tags' contain multiple tags, quoted and separated by spaces,
# we want to get rid of the quotes in order to have a proper list
set tags [string map { \" "" } $value]
set ::tags [concat $::tags $tags]
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
index 092306e11..dd1a75801 100644
--- a/tests/support/util.tcl
+++ b/tests/support/util.tcl
@@ -498,7 +498,7 @@ proc find_valgrind_errors {stderr on_termination} {
return ""
}
- # Look for the absense of a leak free summary (happens when redis isn't terminated properly).
+ # Look for the absence of a leak free summary (happens when redis isn't terminated properly).
if {(![regexp -- {definitely lost: 0 bytes} $buf] &&
![regexp -- {no leaks are possible} $buf])} {
return $buf
diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl
index 975847f39..ee188d578 100644
--- a/tests/unit/introspection.tcl
+++ b/tests/unit/introspection.tcl
@@ -251,6 +251,6 @@ start_server {tags {"introspection"}} {
}
} {} {external:skip}
- # Config file at this point is at a wierd state, and includes all
+ # Config file at this point is at a weird state, and includes all
# known keywords. Might be a good idea to avoid adding tests here.
}
diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl
index 376e0b91e..cff805431 100644
--- a/tests/unit/memefficiency.tcl
+++ b/tests/unit/memefficiency.tcl
@@ -393,7 +393,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
# there was an edge case in defrag where all the slabs of a certain bin are exact the same
# % utilization, with the exception of the current slab from which new allocations are made
# if the current slab is lower in utilization the defragger would have ended up in stagnation,
- # keept running and not move any allocation.
+ # kept running and not move any allocation.
# this test is more consistent on a fresh server with no history
start_server {tags {"defrag"} overrides {save ""}} {
r flushdb
diff --git a/tests/unit/moduleapi/auth.tcl b/tests/unit/moduleapi/auth.tcl
index 04a90a496..906ab0beb 100644
--- a/tests/unit/moduleapi/auth.tcl
+++ b/tests/unit/moduleapi/auth.tcl
@@ -30,7 +30,7 @@ start_server {tags {"modules"}} {
assert_equal [r auth.changecount] 1
}
- test {Modules cant authenticate with ACLs users that dont exist} {
+ test {Modules can't authenticate with ACLs users that dont exist} {
catch { [r auth.authrealuser auth-module-test-fake] } e
assert_match {*Invalid user*} $e
}
diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl
index a6a7a78f9..3205b1c45 100644
--- a/tests/unit/moduleapi/misc.tcl
+++ b/tests/unit/moduleapi/misc.tcl
@@ -40,7 +40,7 @@ start_server {tags {"modules"}} {
assert_equal [r test.dbsize] 0
}
- test {test modle lru api} {
+ test {test module lru api} {
r config set maxmemory-policy allkeys-lru
r set x foo
set lru [r test.getlru x]
@@ -59,7 +59,7 @@ start_server {tags {"modules"}} {
}
r config set maxmemory-policy allkeys-lru
- test {test modle lfu api} {
+ test {test module lfu api} {
r config set maxmemory-policy allkeys-lfu
r set x foo
set lfu [r test.getlfu x]
diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl
index 03fb7f256..3e1784b13 100644
--- a/tests/unit/multi.tcl
+++ b/tests/unit/multi.tcl
@@ -414,7 +414,7 @@ start_server {tags {"multi"}} {
}
test {MULTI-EXEC body and script timeout} {
- # check that we don't run an imcomplete transaction due to some commands
+ # check that we don't run an incomplete transaction due to some commands
# arriving during busy script
set rd1 [redis_deferring_client]
set r2 [redis_client]
diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl
index 456eb4316..1277d6035 100644
--- a/tests/unit/obuf-limits.tcl
+++ b/tests/unit/obuf-limits.tcl
@@ -131,7 +131,7 @@ start_server {tags {"obuf-limits external:skip"}} {
after 100
# Create a pipeline of commands that will be processed in one socket read.
- # It is important to use one write, in TLS mode independant writes seem
+ # It is important to use one write, in TLS mode independent writes seem
# to wait for response from the server.
# Total size should be less than OS socket buffer, redis can
# execute all commands in this pipeline when it wakes up.
diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl
index 391f0cbc8..26c97e16e 100644
--- a/tests/unit/scripting.tcl
+++ b/tests/unit/scripting.tcl
@@ -272,7 +272,7 @@ start_server {tags {"scripting"}} {
local encoded = cmsgpack.pack(a)
local h = ""
-- cmsgpack encodes to a depth of 16, but can't encode
- -- references, so the encoded object has a deep copy recusive
+ -- references, so the encoded object has a deep copy recursive
-- depth of 16.
for i = 1, #encoded do
h = h .. string.format("%02x",string.byte(encoded,i))
diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl
index 1c72052d2..522b6a3d1 100644
--- a/tests/unit/type/stream-cgroups.tcl
+++ b/tests/unit/type/stream-cgroups.tcl
@@ -640,7 +640,7 @@ start_server {
set item [$slave xreadgroup group mygroup myconsumer \
COUNT 1 STREAMS stream >]
- # The consumed enty should be the third
+ # The consumed entry should be the third
set myentry [lindex $item 0 1 0 1]
assert {$myentry eq {a 3}}
}
diff --git a/utils/lru/lfu-simulation.c b/utils/lru/lfu-simulation.c
index 6aa5911ac..51d639d66 100644
--- a/utils/lru/lfu-simulation.c
+++ b/utils/lru/lfu-simulation.c
@@ -30,7 +30,7 @@ uint16_t minutes_diff(uint16_t now, uint16_t prev) {
return 65535-prev+now;
}
-/* Increment a couter logaritmically: the greatest is its value, the
+/* Increment a counter logarithmically: the greatest is its value, the
* less likely is that the counter is really incremented.
* The maximum value of the counter is saturated at 255. */
uint8_t log_incr(uint8_t counter) {
diff --git a/utils/redis-sha1.rb b/utils/redis-sha1.rb
index 24498e25a..6a8b4f358 100644
--- a/utils/redis-sha1.rb
+++ b/utils/redis-sha1.rb
@@ -1,7 +1,7 @@
# redis-sha1.rb - Copyright (C) 2009 Salvatore Sanfilippo
# BSD license, See the COPYING file for more information.
#
-# Performs the SHA1 sum of the whole datset.
+# Performs the SHA1 sum of the whole dataset.
# This is useful to spot bugs in persistence related code and to make sure
# Slaves and Masters are in SYNC.
#
diff --git a/utils/speed-regression.tcl b/utils/speed-regression.tcl
index 86a7d8d86..bf35c7db4 100755
--- a/utils/speed-regression.tcl
+++ b/utils/speed-regression.tcl
@@ -101,7 +101,7 @@ if {![file exists speed-regression.tcl]} {
exit 1
}
-# Make sure there is not already a server runnign on port 12123
+# Make sure there is not already a server running on port 12123
set is_not_running [catch {set r [redis 127.0.0.1 $::port]}]
if {!$is_not_running} {
puts "Sorry, you have a running server on port $::port"
diff --git a/utils/srandmember/showfreq.rb b/utils/srandmember/showfreq.rb
index fd47bc0ca..625519c5d 100644
--- a/utils/srandmember/showfreq.rb
+++ b/utils/srandmember/showfreq.rb
@@ -17,7 +17,7 @@ freq = {}
}
}
-# Print the frequency each element was yeld to process it with gnuplot
+# Print the frequency each element was yield to process it with gnuplot
freq.each{|item,count|
puts "#{item} #{count}"
}
diff --git a/utils/tracking_collisions.c b/utils/tracking_collisions.c
index cd64b36c5..f52111173 100644
--- a/utils/tracking_collisions.c
+++ b/utils/tracking_collisions.c
@@ -1,4 +1,4 @@
-/* This is a small program used in order to understand the collison rate
+/* This is a small program used in order to understand the collision rate
* of CRC64 (ISO version) VS other stronger hashing functions in the context
* of hashing keys for the Redis "tracking" feature (client side caching
* assisted by the server).
@@ -7,7 +7,7 @@
*
* prefix:<counter>
*
- * And counts the resulting collisons generated in the 24 bits of output
+ * And counts the resulting collisions generated in the 24 bits of output
* needed for the tracking feature invalidation table (16 millions + entries)
*
* Compile with: