summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/adlist.c2
-rw-r--r--src/aof.c26
-rw-r--r--src/cluster.c26
-rw-r--r--src/cluster.h6
-rw-r--r--src/config.c2
-rw-r--r--src/debug.c2
-rw-r--r--src/dict.c62
-rw-r--r--src/networking.c6
-rw-r--r--src/rdb.c2
-rw-r--r--src/redis.c4
-rw-r--r--src/redis.h2
-rw-r--r--src/replication.c12
-rw-r--r--src/scripting.c2
-rw-r--r--src/sds.c4
-rw-r--r--src/sentinel.c2
-rw-r--r--src/util.c2
-rw-r--r--src/ziplist.c9
17 files changed, 86 insertions, 85 deletions
diff --git a/src/adlist.c b/src/adlist.c
index b4dba420f..b4cc785be 100644
--- a/src/adlist.c
+++ b/src/adlist.c
@@ -71,7 +71,7 @@ void listRelease(list *list)
zfree(list);
}
-/* Add a new node to the list, to head, contaning the specified 'value'
+/* Add a new node to the list, to head, containing the specified 'value'
* pointer as value.
*
* On error, NULL is returned and no operation is performed (i.e. the
diff --git a/src/aof.c b/src/aof.c
index 81f5f1fcd..0af519bfa 100644
--- a/src/aof.c
+++ b/src/aof.c
@@ -74,7 +74,7 @@ void aofRewriteBufferReset(void) {
listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree);
}
-/* Return the current size of the AOF rerwite buffer. */
+/* Return the current size of the AOF rewrite buffer. */
unsigned long aofRewriteBufferSize(void) {
listNode *ln;
listIter li;
@@ -245,7 +245,7 @@ int startAppendOnly(void) {
redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error.");
return REDIS_ERR;
}
- /* We correctly switched on AOF, now wait for the rerwite to be complete
+ /* We correctly switched on AOF, now wait for the rewrite to be complete
* in order to append data on disk. */
server.aof_state = REDIS_AOF_WAIT_REWRITE;
return REDIS_OK;
@@ -286,7 +286,7 @@ void flushAppendOnlyFile(int force) {
* the write for a couple of seconds. */
if (sync_in_progress) {
if (server.aof_flush_postponed_start == 0) {
- /* No previous write postponinig, remember that we are
+ /* No previous write postponing, remember that we are
* postponing the flush and return. */
server.aof_flush_postponed_start = server.unixtime;
return;
@@ -337,7 +337,7 @@ void flushAppendOnlyFile(int force) {
last_write_error_log = server.unixtime;
}
- /* Lof the AOF write error and record the error code. */
+ /* Log the AOF write error and record the error code. */
if (nwritten == -1) {
if (can_log) {
redisLog(REDIS_WARNING,"Error writing to the AOF file: %s",
@@ -361,7 +361,7 @@ void flushAppendOnlyFile(int force) {
"ftruncate: %s", strerror(errno));
}
} else {
- /* If the ftrunacate() succeeded we can set nwritten to
+ /* If the ftruncate() succeeded we can set nwritten to
* -1 since there is no longer partial data into the AOF. */
nwritten = -1;
}
@@ -373,7 +373,7 @@ void flushAppendOnlyFile(int force) {
/* We can't recover when the fsync policy is ALWAYS since the
* reply for the client is already in the output buffers, and we
* have the contract with the user that on acknowledged write data
- * is synched on disk. */
+ * is synced on disk. */
redisLog(REDIS_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting...");
exit(1);
} else {
@@ -468,7 +468,7 @@ sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, r
long long when;
robj *argv[3];
- /* Make sure we can use strtol */
+ /* Make sure we can use strtoll */
seconds = getDecodedObject(seconds);
when = strtoll(seconds->ptr,NULL,10);
/* Convert argument into milliseconds for EXPIRE, SETEX, EXPIREAT */
@@ -499,7 +499,7 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a
robj *tmpargv[3];
/* The DB this command was targeting is not the same as the last command
- * we appendend. To issue a SELECT command is needed. */
+ * we appended. To issue a SELECT command is needed. */
if (dictid != server.aof_selected_db) {
char seldb[64];
@@ -593,7 +593,7 @@ void freeFakeClient(struct redisClient *c) {
zfree(c);
}
-/* Replay the append log file. On error REDIS_OK is returned. On non fatal
+/* Replay the append log file. On success REDIS_OK is returned. On non fatal
* error (the append only file is zero-length) REDIS_ERR is returned. On
* fatal error an error message is logged and the program exists. */
int loadAppendOnlyFile(char *filename) {
@@ -1000,7 +1000,7 @@ int rewriteHashObject(rio *r, robj *key, robj *o) {
* the difference accumulated from the parent into a buffer, that is
* concatenated at the end of the rewrite. */
ssize_t aofReadDiffFromParent(void) {
- char buf[65536]; /* Default pipe buffer size on most Linux sytems. */
+ char buf[65536]; /* Default pipe buffer size on most Linux systems. */
ssize_t nread, total = 0;
while ((nread =
@@ -1114,7 +1114,7 @@ int rewriteAppendOnlyFile(char *filename) {
/* Read again a few times to get more data from the parent.
* We can't read forever (the server may receive data from clients
- * fater than it is able to send data to the child), so we try to read
+ * faster than it is able to send data to the child), so we try to read
* some more data in a loop as soon as there is a good chance more data
* will come. If it looks like we are wasting time, we abort (this
* happens after 20 ms without new data). */
@@ -1250,7 +1250,7 @@ void aofClosePipes(void) {
}
/* ----------------------------------------------------------------------------
- * AOF backgorund rewrite
+ * AOF background rewrite
* ------------------------------------------------------------------------- */
/* This is how rewriting of the append only file in background works:
@@ -1392,7 +1392,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency);
redisLog(REDIS_NOTICE,
- "Redidual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));
+ "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));
/* The only remaining thing to do is to rename the temporary file to
* the configured file and switch the file descriptor used to do AOF
diff --git a/src/cluster.c b/src/cluster.c
index 821fe1734..149c9d937 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -124,7 +124,7 @@ int clusterLoadConfig(char *filename) {
return REDIS_ERR;
}
- /* Parse the file. Note that single liens of the cluster config file can
+ /* Parse the file. Note that single lines of the cluster config file can
* be really long as they include all the hash slots of the node.
* This means in the worst possible case, half of the Redis slots will be
* present in a single line, possibly in importing or migrating state, so
@@ -1133,7 +1133,7 @@ int clusterStartHandshake(char *ip, int port) {
/* Add the node with a random address (NULL as first argument to
* createClusterNode()). Everything will be fixed during the
- * handskake. */
+ * handshake. */
n = createClusterNode(NULL,REDIS_NODE_HANDSHAKE|REDIS_NODE_MEET);
memcpy(n->ip,norm_ip,sizeof(n->ip));
n->port = port;
@@ -1284,7 +1284,7 @@ void clusterSetNodeAsMaster(clusterNode *n) {
* node (see the function comments for more info).
*
* The 'sender' is the node for which we received a configuration update.
- * Sometimes it is not actaully the "Sender" of the information, like in the case
+ * Sometimes it is not actually the "Sender" of the information, like in the case
* we receive the info via an UPDATE packet. */
void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) {
int j;
@@ -1597,7 +1597,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_UPDATE_STATE);
}
- /* Free this node as we alrady have it. This will
+ /* Free this node as we already have it. This will
* cause the link to be freed as well. */
freeClusterNode(link->node);
return 0;
@@ -1794,7 +1794,7 @@ int clusterProcessPacket(clusterLink *link) {
}
} else {
redisLog(REDIS_NOTICE,
- "Ignoring FAIL message from unknonw node %.40s about %.40s",
+ "Ignoring FAIL message from unknown node %.40s about %.40s",
hdr->sender, hdr->data.fail.about.nodename);
}
} else if (type == CLUSTERMSG_TYPE_PUBLISH) {
@@ -1863,7 +1863,7 @@ int clusterProcessPacket(clusterLink *link) {
clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|
CLUSTER_TODO_FSYNC_CONFIG);
- /* Check the bitmap of served slots and udpate our
+ /* Check the bitmap of served slots and update our
* config accordingly. */
clusterUpdateSlotsConfigWith(n,reportedConfigEpoch,
hdr->data.update.nodecfg.slots);
@@ -2761,7 +2761,7 @@ void clusterHandleManualFailover(void) {
/* Return ASAP if no manual failover is in progress. */
if (server.cluster->mf_end == 0) return;
- /* If mf_can_start is non-zero, the failover was alrady triggered so the
+ /* If mf_can_start is non-zero, the failover was already triggered so the
* next steps are performed by clusterHandleSlaveFailover(). */
if (server.cluster->mf_can_start) return;
@@ -3300,7 +3300,7 @@ int verifyClusterConfigWithData(void) {
* assigned to this slot. Fix this condition. */
update_config++;
- /* Case A: slot is unassigned. Take responsability for it. */
+ /* Case A: slot is unassigned. Take responsibility for it. */
if (server.cluster->slots[j] == NULL) {
redisLog(REDIS_WARNING, "I have keys for unassigned slot %d. "
"Taking responsibility for it.",j);
@@ -3613,7 +3613,7 @@ void clusterCommand(redisClient *c) {
int del = !strcasecmp(c->argv[1]->ptr,"delslots");
memset(slots,0,REDIS_CLUSTER_SLOTS);
- /* Check that all the arguments are parsable and that all the
+ /* Check that all the arguments are parseable and that all the
* slots are not already busy. */
for (j = 2; j < c->argc; j++) {
if ((slot = getSlotOrReply(c,c->argv[j])) == -1) {
@@ -4180,14 +4180,14 @@ void restoreCommand(redisClient *c) {
* This sockets are closed when the max number we cache is reached, and also
* in serverCron() when they are around for more than a few seconds. */
#define MIGRATE_SOCKET_CACHE_ITEMS 64 /* max num of items in the cache. */
-#define MIGRATE_SOCKET_CACHE_TTL 10 /* close cached socekts after 10 sec. */
+#define MIGRATE_SOCKET_CACHE_TTL 10 /* close cached sockets after 10 sec. */
typedef struct migrateCachedSocket {
int fd;
time_t last_use_time;
} migrateCachedSocket;
-/* Return a TCP scoket connected with the target instance, possibly returning
+/* Return a TCP socket connected with the target instance, possibly returning
* a cached one.
*
* This function is responsible of sending errors to the client if a
@@ -4196,7 +4196,7 @@ typedef struct migrateCachedSocket {
* attempt to free it after usage.
*
* If the caller detects an error while using the socket, migrateCloseSocket()
- * should be called so that the connection will be craeted from scratch
+ * should be called so that the connection will be created from scratch
* the next time. */
int migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) {
int fd;
@@ -4452,7 +4452,7 @@ void askingCommand(redisClient *c) {
addReply(c,shared.ok);
}
-/* The READONLY command is uesd by clients to enter the read-only mode.
+/* The READONLY command is used by clients to enter the read-only mode.
* In this mode slaves will not redirect clients as long as clients access
* with read-only commands to keys that are served by the slave's master. */
void readonlyCommand(redisClient *c) {
diff --git a/src/cluster.h b/src/cluster.h
index adad0645f..3287afe72 100644
--- a/src/cluster.h
+++ b/src/cluster.h
@@ -11,7 +11,7 @@
#define REDIS_CLUSTER_NAMELEN 40 /* sha1 hex length */
#define REDIS_CLUSTER_PORT_INCR 10000 /* Cluster port = baseport + PORT_INCR */
-/* The following defines are amunt of time, sometimes expressed as
+/* The following defines are amount of time, sometimes expressed as
* multiplicators of the node timeout value (when ending with MULT). */
#define REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT 15000
#define REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY 10 /* Slave max data age factor. */
@@ -51,7 +51,7 @@ typedef struct clusterLink {
#define REDIS_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */
#define REDIS_NODE_NOADDR 64 /* We don't know the address of this node */
#define REDIS_NODE_MEET 128 /* Send a MEET message to this node */
-#define REDIS_NODE_PROMOTED 256 /* Master was a slave propoted by failover */
+#define REDIS_NODE_PROMOTED 256 /* Master was a slave promoted by failover */
#define REDIS_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
#define nodeIsMaster(n) ((n)->flags & REDIS_NODE_MASTER)
@@ -117,7 +117,7 @@ typedef struct clusterState {
or zero if stil not received. */
int mf_can_start; /* If non-zero signal that the manual failover
can start requesting masters vote. */
- /* The followign fields are uesd by masters to take state on elections. */
+ /* The followign fields are used by masters to take state on elections. */
uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */
int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */
long long stats_bus_messages_sent; /* Num of msg sent via cluster bus. */
diff --git a/src/config.c b/src/config.c
index db0847935..43507000f 100644
--- a/src/config.c
+++ b/src/config.c
@@ -1493,7 +1493,7 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int
rewriteConfigRewriteLine(state,option,line,force);
}
-/* Rewrite the syslog-fability option. */
+/* Rewrite the syslog-facility option. */
void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) {
int value = server.syslog_facility, j;
int force = value != LOG_LOCAL0;
diff --git a/src/debug.c b/src/debug.c
index 641b10024..68878385f 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -852,7 +852,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
redisLog(REDIS_WARNING,
"\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n"
-" Please report the crash opening an issue on github:\n\n"
+" Please report the crash by opening an issue on github:\n\n"
" http://github.com/antirez/redis/issues\n\n"
" Suspect RAM error? Use redis-server --test-memory to verify it.\n\n"
);
diff --git a/src/dict.c b/src/dict.c
index 8eb3da34b..29d400099 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -709,72 +709,72 @@ static unsigned long rev(unsigned long v) {
/* dictScan() is used to iterate over the elements of a dictionary.
*
- * Iterating works in the following way:
+ * Iterating works the following way:
*
* 1) Initially you call the function using a cursor (v) value of 0.
* 2) The function performs one step of the iteration, and returns the
- * new cursor value that you must use in the next call.
+ * new cursor value you must use in the next call.
* 3) When the returned cursor is 0, the iteration is complete.
*
- * The function guarantees that all the elements that are present in the
- * dictionary from the start to the end of the iteration are returned.
- * However it is possible that some element is returned multiple time.
+ * The function guarantees all elements present in the
+ * dictionary get returned between the start and end of the iteration.
+ * However it is possible some elements get returned multiple times.
*
- * For every element returned, the callback 'fn' passed as argument is
- * called, with 'privdata' as first argument and the dictionar entry
+ * For every element returned, the callback argument 'fn' is
+ * called with 'privdata' as first argument and the dictionary entry
* 'de' as second argument.
*
* HOW IT WORKS.
*
- * The algorithm used in the iteration was designed by Pieter Noordhuis.
+ * The iteration algorithm was designed by Pieter Noordhuis.
* The main idea is to increment a cursor starting from the higher order
- * bits, that is, instead of incrementing the cursor normally, the bits
+ * bits. That is, instead of incrementing the cursor normally, the bits
* of the cursor are reversed, then the cursor is incremented, and finally
* the bits are reversed again.
*
- * This strategy is needed because the hash table may be resized from one
- * call to the other call of the same iteration.
+ * This strategy is needed because the hash table may be resized between
+ * iteration calls.
*
* dict.c hash tables are always power of two in size, and they
* use chaining, so the position of an element in a given table is given
- * always by computing the bitwise AND between Hash(key) and SIZE-1
+ * by computing the bitwise AND between Hash(key) and SIZE-1
* (where SIZE-1 is always the mask that is equivalent to taking the rest
* of the division between the Hash of the key and SIZE).
*
* For example if the current hash table size is 16, the mask is
- * (in binary) 1111. The position of a key in the hash table will be always
+ * (in binary) 1111. The position of a key in the hash table will always be
* the last four bits of the hash output, and so forth.
*
* WHAT HAPPENS IF THE TABLE CHANGES IN SIZE?
*
- * If the hash table grows, elements can go anyway in one multiple of
- * the old bucket: for example let's say that we already iterated with
- * a 4 bit cursor 1100, since the mask is 1111 (hash table size = 16).
+ * If the hash table grows, elements can go anywhere in one multiple of
+ * the old bucket: for example let's say we already iterated with
+ * a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16).
*
- * If the hash table will be resized to 64 elements, and the new mask will
- * be 111111, the new buckets that you obtain substituting in ??1100
- * either 0 or 1, can be targeted only by keys that we already visited
+ * If the hash table will be resized to 64 elements, then the new mask will
+ * be 111111. The new buckets you obtain by substituting in ??1100
+ * with either 0 or 1 can be targeted only by keys we already visited
* when scanning the bucket 1100 in the smaller hash table.
*
* By iterating the higher bits first, because of the inverted counter, the
- * cursor does not need to restart if the table size gets bigger, and will
- * just continue iterating with cursors that don't have '1100' at the end,
- * nor any other combination of final 4 bits already explored.
+ * cursor does not need to restart if the table size gets bigger. It will
+ * continue iterating using cursors without '1100' at the end, and also
+ * without any other combination of the final 4 bits already explored.
*
* Similarly when the table size shrinks over time, for example going from
- * 16 to 8, If a combination of the lower three bits (the mask for size 8
- * is 111) was already completely explored, it will not be visited again
- * as we are sure that, we tried for example, both 0111 and 1111 (all the
+ * 16 to 8, if a combination of the lower three bits (the mask for size 8
+ * is 111) were already completely explored, it would not be visited again
+ * because we are sure we tried, for example, both 0111 and 1111 (all the
* variations of the higher bit) so we don't need to test it again.
*
* WAIT... YOU HAVE *TWO* TABLES DURING REHASHING!
*
- * Yes, this is true, but we always iterate the smaller one of the tables,
- * testing also all the expansions of the current cursor into the larger
- * table. So for example if the current cursor is 101 and we also have a
+ * Yes, this is true, but we always iterate the smaller table first, then
+ * we test all the expansions of the current cursor into the larger
+ * table. For example if the current cursor is 101 and we also have a
* larger table of size 16, we also test (0)101 and (1)101 inside the larger
* table. This reduces the problem back to having only one table, where
- * the larger one, if exists, is just an expansion of the smaller one.
+ * the larger one, if it exists, is just an expansion of the smaller one.
*
* LIMITATIONS
*
@@ -783,11 +783,11 @@ static unsigned long rev(unsigned long v) {
*
* The disadvantages resulting from this design are:
*
- * 1) It is possible that we return duplicated elements. However this is usually
+ * 1) It is possible we return elements more than once. However this is usually
* easy to deal with in the application level.
* 2) The iterator must return multiple elements per call, as it needs to always
* return all the keys chained in a given bucket, and all the expansions, so
- * we are sure we don't miss keys moving.
+ * we are sure we don't miss keys moving during rehashing.
* 3) The reverse cursor is somewhat hard to understand at first, but this
* comment is supposed to help.
*/
diff --git a/src/networking.c b/src/networking.c
index fb49b7964..c7b1c9ba7 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -1230,9 +1230,9 @@ void formatPeerId(char *peerid, size_t peerid_len, char *ip, int port) {
}
/* A Redis "Peer ID" is a colon separated ip:port pair.
- * For IPv4 it's in the form x.y.z.k:pork, example: "127.0.0.1:1234".
+ * For IPv4 it's in the form x.y.z.k:port, example: "127.0.0.1:1234".
* For IPv6 addresses we use [] around the IP part, like in "[::1]:1234".
- * For Unix socekts we use path:0, like in "/tmp/redis:0".
+ * For Unix sockets we use path:0, like in "/tmp/redis:0".
*
* A Peer ID always fits inside a buffer of REDIS_PEER_ID_LEN bytes, including
* the null term.
@@ -1259,7 +1259,7 @@ int genClientPeerId(redisClient *client, char *peerid, size_t peerid_len) {
}
/* This function returns the client peer id, by creating and caching it
- * if client->perrid is NULL, otherwise returning the cached value.
+ * if client->peerid is NULL, otherwise returning the cached value.
* The Peer ID never changes during the life of the client, however it
* is expensive to compute. */
char *getClientPeerId(redisClient *c) {
diff --git a/src/rdb.c b/src/rdb.c
index afaef2681..4d789bc2b 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -475,7 +475,7 @@ int rdbLoadObjectType(rio *rdb) {
return type;
}
-/* Save a Redis object. Returns -1 on error, 0 on success. */
+/* Save a Redis object. Returns -1 on error, number of bytes written on success. */
int rdbSaveObject(rio *rdb, robj *o) {
int n, nwritten = 0;
diff --git a/src/redis.c b/src/redis.c
index 5ce7d1d93..e7faa8859 100644
--- a/src/redis.c
+++ b/src/redis.c
@@ -407,7 +407,7 @@ void exitFromChild(int retcode) {
/*====================== Hash table type implementation ==================== */
/* This is a hash table type that uses the SDS dynamic strings library as
- * keys and radis objects as values (objects can hold SDS strings,
+ * keys and redis objects as values (objects can hold SDS strings,
* lists, sets). */
void dictVanillaFree(void *privdata, void *val)
@@ -1978,7 +1978,7 @@ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
}
/* It is possible to call the function forceCommandPropagation() inside a
- * Redis command implementaiton in order to to force the propagation of a
+ * Redis command implementation in order to to force the propagation of a
* specific command execution into AOF / Replication. */
void forceCommandPropagation(redisClient *c, int flags) {
if (flags & REDIS_PROPAGATE_REPL) c->flags |= REDIS_FORCE_REPL;
diff --git a/src/redis.h b/src/redis.h
index 7ad03dc9b..a1ae0f2bc 100644
--- a/src/redis.h
+++ b/src/redis.h
@@ -499,7 +499,7 @@ typedef struct readyList {
} readyList;
/* With multiplexing we need to take per-client state.
- * Clients are taken in a liked list. */
+ * Clients are taken in a linked list. */
typedef struct redisClient {
uint64_t id; /* Client incremental unique ID. */
int fd;
diff --git a/src/replication.c b/src/replication.c
index ff0a0141a..16014c8a9 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -82,7 +82,7 @@ void resizeReplicationBacklog(long long newsize) {
server.repl_backlog = zmalloc(server.repl_backlog_size);
server.repl_backlog_histlen = 0;
server.repl_backlog_idx = 0;
- /* Next byte we have is... the next since the buffer is emtpy. */
+ /* Next byte we have is... the next since the buffer is empty. */
server.repl_backlog_off = server.master_repl_offset+1;
}
}
@@ -200,7 +200,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
/* We need to feed the buffer with the object as a bulk reply
* not just as a plain string, so create the $..CRLF payload len
- * ad add the final CRLF */
+ * and add the final CRLF */
aux[0] = '$';
len = ll2string(aux+1,sizeof(aux)-1,objlen);
aux[len+1] = '\r';
@@ -376,7 +376,7 @@ int masterTryPartialResynchronization(redisClient *c) {
listAddNodeTail(server.slaves,c);
/* We can't use the connection buffers since they are used to accumulate
* new commands at this stage. But we are sure the socket send buffer is
- * emtpy so this write will never fail actually. */
+ * empty so this write will never fail actually. */
buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n");
if (write(c->fd,buf,buflen) != buflen) {
freeClientAsync(c);
@@ -408,7 +408,7 @@ need_full_resync:
return REDIS_ERR;
}
-/* SYNC ad PSYNC command implemenation. */
+/* SYNC and PSYNC command implemenation. */
void syncCommand(redisClient *c) {
/* ignore SYNC if already slave or in monitor mode */
if (c->flags & REDIS_SLAVE) return;
@@ -1460,7 +1460,7 @@ void replicationDiscardCachedMaster(void) {
/* Turn the cached master into the current master, using the file descriptor
* passed as argument as the socket for the new master.
*
- * This funciton is called when successfully setup a partial resynchronization
+ * This function is called when successfully setup a partial resynchronization
* so the stream of data that we'll receive will start from were this
* master left. */
void replicationResurrectCachedMaster(int newfd) {
@@ -1750,7 +1750,7 @@ long long replicationGetSlaveOffset(void) {
/* --------------------------- REPLICATION CRON ---------------------------- */
-/* Replication cron funciton, called 1 time per second. */
+/* Replication cron function, called 1 time per second. */
void replicationCron(void) {
/* Non blocking connection timeout? */
if (server.masterhost &&
diff --git a/src/scripting.c b/src/scripting.c
index 5cd0b299c..77a98abdc 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -717,7 +717,7 @@ void scriptingInit(void) {
server.lua_client->flags |= REDIS_LUA_CLIENT;
}
- /* Lua beginners ofter don't use "local", this is likely to introduce
+ /* Lua beginners often don't use "local", this is likely to introduce
* subtle bugs in their code. To prevent problems we protect accesses
* to global variables. */
scriptingEnableGlobalsProtection(lua);
diff --git a/src/sds.c b/src/sds.c
index 95454e997..0ad925b4a 100644
--- a/src/sds.c
+++ b/src/sds.c
@@ -43,7 +43,7 @@
* The string is always null-termined (all the sds strings are, always) so
* even if you create an sds string with:
*
- * mystring = sdsnewlen("abc",3");
+ * mystring = sdsnewlen("abc",3);
*
* You can print the string with printf() as there is an implicit \0 at the
* end of the string. However the string is binary safe and can contain
@@ -109,7 +109,7 @@ void sdsupdatelen(sds s) {
sh->len = reallen;
}
-/* Modify an sds string on-place to make it empty (zero length).
+/* Modify an sds string in-place to make it empty (zero length).
* However all the existing buffer is not discarded but set as free space
* so that next append operations will not require allocations up to the
* number of bytes previously available. */
diff --git a/src/sentinel.c b/src/sentinel.c
index 06f53c128..8e78a2263 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -2106,7 +2106,7 @@ void sentinelPublishReplyCallback(redisAsyncContext *c, void *reply, void *privd
* or sent directly to this sentinel via the (fake) PUBLISH command of Sentinel.
*
* If the master name specified in the message is not known, the message is
- * discareded. */
+ * discarded. */
void sentinelProcessHelloMessage(char *hello, int hello_len) {
/* Format is composed of 8 tokens:
* 0=ip,1=port,2=runid,3=current_epoch,4=master_name,
diff --git a/src/util.c b/src/util.c
index 1b1798658..80242ff71 100644
--- a/src/util.c
+++ b/src/util.c
@@ -385,7 +385,7 @@ int string2l(const char *s, size_t slen, long *lval) {
}
/* Convert a double to a string representation. Returns the number of bytes
- * required. The representation should always be parsable by stdtod(3). */
+ * required. The representation should always be parsable by strtod(3). */
int d2string(char *buf, size_t len, double value) {
if (isnan(value)) {
len = snprintf(buf,len,"nan");
diff --git a/src/ziplist.c b/src/ziplist.c
index 4a0111105..64a22adfc 100644
--- a/src/ziplist.c
+++ b/src/ziplist.c
@@ -183,7 +183,7 @@ static unsigned int zipIntSize(unsigned char encoding) {
return 0;
}
-/* Encode the length 'l' writing it in 'p'. If p is NULL it just returns
+/* Encode the length 'rawlen' writing it in 'p'. If p is NULL it just returns
* the amount of bytes required to encode such a length. */
static unsigned int zipEncodeLength(unsigned char *p, unsigned char encoding, unsigned int rawlen) {
unsigned char len = 1, buf[5];
@@ -739,8 +739,8 @@ unsigned char *ziplistPrev(unsigned char *zl, unsigned char *p) {
}
}
-/* Get entry pointed to by 'p' and store in either 'e' or 'v' depending
- * on the encoding of the entry. 'e' is always set to NULL to be able
+/* Get entry pointed to by 'p' and store in either '*sstr' or 'sval' depending
+ * on the encoding of the entry. '*sstr' is always set to NULL to be able
* to find out whether the string pointer or the integer value was set.
* Return 0 if 'p' points to the end of the ziplist, 1 otherwise. */
unsigned int ziplistGet(unsigned char *p, unsigned char **sstr, unsigned int *slen, long long *sval) {
@@ -788,7 +788,8 @@ unsigned char *ziplistDeleteRange(unsigned char *zl, unsigned int index, unsigne
return (p == NULL) ? zl : __ziplistDelete(zl,p,num);
}
-/* Compare entry pointer to by 'p' with 'entry'. Return 1 if equal. */
+/* Compare entry pointer to by 'p' with 'sstr' of length 'slen'. */
+/* Return 1 if equal. */
unsigned int ziplistCompare(unsigned char *p, unsigned char *sstr, unsigned int slen) {
zlentry entry;
unsigned char sencoding;