summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Mitchener <bruce.mitchener@gmail.com>2014-08-20 18:43:45 +0700
committerBruce Mitchener <bruce.mitchener@gmail.com>2014-08-20 18:43:45 +0700
commitc35b00cd6cda54cf89ab331725b8681c2f497c91 (patch)
tree02b0aceaf1d7ee4271a526c32b010f2bf42b4b74
parent57ee57091f4b3189a682c8585a1866d5e74340d1 (diff)
downloadnanomsg-c35b00cd6cda54cf89ab331725b8681c2f497c91.tar.gz
Fix many typos.
-rw-r--r--doc/nn_cmsg.txt2
-rw-r--r--doc/nn_poll.txt2
-rw-r--r--doc/nn_pubsub.txt2
-rw-r--r--doc/nn_symbol_info.txt2
-rw-r--r--rfc/sp-protocol-ids-01.txt2
-rw-r--r--rfc/sp-protocol-ids-01.xml2
-rw-r--r--rfc/sp-publish-subscribe-01.txt3
-rw-r--r--rfc/sp-publish-subscribe-01.xml2
-rw-r--r--rfc/sp-request-reply-01.txt28
-rw-r--r--rfc/sp-request-reply-01.xml28
-rw-r--r--rfc/sp-tcp-mapping-01.txt6
-rw-r--r--rfc/sp-tcp-mapping-01.xml8
-rw-r--r--rfc/sp-tls-mapping-01.txt4
-rw-r--r--rfc/sp-tls-mapping-01.xml4
-rw-r--r--src/core/sock.c2
-rw-r--r--src/nn.h2
-rw-r--r--src/protocol.h6
-rw-r--r--src/protocols/survey/surveyor.c2
-rw-r--r--src/protocols/utils/priolist.h2
-rw-r--r--src/transports/ipc/sipc.h2
-rw-r--r--src/transports/tcp/stcp.h2
-rw-r--r--src/transports/utils/streamhdr.h2
-rw-r--r--tools/nanocat.c2
23 files changed, 58 insertions, 59 deletions
diff --git a/doc/nn_cmsg.txt b/doc/nn_cmsg.txt
index 89e8236..23eb9ad 100644
--- a/doc/nn_cmsg.txt
+++ b/doc/nn_cmsg.txt
@@ -25,7 +25,7 @@ DESCRIPTION
These functions can be used to iterate over ancillary data attached to a message.
-Structure 'nn_cmsghdr' represents a single anciallary property and contains following members:
+Structure 'nn_cmsghdr' represents a single ancillary property and contains following members:
size_t cmsg_len;
int cmsg_level;
diff --git a/doc/nn_poll.txt b/doc/nn_poll.txt
index 9ec5b99..3a21223 100644
--- a/doc/nn_poll.txt
+++ b/doc/nn_poll.txt
@@ -71,7 +71,7 @@ nn_poll is a convenience function. You can achieve same behaviour by using
NN_RCVFD and NN_SNDFD socket options. However, using the socket options
allows for usage that's not possible with nn_poll, such as simultaneous polling
for both SP and OS-level sockets, integration of SP sockets with external event
-loops et c.
+loops etc.
EXAMPLE
-------
diff --git a/doc/nn_pubsub.txt b/doc/nn_pubsub.txt
index 7eb7083..dd585c7 100644
--- a/doc/nn_pubsub.txt
+++ b/doc/nn_pubsub.txt
@@ -36,7 +36,7 @@ Topic with zero length matches any message.
If the socket is subscribed to multiple topics, message matching any of them
will be delivered to the user.
-The entire message, including the the topic, is delivered to the user.
+The entire message, including the topic, is delivered to the user.
Socket Types
~~~~~~~~~~~~
diff --git a/doc/nn_symbol_info.txt b/doc/nn_symbol_info.txt
index a741fb6..b67ed8f 100644
--- a/doc/nn_symbol_info.txt
+++ b/doc/nn_symbol_info.txt
@@ -27,7 +27,7 @@ struct nn_symbol_properties {
/* The constant value */
int value;
- /* The contant name */
+ /* The constant name */
const char* name;
/* The constant namespace, or zero for namespaces themselves */
diff --git a/rfc/sp-protocol-ids-01.txt b/rfc/sp-protocol-ids-01.txt
index 024f3f0..2435e1d 100644
--- a/rfc/sp-protocol-ids-01.txt
+++ b/rfc/sp-protocol-ids-01.txt
@@ -72,7 +72,7 @@ Internet-Draft List of SP protocol IDs June 2014
Protocol IDs denote the SP protocol used (such as request/reply or
publish/subscribe), while endpoint role determines the role of the
endpoint within the topology (requester vs. replier, publisher vs.
- subscriber et c.) Both numbers are in network byte order.
+ subscriber etc.) Both numbers are in network byte order.
Protocol IDs are global, while endpoint roles are specific to any
given protocol. As such, protocol IDs are defined in this document,
diff --git a/rfc/sp-protocol-ids-01.xml b/rfc/sp-protocol-ids-01.xml
index 736a22e..064cc16 100644
--- a/rfc/sp-protocol-ids-01.xml
+++ b/rfc/sp-protocol-ids-01.xml
@@ -50,7 +50,7 @@
<t>Protocol IDs denote the SP protocol used (such as request/reply or
publish/subscribe), while endpoint role determines the role of the
endpoint within the topology (requester vs. replier, publisher vs.
- subscriber et c.) Both numbers are in network byte order.</t>
+ subscriber etc.) Both numbers are in network byte order.</t>
<t>Protocol IDs are global, while endpoint roles are specific to any given
protocol. As such, protocol IDs are defined in this document, while
diff --git a/rfc/sp-publish-subscribe-01.txt b/rfc/sp-publish-subscribe-01.txt
index de4410d..6585b62 100644
--- a/rfc/sp-publish-subscribe-01.txt
+++ b/rfc/sp-publish-subscribe-01.txt
@@ -71,8 +71,7 @@ Internet-Draft Publish/Subscribe SP May 2014
arbitrarily complex topology rather than of a single node-to-node
communication, several underlying protocols can be used in parallel.
For example, publisher can send a message to intermediary node via
- TCP. The intermediate node can then forward the message via PGM et
- c.
+ TCP. The intermediate node can then forward the message via PGM etc.
+---+ TCP +---+ PGM +---+
| |----------->| |---------->| |
diff --git a/rfc/sp-publish-subscribe-01.xml b/rfc/sp-publish-subscribe-01.xml
index 6259d89..06732ac 100644
--- a/rfc/sp-publish-subscribe-01.xml
+++ b/rfc/sp-publish-subscribe-01.xml
@@ -53,7 +53,7 @@
arbitrarily complex topology rather than of a single node-to-node
communication, several underlying protocols can be used in parallel.
For example, publisher can send a message to intermediary node via TCP.
- The intermediate node can then forward the message via PGM et c.</t>
+ The intermediate node can then forward the message via PGM etc.</t>
<figure>
<artwork>
diff --git a/rfc/sp-request-reply-01.txt b/rfc/sp-request-reply-01.txt
index 5a95faa..71664d8 100644
--- a/rfc/sp-request-reply-01.txt
+++ b/rfc/sp-request-reply-01.txt
@@ -85,7 +85,7 @@ Internet-Draft Request/Reply SP August 2013
no matter what instance of the service have computed it.
Service that accepts empty requests and produces the number of
- requests processed so far (1, 2, 3 et c.), on the other hand, is not
+ requests processed so far (1, 2, 3 etc.), on the other hand, is not
stateless. To prove it you can run two instances of the service.
First reply, no matter which instance produces it is going to be 1.
Second reply though is going to be either 2 (if processed by the same
@@ -153,7 +153,7 @@ Internet-Draft Request/Reply SP August 2013
"enterprise service bus" model. In the simplest case the bus can
be implemented as a simple hub-and-spokes topology. In complex
cases the bus can span multiple physical locations or multiple
- oraganisations with intermediate nodes at the boundaries
+ organisations with intermediate nodes at the boundaries
connecting different parts of the topology.
In addition to distributing tasks to processing nodes, request/reply
@@ -183,9 +183,9 @@ Internet-Draft Request/Reply SP August 2013
As can be seen from the above, one request may be processed multiple
times. For example, reply may be lost on its way back to the client.
Client will assume that the request was not processed yet, it will
- resend it and thus cause duplicit execution of the task.
+ resend it and thus cause duplicate execution of the task.
- Some applications may want to prevent duplicit execution of tasks.
+ Some applications may want to prevent duplicate execution of tasks.
It often turns out that hardening such applications to be idempotent
is relatively easy as they already possess the tools to do so. For
example, a payment processing server already has access to a shared
@@ -193,8 +193,8 @@ Internet-Draft Request/Reply SP August 2013
ID was not yet processed.
On the other hand, many applications don't care about occasional
- duplicitly processed tasks. Therefore, request/reply protocol does
- not require the service to be idempotent. Instead, the idempotancy
+ duplicate processed tasks. Therefore, request/reply protocol does
+ not require the service to be idempotent. Instead, the idempotence
issue is left to the user to decide on.
Finally, it should be noted that this specification discusses several
@@ -213,7 +213,7 @@ Internet-Draft Request/Reply SP August 2013
communication, several underlying protocols can be used in parallel.
For example, a client may send a request via WebSocket, then, on the
edge of the company network an intermediary node may retransmit it
- using TCP et c.
+ using TCP etc.
@@ -288,14 +288,14 @@ Internet-Draft Request/Reply SP August 2013
Thus, when a node is about to send a request, it can choose to send
it only to one of the channels that don't report pushback at the
- moment. To implement approximately fair distibution of the workload
+ moment. To implement approximately fair distribution of the workload
the node choses a channel from that pool using the round-robin
algorithm.
As for delivering replies back to the clients, it should be
understood that the client may not be directly accessible (say using
TCP/IP) from the processing node. It may be beyond a firewall, have
- no static IP address et c. Furthermore, the client and the processing
+ no static IP address etc. Furthermore, the client and the processing
may not even speak the same transport protocol -- imagine client
connecting to the topology using WebSockets and processing node via
SCTP.
@@ -317,7 +317,7 @@ Internet-Draft Request/Reply SP August 2013
The upside, on the other hand, is that the nodes in the topology
don't have to maintain any routing tables beside the simple table of
- adjacent channels along with thier IDs. There's also no need for any
+ adjacent channels along with their IDs. There's also no need for any
additional protocols for distributing routing information within the
topology.
@@ -381,7 +381,7 @@ Internet-Draft Request/Reply SP August 2013
tag. That allows the algorithm to find out where the tags end and
where the message payload begins.
- As for the reamining 31 bits, they are either request ID (in the last
+ As for the remaining 31 bits, they are either request ID (in the last
tag) or a channel ID (in all the remaining tags). The first channel
ID is added and processed by the REP endpoint closest to the
processing node. The last channel ID is added and processed by the
@@ -511,7 +511,7 @@ Internet-Draft Request/Reply SP August 2013
responsive. It can be thought of as a crude scheduling algorithm.
However crude though, it's probably still the best you can get
without knowing estimates of execution time for individual tasks, CPU
- capacity of individual processing nodes et c.
+ capacity of individual processing nodes etc.
Alternatively, backpressure can be thought of as a congestion control
mechanism. When all available processing nodes are busy, it slows
@@ -694,7 +694,7 @@ Internet-Draft Request/Reply SP August 2013
If the request is successfully sent, the endpoint stores the request
including its request ID, so that it can be resent later on if
needed. At the same time it sets up a timer to trigger the re-
- transimission in case the reply is not received within a specified
+ transmission in case the reply is not received within a specified
timeout. The user MUST be allowed to specify the timeout interval.
The default timeout interval must be 60 seconds.
@@ -795,7 +795,7 @@ Internet-Draft Request/Reply SP August 2013
legitimate setups can cause loop to be created.
With no additional guards against the loops, it's likely that
- requests will be caugth inside the loop, rotating there forever, each
+ requests will be caught inside the loop, rotating there forever, each
message gradually growing in size as new prefixes are added to it by
each REP endpoint on the way. Eventually, a loop can cause
congestion and bring the whole system to a halt.
diff --git a/rfc/sp-request-reply-01.xml b/rfc/sp-request-reply-01.xml
index d8273f7..b7fcab9 100644
--- a/rfc/sp-request-reply-01.xml
+++ b/rfc/sp-request-reply-01.xml
@@ -67,7 +67,7 @@
no matter what instance of the service have computed it.</t>
<t>Service that accepts empty requests and produces the number
- of requests processed so far (1, 2, 3 et c.), on the other hand, is
+ of requests processed so far (1, 2, 3 etc.), on the other hand, is
not stateless. To prove it you can run two instances of the service.
First reply, no matter which instance produces it is going to be 1.
Second reply though is going to be either 2 (if processed by the same
@@ -123,7 +123,7 @@
The "enterprise service bus" model. In the simplest case the bus
can be implemented as a simple hub-and-spokes topology. In complex
cases the bus can span multiple physical locations or multiple
- oraganisations with intermediate nodes at the boundaries connecting
+ organisations with intermediate nodes at the boundaries connecting
different parts of the topology.</t>
</list>
@@ -149,9 +149,9 @@
<t>As can be seen from the above, one request may be processed multiple
times. For example, reply may be lost on its way back to the client.
Client will assume that the request was not processed yet, it will
- resend it and thus cause duplicit execution of the task.</t>
+ resend it and thus cause duplicate execution of the task.</t>
- <t>Some applications may want to prevent duplicit execution of tasks. It
+ <t>Some applications may want to prevent duplicate execution of tasks. It
often turns out that hardening such applications to be idempotent is
relatively easy as they already possess the tools to do so. For
example, a payment processing server already has access to a shared
@@ -159,8 +159,8 @@
was not yet processed.</t>
<t>On the other hand, many applications don't care about occasional
- duplicitly processed tasks. Therefore, request/reply protocol does not
- require the service to be idempotent. Instead, the idempotancy issue
+ duplicate processed tasks. Therefore, request/reply protocol does not
+ require the service to be idempotent. Instead, the idempotence issue
is left to the user to decide on.</t>
<t>Finally, it should be noted that this specification discusses several
@@ -182,7 +182,7 @@
communication, several underlying protocols can be used in parallel.
For example, a client may send a request via WebSocket, then, on the
edge of the company network an intermediary node may retransmit it
- using TCP et c.</t>
+ using TCP etc.</t>
<figure>
<artwork>
@@ -248,14 +248,14 @@
<t>Thus, when a node is about to send a request, it can choose to send
it only to one of the channels that don't report pushback at the
- moment. To implement approximately fair distibution of the workload
+ moment. To implement approximately fair distribution of the workload
the node choses a channel from that pool using the round-robin
algorithm.</t>
<t>As for delivering replies back to the clients, it should be understood
that the client may not be directly accessible (say using TCP/IP) from
the processing node. It may be beyond a firewall, have no static IP
- address et c. Furthermore, the client and the processing may not even
+ address etc. Furthermore, the client and the processing may not even
speak the same transport protocol -- imagine client connecting to the
topology using WebSockets and processing node via SCTP.</t>
@@ -276,7 +276,7 @@
<t>The upside, on the other hand, is that the nodes in the topology don't
have to maintain any routing tables beside the simple table of
- adjacent channels along with thier IDs. There's also no need for any
+ adjacent channels along with their IDs. There's also no need for any
additional protocols for distributing routing information within
the topology.</t>
@@ -334,7 +334,7 @@
That allows the algorithm to find out where the tags end and where
the message payload begins.</t>
- <t>As for the reamining 31 bits, they are either request ID (in the last
+ <t>As for the remaining 31 bits, they are either request ID (in the last
tag) or a channel ID (in all the remaining tags). The first channel ID
is added and processed by the REP endpoint closest to the processing
node. The last channel ID is added and processed by the REP endpoint
@@ -445,7 +445,7 @@
responsive. It can be thought of as a crude scheduling algorithm.
However crude though, it's probably still the best you can get
without knowing estimates of execution time for individual tasks,
- CPU capacity of individual processing nodes et c.</t>
+ CPU capacity of individual processing nodes etc.</t>
<t>Alternatively, backpressure can be thought of as a congestion control
mechanism. When all available processing nodes are busy, it slows
@@ -613,7 +613,7 @@
<t>If the request is successfully sent, the endpoint stores the request
including its request ID, so that it can be resent later on if
needed. At the same time it sets up a timer to trigger the
- re-transimission in case the reply is not received within a specified
+ re-transmission in case the reply is not received within a specified
timeout. The user MUST be allowed to specify the timeout interval.
The default timeout interval must be 60 seconds.</t>
@@ -704,7 +704,7 @@
legitimate setups can cause loop to be created.</t>
<t>With no additional guards against the loops, it's likely that
- requests will be caugth inside the loop, rotating there forever,
+ requests will be caught inside the loop, rotating there forever,
each message gradually growing in size as new prefixes are added to it
by each REP endpoint on the way. Eventually, a loop can cause
congestion and bring the whole system to a halt.</t>
diff --git a/rfc/sp-tcp-mapping-01.txt b/rfc/sp-tcp-mapping-01.txt
index 95bb61e..4ce2361 100644
--- a/rfc/sp-tcp-mapping-01.txt
+++ b/rfc/sp-tcp-mapping-01.txt
@@ -98,7 +98,7 @@ Internet-Draft TCP mapping for SPs March 2014
The fact that the first byte of the protocol header is binary zero
eliminates any text-based protocols that were accidentally connected
- to the endpiont. Subsequent two bytes make the check even more
+ to the endpoint. Subsequent two bytes make the check even more
rigorous. At the same time they can be used as a debugging hint to
indicate that the connection is supposed to use one of the
scalability protocols -- ASCII representation of these bytes is 'SP'
@@ -143,7 +143,7 @@ Internet-Draft TCP mapping for SPs March 2014
+------------+-----------------+
It may seem that 64 bit message size is excessive and consumes too
- much of valueable bandwidth, especially given that most scenarios
+ much of valuable bandwidth, especially given that most scenarios
call for relatively small messages, in order of bytes or kilobytes.
Variable length field may seem like a better solution, however, our
@@ -154,7 +154,7 @@ Internet-Draft TCP mapping for SPs March 2014
portion of the message and the performance impact is not even
measurable.
- For small messages, the overal throughput is heavily CPU-bound, never
+ For small messages, the overall throughput is heavily CPU-bound, never
I/O-bound. In other words, CPU processing associated with each
individual message limits the message rate in such a way that network
bandwidth limit is never reached. In the future we expect it to be
diff --git a/rfc/sp-tcp-mapping-01.xml b/rfc/sp-tcp-mapping-01.xml
index d15e2e8..06675d0 100644
--- a/rfc/sp-tcp-mapping-01.xml
+++ b/rfc/sp-tcp-mapping-01.xml
@@ -27,7 +27,7 @@
<abstract>
<t>This document defines the TCP mapping for scalability protocols.
The main purpose of the mapping is to turn the stream of bytes
- into stream of messages. Additionaly, the mapping provides some
+ into stream of messages. Additionally, the mapping provides some
additional checks during the connection establishment phase.</t>
</abstract>
@@ -80,7 +80,7 @@
<t>The fact that the first byte of the protocol header is binary zero
eliminates any text-based protocols that were accidentally connected
- to the endpiont. Subsequent two bytes make the check even more
+ to the endpoint. Subsequent two bytes make the check even more
rigorous. At the same time they can be used as a debugging hint to
indicate that the connection is supposed to use one of the scalability
protocols -- ASCII representation of these bytes is 'SP' that can
@@ -123,7 +123,7 @@
</figure>
<t>It may seem that 64 bit message size is excessive and consumes too much
- of valueable bandwidth, especially given that most scenarios call for
+ of valuable bandwidth, especially given that most scenarios call for
relatively small messages, in order of bytes or kilobytes.</t>
<t>Variable length field may seem like a better solution, however, our
@@ -133,7 +133,7 @@
<t>For large messages, 64 bits used by the field form a negligible portion
of the message and the performance impact is not even measurable.</t>
- <t>For small messages, the overal throughput is heavily CPU-bound, never
+ <t>For small messages, the overall throughput is heavily CPU-bound, never
I/O-bound. In other words, CPU processing associated with each
individual message limits the message rate in such a way that network
bandwidth limit is never reached. In the future we expect it to be
diff --git a/rfc/sp-tls-mapping-01.txt b/rfc/sp-tls-mapping-01.txt
index fff09f7..64b7127 100644
--- a/rfc/sp-tls-mapping-01.txt
+++ b/rfc/sp-tls-mapping-01.txt
@@ -69,7 +69,7 @@ Internet-Draft TLS/TCP mapping for SPs March 2014
details as discussed here.
As when running SP over TCP directly, the TCP port number is
- determined by the applicaton or user.
+ determined by the application or user.
This mapping follows the details of SP over TCP [SPoverTCP].
@@ -80,7 +80,7 @@ Internet-Draft TLS/TCP mapping for SPs March 2014
of the connection, including negotiation of cipher suites, exchanging
keys, and possibly performing one or two-way authentication.
- The specific details of the TLS negotiation are determined by the the
+ The specific details of the TLS negotiation are determined by the
application(s) involved, and are not specified here. This includes
selection of the specific version of TLS or possibly falling back to
SSL version 3 (but not SSL version 1 or 2).
diff --git a/rfc/sp-tls-mapping-01.xml b/rfc/sp-tls-mapping-01.xml
index 6cbc247..0028a33 100644
--- a/rfc/sp-tls-mapping-01.xml
+++ b/rfc/sp-tls-mapping-01.xml
@@ -44,7 +44,7 @@
and should follow the same details as discussed here.</t>
<t>As when running SP over TCP directly, the TCP port number is
- determined by the applicaton or user.</t>
+ determined by the application or user.</t>
<t>This mapping follows the details of
<xref target='SPoverTCP'>SP over TCP</xref>.</t>
@@ -59,7 +59,7 @@
keys, and possibly performing one or two-way authentication.</t>
<t>The specific details of the TLS negotiation are determined by the
- the application(s) involved, and are not specified here. This includes
+ application(s) involved, and are not specified here. This includes
selection of the specific version of TLS or possibly falling back to
SSL version 3 (but not SSL version 1 or 2).</t>
diff --git a/src/core/sock.c b/src/core/sock.c
index ec7896e..fd4973c 100644
--- a/src/core/sock.c
+++ b/src/core/sock.c
@@ -720,7 +720,7 @@ static void nn_sock_onleave (struct nn_ctx *self)
if (nn_slow (sock->state != NN_SOCK_STATE_ACTIVE))
return;
- /* Check whether socket is readable and/or writeable at the moment. */
+ /* Check whether socket is readable and/or writable at the moment. */
events = sock->sockbase->vfptr->events (sock->sockbase);
errnum_assert (events >= 0, -events);
diff --git a/src/nn.h b/src/nn.h
index ca250bc..af8b9bf 100644
--- a/src/nn.h
+++ b/src/nn.h
@@ -233,7 +233,7 @@ struct nn_symbol_properties {
/* The constant value */
int value;
- /* The contant name */
+ /* The constant name */
const char* name;
/* The constant namespace, or zero for namespaces themselves */
diff --git a/src/protocol.h b/src/protocol.h
index 38d9b82..38db31f 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -98,14 +98,14 @@ struct nn_sockbase_vfptr {
to send to or to be received from at the moment. 'rm' unregisters the
pipe. The pipe should not be used after this call as it may already be
deallocated. 'in' informs the socket that pipe is readable. 'out'
- informs it that it is writeable. */
+ informs it that it is writable. */
int (*add) (struct nn_sockbase *self, struct nn_pipe *pipe);
void (*rm) (struct nn_sockbase *self, struct nn_pipe *pipe);
void (*in) (struct nn_sockbase *self, struct nn_pipe *pipe);
void (*out) (struct nn_sockbase *self, struct nn_pipe *pipe);
/* Return any combination of event flags defined above, thus specifying
- whether the socket should be readable, writeable, both or none. */
+ whether the socket should be readable, writable, both or none. */
int (*events) (struct nn_sockbase *self);
/* Send a message to the socket. Returns -EAGAIN if it cannot be done at
@@ -150,7 +150,7 @@ struct nn_ctx *nn_sockbase_getctx (struct nn_sockbase *self);
int nn_sockbase_getopt (struct nn_sockbase *self, int option,
void *optval, size_t *optvallen);
-/* Add some statitistics for socket */
+/* Add some statistics for socket */
void nn_sockbase_stat_increment (struct nn_sockbase *self, int name,
int increment);
diff --git a/src/protocols/survey/surveyor.c b/src/protocols/survey/surveyor.c
index d077857..bf3c86f 100644
--- a/src/protocols/survey/surveyor.c
+++ b/src/protocols/survey/surveyor.c
@@ -174,7 +174,7 @@ static int nn_surveyor_events (struct nn_sockbase *self)
surveyor = nn_cont (self, struct nn_surveyor, xsurveyor.sockbase);
- /* Determine the actual readability/writeability of the socket. */
+ /* Determine the actual readability/writability of the socket. */
rc = nn_xsurveyor_events (&surveyor->xsurveyor.sockbase);
/* If there's no survey going on we'll signal IN to interrupt polling
diff --git a/src/protocols/utils/priolist.h b/src/protocols/utils/priolist.h
index bf8e302..260202d 100644
--- a/src/protocols/utils/priolist.h
+++ b/src/protocols/utils/priolist.h
@@ -92,7 +92,7 @@ int nn_priolist_is_active (struct nn_priolist *self);
struct nn_pipe *nn_priolist_getpipe (struct nn_priolist *self);
/* Moves to the next pipe in the list. If 'release' is set to 1, the current
- pipe is removed from the list. To re-insert it into thr list use
+ pipe is removed from the list. To re-insert it into the list use
nn_priolist_activate function. */
void nn_priolist_advance (struct nn_priolist *self, int release);
diff --git a/src/transports/ipc/sipc.h b/src/transports/ipc/sipc.h
index 2d6d882..4090e2d 100644
--- a/src/transports/ipc/sipc.h
+++ b/src/transports/ipc/sipc.h
@@ -44,7 +44,7 @@ struct nn_sipc {
struct nn_fsm fsm;
int state;
- /* The undelrying socket. */
+ /* The underlying socket. */
struct nn_usock *usock;
/* Child state machine to do protocol header exchange. */
diff --git a/src/transports/tcp/stcp.h b/src/transports/tcp/stcp.h
index ca6a7c1..a158f03 100644
--- a/src/transports/tcp/stcp.h
+++ b/src/transports/tcp/stcp.h
@@ -44,7 +44,7 @@ struct nn_stcp {
struct nn_fsm fsm;
int state;
- /* The undelrying socket. */
+ /* The underlying socket. */
struct nn_usock *usock;
/* Child state machine to do protocol header exchange. */
diff --git a/src/transports/utils/streamhdr.h b/src/transports/utils/streamhdr.h
index 8680dd1..6bd81c0 100644
--- a/src/transports/utils/streamhdr.h
+++ b/src/transports/utils/streamhdr.h
@@ -47,7 +47,7 @@ struct nn_streamhdr {
/* Used to timeout the protocol header exchange. */
struct nn_timer timer;
- /* The undelrying socket. */
+ /* The underlying socket. */
struct nn_usock *usock;
/* The original owner of the underlying socket. */
diff --git a/tools/nanocat.c b/tools/nanocat.c
index 0e2ec1b..ea8d7dc 100644
--- a/tools/nanocat.c
+++ b/tools/nanocat.c
@@ -362,7 +362,7 @@ int nn_create_socket (nn_options_t *options)
nn_assert_errno (rc == 0, "Can't set socket name");
}
- /* Specific intitalization */
+ /* Specific initialization */
switch (options->socket_type) {
case NN_SUB:
nn_sub_init (options, sock);