From f9a1c0de50ee16981fb46ff21efad6784fd74a59 Mon Sep 17 00:00:00 2001 From: schmidt Date: Wed, 3 Feb 1999 01:27:30 +0000 Subject: . --- docs/tutorials/001/acceptor.h | 182 ++++++------ docs/tutorials/001/logger.h | 203 +++++++------ docs/tutorials/001/page02.html | 134 +++++---- docs/tutorials/001/page03.html | 182 ++++++------ docs/tutorials/001/page04.html | 203 +++++++------ docs/tutorials/001/server.cpp | 122 ++++---- docs/tutorials/002/handler.h | 2 +- docs/tutorials/002/page02.html | 85 +++--- docs/tutorials/002/page03.html | 2 +- docs/tutorials/002/server.cpp | 85 +++--- docs/tutorials/003/client.cpp | 159 +++++------ docs/tutorials/003/page01.html | 167 +++++------ docs/tutorials/004/client.cpp | 345 ++++++++++------------ docs/tutorials/004/page01.html | 345 ++++++++++------------ docs/tutorials/005/client_handler.cpp | 370 ++++++++++++------------ docs/tutorials/005/client_handler.h | 3 +- docs/tutorials/005/page02.html | 160 +++++------ docs/tutorials/005/page04.html | 3 +- docs/tutorials/005/page05.html | 370 ++++++++++++------------ docs/tutorials/005/page06.html | 3 + docs/tutorials/005/server.cpp | 160 +++++------ docs/tutorials/006/client_handler.cpp | 487 +++++++++++++++----------------- docs/tutorials/006/client_handler.h | 2 +- docs/tutorials/006/page02.html | 156 +++++----- docs/tutorials/006/page04.html | 2 +- docs/tutorials/006/page05.html | 487 +++++++++++++++----------------- docs/tutorials/006/server.cpp | 156 +++++----- docs/tutorials/007/client_acceptor.cpp | 79 +++--- docs/tutorials/007/client_handler.cpp | 332 ++++++++++------------ docs/tutorials/007/page02.html | 145 +++++----- docs/tutorials/007/page04.html | 79 +++--- docs/tutorials/007/page06.html | 332 ++++++++++------------ docs/tutorials/007/page08.html | 451 ++++++++++++++--------------- docs/tutorials/007/server.cpp | 145 +++++----- docs/tutorials/007/thread_pool.cpp | 451 ++++++++++++++--------------- docs/tutorials/008/broadcast_client.cpp | 134 ++++----- docs/tutorials/008/directed_client.cpp | 185 ++++++------ docs/tutorials/008/page02.html | 213 +++++++------- docs/tutorials/008/page03.html | 205 +++++++------- docs/tutorials/008/page04.html | 134 ++++----- docs/tutorials/008/server.cpp | 213 +++++++------- docs/tutorials/009/broadcast_client.cpp | 55 ++-- docs/tutorials/009/directed_client.cpp | 94 +++--- docs/tutorials/009/page02.html | 107 +++---- docs/tutorials/009/page03.html | 96 ++++--- docs/tutorials/009/page04.html | 55 ++-- docs/tutorials/009/server.cpp | 105 +++---- docs/tutorials/010/block.h | 28 +- docs/tutorials/010/message_queue.cpp | 121 ++++---- docs/tutorials/010/page02.html | 129 +++++---- docs/tutorials/010/page03.html | 28 +- docs/tutorials/010/page04.html | 56 ++-- docs/tutorials/010/page05.html | 204 +++++++------ docs/tutorials/010/task.cpp | 204 +++++++------ docs/tutorials/010/task.h | 56 ++-- docs/tutorials/011/block.h | 19 +- docs/tutorials/011/data.h | 43 ++- docs/tutorials/011/message_queue.cpp | 123 ++++---- docs/tutorials/011/page02.html | 123 ++++---- docs/tutorials/011/page03.html | 151 +++++----- docs/tutorials/011/page04.html | 43 ++- docs/tutorials/011/task.cpp | 134 +++++---- docs/tutorials/011/task.h | 17 +- 63 files changed, 4611 insertions(+), 5053 deletions(-) (limited to 'docs/tutorials') diff --git a/docs/tutorials/001/acceptor.h b/docs/tutorials/001/acceptor.h index 2556f0ab324..d44433bb233 100644 --- a/docs/tutorials/001/acceptor.h +++ b/docs/tutorials/001/acceptor.h @@ -1,130 +1,114 @@ - // $Id$ - #ifndef _CLIENT_ACCEPTOR_H #define _CLIENT_ACCEPTOR_H -/* - A SOCK_Acceptor knows how to accept socket connections. We'll use - one of those at the heart of our Logging_Acceptor. - */ +/* A SOCK_Acceptor knows how to accept socket connections. We'll use + one of those at the heart of our Logging_Acceptor. */ #include "ace/SOCK_Acceptor.h" #if !defined (ACE_LACKS_PRAGMA_ONCE) # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ -/* - An Event_Handler is what you register with ACE_Reactor. When events occur, - the reactor will callback on the Event_Handler. More on that in a few lines. - */ +/* An Event_Handler is what you register with ACE_Reactor. When + events occur, the reactor will callback on the Event_Handler. More + on that in a few lines. */ #include "ace/Event_Handler.h" -/* - When a client connects, we'll create a Logging_Handler to deal with the - connection. Here, we bring in that declaration. - */ +/* When a client connects, we'll create a Logging_Handler to deal with + the connection. Here, we bring in that declaration. */ #include "logger.h" -/* - Our Logging_Acceptor is derived from ACE_Event_Handler. That lets the - reactor treat our acceptor just like every other handler. - */ +/* Our Logging_Acceptor is derived from ACE_Event_Handler. That lets + the reactor treat our acceptor just like every other handler. */ class Logging_Acceptor : public ACE_Event_Handler { public: - /* - For this simple case we won't bother with either constructor or - destructor. In a real application you would certainly have them. - */ - - /* - Here's the open() method we called from main(). We have two things - to accomplish here: (1) Open the acceptor so that we can hear - client requests and (2) register ourselves with the reactor so that - we can respond to those requests. - */ - int open (const ACE_INET_Addr &_addr, ACE_Reactor * _reactor ) + /* For this simple case we won't bother with either constructor or + destructor. In a real application you would certainly have them. */ + + /* Here's the open() method we called from main(). We have two + things to accomplish here: (1) Open the acceptor so that we can + hear client requests and (2) register ourselves with the reactor + so that we can respond to those requests. */ + int open (const ACE_INET_Addr &addr, + ACE_Reactor *reactor) { - /* - Perform the open() on the acceptor. We pass through the address - at which main() wants us to listen. The second parameter tells - the acceptor it is OK to reuse the address. This is necessary - sometimes to get around closed connections that haven't timed out. - */ - if (this->peer_acceptor_.open (_addr, 1) == -1) + /* Perform the open() on the acceptor. We pass through the + address at which main() wants us to listen. The second + parameter tells the acceptor it is OK to reuse the address. + This is necessary sometimes to get around closed connections + that haven't timed out. */ + if (this->peer_acceptor_.open (addr, 1) == -1) return -1; - /* - Remember the reactor we're using. We'll need it later when we - create a client connection handler. - */ - reactor_ = _reactor; - - /* - Now we can register with the reactor we were given. Since the reactor - pointer is global, we could have just used that but it's gross enough - already. - Notice that we can pass 'this' right into the registration since we're - derived from ACE_Event_Handler. We also provide ACCEPT_MASK to tell - the reactor that we want to know about accept requests from clients. - */ - return _reactor->register_handler( this, ACE_Event_Handler::ACCEPT_MASK ); + /* Remember the reactor we're using. We'll need it later when we + create a client connection handler. */ + reactor_ = reactor; + + /* Now we can register with the reactor we were given. Since the + reactor pointer is global, we could have just used that but it's + gross enough already. Notice that we can pass 'this' right into + the registration since we're derived from ACE_Event_Handler. We + also provide ACCEPT_MASK to tell the reactor that we want to + know about accept requests from clients. */ + return reactor->register_handler (this, + ACE_Event_Handler::ACCEPT_MASK); } private: - /* - To provide multi-OS abstraction, ACE uses the concept of "handles" for - connection endpoints. In Unix, this is a traditional file descriptor - (or integer). On other OS's, it may be something else. - The reactor will need to get the handle (file descriptor) to satisfy - it's own internal needs. Our relevant handle is the handle of the - acceptor object, so that's what we provide. - */ + /* To provide multi-OS abstraction, ACE uses the concept of + "handles" for connection endpoints. In Unix, this is a + traditional file descriptor (or integer). On other OS's, it may + be something else. The reactor will need to get the handle (file + descriptor) to satisfy it's own internal needs. Our relevant + handle is the handle of the acceptor object, so that's what we + provide. */ ACE_HANDLE get_handle (void) const { return this->peer_acceptor_.get_handle (); } - /* - When an accept request arrives, the reactor will invoke the handle_input() - callback. This is where we deal with the connection request. - */ - virtual int handle_input (ACE_HANDLE _handle) + /* When an accept request arrives, the reactor will invoke the + handle_input() callback. This is where we deal with the + connection request. */ + virtual int handle_input (ACE_HANDLE handle) { - /* - The handle provided to us by the reactor is the one that triggered - our up-call. In some advanced situations, you might actually - register a single handler for multiple connections. The _handle - parameter is a way to sort 'em out. Since we don't use that - here, we simply ignore the parameter with the ACE_UNUSED_ARG() macro. - */ - ACE_UNUSED_ARG(_handle); - - /* - In response to the connection request, we create a new Logging_Handler. - This new object will be used to interact with the client until it - disconnects. - */ - Logging_Handler *svc_handler = new Logging_Handler; - - /* - To complete the connection, we invoke the accept() method call on - the acceptor object and provide it with the connection handler instance. - This transfers "ownership" of the connection from the acceptor to the - connection handler. - */ + /* The handle provided to us by the reactor is the one that + triggered our up-call. In some advanced situations, you might + actually register a single handler for multiple connections. + The _handle parameter is a way to sort 'em out. Since we don't + use that here, we simply ignore the parameter with the + ACE_UNUSED_ARG() macro. */ + ACE_UNUSED_ARG (handle); + + Logging_Handler *svc_handler; + + /* In response to the connection request, we create a new + Logging_Handler. This new object will be used to interact with + the client until it disconnects. Note how we use the + ACE_NEW_RETURN macro, which returns -1 if operator new fails. */ + ACE_NEW_RETURN (svc_handler, + Logging_Handler, + -1); + + /* To complete the connection, we invoke the accept() method call + on the acceptor object and provide it with the connection + handler instance. This transfers "ownership" of the connection + from the acceptor to the connection handler. */ if (this->peer_acceptor_.accept (*svc_handler) == -1) - ACE_ERROR_RETURN ((LM_ERROR, "%p", "accept failed"), -1); - - /* - Again, most objects need to be open()ed before they are useful. We'll - give the handler our reactor pointer so that it can register for - events as well. If the open fails, we'll force a close(). - */ + ACE_ERROR_RETURN ((LM_ERROR, + "%p", + "accept failed"), + -1); + + /* Again, most objects need to be open()ed before they are useful. + We'll give the handler our reactor pointer so that it can + register for events as well. If the open fails, we'll force a + close(). */ if (svc_handler->open (reactor_) == -1) svc_handler->close (); @@ -133,15 +117,11 @@ private: protected: - /* - Our acceptor object instance - */ + /* Our acceptor object instance */ ACE_SOCK_Acceptor peer_acceptor_; - /* - A place to remember our reactor pointer - */ - ACE_Reactor * reactor_; + /* A place to remember our reactor pointer */ + ACE_Reactor *reactor_; }; #endif /* _CLIENT_ACCEPTOR_H */ diff --git a/docs/tutorials/001/logger.h b/docs/tutorials/001/logger.h index 3ea531b842c..69d19505d60 100644 --- a/docs/tutorials/001/logger.h +++ b/docs/tutorials/001/logger.h @@ -1,14 +1,11 @@ - // $Id$ - #ifndef _CLIENT_HANDLER_H #define _CLIENT_HANDLER_H -/* - A connection handler will also be derived from ACE_Event_Handler so that we - can register with a reactor. - */ +/* A connection handler will also be derived from ACE_Event_Handler so + that we can register with a reactor. */ + #include "ace/Event_Handler.h" #if !defined (ACE_LACKS_PRAGMA_ONCE) @@ -17,60 +14,60 @@ #include "ace/INET_Addr.h" -/* - Since we're doing TCP/IP, we'll need a SOCK_Stream for the connection. - */ +/* Since we're doing TCP/IP, we'll need a SOCK_Stream for the + connection. */ #include "ace/SOCK_Stream.h" class Logging_Handler : public ACE_Event_Handler { public: - /* Like the acceptor, we're simple enough to avoid constructor and destructor. - */ + */ - /* - To open the client handler, we have to register ourselves with the reactor. - Notice that we don't have to "open" our ACE_SOCK_Stream member variable. - Why? Because the call to the acceptor's accept() method took care of those - details for us. - */ - int open ( ACE_Reactor * _reactor ) + /* To open the client handler, we have to register ourselves with + the reactor. Notice that we don't have to "open" our + ACE_SOCK_Stream member variable. Why? Because the call to the + acceptor's accept() method took care of those details for us. */ + + int open (ACE_Reactor *reactor) { /* Remember our reactor... - */ - reactor_ = _reactor; - - /* - In this case we're using the READ_MASK. Like the acceptor, handle_input() - will be called due to this mask but it's a nice piece of bookkeeping to - have separate masks for the separate types of activity. - */ - if (_reactor-> register_handler (this, ACE_Event_Handler::READ_MASK) == -1) - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1); + */ + reactor_ = reactor; + + /* In this case we're using the READ_MASK. Like the acceptor, + handle_input() will be called due to this mask but it's a nice + piece of bookkeeping to have separate masks for the separate + types of activity. */ + + if (reactor->register_handler (this, + ACE_Event_Handler::READ_MASK) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) can't register with reactor\n"), + -1); return 0; } - /* - If we're explicitly closed we'll close our "file handle". The net result - is to close the connection to the client and remove ourselves from the - reactor if we're registered - */ + /* If we're explicitly closed we'll close our "file handle". The + net result is to close the connection to the client and remove + ourselves from the reactor if we're registered */ + int close (void) { - return this->handle_close (ACE_INVALID_HANDLE, ACE_Event_Handler::RWE_MASK); + return this->handle_close (ACE_INVALID_HANDLE, + ACE_Event_Handler::RWE_MASK); } - /* - This is a bit of magic... When we call the accept() method of the acceptor - object, it wants to do work on an ACE_SOCK_Stream. We have one of those as - our connection to the client but it would be gross to provide a method to - access that object. It's much cooler if the acceptor can just treat the - Logging_Handler as an ACE_SOCK_Stream. Providing this cast operator lets - that happen cleanly. - */ + /* This is a bit of magic... When we call the accept() method of + the acceptor object, it wants to do work on an ACE_SOCK_Stream. + We have one of those as our connection to the client but it would + be gross to provide a method to access that object. It's much + cooler if the acceptor can just treat the Logging_Handler as an + ACE_SOCK_Stream. Providing this cast operator lets that happen + cleanly. */ + operator ACE_SOCK_Stream &() { return this->cli_stream_; @@ -78,85 +75,91 @@ public: protected: - /* - Again, like the acceptor, we need to provide the connection handle to the reactor. - */ + /* Again, like the acceptor, we need to provide the connection + handle to the reactor. */ + ACE_HANDLE get_handle (void) const { return this->cli_stream_.get_handle (); } - /* - And here's the handle_input(). This is really the workhorse of the application. - */ + /* And here's the handle_input(). This is really the workhorse of + the application. */ + virtual int handle_input (ACE_HANDLE) { /* Create and initialize a small receive buffer. The extra byte is there to allow us to have a null-terminated string when it's over. */ - char buf[BUFSIZ+1]; + char buf[BUFSIZ + 1]; + + /* Invoke the recv() method of the ACE_SOCK_Stream to get some + data. It will return -1 if there is an error. Otherwise, it + will return the number of bytes read. Of course, if it read + zero bytes then the connection must be gone. How do I know + that? Because handle_input() would not be called by the reactor + if there wasn't *some* kind of activity and a closed connection + looks like a read request to the reactor. But when you read + from a closed connection you'll read zero bytes. + + Notice that in the error case or closed case we return -1. That + tells the reactor to call our handle_close() where we'll take + care of shutting down cleanly. + + Although we don't make use of them, there are additional + parameters you can use with the recv() call. One of these is an + ACE_Time_Value that allows you to limit the amount of time + blocking on the recv(). You would use that if you weren't sure + if data was available. Since we only get to handle_input() when + data is ready, that would be redundant. On the other hand, if + you use recv_n() to read *exactly* a number of bytes then + limiting the time you wait for those bytes might be good. The + other paramter that may come in handy is an integer + flags. This is passed directly to the underlying OS + recv() call. See the man page recv(2) and the header + sys/socket.h for the gory details. */ - /* - Invoke the recv() method of the ACE_SOCK_Stream to get some data. It will - return -1 if there is an error. Otherwise, it will return the number of bytes - read. Of course, if it read zero bytes then the connection must be gone. - How do I know that? Because handle_input() would not be called by the reactor - if there wasn't *some* kind of activity and a closed connection looks like a - read request to the reactor. But when you read from a closed connection you'll - read zero bytes. - - Notice that in the error case or closed case we return -1. That tells the reactor - to call our handle_close() where we'll take care of shutting down cleanly. - - Although we don't make use of them, there are additional parameters you can - use with the recv() call. One of these is an ACE_Time_Value that allows you to - limit the amount of time blocking on the recv(). You would use that if you - weren't sure if data was available. Since we only get to handle_input() when - data is ready, that would be redundant. On the other hand, if you use recv_n() - to read *exactly* a number of bytes then limiting the time you wait for those - bytes might be good. - The other paramter that may come in handy is an integer flags. This is - passed directly to the underlying OS recv() call. See the man page recv(2) - and the header sys/socket.h for the gory details. - */ ssize_t retval; - switch( retval = this->cli_stream_.recv(buf,BUFSIZ) ) + switch (retval = this->cli_stream_.recv (buf, BUFSIZ)) { case -1: - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client logger"), -1); + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) %p bad read\n", + "client logger"), + -1); case 0: - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing log daemon (fd = %d)\n", - this->get_handle ()), -1); + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) closing log daemon (fd = %d)\n", + this->get_handle ()), + -1); default: buf[retval] = '\0'; - ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s",buf)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) from client: %s", + buf)); } return 0; } - /* - When handle_input() returns -1, we'll end up here. There are a few housekeeping - chores to handle. - */ - int handle_close (ACE_HANDLE, ACE_Reactor_Mask _mask) + /* When handle_input() returns -1, we'll end up here. There are a + few housekeeping chores to handle. */ + + int handle_close (ACE_HANDLE, + ACE_Reactor_Mask _mask) { - /* - Remove ourselves from the reactor. We have to include the DONT_CALL in the - mask so that it won't call handle_close() on us again! - */ - reactor_->remove_handler(this,_mask|ACE_Event_Handler::DONT_CALL); + /* Remove ourselves from the reactor. We have to include the + DONT_CALL in the mask so that it won't call handle_close() on us + again! */ + reactor_->remove_handler (this, + _mask | ACE_Event_Handler::DONT_CALL); - /* - Close the socket that we're connected to the client with. - */ - cli_stream_.close(); + /* Close the socket that we're connected to the client with. */ + cli_stream_.close (); - /* - Since we know we were dynamically allocated by the acceptor, now is a good - time to get rid of ourselves. - */ + /* Since we know we were dynamically allocated by the acceptor, + now is a good time to get rid of ourselves. */ delete this; return 0; @@ -164,15 +167,11 @@ protected: protected: - /* - Our peer connection. - */ + /* Our peer connection. */ ACE_SOCK_Stream cli_stream_; - /* - Our reactor (and our acceptor's reactor). - */ - ACE_Reactor * reactor_; + /* Our reactor (and our acceptor's reactor). */ + ACE_Reactor *reactor_; }; #endif /* _CLIENT_HANDLER_H */ diff --git a/docs/tutorials/001/page02.html b/docs/tutorials/001/page02.html index dff82baa375..42180451fc3 100644 --- a/docs/tutorials/001/page02.html +++ b/docs/tutorials/001/page02.html @@ -68,87 +68,81 @@ to "read" data from an established connection.
-
 // $Id$
 
-
-/*
-  Include the header file where our client acceptor is defined.
- */
+/* Include the header file where our client acceptor is defined.  */
 #include "ace/Reactor.h"
 
-/*  
-  For simplicity, we create our reactor in the global address space.
-  In later tutorials we will do something more clever and appropriate. However,
-  the purpose of this tutorial is to introduce a connection acceptance and
-  handling, not the full capabilities of a reactor.
-*/
-ACE_Reactor * g_reactor;
-
-/*
-  Include the header where we define our acceptor object.  An acceptor is
-  an abstraction that allows a server to "accept" connections from clients.
-*/
+/* For simplicity, we create our reactor in the global address space.
+  In later tutorials we will do something more clever and
+  appropriate. However, the purpose of this tutorial is to introduce a
+  connection acceptance and handling, not the full capabilities of a
+  reactor.  */
+ACE_Reactor *g_reactor;
+
+/* Include the header where we define our acceptor object.  An
+  acceptor is an abstraction that allows a server to "accept"
+  connections from clients.  */
 #include "acceptor.h"
 
-/*
-  A TCP/IP server can listen to only one port for connection requests.
-  Well-known services can always be found at the same address. Lesser-known
-  services are generally told where to listen by a configuration file or
-  command-line parameter. For this example, we're satisfied with simply hard-coding
-  a random but known value.
-*/
+/* A TCP/IP server can listen to only one port for connection
+  requests.  Well-known services can always be found at the same
+  address. Lesser-known services are generally told where to listen by
+  a configuration file or command-line parameter. For this example,
+  we're satisfied with simply hard-coding a random but known value.  */
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int, char *[])
+int 
+main (int, char *[])
 {
-  /*
-    Create a Reactor instance.  Again, a global pointer isn't exactly the
-    best way to handle this but for the simple example here, it will be OK.
-    We'll get cute with it later.
-  */
-  g_reactor = new ACE_Reactor;
-
-  /*
-    Like the Reactor, I'm skimming over the details of the ADDR
-    object. What it provides is an abstraction for addressing services in the
-    network. All we need to know at this point is that we are creating an address
-    object which specifies the TCP/IP port on which the server
-    will listen for new connection requests.
-  */
+  /* Create a Reactor instance.  Again, a global pointer isn't exactly
+    the best way to handle this but for the simple example here, it
+    will be OK.  We'll get cute with it later.  Note how we use the
+    ACE_NEW_RETURN macro, which returns 1 if operator new fails. */
+  ACE_NEW_RETURN (g_reactor,
+                  ACE_Reactor,
+                  1);
+
+  /* Like the Reactor, I'm skimming over the details of the ADDR
+    object. What it provides is an abstraction for addressing services
+    in the network. All we need to know at this point is that we are
+    creating an address object which specifies the TCP/IP port on
+    which the server will listen for new connection requests.  */
   ACE_INET_Addr addr (PORT);
 
-  /*
-    We now create an acceptor object.  No connections will
-    yet be established because the object isn't "open for business"
-    at this time. Which brings us to the next line...
-  */
-  Logging_Acceptor * peer_acceptor = new Logging_Acceptor();
-
-  /*
-    where the acceptor object is opened.  You'll find that most ACE
-    objects have to be open()ed before they're of any use to you.
-    On this open() call, we're telling the acceptor where to listen
-    for connections via the 'addr' object.  We're also telling it
-    that we want it to be registered with our 'g_reactor' instance.
-   */
-  if (peer_acceptor->open(addr,g_reactor) == -1 )
-    ACE_ERROR_RETURN ((LM_ERROR, "Opening Acceptor\n"), -1);
-
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server logging daemon\n"));
-
-  /*
-    The reactor's handle_events member function is responsible for looking at
-    all registered objects and invoking an appropriate member function when
-    anything of interest occurs. When an event is processed, the handle_events
-    function returns. In order to get all events, we embed this in an infinite
-    loop.
-
-    Since we put ourselves into an infinite loop, you'll need to CTRL-C
-    to exit the program.
-  */
-  while (1)
-     g_reactor-> handle_events ();
+  Logging_Acceptor *peer_acceptor;
+
+  /* We now create an acceptor object.  No connections will yet be
+    established because the object isn't "open for business" at this
+    time. Which brings us to the next line...  */
+  ACE_NEW_RETURN (peer_acceptor,
+                  Logging_Acceptor,
+                  1);
+
+  /* where the acceptor object is opened.  You'll find that most ACE
+    objects have to be open()ed before they're of any use to you.  On
+    this open() call, we're telling the acceptor where to listen for
+    connections via the 'addr' object.  We're also telling it that we
+    want it to be registered with our 'g_reactor' instance.  */
+  if (peer_acceptor->open (addr, g_reactor) == -1 )
+    ACE_ERROR_RETURN ((LM_ERROR, 
+                       "Opening Acceptor\n"),
+                      -1);
+
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server logging daemon\n"));
+
+  /* The reactor's handle_events member function is responsible for
+    looking at all registered objects and invoking an appropriate
+    member function when anything of interest occurs. When an event is
+    processed, the handle_events function returns. In order to get all
+    events, we embed this in an infinite loop.
+
+    Since we put ourselves into an infinite loop, you'll need to
+    CTRL-C to exit the program.  */
+
+  for (;;)
+    g_reactor->handle_events ();
 
   return 0;
 }
diff --git a/docs/tutorials/001/page03.html b/docs/tutorials/001/page03.html
index 5f45ca0826f..88add75adc8 100644
--- a/docs/tutorials/001/page03.html
+++ b/docs/tutorials/001/page03.html
@@ -37,133 +37,117 @@ appropriate section (event_handler) which would cater to his needs.
 


-
 // $Id$
 
-
 #ifndef _CLIENT_ACCEPTOR_H
 #define _CLIENT_ACCEPTOR_H
 
-/*
-  A SOCK_Acceptor knows how to accept socket connections.  We'll use
-  one of those at the heart of our Logging_Acceptor.
- */
+/* A SOCK_Acceptor knows how to accept socket connections.  We'll use
+  one of those at the heart of our Logging_Acceptor.  */
 #include "ace/SOCK_Acceptor.h"
 
 #if !defined (ACE_LACKS_PRAGMA_ONCE)
 # pragma once
 #endif /* ACE_LACKS_PRAGMA_ONCE */
 
-/*
-  An Event_Handler is what you register with ACE_Reactor.  When events occur,
-  the reactor will callback on the Event_Handler.  More on that in a few lines.
- */
+/* An Event_Handler is what you register with ACE_Reactor.  When
+  events occur, the reactor will callback on the Event_Handler.  More
+  on that in a few lines.  */
 #include "ace/Event_Handler.h"
 
-/*
-  When a client connects, we'll create a Logging_Handler to deal with the
-  connection.  Here, we bring in that declaration.
- */
+/* When a client connects, we'll create a Logging_Handler to deal with
+  the connection.  Here, we bring in that declaration.  */
 #include "logger.h"
 
-/*
-  Our Logging_Acceptor is derived from ACE_Event_Handler.  That lets the
-  reactor treat our acceptor just like every other handler.
- */
+/* Our Logging_Acceptor is derived from ACE_Event_Handler.  That lets
+  the reactor treat our acceptor just like every other handler.  */
 class Logging_Acceptor : public ACE_Event_Handler
 {
 public:
 
-  /*
-    For this simple case we won't bother with either constructor or
-    destructor.  In a real application you would certainly have them.
-   */
-
-  /*
-    Here's the open() method we called from main().  We have two things
-    to accomplish here:  (1)  Open the acceptor so that we can hear
-    client requests and (2) register ourselves with the reactor so that
-    we can respond to those requests.
-   */
-  int open (const ACE_INET_Addr &_addr, ACE_Reactor * _reactor )
+  /* For this simple case we won't bother with either constructor or
+    destructor.  In a real application you would certainly have them.  */
+
+  /* Here's the open() method we called from main().  We have two
+    things to accomplish here: (1) Open the acceptor so that we can
+    hear client requests and (2) register ourselves with the reactor
+    so that we can respond to those requests.  */
+  int open (const ACE_INET_Addr &addr,
+            ACE_Reactor *reactor)
   {
-    /*
-      Perform the open() on the acceptor.  We pass through the address
-      at which main() wants us to listen.  The second parameter tells
-      the acceptor it is OK to reuse the address.  This is necessary
-      sometimes to get around closed connections that haven't timed out.
-     */
-    if (this->peer_acceptor_.open (_addr, 1) == -1)
+    /* Perform the open() on the acceptor.  We pass through the
+      address at which main() wants us to listen.  The second
+      parameter tells the acceptor it is OK to reuse the address.
+      This is necessary sometimes to get around closed connections
+      that haven't timed out.  */
+    if (this->peer_acceptor_.open (addr, 1) == -1)
       return -1;
 
-    /*
-      Remember the reactor we're using.  We'll need it later when we
-      create a client connection handler.
-     */
-    reactor_ = _reactor;
-
-    /*
-      Now we can register with the reactor we were given.  Since the reactor
-      pointer is global, we could have just used that but it's gross enough
-      already.
-      Notice that we can pass 'this' right into the registration since we're
-      derived from ACE_Event_Handler.  We also provide ACCEPT_MASK to tell
-      the reactor that we want to know about accept requests from clients.
-     */
-    return _reactor->register_handler( this, ACE_Event_Handler::ACCEPT_MASK );
+    /* Remember the reactor we're using.  We'll need it later when we
+      create a client connection handler.  */
+    reactor_ = reactor;
+
+    /* Now we can register with the reactor we were given.  Since the
+      reactor pointer is global, we could have just used that but it's
+      gross enough already.  Notice that we can pass 'this' right into
+      the registration since we're derived from ACE_Event_Handler.  We
+      also provide ACCEPT_MASK to tell the reactor that we want to
+      know about accept requests from clients.  */
+    return reactor->register_handler (this,
+                                      ACE_Event_Handler::ACCEPT_MASK);
   }
 
 private:
 
-  /*
-    To provide multi-OS abstraction, ACE uses the concept of "handles" for
-    connection endpoints.  In Unix, this is a traditional file descriptor
-    (or integer).  On other OS's, it may be something else.
-    The reactor will need to get the handle (file descriptor) to satisfy
-    it's own internal needs.  Our relevant handle is the handle of the
-    acceptor object, so that's what we provide.
-   */
+  /* To provide multi-OS abstraction, ACE uses the concept of
+    "handles" for connection endpoints.  In Unix, this is a
+    traditional file descriptor (or integer).  On other OS's, it may
+    be something else.  The reactor will need to get the handle (file
+    descriptor) to satisfy it's own internal needs.  Our relevant
+    handle is the handle of the acceptor object, so that's what we
+    provide.  */
   ACE_HANDLE get_handle (void) const
   {
     return this->peer_acceptor_.get_handle ();
   }
 
-  /*
-    When an accept request arrives, the reactor will invoke the handle_input()
-    callback.  This is where we deal with the connection request.
-   */
-  virtual int handle_input (ACE_HANDLE _handle)
+  /* When an accept request arrives, the reactor will invoke the
+    handle_input() callback.  This is where we deal with the
+    connection request.  */
+  virtual int handle_input (ACE_HANDLE handle)
   {
-    /*
-      The handle provided to us by the reactor is the one that triggered
-      our up-call.  In some advanced situations, you might actually
-      register a single handler for multiple connections.  The _handle
-      parameter is a way to sort 'em out.  Since we don't use that
-      here, we simply ignore the parameter with the ACE_UNUSED_ARG() macro.
-     */
-    ACE_UNUSED_ARG(_handle);
-
-    /*
-      In response to the connection request, we create a new Logging_Handler.
-      This new object will be used to interact with the client until it
-      disconnects.
-     */
-    Logging_Handler *svc_handler = new Logging_Handler;
-
-    /*
-      To complete the connection, we invoke the accept() method call on
-      the acceptor object and provide it with the connection handler instance.
-      This transfers "ownership" of the connection from the acceptor to the
-      connection handler.
-     */
+    /* The handle provided to us by the reactor is the one that
+      triggered our up-call.  In some advanced situations, you might
+      actually register a single handler for multiple connections.
+      The _handle parameter is a way to sort 'em out.  Since we don't
+      use that here, we simply ignore the parameter with the
+      ACE_UNUSED_ARG() macro.  */
+    ACE_UNUSED_ARG (handle);
+
+    Logging_Handler *svc_handler;
+
+    /* In response to the connection request, we create a new
+      Logging_Handler.  This new object will be used to interact with
+      the client until it disconnects.  Note how we use the
+      ACE_NEW_RETURN macro, which returns -1 if operator new fails. */
+    ACE_NEW_RETURN (svc_handler,
+                    Logging_Handler,
+                    -1);
+
+    /* To complete the connection, we invoke the accept() method call
+      on the acceptor object and provide it with the connection
+      handler instance.  This transfers "ownership" of the connection
+      from the acceptor to the connection handler.  */
     if (this->peer_acceptor_.accept (*svc_handler) == -1)
-      ACE_ERROR_RETURN ((LM_ERROR, "%p", "accept failed"), -1);
-
-    /*
-      Again, most objects need to be open()ed before they are useful.  We'll
-      give the handler our reactor pointer so that it can register for
-      events as well.  If the open fails, we'll force a close().
-     */
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "%p",
+                         "accept failed"),
+                        -1);
+
+    /* Again, most objects need to be open()ed before they are useful.
+      We'll give the handler our reactor pointer so that it can
+      register for events as well.  If the open fails, we'll force a
+      close().  */
     if (svc_handler->open (reactor_) == -1)
       svc_handler->close ();
 
@@ -172,15 +156,11 @@ private:
 
 protected:
 
-  /*
-    Our acceptor object instance
-   */
+  /* Our acceptor object instance */
   ACE_SOCK_Acceptor peer_acceptor_;
 
-  /*
-    A place to remember our reactor pointer
-   */
-  ACE_Reactor * reactor_;
+  /* A place to remember our reactor pointer */
+  ACE_Reactor *reactor_;
 };
 
 #endif /* _CLIENT_ACCEPTOR_H */
diff --git a/docs/tutorials/001/page04.html b/docs/tutorials/001/page04.html
index 22c35a7d4d4..aac12e70ba9 100644
--- a/docs/tutorials/001/page04.html
+++ b/docs/tutorials/001/page04.html
@@ -19,17 +19,14 @@ object.
 


-
 // $Id$
 
-
 #ifndef _CLIENT_HANDLER_H
 #define _CLIENT_HANDLER_H
 
-/*
-  A connection handler will also be derived from ACE_Event_Handler so that we
-  can register with a reactor.
- */
+/* A connection handler will also be derived from ACE_Event_Handler so
+  that we can register with a reactor.  */
+
 #include "ace/Event_Handler.h"
 
 #if !defined (ACE_LACKS_PRAGMA_ONCE)
@@ -38,60 +35,60 @@ object.
 
 #include "ace/INET_Addr.h"
 
-/*
-  Since we're doing TCP/IP, we'll need a SOCK_Stream for the connection.
- */
+/* Since we're doing TCP/IP, we'll need a SOCK_Stream for the
+  connection.  */
 #include "ace/SOCK_Stream.h"
 
 class Logging_Handler : public ACE_Event_Handler
 {
 public:
-
   /*
     Like the acceptor, we're simple enough to avoid constructor and destructor.
-   */
+    */
 
-  /*
-    To open the client handler, we have to register ourselves with the reactor.
-    Notice that we don't have to "open" our ACE_SOCK_Stream member variable.
-    Why?  Because the call to the acceptor's accept() method took care of those
-    details for us.
-   */
-  int open ( ACE_Reactor * _reactor )
+  /* To open the client handler, we have to register ourselves with
+    the reactor.  Notice that we don't have to "open" our
+    ACE_SOCK_Stream member variable.  Why?  Because the call to the
+    acceptor's accept() method took care of those details for us.  */
+
+  int open (ACE_Reactor *reactor)
   {
     /*
       Remember our reactor...
-     */
-    reactor_ = _reactor;
-
-    /*
-      In this case we're using the READ_MASK.  Like the acceptor, handle_input()
-      will be called due to this mask but it's a nice piece of bookkeeping to
-      have separate masks for the separate types of activity.
-     */
-    if (_reactor-> register_handler (this, ACE_Event_Handler::READ_MASK) == -1)
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
+      */
+    reactor_ = reactor;
+
+    /* In this case we're using the READ_MASK.  Like the acceptor,
+      handle_input() will be called due to this mask but it's a nice
+      piece of bookkeeping to have separate masks for the separate
+      types of activity.  */
+
+    if (reactor->register_handler (this,
+                                   ACE_Event_Handler::READ_MASK) == -1)
+      ACE_ERROR_RETURN ((LM_ERROR, 
+                         "(%P|%t) can't register with reactor\n"),
+                        -1);
     return 0;
   }
 
-  /*
-    If we're explicitly closed we'll close our "file handle".  The net result
-    is to close the connection to the client and remove ourselves from the
-    reactor if we're registered
-   */
+  /* If we're explicitly closed we'll close our "file handle".  The
+    net result is to close the connection to the client and remove
+    ourselves from the reactor if we're registered */
+
   int close (void)
   {
-    return this->handle_close (ACE_INVALID_HANDLE, ACE_Event_Handler::RWE_MASK);
+    return this->handle_close (ACE_INVALID_HANDLE,
+                               ACE_Event_Handler::RWE_MASK);
   }
 
-  /*
-    This is a bit of magic...  When we call the accept() method of the acceptor
-    object, it wants to do work on an ACE_SOCK_Stream.  We have one of those as
-    our connection to the client but it would be gross to provide a method to
-    access that object.  It's much cooler if the acceptor can just treat the
-    Logging_Handler as an ACE_SOCK_Stream.  Providing this cast operator lets
-    that happen cleanly.
-   */
+  /* This is a bit of magic...  When we call the accept() method of
+    the acceptor object, it wants to do work on an ACE_SOCK_Stream.
+    We have one of those as our connection to the client but it would
+    be gross to provide a method to access that object.  It's much
+    cooler if the acceptor can just treat the Logging_Handler as an
+    ACE_SOCK_Stream.  Providing this cast operator lets that happen
+    cleanly.  */
+
   operator ACE_SOCK_Stream &()
   {
     return this->cli_stream_;
@@ -99,85 +96,91 @@ public:
 
 protected:
 
-  /*
-    Again, like the acceptor, we need to provide the connection handle to the reactor.
-   */
+  /* Again, like the acceptor, we need to provide the connection
+    handle to the reactor.  */
+
   ACE_HANDLE get_handle (void) const
   {
     return this->cli_stream_.get_handle ();
   }
 
-  /*
-    And here's the handle_input().  This is really the workhorse of the application.
-   */
+  /* And here's the handle_input().  This is really the workhorse of
+    the application.  */
+
   virtual int handle_input (ACE_HANDLE)
   {
     /*
       Create and initialize a small receive buffer.  The extra byte is 
       there to allow us to have a null-terminated string when it's over.
      */
-    char buf[BUFSIZ+1];
+    char buf[BUFSIZ + 1];
+
+    /* Invoke the recv() method of the ACE_SOCK_Stream to get some
+      data.  It will return -1 if there is an error.  Otherwise, it
+      will return the number of bytes read.  Of course, if it read
+      zero bytes then the connection must be gone.  How do I know
+      that?  Because handle_input() would not be called by the reactor
+      if there wasn't *some* kind of activity and a closed connection
+      looks like a read request to the reactor.  But when you read
+      from a closed connection you'll read zero bytes.
+
+      Notice that in the error case or closed case we return -1.  That
+      tells the reactor to call our handle_close() where we'll take
+      care of shutting down cleanly.
+
+      Although we don't make use of them, there are additional
+      parameters you can use with the recv() call.  One of these is an
+      ACE_Time_Value that allows you to limit the amount of time
+      blocking on the recv().  You would use that if you weren't sure
+      if data was available.  Since we only get to handle_input() when
+      data is ready, that would be redundant.  On the other hand, if
+      you use recv_n() to read *exactly* a number of bytes then
+      limiting the time you wait for those bytes might be good.  The
+      other paramter that may come in handy is an integer
+      <i>flags</i>.  This is passed directly to the underlying OS
+      recv() call.  See the man page recv(2) and the header
+      sys/socket.h for the gory details. */
 
-    /*
-      Invoke the recv() method of the ACE_SOCK_Stream to get some data.  It will
-      return -1 if there is an error.  Otherwise, it will return the number of bytes
-      read.  Of course, if it read zero bytes then the connection must be gone.
-      How do I know that?  Because handle_input() would not be called by the reactor
-      if there wasn't *some* kind of activity and a closed connection looks like a
-      read request to the reactor.  But when you read from a closed connection you'll
-      read zero bytes.
-
-      Notice that in the error case or closed case we return -1.  That tells the reactor
-      to call our handle_close() where we'll take care of shutting down cleanly.
-
-      Although we don't make use of them, there are additional parameters you can
-      use with the recv() call.  One of these is an ACE_Time_Value that allows you to
-      limit the amount of time blocking on the recv().  You would use that if you
-      weren't sure if data was available.  Since we only get to handle_input() when
-      data is ready, that would be redundant.  On the other hand, if you use recv_n()
-      to read *exactly* a number of bytes then limiting the time you wait for those
-      bytes might be good.
-      The other paramter that may come in handy is an integer <i>flags</i>.  This is
-      passed directly to the underlying OS recv() call.  See the man page recv(2)
-      and the header sys/socket.h for the gory details.
-     */
     ssize_t retval;
-    switch( retval = this->cli_stream_.recv(buf,BUFSIZ) )
+    switch (retval = this->cli_stream_.recv (buf, BUFSIZ))
     {
     case -1:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client logger"), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) %p bad read\n",
+                         "client logger"),
+                        -1);
     case 0:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing log daemon (fd = %d)\n",
-        this->get_handle ()), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing log daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
     default:
       buf[retval] = '\0';
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s",buf));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  buf));
     }
 
     return 0;
   }
 
-  /*
-    When handle_input() returns -1, we'll end up here.  There are a few housekeeping
-    chores to handle.
-   */
-  int handle_close (ACE_HANDLE, ACE_Reactor_Mask _mask)
+  /* When handle_input() returns -1, we'll end up here.  There are a
+    few housekeeping chores to handle.  */
+
+  int handle_close (ACE_HANDLE,
+                    ACE_Reactor_Mask _mask)
   {
-    /*
-      Remove ourselves from the reactor.  We have to include the DONT_CALL in the
-      mask so that it won't call handle_close() on us again!
-     */
-    reactor_->remove_handler(this,_mask|ACE_Event_Handler::DONT_CALL);
+    /* Remove ourselves from the reactor.  We have to include the
+      DONT_CALL in the mask so that it won't call handle_close() on us
+      again!  */
+    reactor_->remove_handler (this,
+                              _mask | ACE_Event_Handler::DONT_CALL);
 
-    /*
-      Close the socket that we're connected to the client with.
-     */
-    cli_stream_.close();
+    /* Close the socket that we're connected to the client with.  */
+    cli_stream_.close ();
 
-    /*
-      Since we know we were dynamically allocated by the acceptor, now is a good
-      time to get rid of ourselves.
-     */
+    /* Since we know we were dynamically allocated by the acceptor,
+      now is a good time to get rid of ourselves.  */
     delete this;
 
     return 0;
@@ -185,15 +188,11 @@ protected:
 
 protected:
 
-  /*
-    Our peer connection.
-   */
+  /* Our peer connection.  */
   ACE_SOCK_Stream cli_stream_;
 
-  /*
-    Our reactor (and our acceptor's reactor).
-   */
-  ACE_Reactor * reactor_;
+  /* Our reactor (and our acceptor's reactor).  */
+  ACE_Reactor *reactor_;
 };
 
 #endif /* _CLIENT_HANDLER_H */
diff --git a/docs/tutorials/001/server.cpp b/docs/tutorials/001/server.cpp
index 412f43a9a34..f7deb7a9075 100644
--- a/docs/tutorials/001/server.cpp
+++ b/docs/tutorials/001/server.cpp
@@ -1,84 +1,78 @@
-
 // $Id$
 
-
-/*
-  Include the header file where our client acceptor is defined.
- */
+/* Include the header file where our client acceptor is defined.  */
 #include "ace/Reactor.h"
 
-/*  
-  For simplicity, we create our reactor in the global address space.
-  In later tutorials we will do something more clever and appropriate. However,
-  the purpose of this tutorial is to introduce a connection acceptance and
-  handling, not the full capabilities of a reactor.
-*/
-ACE_Reactor * g_reactor;
+/* For simplicity, we create our reactor in the global address space.
+  In later tutorials we will do something more clever and
+  appropriate. However, the purpose of this tutorial is to introduce a
+  connection acceptance and handling, not the full capabilities of a
+  reactor.  */
+ACE_Reactor *g_reactor;
 
-/*
-  Include the header where we define our acceptor object.  An acceptor is
-  an abstraction that allows a server to "accept" connections from clients.
-*/
+/* Include the header where we define our acceptor object.  An
+  acceptor is an abstraction that allows a server to "accept"
+  connections from clients.  */
 #include "acceptor.h"
 
-/*
-  A TCP/IP server can listen to only one port for connection requests.
-  Well-known services can always be found at the same address. Lesser-known
-  services are generally told where to listen by a configuration file or
-  command-line parameter. For this example, we're satisfied with simply hard-coding
-  a random but known value.
-*/
+/* A TCP/IP server can listen to only one port for connection
+  requests.  Well-known services can always be found at the same
+  address. Lesser-known services are generally told where to listen by
+  a configuration file or command-line parameter. For this example,
+  we're satisfied with simply hard-coding a random but known value.  */
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int, char *[])
+int 
+main (int, char *[])
 {
-  /*
-    Create a Reactor instance.  Again, a global pointer isn't exactly the
-    best way to handle this but for the simple example here, it will be OK.
-    We'll get cute with it later.
-  */
-  g_reactor = new ACE_Reactor;
+  /* Create a Reactor instance.  Again, a global pointer isn't exactly
+    the best way to handle this but for the simple example here, it
+    will be OK.  We'll get cute with it later.  Note how we use the
+    ACE_NEW_RETURN macro, which returns 1 if operator new fails. */
+  ACE_NEW_RETURN (g_reactor,
+                  ACE_Reactor,
+                  1);
 
-  /*
-    Like the Reactor, I'm skimming over the details of the ADDR
-    object. What it provides is an abstraction for addressing services in the
-    network. All we need to know at this point is that we are creating an address
-    object which specifies the TCP/IP port on which the server
-    will listen for new connection requests.
-  */
+  /* Like the Reactor, I'm skimming over the details of the ADDR
+    object. What it provides is an abstraction for addressing services
+    in the network. All we need to know at this point is that we are
+    creating an address object which specifies the TCP/IP port on
+    which the server will listen for new connection requests.  */
   ACE_INET_Addr addr (PORT);
 
-  /*
-    We now create an acceptor object.  No connections will
-    yet be established because the object isn't "open for business"
-    at this time. Which brings us to the next line...
-  */
-  Logging_Acceptor * peer_acceptor = new Logging_Acceptor();
+  Logging_Acceptor *peer_acceptor;
+
+  /* We now create an acceptor object.  No connections will yet be
+    established because the object isn't "open for business" at this
+    time. Which brings us to the next line...  */
+  ACE_NEW_RETURN (peer_acceptor,
+                  Logging_Acceptor,
+                  1);
+
+  /* where the acceptor object is opened.  You'll find that most ACE
+    objects have to be open()ed before they're of any use to you.  On
+    this open() call, we're telling the acceptor where to listen for
+    connections via the 'addr' object.  We're also telling it that we
+    want it to be registered with our 'g_reactor' instance.  */
+  if (peer_acceptor->open (addr, g_reactor) == -1 )
+    ACE_ERROR_RETURN ((LM_ERROR, 
+                       "Opening Acceptor\n"),
+                      -1);
 
-  /*
-    where the acceptor object is opened.  You'll find that most ACE
-    objects have to be open()ed before they're of any use to you.
-    On this open() call, we're telling the acceptor where to listen
-    for connections via the 'addr' object.  We're also telling it
-    that we want it to be registered with our 'g_reactor' instance.
-   */
-  if (peer_acceptor->open(addr,g_reactor) == -1 )
-    ACE_ERROR_RETURN ((LM_ERROR, "Opening Acceptor\n"), -1);
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server logging daemon\n"));
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server logging daemon\n"));
+  /* The reactor's handle_events member function is responsible for
+    looking at all registered objects and invoking an appropriate
+    member function when anything of interest occurs. When an event is
+    processed, the handle_events function returns. In order to get all
+    events, we embed this in an infinite loop.
 
-  /*
-    The reactor's handle_events member function is responsible for looking at
-    all registered objects and invoking an appropriate member function when
-    anything of interest occurs. When an event is processed, the handle_events
-    function returns. In order to get all events, we embed this in an infinite
-    loop.
+    Since we put ourselves into an infinite loop, you'll need to
+    CTRL-C to exit the program.  */
 
-    Since we put ourselves into an infinite loop, you'll need to CTRL-C
-    to exit the program.
-  */
-  while (1)
-     g_reactor-> handle_events ();
+  for (;;)
+    g_reactor->handle_events ();
 
   return 0;
 }
diff --git a/docs/tutorials/002/handler.h b/docs/tutorials/002/handler.h
index 2f1ce9e9ecf..131e94b95e6 100644
--- a/docs/tutorials/002/handler.h
+++ b/docs/tutorials/002/handler.h
@@ -16,7 +16,7 @@
   there is a way to get to the reactor it uses.  We'll take the easy
   way out and grab the global pointer.  (There is a way to get back to
   the acceptor's reactor that we'll see later on.)  */
-extern ACE_Reactor * g_reactor;
+extern ACE_Reactor *g_reactor;
 
 /* This time we're deriving from ACE_Svc_Handler instead of
   ACE_Event_Handler.  The big reason for this is because it already
diff --git a/docs/tutorials/002/page02.html b/docs/tutorials/002/page02.html
index eb1ab5893e2..d886ad2a30c 100644
--- a/docs/tutorials/002/page02.html
+++ b/docs/tutorials/002/page02.html
@@ -62,38 +62,30 @@ the various ACE components judiciously.
 


-
 // $Id$
 
-
-/*
-  As before, we need a few ACE objects as well as our Logging_Handler declaration.
- */
+/* As before, we need a few ACE objects as well as our Logging_Handler
+  declaration.  */
 #include "ace/Acceptor.h"
 #include "ace/SOCK_Acceptor.h"
 #include "ace/Reactor.h"
 #include "handler.h"
 
-/*
-  We'll still use the global reactor pointer.  There's a snappy way around this
-  that shows up in later server tutorials.
- */
-ACE_Reactor * g_reactor;
-
-/*
-  This was hinted at in Tutorial 1.  Remember the hand-coded acceptor that we
-  created there?  This template does all of that and more and better.  If you
-  find yourself creating code that doesn't feel like a part of your application,
-  there's a good chance that ACE has a template or framework component to do
-  it for you.
- */
-typedef ACE_Acceptor < Logging_Handler, ACE_SOCK_ACCEPTOR > Logging_Acceptor;
-
-/*
-  One of the new things will be a signal handler so that we can exit the application
-  somewhat cleanly.  The 'finished' flag is used instead of the previous infninite
-  loop and the 'handler' will set that flag in respose to SIGINT (CTRL-C).
- */
+/* We'll still use the global reactor pointer.  There's a snappy way
+   around this that shows up in later server tutorials.  */
+ACE_Reactor *g_reactor;
+
+/* This was hinted at in Tutorial 1.  Remember the hand-coded acceptor
+  that we created there?  This template does all of that and more and
+  better.  If you find yourself creating code that doesn't feel like a
+  part of your application, there's a good chance that ACE has a
+  template or framework component to do it for you.  */
+typedef ACE_Acceptor <Logging_Handler, ACE_SOCK_ACCEPTOR> Logging_Acceptor;
+
+/* One of the new things will be a signal handler so that we can exit
+  the application somewhat cleanly.  The 'finished' flag is used
+  instead of the previous infninite loop and the 'handler' will set
+  that flag in respose to SIGINT (CTRL-C).  */
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
 {
@@ -102,30 +94,36 @@ extern "C" void handler (int)
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int, char **)
+int
+main (int, char **)
 {
   // Create the reactor we'll register our event handler derivatives with.
-  g_reactor = new ACE_Reactor;
+  ACE_NEW_RETURN (g_reactor,
+                  ACE_Reactor,
+                  1);
 
   // Create the acceptor that will listen for client connetions
   Logging_Acceptor peer_acceptor;
 
-  /*
-    Notice how similar this is to the open() call in Tutorial 1.  I read
-    ahead when I created that one so that it would come out this way...
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), g_reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-    Here's the easiest way to respond to signals in your application.  Simply
-    construct an ACE_Sig_Action object with a "C" function and the signal you
-    want to capture.  As you might expect, there is also a way to register
-    signal handlers with a reactor but we take the easy-out here.
-   */
+  /* Notice how similar this is to the open() call in Tutorial 1.  I
+    read ahead when I created that one so that it would come out this
+    way...  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          g_reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Here's the easiest way to respond to signals in your application.
+    Simply construct an ACE_Sig_Action object with a "C" function and
+    the signal you want to capture.  As you might expect, there is
+    also a way to register signal handlers with a reactor but we take
+    the easy-out here.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server logging daemon\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server logging daemon\n"));
 
   // Perform logging service until the signal handler receives SIGINT.
   while (!finished)
@@ -137,12 +135,11 @@ int main (int, char **)
   // Free up the memory allocated for the reactor.
   delete g_reactor;
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server logging daemon\n"));
-
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server logging daemon\n"));
   return 0;
 }
 
-
 #if defined (ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION)
 template class ACE_Acceptor <Logging_Handler, ACE_SOCK_ACCEPTOR>;
 template class ACE_Svc_Handler<ACE_SOCK_STREAM, ACE_NULL_SYNCH>;
diff --git a/docs/tutorials/002/page03.html b/docs/tutorials/002/page03.html
index e2243550505..365e64d0888 100644
--- a/docs/tutorials/002/page03.html
+++ b/docs/tutorials/002/page03.html
@@ -39,7 +39,7 @@
   there is a way to get to the reactor it uses.  We'll take the easy
   way out and grab the global pointer.  (There is a way to get back to
   the acceptor's reactor that we'll see later on.)  */
-extern ACE_Reactor * g_reactor;
+extern ACE_Reactor *g_reactor;
 
 /* This time we're deriving from ACE_Svc_Handler instead of
   ACE_Event_Handler.  The big reason for this is because it already
diff --git a/docs/tutorials/002/server.cpp b/docs/tutorials/002/server.cpp
index bca90aa01bc..7e46b080066 100644
--- a/docs/tutorials/002/server.cpp
+++ b/docs/tutorials/002/server.cpp
@@ -1,35 +1,27 @@
-
 // $Id$
 
-
-/*
-  As before, we need a few ACE objects as well as our Logging_Handler declaration.
- */
+/* As before, we need a few ACE objects as well as our Logging_Handler
+  declaration.  */
 #include "ace/Acceptor.h"
 #include "ace/SOCK_Acceptor.h"
 #include "ace/Reactor.h"
 #include "handler.h"
 
-/*
-  We'll still use the global reactor pointer.  There's a snappy way around this
-  that shows up in later server tutorials.
- */
-ACE_Reactor * g_reactor;
-
-/*
-  This was hinted at in Tutorial 1.  Remember the hand-coded acceptor that we
-  created there?  This template does all of that and more and better.  If you
-  find yourself creating code that doesn't feel like a part of your application,
-  there's a good chance that ACE has a template or framework component to do
-  it for you.
- */
-typedef ACE_Acceptor < Logging_Handler, ACE_SOCK_ACCEPTOR > Logging_Acceptor;
-
-/*
-  One of the new things will be a signal handler so that we can exit the application
-  somewhat cleanly.  The 'finished' flag is used instead of the previous infninite
-  loop and the 'handler' will set that flag in respose to SIGINT (CTRL-C).
- */
+/* We'll still use the global reactor pointer.  There's a snappy way
+   around this that shows up in later server tutorials.  */
+ACE_Reactor *g_reactor;
+
+/* This was hinted at in Tutorial 1.  Remember the hand-coded acceptor
+  that we created there?  This template does all of that and more and
+  better.  If you find yourself creating code that doesn't feel like a
+  part of your application, there's a good chance that ACE has a
+  template or framework component to do it for you.  */
+typedef ACE_Acceptor  Logging_Acceptor;
+
+/* One of the new things will be a signal handler so that we can exit
+  the application somewhat cleanly.  The 'finished' flag is used
+  instead of the previous infninite loop and the 'handler' will set
+  that flag in respose to SIGINT (CTRL-C).  */
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
 {
@@ -38,30 +30,36 @@ extern "C" void handler (int)
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int, char **)
+int
+main (int, char **)
 {
   // Create the reactor we'll register our event handler derivatives with.
-  g_reactor = new ACE_Reactor;
+  ACE_NEW_RETURN (g_reactor,
+                  ACE_Reactor,
+                  1);
 
   // Create the acceptor that will listen for client connetions
   Logging_Acceptor peer_acceptor;
 
-  /*
-    Notice how similar this is to the open() call in Tutorial 1.  I read
-    ahead when I created that one so that it would come out this way...
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), g_reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-    Here's the easiest way to respond to signals in your application.  Simply
-    construct an ACE_Sig_Action object with a "C" function and the signal you
-    want to capture.  As you might expect, there is also a way to register
-    signal handlers with a reactor but we take the easy-out here.
-   */
+  /* Notice how similar this is to the open() call in Tutorial 1.  I
+    read ahead when I created that one so that it would come out this
+    way...  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          g_reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Here's the easiest way to respond to signals in your application.
+    Simply construct an ACE_Sig_Action object with a "C" function and
+    the signal you want to capture.  As you might expect, there is
+    also a way to register signal handlers with a reactor but we take
+    the easy-out here.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server logging daemon\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server logging daemon\n"));
 
   // Perform logging service until the signal handler receives SIGINT.
   while (!finished)
@@ -73,12 +71,11 @@ int main (int, char **)
   // Free up the memory allocated for the reactor.
   delete g_reactor;
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server logging daemon\n"));
-
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server logging daemon\n"));
   return 0;
 }
 
-
 #if defined (ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION)
 template class ACE_Acceptor ;
 template class ACE_Svc_Handler;
diff --git a/docs/tutorials/003/client.cpp b/docs/tutorials/003/client.cpp
index 5156b5764a8..dd21077f449 100644
--- a/docs/tutorials/003/client.cpp
+++ b/docs/tutorials/003/client.cpp
@@ -1,111 +1,98 @@
-
 // $Id$
 
-/*
-  To establish a socket connection to a server, we'll need an ACE_SOCK_Connector.
- */
+/* To establish a socket connection to a server, we'll need an
+  ACE_SOCK_Connector.  */
 #include "ace/SOCK_Connector.h"
 
-/*
-  Unlike the previous two tutorials, we're going to allow the user to provide
-  command line options this time.  Still, we need defaults in case that isn't
-  done.
- */
+/* Unlike the previous two tutorials, we're going to allow the user to
+  provide command line options this time.  Still, we need defaults in
+  case that isn't done.  */
 static u_short SERVER_PORT = ACE_DEFAULT_SERVER_PORT;
 static const char *const SERVER_HOST = ACE_DEFAULT_SERVER_HOST;
 static const int MAX_ITERATIONS = 4;
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-  /*
-    Accept the users's choice of hosts or use the default.  Then do the same
-    for the TCP/IP port at which the server is listening as well as the
-    number of iterations to perform.
-   */
-  const char *server_host = argc > 1 ? argv[1]                : SERVER_HOST;
-  u_short server_port     = argc > 2 ? ACE_OS::atoi (argv[2]) : SERVER_PORT;
-  int max_iterations      = argc > 3 ? ACE_OS::atoi (argv[3]) : MAX_ITERATIONS;
+  /* Accept the users's choice of hosts or use the default.  Then do
+    the same for the TCP/IP port at which the server is listening as
+    well as the number of iterations to perform.  */
+  const char *server_host = argc > 1 ? argv[1] : SERVER_HOST;
+  u_short server_port = argc > 2 ? ACE_OS::atoi (argv[2]) : SERVER_PORT;
+  int max_iterations = argc > 3 ? ACE_OS::atoi (argv[3]) : MAX_ITERATIONS;
 
-  /*
-    Build ourselves a Stream socket. This is a connected socket that provides
-    reliable end-to-end communications. We will use the server object to send
-    data to the server we connect to.
-   */
+  /* Build ourselves a Stream socket. This is a connected socket that
+    provides reliable end-to-end communications. We will use the
+    server object to send data to the server we connect to.  */
   ACE_SOCK_Stream server;
 
-  /*
-    And we need a connector object to establish that connection. The ACE_SOCK_Connector
-    object provides all of the tools we need to establish a connection once we know the
-    server's network address... 
-   */
+  /* And we need a connector object to establish that connection. The
+    ACE_SOCK_Connector object provides all of the tools we need to
+    establish a connection once we know the server's network
+    address...  */
   ACE_SOCK_Connector connector;
 
-  /*
-    Which we create with an ACE_INET_Addr object. This object is given the TCP/IP port
-    and hostname of the server we want to connect to.
-   */
-  ACE_INET_Addr addr (server_port, server_host);
+  /* Which we create with an ACE_INET_Addr object. This object is
+    given the TCP/IP port and hostname of the server we want to
+    connect to.  */
+  ACE_INET_Addr addr (server_port,
+                      server_host);
 
-  /*
-    So, we feed the Addr object and the Stream object to the connector's connect() member
-    function. Given this information, it will establish the network connection to the
-    server and attach that connection to the server object. 
-   */
+  /* So, we feed the Addr object and the Stream object to the
+    connector's connect() member function. Given this information, it
+    will establish the network connection to the server and attach
+    that connection to the server object.  */
   if (connector.connect (server, addr) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
   
-  /*
-    Just for grins, we'll send the server several messages.
-   */
+  /* Just for grins, we'll send the server several messages.  */
   for (int i = 0; i < max_iterations; i++)
-  {
-    char buf[BUFSIZ];
-
-    /*
-      Create our message with the message number
-     */
-    ACE_OS::sprintf (buf, "message = %d\n", i + 1);
+    {
+      char buf[BUFSIZ];
 
-    /*
-      Send the message to the server.  We use the server object's send_n() function to
-      send all of the data at once. There is also a send() function but it may not send
-      all of the data. That is due to network buffer availability and such. If the send()
-      doesn't send all of the data, it is up to you to program things such that it will
-      keep trying until all of the data is sent or simply give up. The send_n() function
-      already does the "keep trying" option for us, so we use it. 
+      /* Create our message with the message number */
+      ACE_OS::sprintf (buf,
+                       "message = %d\n",
+                       i + 1);
+      /* Send the message to the server.  We use the server object's
+        send_n() function to send all of the data at once. There is
+        also a send() function but it may not send all of the
+        data. That is due to network buffer availability and such. If
+        the send() doesn't send all of the data, it is up to you to
+        program things such that it will keep trying until all of the
+        data is sent or simply give up. The send_n() function already
+        does the "keep trying" option for us, so we use it.
 
-      Like the send() method used in the servers we've seen, there are two additional
-      parameters you can use on the send() and send_n() method calls.  The timeout
-      parameter limits the amount of time the system will attempt to send the data
-      to the peer.  The flags parameter is passed directly to the OS send() system
-      call.  See send(2) for the valid flags values.
-    */
-    if (server.send_n ( buf, strlen(buf) ) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
+        Like the send() method used in the servers we've seen, there
+        are two additional parameters you can use on the send() and
+        send_n() method calls.  The timeout parameter limits the
+        amount of time the system will attempt to send the data to the
+        peer.  The flags parameter is passed directly to the OS send()
+        system call.  See send(2) for the valid flags values.  */
+      if (server.send_n (buf,
+                         ACE_OS::strlen (buf)) == -1)
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "send"),
+                          -1);
+      else
+        /* Pause for a second.  */
+        ACE_OS::sleep (1);
     }
-    else
-    {
-      /*
-        Pause for a second.
-       */
-      ACE_OS::sleep (1);
-    }
-  }
 
-  /*
-    Close the connection to the server.  The servers we've created so far all are based
-    on the ACE_Reactor.  When we close(), the server's reactor will see activity for
-    the registered event handler and invoke handle_input().  That, in turn, will try
-    to read from the socket but get back zero bytes.  At that point, the server will know
-    that we've closed from our side.
-   */
+  /* Close the connection to the server.  The servers we've created so
+    far all are based on the ACE_Reactor.  When we close(), the
+    server's reactor will see activity for the registered event
+    handler and invoke handle_input().  That, in turn, will try to
+    read from the socket but get back zero bytes.  At that point, the
+    server will know that we've closed from our side.  */
   if (server.close () == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "close"), -1);
-  }
-
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "close"),
+                      -1);
   return 0;
 }
diff --git a/docs/tutorials/003/page01.html b/docs/tutorials/003/page01.html
index d2d72870967..c68855784bb 100644
--- a/docs/tutorials/003/page01.html
+++ b/docs/tutorials/003/page01.html
@@ -42,115 +42,102 @@ Kirthika says, "Here's an one paragraph abstract for a one page client app:"
 
 
-
 // $Id$
 
-/*
-  To establish a socket connection to a server, we'll need an ACE_SOCK_Connector.
- */
+/* To establish a socket connection to a server, we'll need an
+  ACE_SOCK_Connector.  */
 #include "ace/SOCK_Connector.h"
 
-/*
-  Unlike the previous two tutorials, we're going to allow the user to provide
-  command line options this time.  Still, we need defaults in case that isn't
-  done.
- */
+/* Unlike the previous two tutorials, we're going to allow the user to
+  provide command line options this time.  Still, we need defaults in
+  case that isn't done.  */
 static u_short SERVER_PORT = ACE_DEFAULT_SERVER_PORT;
 static const char *const SERVER_HOST = ACE_DEFAULT_SERVER_HOST;
 static const int MAX_ITERATIONS = 4;
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-  /*
-    Accept the users's choice of hosts or use the default.  Then do the same
-    for the TCP/IP port at which the server is listening as well as the
-    number of iterations to perform.
-   */
-  const char *server_host = argc > 1 ? argv[1]                : SERVER_HOST;
-  u_short server_port     = argc > 2 ? ACE_OS::atoi (argv[2]) : SERVER_PORT;
-  int max_iterations      = argc > 3 ? ACE_OS::atoi (argv[3]) : MAX_ITERATIONS;
-
-  /*
-    Build ourselves a Stream socket. This is a connected socket that provides
-    reliable end-to-end communications. We will use the server object to send
-    data to the server we connect to.
-   */
+  /* Accept the users's choice of hosts or use the default.  Then do
+    the same for the TCP/IP port at which the server is listening as
+    well as the number of iterations to perform.  */
+  const char *server_host = argc > 1 ? argv[1] : SERVER_HOST;
+  u_short server_port = argc > 2 ? ACE_OS::atoi (argv[2]) : SERVER_PORT;
+  int max_iterations = argc > 3 ? ACE_OS::atoi (argv[3]) : MAX_ITERATIONS;
+
+  /* Build ourselves a Stream socket. This is a connected socket that
+    provides reliable end-to-end communications. We will use the
+    server object to send data to the server we connect to.  */
   ACE_SOCK_Stream server;
 
-  /*
-    And we need a connector object to establish that connection. The ACE_SOCK_Connector
-    object provides all of the tools we need to establish a connection once we know the
-    server's network address... 
-   */
+  /* And we need a connector object to establish that connection. The
+    ACE_SOCK_Connector object provides all of the tools we need to
+    establish a connection once we know the server's network
+    address...  */
   ACE_SOCK_Connector connector;
 
-  /*
-    Which we create with an ACE_INET_Addr object. This object is given the TCP/IP port
-    and hostname of the server we want to connect to.
-   */
-  ACE_INET_Addr addr (server_port, server_host);
-
-  /*
-    So, we feed the Addr object and the Stream object to the connector's connect() member
-    function. Given this information, it will establish the network connection to the
-    server and attach that connection to the server object. 
-   */
+  /* Which we create with an ACE_INET_Addr object. This object is
+    given the TCP/IP port and hostname of the server we want to
+    connect to.  */
+  ACE_INET_Addr addr (server_port,
+                      server_host);
+
+  /* So, we feed the Addr object and the Stream object to the
+    connector's connect() member function. Given this information, it
+    will establish the network connection to the server and attach
+    that connection to the server object.  */
   if (connector.connect (server, addr) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
   
-  /*
-    Just for grins, we'll send the server several messages.
-   */
+  /* Just for grins, we'll send the server several messages.  */
   for (int i = 0; i < max_iterations; i++)
-  {
-    char buf[BUFSIZ];
-
-    /*
-      Create our message with the message number
-     */
-    ACE_OS::sprintf (buf, "message = %d\n", i + 1);
-
-    /*
-      Send the message to the server.  We use the server object's send_n() function to
-      send all of the data at once. There is also a send() function but it may not send
-      all of the data. That is due to network buffer availability and such. If the send()
-      doesn't send all of the data, it is up to you to program things such that it will
-      keep trying until all of the data is sent or simply give up. The send_n() function
-      already does the "keep trying" option for us, so we use it. 
-
-      Like the send() method used in the servers we've seen, there are two additional
-      parameters you can use on the send() and send_n() method calls.  The timeout
-      parameter limits the amount of time the system will attempt to send the data
-      to the peer.  The flags parameter is passed directly to the OS send() system
-      call.  See send(2) for the valid flags values.
-    */
-    if (server.send_n ( buf, strlen(buf) ) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
-    }
-    else
     {
-      /*
-        Pause for a second.
-       */
-      ACE_OS::sleep (1);
+      char buf[BUFSIZ];
+
+      /* Create our message with the message number */
+      ACE_OS::sprintf (buf,
+                       "message = %d\n",
+                       i + 1);
+      /* Send the message to the server.  We use the server object's
+        send_n() function to send all of the data at once. There is
+        also a send() function but it may not send all of the
+        data. That is due to network buffer availability and such. If
+        the send() doesn't send all of the data, it is up to you to
+        program things such that it will keep trying until all of the
+        data is sent or simply give up. The send_n() function already
+        does the "keep trying" option for us, so we use it.
+
+        Like the send() method used in the servers we've seen, there
+        are two additional parameters you can use on the send() and
+        send_n() method calls.  The timeout parameter limits the
+        amount of time the system will attempt to send the data to the
+        peer.  The flags parameter is passed directly to the OS send()
+        system call.  See send(2) for the valid flags values.  */
+      if (server.send_n (buf,
+                         ACE_OS::strlen (buf)) == -1)
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "send"),
+                          -1);
+      else
+        /* Pause for a second.  */
+        ACE_OS::sleep (1);
     }
-  }
-
-  /*
-    Close the connection to the server.  The servers we've created so far all are based
-    on the ACE_Reactor.  When we close(), the server's reactor will see activity for
-    the registered event handler and invoke handle_input().  That, in turn, will try
-    to read from the socket but get back zero bytes.  At that point, the server will know
-    that we've closed from our side.
-   */
-  if (server.close () == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "close"), -1);
-  }
 
+  /* Close the connection to the server.  The servers we've created so
+    far all are based on the ACE_Reactor.  When we close(), the
+    server's reactor will see activity for the registered event
+    handler and invoke handle_input().  That, in turn, will try to
+    read from the socket but get back zero bytes.  At that point, the
+    server will know that we've closed from our side.  */
+  if (server.close () == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "close"),
+                      -1);
   return 0;
 }
 
diff --git a/docs/tutorials/004/client.cpp b/docs/tutorials/004/client.cpp index 88ddb0850ed..c65a028f633 100644 --- a/docs/tutorials/004/client.cpp +++ b/docs/tutorials/004/client.cpp @@ -1,264 +1,215 @@ - // $Id$ -/* - We need the connector object & we also bring in a simple string class. - */ +/* We need the connector object & we also bring in a simple string + class. */ #include "ace/SOCK_Connector.h" #include "ace/SString.h" -/* - In this tutorial, we extend SOCK_Stream by adding a few wrappers - around the send_n() method. - */ +/* In this tutorial, we extend SOCK_Stream by adding a few wrappers + around the send_n() method. */ class Client : public ACE_SOCK_Stream { - public: // Basic constructor - Client(void); - - /* - Construct and open() in one call. This isn't generally a good - idea because you don't have a clean way to inform the caller - when open() fails. (Unless you use C++ exceptions.) - */ - Client( const char * server, u_short port ); - - /* - Open the connection to the server. Notice that this mirrors - the use of ACE_SOCK_Connector. By providing our own open(), - we can hide the connector from our caller & make it's interaction - easier. - */ - int open( const char * server, u_short port ); - - /* - These are necessary if you're going to use the constructor that - invokes open(). - */ - inline int initialized(void) { return initialized_; } - inline int error(void) { return error_; } - - /* - This is where the coolness lies. Most C++ folks are familiar - with "cout << some-data." It's a very handy and easy way to - toss data around. By adding these method calls, we're able - to do the same thing with a socket connection. - */ - Client & operator<<( ACE_SString & str ); - Client & operator<<( char * str ); - Client & operator<<( int n ); + Client (void); + + /* Construct and open() in one call. This isn't generally a good + idea because you don't have a clean way to inform the caller when + open() fails. (Unless you use C++ exceptions.) */ + Client (const char *server, + u_short port); + + /* Open the connection to the server. Notice that this mirrors the + use of ACE_SOCK_Connector. By providing our own open(), we can + hide the connector from our caller & make it's interaction easier. */ + int open (const char *server, + u_short port); + + /* These are necessary if you're going to use the constructor that + invokes open(). */ + int initialized (void) { return initialized_; } + int error (void) { return error_; } + + /* This is where the coolness lies. Most C++ folks are familiar + with "cout << some-data." It's a very handy and easy way to toss + data around. By adding these method calls, we're able to do the + same thing with a socket connection. */ + Client &operator<< (ACE_SString &str); + Client &operator<< (char *str); + Client &operator<< (int n); protected: - unsigned char initialized_; - unsigned char error_; - + u_char initialized_; + u_char error_; }; -/* - The basic constructor just sets our flags to reasonable values. - */ +/* The basic constructor just sets our flags to reasonable values. */ Client::Client(void) { initialized_ = 0; error_ = 0; } -/* - This constructor also sets the flags but then calls open(). If the - open() fails, the flags will be set appropriately. Use the two inline - method calls initialized() and error() to check the object state after - using this constructor. - */ -Client::Client( const char * server, u_short port ) +/* This constructor also sets the flags but then calls open(). If the + open() fails, the flags will be set appropriately. Use the two + inline method calls initialized() and error() to check the object + state after using this constructor. */ +Client::Client (const char *server, + u_short port) { initialized_ = 0; error_ = 0; - (void)open(server,port); + this->open (server, port); } -/* - Open a connection to the server. This hides the use of ACE_SOCK_Connector - from our caller. Since our caller probably doesn't care *how* we connect, - this is a good thing. - */ -int Client::open( const char * server, u_short port ) +/* Open a connection to the server. This hides the use of + ACE_SOCK_Connector from our caller. Since our caller probably + doesn't care *how* we connect, this is a good thing. */ +int +Client::open (const char *server, + u_short port) { - /* - This is right out of Tutorial 3. The only thing we've added is to set - the initialized_ member variable on success. - */ + /* This is right out of Tutorial 3. The only thing we've added is + to set the initialized_ member variable on success. */ ACE_SOCK_Connector connector; ACE_INET_Addr addr (port, server); if (connector.connect (*this, addr) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - } - + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); initialized_ = 1; - - return(0); + return 0; } -/* - The first of our put operators sends a simple string object to the peer. -*/ -Client & Client::operator<<( ACE_SString & str ) +/* The first of our put operators sends a simple string object to the + peer. */ +Client & +Client::operator<< (ACE_SString &str) { - /* - We have to be able to allow: - server << foo << bar << stuff; - - To accomplish that, every << operator must check that the object is - in a valid state before doing work. - */ - - if( initialized() && ! error() ) - { - /* - Get the actual data held in the string object - */ - const char * cp = str.rep(); - - /* - Send that data to the peer using send_n() as before. If we have - a problem, we'll set error_ so that subsequent << operations won't - try to use a broken stream. - */ - if( this->send_n(cp,strlen(cp)) == -1 ) - { - error_ = 1; - } - } + /* We have to be able to allow: server << foo << bar << stuff; + + To accomplish that, every << operator must check that the object + is in a valid state before doing work. */ + + if (initialized () && !error ()) + { + /* Get the actual data held in the string object */ + const char *cp = str.fast_rep (); + + /* Send that data to the peer using send_n() as before. If we + have a problem, we'll set error_ so that subsequent << + operations won't try to use a broken stream. */ + if (this->send_n (cp, + ACE_OS::strlen (cp)) == -1) + error_ = 1; + } else - { - /* - Be sure that error_ is set if somebody tries to use us when - we're not initialized. - */ - error_ = 1; - } - - /* - We have to return a reference to ourselves to allow chaining of - put operations (eg -- "server << foo << bar"). Without the reference, - you would have to do each put operation as a statement. That's OK - but doesn't have the same feel as standard C++ iostreams. - */ + /* Be sure that error_ is set if somebody tries to use us when + we're not initialized. */ + error_ = 1; + + /* We have to return a reference to ourselves to allow chaining of + put operations (eg -- "server << foo << bar"). Without the + reference, you would have to do each put operation as a statement. + That's OK but doesn't have the same feel as standard C++ + iostreams. */ return *this ; } -/* -How do you put a char*? We'll take an easy way out and construct an ACE_SString -from the char* and then put that. It would have been more efficient to implement -this with the body of the operator<<(ACE_SString&) method and then express that -method in terms of this one. There's always more than one way to do things! - */ -Client & Client::operator<< ( char * str ) +/* How do you put a char*? We'll take an easy way out and construct +an ACE_SString from the char* and then put that. It would have been +more efficient to implement this with the body of the +operator<<(ACE_SString&) method and then express that method in terms +of this one. There's always more than one way to do things! */ + +Client & +Client::operator<< (char *str) { - ACE_SString newStr(str); + ACE_SString newStr (str); *this << newStr; return *this ; - /* - Notice that we could have been really clever and done: + /* Notice that we could have been really clever and done: - return *this << ACE_SString(str); + return *this << ACE_SString (str); - That kind of thing just makes debugging a pain though! - */ + That kind of thing just makes debugging a pain though! */ } -/* - ACE_SString and char* are both about the same thing. What do you do about - different datatypes though? +/* ACE_SString and char* are both about the same thing. What do you + do about different datatypes though? - Do the same thing we did with char* and convert it to ACE_SString where we - already have a << operator defined. - */ -Client & Client::operator<< ( int n ) + Do the same thing we did with char* and convert it to ACE_SString + where we already have a << operator defined. */ +Client & +Client::operator<< (int n) { - /* - Create a character buffer large enough for the largest number. That's - a tough call but 1024 should be quite enough. - */ - char buf[1024]; - - /* - Put the number into our buffer... - */ - ACE_OS::sprintf(buf,"(%d)\n",n); - - /* - And create the ACE_SString that we know how to put. - */ - ACE_SString newStr(buf); - - /* - Send it and... - */ + /* Create a character buffer large enough for the largest number. + That's a tough call but BUFSIZ should be quite enough. */ + char buf[BUFSIZ]; + + /* Put the number into our buffer... */ + ACE_OS::sprintf (buf, + "(%d)\n", + n); + + /* And create the ACE_SString that we know how to put. */ + ACE_SString newStr (buf); + + /* Send it and... */ *this << newStr; - /* - return ourselves as usual. - */ + /* return ourselves as usual. */ return *this; } - -/* - Now we pull it all together. Like Tutorial 3, we'll allow command line options. - */ -int main (int argc, char *argv[]) +/* Now we pull it all together. Like Tutorial 3, we'll allow command + line options. */ +int +main (int argc, char *argv[]) { - const char *server_host = argc > 1 ? argv[1] : ACE_DEFAULT_SERVER_HOST; - u_short server_port = argc > 2 ? ACE_OS::atoi (argv[2]) : ACE_DEFAULT_SERVER_PORT; - int max_iterations = argc > 3 ? ACE_OS::atoi (argv[3]) : 4; + const char *server_host = argc > 1 ? argv[1] : ACE_DEFAULT_SERVER_HOST; + u_short server_port = argc > 2 ? ACE_OS::atoi (argv[2]) : ACE_DEFAULT_SERVER_PORT; + int max_iterations = argc > 3 ? ACE_OS::atoi (argv[3]) : 4; - /* - Use the basic constructor since the other isn't really very safe. - */ + /* Use the basic constructor since the other isn't really very safe. */ Client peer; - /* - Open the server connection. Notice how this is simpler than Tutorial 3 - since we only have to provide a host name and port value. - */ - if( peer.open(server_host,server_port) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - } + /* Open the server connection. Notice how this is simpler than + Tutorial 3 since we only have to provide a host name and port + value. */ + if (peer.open (server_host, + server_port) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); for (int i = 0; i < max_iterations; i++) - { - /* - Tell the server which iteration we're on. No more mucking aroudn with - sprintf at this level! It's all hidden from us. - */ - peer << "message = " << i+1; - - /* - Everything OK? - */ - if ( peer.error() ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1); - } - else { - ACE_OS::sleep (1); + /* Tell the server which iteration we're on. No more mucking + aroudn with sprintf at this level! It's all hidden from us. */ + peer << "message = " << i+1; + + /* Everything OK? */ + if (peer.error ()) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); + else + ACE_OS::sleep (1); } - } if (peer.close () == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "close"), -1); - } - + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "close"), + -1); return 0; } diff --git a/docs/tutorials/004/page01.html b/docs/tutorials/004/page01.html index 71db5883d43..13a93255949 100644 --- a/docs/tutorials/004/page01.html +++ b/docs/tutorials/004/page01.html @@ -32,268 +32,219 @@ successfully interacts with the server when connection is established.
-
 // $Id$
 
-/*
-  We need the connector object & we also bring in a simple string class.
- */
+/* We need the connector object & we also bring in a simple string
+  class.  */
 #include "ace/SOCK_Connector.h"
 #include "ace/SString.h"
 
-/*
-  In this tutorial, we extend SOCK_Stream by adding a few wrappers
-  around the send_n() method.
- */
+/* In this tutorial, we extend SOCK_Stream by adding a few wrappers
+  around the send_n() method.  */
 class Client : public ACE_SOCK_Stream
 {
-
 public:
   // Basic constructor
-  Client(void);
-
-  /*
-    Construct and open() in one call.  This isn't generally a good
-    idea because you don't have a clean way to inform the caller
-    when open() fails.  (Unless you use C++ exceptions.)
-   */
-  Client( const char * server, u_short port );
-
-  /*
-    Open the connection to the server.  Notice that this mirrors
-    the use of ACE_SOCK_Connector.  By providing our own open(),
-    we can hide the connector from our caller & make it's interaction
-    easier.
-   */
-  int open( const char * server, u_short port );
-
-  /*
-    These are necessary if you're going to use the constructor that
-    invokes open().
-   */
-  inline int initialized(void) { return initialized_; }
-  inline int error(void)       { return error_; }
-
-  /*
-    This is where the coolness lies.  Most C++ folks are familiar
-    with "cout << some-data."  It's a very handy and easy way to
-    toss data around.  By adding these method calls, we're able
-    to do the same thing with a socket connection.
-   */
-  Client & operator<<( ACE_SString & str );
-  Client & operator<<( char * str );
-  Client & operator<<( int  n );
+  Client (void);
+
+  /* Construct and open() in one call.  This isn't generally a good
+    idea because you don't have a clean way to inform the caller when
+    open() fails.  (Unless you use C++ exceptions.)  */
+  Client (const char *server,
+          u_short port);
+
+  /* Open the connection to the server.  Notice that this mirrors the
+    use of ACE_SOCK_Connector.  By providing our own open(), we can
+    hide the connector from our caller & make it's interaction easier.  */
+  int open (const char *server,
+            u_short port);
+
+  /* These are necessary if you're going to use the constructor that
+    invokes open().  */
+  int initialized (void) { return initialized_; }
+  int error (void) { return error_; }
+
+  /* This is where the coolness lies.  Most C++ folks are familiar
+    with "cout << some-data."  It's a very handy and easy way to toss
+    data around.  By adding these method calls, we're able to do the
+    same thing with a socket connection.  */
+  Client &operator<< (ACE_SString &str);
+  Client &operator<< (char *str);
+  Client &operator<< (int n);
 
 protected:
-  unsigned char initialized_;
-  unsigned char error_;
-
+  u_char initialized_;
+  u_char error_;
 };
 
-/*
-  The basic constructor just sets our flags to reasonable values.
- */
+/* The basic constructor just sets our flags to reasonable values.  */
 Client::Client(void)
 {
   initialized_ = 0;
   error_ = 0;
 }
 
-/*
-  This constructor also sets the flags but then calls open().  If the
-  open() fails, the flags will be set appropriately.  Use the two inline
-  method calls initialized() and error() to check the object state after
-  using this constructor.
- */
-Client::Client( const char * server, u_short port )
+/* This constructor also sets the flags but then calls open().  If the
+  open() fails, the flags will be set appropriately.  Use the two
+  inline method calls initialized() and error() to check the object
+  state after using this constructor.  */
+Client::Client (const char *server,
+                u_short port)
 {
   initialized_ = 0;
   error_ = 0;
-  (void)open(server,port);
+  this->open (server, port);
 }
 
-/*
-  Open a connection to the server.  This hides the use of ACE_SOCK_Connector
-  from our caller.  Since our caller probably doesn't care *how* we connect,
-  this is a good thing.
- */
-int Client::open( const char * server, u_short port )
+/* Open a connection to the server.  This hides the use of
+  ACE_SOCK_Connector from our caller.  Since our caller probably
+  doesn't care *how* we connect, this is a good thing.  */
+int 
+Client::open (const char *server,
+              u_short port)
 {
-  /*
-    This is right out of Tutorial 3.  The only thing we've added is to set
-    the initialized_ member variable on success.
-   */
+  /* This is right out of Tutorial 3.  The only thing we've added is
+    to set the initialized_ member variable on success.  */
 
   ACE_SOCK_Connector connector;
   ACE_INET_Addr addr (port, server);
 
   if (connector.connect (*this, addr) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
-
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
   initialized_ = 1;
-
-  return(0);
+  return 0;
 }
 
-/*
-  The first of our put operators sends a simple string object to the peer.
-*/
-Client & Client::operator<<( ACE_SString & str )
+/* The first of our put operators sends a simple string object to the
+  peer.  */
+Client & 
+Client::operator<< (ACE_SString &str)
 {
-  /*
-    We have to be able to allow:
-      server << foo << bar << stuff;
-
-    To accomplish that, every << operator must check that the object is
-    in a valid state before doing work.
-   */
-
-  if( initialized() && ! error() )
-  {
-      /*
-        Get the actual data held in the string object
-       */
-      const char * cp = str.rep();
-
-      /*
-        Send that data to the peer using send_n() as before.  If we have
-        a problem, we'll set error_ so that subsequent << operations won't
-        try to use a broken stream.
-       */
-      if( this->send_n(cp,strlen(cp)) == -1 )
-      {
-          error_ = 1;
-      }
-  }
+  /* We have to be able to allow: server << foo << bar << stuff;
+
+    To accomplish that, every << operator must check that the object
+    is in a valid state before doing work.  */
+
+  if (initialized () && !error ())
+    {
+      /* Get the actual data held in the string object */
+      const char *cp = str.fast_rep ();
+
+      /* Send that data to the peer using send_n() as before.  If we
+        have a problem, we'll set error_ so that subsequent <<
+        operations won't try to use a broken stream.  */
+      if (this->send_n (cp,
+                        ACE_OS::strlen (cp)) == -1)
+        error_ = 1;
+    }
   else
-  {
-      /*
-        Be sure that error_ is set if somebody tries to use us when
-        we're not initialized.
-       */
-      error_ = 1;
-  }
-
-  /*
-    We have to return a reference to ourselves to allow chaining of
-    put operations (eg -- "server << foo << bar").  Without the reference,
-    you would have to do each put operation as a statement.  That's OK
-    but doesn't have the same feel as standard C++ iostreams.
-   */
+    /* Be sure that error_ is set if somebody tries to use us when
+        we're not initialized.  */
+    error_ = 1;
+
+  /* We have to return a reference to ourselves to allow chaining of
+    put operations (eg -- "server << foo << bar").  Without the
+    reference, you would have to do each put operation as a statement.
+    That's OK but doesn't have the same feel as standard C++
+    iostreams.  */
   return *this ;
 }
 
-/*
-How do you put a char*?  We'll take an easy way out and construct an ACE_SString
-from the char* and then put that.  It would have been more efficient to implement
-this with the body of the operator<<(ACE_SString&) method and then express that
-method in terms of this one.  There's always more than one way to do things!
- */
-Client & Client::operator<< ( char * str )
+/* How do you put a char*?  We'll take an easy way out and construct
+an ACE_SString from the char* and then put that.  It would have been
+more efficient to implement this with the body of the
+operator<<(ACE_SString&) method and then express that method in terms
+of this one.  There's always more than one way to do things!  */
+
+Client & 
+Client::operator<< (char *str)
 {
-  ACE_SString newStr(str);
+  ACE_SString newStr (str);
 
   *this << newStr;
 
   return *this ;
 
-  /*
-    Notice that we could have been really clever and done:
+  /* Notice that we could have been really clever and done:
 
-      return *this << ACE_SString(str);
+     return *this << ACE_SString (str);
 
-    That kind of thing just makes debugging a pain though!
-   */
+    That kind of thing just makes debugging a pain though!  */
 }
 
-/*
-  ACE_SString and char* are both about the same thing.  What do you do about
-  different datatypes though?
+/* ACE_SString and char* are both about the same thing.  What do you
+  do about different datatypes though?
 
-  Do the same thing we did with char* and convert it to ACE_SString where we
-  already have a << operator defined.
- */
-Client & Client::operator<< ( int n )
+  Do the same thing we did with char* and convert it to ACE_SString
+  where we already have a << operator defined.  */
+Client & 
+Client::operator<< (int n)
 {
-  /*
-    Create a character buffer large enough for the largest number.  That's
-    a tough call but 1024 should be quite enough.
-  */
-  char buf[1024];
-
-  /*
-    Put the number into our buffer...
-  */
-  ACE_OS::sprintf(buf,"(%d)\n",n);
-
-  /*
-    And create the ACE_SString that we know how to put.
-  */
-  ACE_SString newStr(buf);
-
-  /*
-    Send it and...
-  */
+  /* Create a character buffer large enough for the largest number.
+    That's a tough call but BUFSIZ should be quite enough.  */
+  char buf[BUFSIZ];
+
+  /* Put the number into our buffer...  */
+  ACE_OS::sprintf (buf,
+                   "(%d)\n",
+                   n);
+
+  /* And create the ACE_SString that we know how to put.  */
+  ACE_SString newStr (buf);
+
+  /* Send it and...  */
   *this << newStr;
 
-  /*
-    return ourselves as usual.
-  */
+  /* return ourselves as usual.  */
   return *this;
 }
 
-
-/*
-  Now we pull it all together.  Like Tutorial 3, we'll allow command line options.
- */
-int main (int argc, char *argv[])
+/* Now we pull it all together.  Like Tutorial 3, we'll allow command
+  line options.  */
+int 
+main (int argc, char *argv[])
 {
-  const char *server_host = argc > 1 ? argv[1]        : ACE_DEFAULT_SERVER_HOST;
-  u_short server_port     = argc > 2 ? ACE_OS::atoi (argv[2]) : ACE_DEFAULT_SERVER_PORT;
-  int max_iterations      = argc > 3 ? ACE_OS::atoi (argv[3]) : 4;
+  const char *server_host = argc > 1 ? argv[1] : ACE_DEFAULT_SERVER_HOST;
+  u_short server_port = argc > 2 ? ACE_OS::atoi (argv[2]) : ACE_DEFAULT_SERVER_PORT;
+  int max_iterations = argc > 3 ? ACE_OS::atoi (argv[3]) : 4;
 
-  /*
-    Use the basic constructor since the other isn't really very safe.
-   */
+  /* Use the basic constructor since the other isn't really very safe.  */
   Client peer;
   
-  /*
-    Open the server connection.  Notice how this is simpler than Tutorial 3
-    since we only have to provide a host name and port value.
-   */
-  if( peer.open(server_host,server_port) == -1 )
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
+  /* Open the server connection.  Notice how this is simpler than
+    Tutorial 3 since we only have to provide a host name and port
+    value.  */
+  if (peer.open (server_host,
+                 server_port) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
   
   for (int i = 0; i < max_iterations; i++)
-  {
-    /*
-      Tell the server which iteration we're on.  No more mucking aroudn with
-      sprintf at this level!  It's all hidden from us.
-     */
-    peer << "message = " << i+1;
-
-    /*
-      Everything OK?
-     */
-    if ( peer.error() )
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
-    }
-    else
     {
-      ACE_OS::sleep (1);
+      /* Tell the server which iteration we're on.  No more mucking
+        aroudn with sprintf at this level!  It's all hidden from us.  */
+      peer << "message = " << i+1;
+
+      /* Everything OK?  */
+      if (peer.error ())
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "send"),
+                          -1);
+      else
+        ACE_OS::sleep (1);
     }
-  }
 
   if (peer.close () == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "close"), -1);
-  }
-
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "close"),
+                      -1);
   return 0;
 }
 
diff --git a/docs/tutorials/005/client_handler.cpp b/docs/tutorials/005/client_handler.cpp index 8d669b8320f..3d4ef08dc38 100644 --- a/docs/tutorials/005/client_handler.cpp +++ b/docs/tutorials/005/client_handler.cpp @@ -1,245 +1,221 @@ - // $Id$ -/* - In client_handler.h I alluded to the fact that we'll mess around with a - Client_Acceptor pointer. To do so, we need the Client_Acceptor object - declaration. +/* In client_handler.h I alluded to the fact that we'll mess around + with a Client_Acceptor pointer. To do so, we need the + Client_Acceptor object declaration. We know that including client_handler.h is redundant because - client_acceptor.h includes it. Still, the sentry prevents double-inclusion - from causing problems and it's sometimes good to be explicit about what - we're using. + client_acceptor.h includes it. Still, the sentry prevents + double-inclusion from causing problems and it's sometimes good to + be explicit about what we're using. - On the other hand, we don't directly include any ACE header files here. - */ + On the other hand, we don't directly include any ACE header files + here. */ #include "client_acceptor.h" #include "client_handler.h" -/* - Our constructor doesn't do anything. That's generally a good idea. Unless - you want to start throwing exceptions, there isn't a really good way to - indicate that a constructor has failed. If I had my way, I'd have a boolean - return code from it that would cause new to return 0 if I failed. Oh - well... - */ +/* Our constructor doesn't do anything. That's generally a good idea. + Unless you want to start throwing exceptions, there isn't a really + good way to indicate that a constructor has failed. If I had my + way, I'd have a boolean return code from it that would cause new to + return 0 if I failed. Oh well... */ Client_Handler::Client_Handler (void) { } -/* - Our destructor doesn't do anything either. That is also by design. - Remember, we really want folks to use destroy() to get rid of us. If that's - so, then there's nothing left to do when the destructor gets invoked. - */ +/* Our destructor doesn't do anything either. That is also by design. + Remember, we really want folks to use destroy() to get rid of us. + If that's so, then there's nothing left to do when the destructor + gets invoked. */ Client_Handler::~Client_Handler (void) { - // Make sure that our peer closes when we're deleted. This - // will probably happened when the peer is deleted but it - // doesn't hurt to be explicit. - this->peer ().close (); + // Make sure that our peer closes when we're deleted. This + // will probably happened when the peer is deleted but it + // doesn't hurt to be explicit. + this->peer ().close (); } -/* - The much talked about destroy() method! The reason I keep going on about - this is because it's just a Bad Idea (TM) to do real work inside of a - destructor. Although this method is void, it really should return - int so that it can tell the caller there was a problem. Even as - void you could at least throw an exception which you would never want - to do in a destructor. - */ -void Client_Handler::destroy (void) +/* The much talked about destroy() method! The reason I keep going on + about this is because it's just a Bad Idea (TM) to do real work + inside of a destructor. Although this method is void, it really + should return int so that it can tell the caller there was a + problem. Even as void you could at least throw an exception which + you would never want to do in a destructor. */ +void +Client_Handler::destroy (void) { - /* - Tell the reactor to forget all about us. Notice that we use the same args - here that we use in the open() method to register ourselves. In addition, - we use the DONT_CALL flag to prevent handle_close() being called. Since we - likely got here due to handle_close(), that could cause a bit of nasty - recursion! - */ + /* Tell the reactor to forget all about us. Notice that we use the + same args here that we use in the open() method to register + ourselves. In addition, we use the DONT_CALL flag to prevent + handle_close() being called. Since we likely got here due to + handle_close(), that could cause a bit of nasty recursion! */ this->reactor ()->remove_handler (this, - ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL); + ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL); - /* - This is how we're able to tell folks not to use delete. By - deleting our own instance, we take care of memory leaks after ensuring - that the object is shut down correctly. - */ + /* This is how we're able to tell folks not to use delete. By + deleting our own instance, we take care of memory leaks after + ensuring that the object is shut down correctly. */ delete this; } -/* - As mentioned before, the open() method is called by the Client_Acceptor when - a new client connection has been accepted. The Client_Acceptor instance - pointer is cast to a void* and given to us here. We'll use that to avoid - some global data... - */ -int Client_Handler::open (void *_acceptor) +/* As mentioned before, the open() method is called by the + Client_Acceptor when a new client connection has been accepted. + The Client_Acceptor instance pointer is cast to a void* and given + to us here. We'll use that to avoid some global data... */ +int +Client_Handler::open (void *acceptor) { - /* - Convert the void* to a Client_Acceptor*. You should probably use those - fancy new C++ cast operators but I can never remember how/when to do so. - Since you can cast just about anything around a void* without compiler - warnings be very sure of what you're doing when you do this kind of thing. - That's where the new-style cast operators can save you. - */ - Client_Acceptor *acceptor = (Client_Acceptor *) _acceptor; - - /* - Our reactor reference will be set when we register ourselves but I decided - to go ahead and set it here. No good reason really... - */ + /* Convert the void* to a Client_Acceptor*. You should probably use + those fancy ACE_*_cast macros but I can never remember how/when + to do so. Since you can cast just about anything around a void* + without compiler warnings be very sure of what you're doing when + you do this kind of thing. That's where the new-style cast + operators can save you. */ + Client_Acceptor *acceptor = (Client_Acceptor *) acceptor; + + /* Our reactor reference will be set when we register ourselves but + I decided to go ahead and set it here. No good reason really... */ this->reactor (acceptor->reactor ()); - /* - We need this to store the address of the client that we are now connected - to. We'll use it later to display a debug message. - */ + /* We need this to store the address of the client that we are now + connected to. We'll use it later to display a debug message. */ ACE_INET_Addr addr; - /* - Our ACE_Svc_Handler baseclass gives us the peer() method as a way to - access our underlying ACE_SOCK_Stream. On that object, we can invoke the - get_remote_addr() method to get an ACE_INET_Addr having our client's - address information. As with most ACE methods, we'll get back (and return) - a -1 if there was any kind of error. Once we have the ACE_INET_Addr, we - can query it to find out the clien's host name, TCP/IP address, TCP/IP - port value and so forth. One word of warning: the get_host_name() - method of ACE_INET_Addr may return you an empty string if your name server - can't resolve it. On the other hand, get_host_addr() will always give you - the dotted-decimal string representing the TCP/IP address. - */ + /* Our ACE_Svc_Handler baseclass gives us the peer() method as a way + to access our underlying ACE_SOCK_Stream. On that object, we can + invoke the get_remote_addr() method to get an ACE_INET_Addr + having our client's address information. As with most ACE + methods, we'll get back (and return) a -1 if there was any kind + of error. Once we have the ACE_INET_Addr, we can query it to + find out the clien's host name, TCP/IP address, TCP/IP port value + and so forth. One word of warning: the get_host_name() method of + ACE_INET_Addr may return you an empty string if your name server + can't resolve it. On the other hand, get_host_addr() will always + give you the dotted-decimal string representing the TCP/IP + address. */ if (this->peer ().get_remote_addr (addr) == -1) - { - return -1; - } - - /* - If we managed to get the client's address then we're connected to a real - and valid client. I suppose that in some cases, the client may connect - and disconnect so quickly that it is invalid by the time we get here. In - any case, the test above should always be done to ensure that the - connection is worth keeping. - - Now, register ourselves with a reactor and tell that reactor that we want - to be notified when there is something to read. Remember, we took our - reactor value from the acceptor which created us in the first place. - Since we're exploring a single-threaded implementation, this is the - correct thing to do. - */ - if (this->reactor ()->register_handler (this, ACE_Event_Handler::READ_MASK) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1); - } - - /* - Here, we use the ACE_INET_Addr object to print a message with the name of - the client we're connected to. Again, it is possible that you'll get an - empty string for the host name if your DNS isn't configured correctly or - if there is some other reason that a TCP/IP addreess cannot be converted - into a host name. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ())); - - /* - Always return zero on success. - */ + return -1; + + /* If we managed to get the client's address then we're connected to + a real and valid client. I suppose that in some cases, the + client may connect and disconnect so quickly that it is invalid + by the time we get here. In any case, the test above should + always be done to ensure that the connection is worth keeping. + + Now, register ourselves with a reactor and tell that reactor that + we want to be notified when there is something to read. + Remember, we took our reactor value from the acceptor which + created us in the first place. Since we're exploring a + single-threaded implementation, this is the correct thing to do. */ + if (this->reactor ()->register_handler (this, + ACE_Event_Handler::READ_MASK) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) can't register with reactor\n"), + -1); + + /* Here, we use the ACE_INET_Addr object to print a message with the + name of the client we're connected to. Again, it is possible + that you'll get an empty string for the host name if your DNS + isn't configured correctly or if there is some other reason that + a TCP/IP addreess cannot be converted into a host name. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) connected with %s\n", + addr.get_host_name ())); + + /* Always return zero on success. */ return 0; } -/* - In the open() method, we registered with the reactor and requested to be - notified when there is data to be read. When the reactor sees that activity - it will invoke this handle_input() method on us. As I mentioned, the _handle - parameter isn't useful to us but it narrows the list of methods the reactor - has to worry about and the list of possible virtual functions we would have - to override. - */ -int Client_Handler::handle_input (ACE_HANDLE _handle) +/* In the open() method, we registered with the reactor and requested + to be notified when there is data to be read. When the reactor + sees that activity it will invoke this handle_input() method on us. + As I mentioned, the _handle parameter isn't useful to us but it + narrows the list of methods the reactor has to worry about and the + list of possible virtual functions we would have to override. */ +int +Client_Handler::handle_input (ACE_HANDLE handle) { - /* - Some compilers don't like it when you fail to use a parameter. This macro - will keep 'em quiet for you. - */ - ACE_UNUSED_ARG (_handle); - - /* - Now, we create and initialize a buffer for receiving the data. Since this - is just a simple test app, we'll use a small buffer size. - */ - char buf[128]; - ACE_OS::memset (buf, 0, sizeof (buf)); - - /* - Invoke the process() method with a pointer to our data area. We'll let - that method worry about interfacing with the data. You might choose to go - ahead and read the data and then pass the result to process(). However, - application logic may require that you read a few bytes to determine what - else to read... It's best if we push that all into the application-logic - level. - */ + /* Some compilers don't like it when you fail to use a parameter. + This macro will keep 'em quiet for you. */ + ACE_UNUSED_ARG (handle); + + /* Now, we create and initialize a buffer for receiving the data. + Since this is just a simple test app, we'll use a small buffer + size. */ + char buf[BUFSIZ]; + + /* Invoke the process() method with a pointer to our data area. + We'll let that method worry about interfacing with the data. You + might choose to go ahead and read the data and then pass the + result to process(). However, application logic may require that + you read a few bytes to determine what else to read... It's best + if we push that all into the application-logic level. */ return this->process (buf, sizeof (buf)); } -/* - If we return -1 out of handle_input() or if the reactor sees other problems - with us then handle_close() will be called. The reactor framework - will take care of removing us (due to the -1), so we don't need to - use the destroy() method. Instead, we just delete ourselves directly. - */ -int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask) +/* If we return -1 out of handle_input() or if the reactor sees other + problems with us then handle_close() will be called. The reactor + framework will take care of removing us (due to the -1), so we + don't need to use the destroy() method. Instead, we just delete + ourselves directly. */ +int +Client_Handler::handle_close (ACE_HANDLE handle, + ACE_Reactor_Mask mask) { - ACE_UNUSED_ARG (_handle); - ACE_UNUSED_ARG (_mask); + ACE_UNUSED_ARG (handle); + ACE_UNUSED_ARG (mask); delete this; return 0; } -/* - And, at last, we get to the application-logic level. Out of everything - we've done so far, this is the only thing that really has anything to do - with what your application will do. In this method we will read and process - the client's data. In a real appliation, you will probably have a bit more - in main() to deal with command line options but after that point, all of the - action takes place here. - */ -int Client_Handler::process (char *_rdbuf, int _rdbuf_len) +/* And, at last, we get to the application-logic level. Out of + everything we've done so far, this is the only thing that really + has anything to do with what your application will do. In this + method we will read and process the client's data. In a real + appliation, you will probably have a bit more in main() to deal + with command line options but after that point, all of the action + takes place here. */ +int +Client_Handler::process (char *rdbuf, + int rdbuf_len) { - /* - Using the buffer provided for us, we read the data from the client. If - there is a read error (eg -- recv() returns -1) then it's a pretty good - bet that the connection is gone. Likewise, if we read zero bytes then - something wrong has happened. The reactor wouldn't have called us if - there wasn't some kind of read activity but there wouldn't be activity if - there were no bytes to read... - - On the other hand, if we got some data then we can display it in a debug - message for everyone to see. - */ - switch (this->peer ().recv (_rdbuf, _rdbuf_len)) + /* Using the buffer provided for us, we read the data from the + client. If there is a read error (eg -- recv() returns -1) then + it's a pretty good bet that the connection is gone. Likewise, if + we read zero bytes then something wrong has happened. The + reactor wouldn't have called us if there wasn't some kind of read + activity but there wouldn't be activity if there were no bytes to + read... + + On the other hand, if we got some data then we can display it in + a debug message for everyone to see. */ + switch (this->peer ().recv (rdbuf, rdbuf_len)) { - case -1: - // Complain and leave - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1); - case 0: - // Complain and leave - ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1); - default: - // Show the data - ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf)); + case -1: // Complain and leave + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) %p bad read\n", + "client"), + -1); + case 0: // Complain and leave + ACE_ERROR_RETURN ((LM_ERROR, + "(%P|%t) closing daemon (fd = %d)\n", + this->get_handle ()), + -1); + default: // Show the data + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) from client: %s", + rdbuf)); } - /* - It's also worth mentioning that recv() has a cousin: recv_n(). - recv_n() will receive exactly the number of bytes you provide - it. This is very good when you know exactly how much you expect - to receive. For the application here, unfortunately, we don't - have any idea how much the client will be sending. recv() will - read up-to-but-not-more-than the number of bytes we specify - (e.g. -- _rdbuf_len). That works well when we don't know how - much the client will provide. - */ + /* It's also worth mentioning that recv() has a cousin: recv_n(). + recv_n() will receive exactly the number of bytes you provide it. + This is very good when you know exactly how much you expect to + receive. For the application here, unfortunately, we don't have + any idea how much the client will be sending. recv() will read + up-to-but-not-more-than the number of bytes we specify (e.g. -- + _rdbuf_len). That works well when we don't know how much the + client will provide. */ return 0; } diff --git a/docs/tutorials/005/client_handler.h b/docs/tutorials/005/client_handler.h index 6851dce83e2..4a579110a6b 100644 --- a/docs/tutorials/005/client_handler.h +++ b/docs/tutorials/005/client_handler.h @@ -12,8 +12,7 @@ ACE_Event_Handler, you'll have to contain and maintain an ACE_SOCK_Stream instance yourself. With ACE_Svc_Handler (which is a derivative of ACE_Event_Handler) some of those details are - handled for you. - */ + handled for you. */ #include "ace/Svc_Handler.h" diff --git a/docs/tutorials/005/page02.html b/docs/tutorials/005/page02.html index d453146c0ca..2a3e4b143de 100644 --- a/docs/tutorials/005/page02.html +++ b/docs/tutorials/005/page02.html @@ -34,21 +34,17 @@ This tutorial is a stepping stone towards a mutithreaded server model.
 // $Id$
 
-/*
-   We try to keep main() very simple.  One of the ways we do that is to push
-   much of the complicated stuff into worker objects.  In this case, we only 
-   need to include the acceptor header in our main source file.  We let it
-   worry about the "real work".     
- */
+/* We try to keep main() very simple.  One of the ways we do that is
+   to push much of the complicated stuff into worker objects.  In this
+   case, we only need to include the acceptor header in our main
+   source file.  We let it worry about the "real work".  */
 
 #include "client_acceptor.h"
 
-/*
-   As before, we create a simple signal handler that will set our finished
-   flag.  There are, of course, more elegant ways to handle program shutdown 
-   requests but that isn't really our focus right now, so we'll just do the
-   easiest thing.     
- */
+/* As before, we create a simple signal handler that will set our
+   finished flag.  There are, of course, more elegant ways to handle
+   program shutdown requests but that isn't really our focus right
+   now, so we'll just do the easiest thing.  */
 
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
@@ -56,93 +52,87 @@ extern "C" void handler (int)
   finished = 1;
 }
 
-/*
-   A server has to listen for clients at a known TCP/IP port.  The default ACE
-   port is 10002 (at least on my system) and that's good enough for what  we
-   want to do here.  Obviously, a more robust application would take a command
-   line parameter or read from a configuration file or do some other  clever
-   thing.  Just like the signal handler above, though, that's not what we want to
-   focus on, so we're taking the easy way out.     
- */
+/* A server has to listen for clients at a known TCP/IP port.  The
+   default ACE port is 10002 (at least on my system) and that's good
+   enough for what we want to do here.  Obviously, a more robust
+   application would take a command line parameter or read from a
+   configuration file or do some other clever thing.  Just like the
+   signal handler above, though, that's not what we want to focus on,
+   so we're taking the easy way out.  */
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Finally, we get to main.  Some C++ compilers will complain loudly if your
-   function signature doesn't match the prototype.  Even though we're not 
-   going to use the parameters, we still  have to specify them.     
- */
+/* Finally, we get to main.  Some C++ compilers will complain loudly
+   if your function signature doesn't match the prototype.  Even
+   though we're not going to use the parameters, we still have to
+   specify them.  */
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-/*
-   In our earlier servers, we used a global pointer to get to the reactor. I've 
-   never really liked that idea, so I've moved it into main() this time. When
-   we  get to the Client_Handler object you'll see how we manage to get a
-   pointer back to this reactor.     
- */
+  /* In our earlier servers, we used a global pointer to get to the
+     reactor. I've never really liked that idea, so I've moved it into
+     main() this time. When we get to the Client_Handler object you'll
+     see how we manage to get a pointer back to this reactor.  */
   ACE_Reactor reactor;
 
-  /*
-     The acceptor will take care of letting clients connect to us.  It will
-     also arrange for a  Client_Handler to be created for each new client.
-     Since we're only going to listen at one  TCP/IP port, we only need one
-     acceptor.  If we wanted, though, we could create several of these  and
-     listen at several ports.  (That's what we would do if we wanted to rewrite 
-     inetd for  instance.)     
-   */
+  /* The acceptor will take care of letting clients connect to us.  It
+    will also arrange for a Client_Handler to be created for each new
+    client.  Since we're only going to listen at one TCP/IP port, we
+    only need one acceptor.  If we wanted, though, we could create
+    several of these and listen at several ports.  (That's what we
+    would do if we wanted to rewrite inetd for instance.)  */
   Client_Acceptor peer_acceptor;
 
-  /*
-     Create an ACE_INET_Addr that represents our endpoint of a connection. We
-     then open our acceptor object with that Addr.  Doing so tells the acceptor 
-     where to listen for connections.  Servers generally listen at "well known" 
-     addresses.  If not, there must be some mechanism by which the client is
-     informed of the server's address.
-
-     Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.  This
-     technique is used over and over again in our tutorials.    
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-     Here, we know that the open was successful.  If it had failed, we would
-     have exited above.  A nice side-effect of the open() is that we're already
-	 registered with the reactor we provided it.
-   */
-
-  /*
-     Install our signal handler.  You can actually register signal handlers
-     with the reactor.  You might do that when the signal handler is
-     responsible for performing "real" work.  Our simple flag-setter doesn't
-     justify deriving from ACE_Event_Handler and providing a callback function
-     though.    
-   */
+  /* Create an ACE_INET_Addr that represents our endpoint of a
+    connection. We then open our acceptor object with that Addr.
+    Doing so tells the acceptor where to listen for connections.
+    Servers generally listen at "well known" addresses.  If not, there
+    must be some mechanism by which the client is informed of the
+    server's address.
+
+    Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.
+    This technique is used over and over again in our tutorials.  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          &reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Here, we know that the open was successful.  If it had failed, we
+    would have exited above.  A nice side-effect of the open() is that
+    we're already registered with the reactor we provided it.  */
+
+  /* Install our signal handler.  You can actually register signal
+    handlers with the reactor.  You might do that when the signal
+    handler is responsible for performing "real" work.  Our simple
+    flag-setter doesn't justify deriving from ACE_Event_Handler and
+    providing a callback function though.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  /*
-     Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.  It's a
-     handy way to generate uniform debug output from your program.    
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-  /*
-     This will loop "forever" invoking the handle_events() method of our
-     reactor. handle_events() watches for activity on any registered handlers
-     and invokes their appropriate callbacks when necessary.  Callback-driven
-     programming is a big thing in ACE, you should get used to it. If the
-     signal handler catches something, the finished flag will be set and we'll
-     exit.  Conveniently enough, handle_events() is also interrupted by signals 
-     and will exit back to the while() loop.  (If you want your event loop to
-     not be interrupted by signals, checkout the 'restart' flag on the
-     open() method of ACE_Reactor if you're interested.)    
-   */
+  /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.
+    It's a handy way to generate uniform debug output from your
+    program.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server daemon\n"));
+
+  /* This will loop "forever" invoking the handle_events() method of
+    our reactor. handle_events() watches for activity on any
+    registered handlers and invokes their appropriate callbacks when
+    necessary.  Callback-driven programming is a big thing in ACE, you
+    should get used to it. If the signal handler catches something,
+    the finished flag will be set and we'll exit.  Conveniently
+    enough, handle_events() is also interrupted by signals and will
+    exit back to the while() loop.  (If you want your event loop to
+    not be interrupted by signals, checkout the 'restart' flag on the
+    open() method of ACE_Reactor if you're interested.)  */
   while (!finished)
     reactor.handle_events ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n"));
-
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server daemon\n"));
+  
   return 0;
 }
 
diff --git a/docs/tutorials/005/page04.html b/docs/tutorials/005/page04.html
index a89deb91336..b455d636917 100644
--- a/docs/tutorials/005/page04.html
+++ b/docs/tutorials/005/page04.html
@@ -42,8 +42,7 @@ the definition where all of the real work of the application takes place.
    ACE_Event_Handler, you'll have to contain and maintain an
    ACE_SOCK_Stream instance yourself.  With ACE_Svc_Handler (which is
    a derivative of ACE_Event_Handler) some of those details are
-   handled for you.
- */
+   handled for you.  */
 
 #include "ace/Svc_Handler.h"
 
diff --git a/docs/tutorials/005/page05.html b/docs/tutorials/005/page05.html
index 4b33f063980..0960c9485cb 100644
--- a/docs/tutorials/005/page05.html
+++ b/docs/tutorials/005/page05.html
@@ -23,248 +23,224 @@ rest of the application all together.
 


-
 // $Id$
 
-/*
-   In client_handler.h I alluded to the fact that we'll mess around with a
-   Client_Acceptor pointer.  To do so, we need the Client_Acceptor object
-   declaration.
+/* In client_handler.h I alluded to the fact that we'll mess around
+   with a Client_Acceptor pointer.  To do so, we need the
+   Client_Acceptor object declaration.
 
    We know that including client_handler.h is redundant because
-   client_acceptor.h includes it.  Still, the sentry prevents double-inclusion
-   from causing problems and it's sometimes good to be explicit about what
-   we're using.
+   client_acceptor.h includes it.  Still, the sentry prevents
+   double-inclusion from causing problems and it's sometimes good to
+   be explicit about what we're using.
 
-   On the other hand, we don't directly include any ACE header files here.  
- */
+   On the other hand, we don't directly include any ACE header files
+   here.  */
 #include "client_acceptor.h"
 #include "client_handler.h"
 
-/*
-   Our constructor doesn't do anything.  That's generally a good idea.  Unless
-   you want to start throwing exceptions, there isn't a really good way to
-   indicate that a constructor has failed.  If I had my way, I'd have a boolean 
-   return code from it that would cause new to return 0 if I failed.  Oh 
-   well...  
- */
+/* Our constructor doesn't do anything.  That's generally a good idea.
+   Unless you want to start throwing exceptions, there isn't a really
+   good way to indicate that a constructor has failed.  If I had my
+   way, I'd have a boolean return code from it that would cause new to
+   return 0 if I failed.  Oh well...  */
 Client_Handler::Client_Handler (void)
 {
 }
 
-/*
-   Our destructor doesn't do anything either.  That is also by design.
-   Remember, we really want folks to use destroy() to get rid of us.  If that's 
-   so, then there's nothing left to do when the destructor gets invoked.  
- */
+/* Our destructor doesn't do anything either.  That is also by design.
+   Remember, we really want folks to use destroy() to get rid of us.
+   If that's so, then there's nothing left to do when the destructor
+   gets invoked.  */
 Client_Handler::~Client_Handler (void)
 {
-        // Make sure that our peer closes when we're deleted.  This
-        // will probably happened when the peer is deleted but it
-        // doesn't hurt to be explicit.
-    this->peer ().close ();
+  // Make sure that our peer closes when we're deleted.  This
+  // will probably happened when the peer is deleted but it
+  // doesn't hurt to be explicit.
+  this->peer ().close ();
 }
 
-/*
-   The much talked about destroy() method!  The reason I keep going on about
-   this is because it's just a Bad Idea (TM) to do real work inside of a
-   destructor.   Although this method is void, it really should return
-   int so that it can tell the caller there was a problem.  Even as
-   void you could at least throw an exception which you would never want 
-   to do in a destructor.  
- */
-void Client_Handler::destroy (void)
+/* The much talked about destroy() method!  The reason I keep going on
+   about this is because it's just a Bad Idea (TM) to do real work
+   inside of a destructor.  Although this method is void, it really
+   should return int so that it can tell the caller there was a
+   problem.  Even as void you could at least throw an exception which
+   you would never want to do in a destructor.  */
+void 
+Client_Handler::destroy (void)
 {
-  /*
-     Tell the reactor to forget all about us.  Notice that we use the same args
-     here that we use in the open() method to register ourselves.  In addition,
-     we use the DONT_CALL flag to prevent handle_close() being called.  Since we 
-     likely got here due to handle_close(), that could cause a bit of nasty
-     recursion! 
-   */
+  /* Tell the reactor to forget all about us.  Notice that we use the
+    same args here that we use in the open() method to register
+    ourselves.  In addition, we use the DONT_CALL flag to prevent
+    handle_close() being called.  Since we likely got here due to
+    handle_close(), that could cause a bit of nasty recursion!  */
   this->reactor ()->remove_handler (this,
-        ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL);
+                                    ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL);
 
-  /*
-     This is how we're able to tell folks not to use delete.  By
-     deleting our own instance, we take care of memory leaks after ensuring
-     that the object is shut down correctly.  
-   */
+  /* This is how we're able to tell folks not to use delete.  By
+    deleting our own instance, we take care of memory leaks after
+    ensuring that the object is shut down correctly.  */
   delete this;
 }
 
-/*
-   As mentioned before, the open() method is called by the Client_Acceptor when 
-   a new client connection has been accepted.  The Client_Acceptor instance
-   pointer is cast to a void* and given to us here.  We'll use that to avoid
-   some global data...  
- */
-int Client_Handler::open (void *_acceptor)
+/* As mentioned before, the open() method is called by the
+   Client_Acceptor when a new client connection has been accepted.
+   The Client_Acceptor instance pointer is cast to a void* and given
+   to us here.  We'll use that to avoid some global data...  */
+int 
+Client_Handler::open (void *acceptor)
 {
-  /*
-     Convert the void* to a Client_Acceptor*.  You should probably use those
-     fancy new C++ cast operators but I can never remember how/when to do so.
-     Since you can cast just about anything around a void* without compiler
-     warnings be very sure of what you're doing when you do this kind of thing.
-     That's where the new-style cast operators can save you.  
-   */
-  Client_Acceptor *acceptor = (Client_Acceptor *) _acceptor;
-
-  /*
-     Our reactor reference will be set when we register ourselves but I decided
-     to go ahead and set it here.  No good reason really...  
-   */
+  /* Convert the void* to a Client_Acceptor*.  You should probably use
+     those fancy ACE_*_cast macros but I can never remember how/when
+     to do so.  Since you can cast just about anything around a void*
+     without compiler warnings be very sure of what you're doing when
+     you do this kind of thing.  That's where the new-style cast
+     operators can save you.  */
+  Client_Acceptor *acceptor = (Client_Acceptor *) acceptor;
+
+  /* Our reactor reference will be set when we register ourselves but
+     I decided to go ahead and set it here.  No good reason really...  */
   this->reactor (acceptor->reactor ());
 
-  /*
-     We need this to store the address of the client that we are now connected
-     to.  We'll use it later to display a debug message.  
-   */
+  /* We need this to store the address of the client that we are now
+     connected to.  We'll use it later to display a debug message.  */
   ACE_INET_Addr addr;
 
-  /*
-     Our ACE_Svc_Handler baseclass gives us the peer() method as a way to
-     access our underlying ACE_SOCK_Stream.  On that object, we can invoke the
-     get_remote_addr() method to get an ACE_INET_Addr having our client's
-     address information. As with most ACE methods, we'll get back (and return) 
-     a -1 if there was any kind of error.  Once we have the ACE_INET_Addr, we
-     can query it to find out the clien's host name, TCP/IP address, TCP/IP
-     port value and so forth.  One word of warning:   the get_host_name()
-     method of ACE_INET_Addr may return you an empty string if your name server 
-     can't resolve it.  On the other hand, get_host_addr() will always give you 
-     the dotted-decimal string representing the TCP/IP address.  
-   */
+  /* Our ACE_Svc_Handler baseclass gives us the peer() method as a way
+     to access our underlying ACE_SOCK_Stream.  On that object, we can
+     invoke the get_remote_addr() method to get an ACE_INET_Addr
+     having our client's address information. As with most ACE
+     methods, we'll get back (and return) a -1 if there was any kind
+     of error.  Once we have the ACE_INET_Addr, we can query it to
+     find out the clien's host name, TCP/IP address, TCP/IP port value
+     and so forth.  One word of warning: the get_host_name() method of
+     ACE_INET_Addr may return you an empty string if your name server
+     can't resolve it.  On the other hand, get_host_addr() will always
+     give you the dotted-decimal string representing the TCP/IP
+     address.  */
   if (this->peer ().get_remote_addr (addr) == -1)
-    {
-      return -1;
-    }
-
-  /*
-     If we managed to get the client's address then we're connected to a real
-     and valid client.  I suppose that in some cases, the client may connect
-     and disconnect so quickly that it is invalid by the time we get here. In
-     any case, the test above should always be done to ensure that the
-     connection is worth keeping.
-
-     Now, register ourselves with a reactor and tell that reactor that we want
-     to be notified when there is something to read.  Remember, we took our
-     reactor value from the acceptor which created us in the first place.
-     Since we're exploring a single-threaded implementation, this is the
-     correct thing to do. 
-   */
-  if (this->reactor ()->register_handler (this, ACE_Event_Handler::READ_MASK) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
-    }
-
-  /*
-     Here, we use the ACE_INET_Addr object to print a message with the name of
-     the client we're connected to.  Again, it is possible that you'll get an
-     empty string for the host name if your DNS isn't configured correctly or
-     if there is some other reason that a TCP/IP addreess cannot be converted
-     into a host name. 
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ()));
-
-  /*
-     Always return zero on success. 
-   */
+    return -1;
+
+  /* If we managed to get the client's address then we're connected to
+     a real and valid client.  I suppose that in some cases, the
+     client may connect and disconnect so quickly that it is invalid
+     by the time we get here. In any case, the test above should
+     always be done to ensure that the connection is worth keeping.
+
+     Now, register ourselves with a reactor and tell that reactor that
+     we want to be notified when there is something to read.
+     Remember, we took our reactor value from the acceptor which
+     created us in the first place.  Since we're exploring a
+     single-threaded implementation, this is the correct thing to do.  */
+  if (this->reactor ()->register_handler (this,
+                                          ACE_Event_Handler::READ_MASK) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "(%P|%t) can't register with reactor\n"),
+                      -1);
+
+  /* Here, we use the ACE_INET_Addr object to print a message with the
+     name of the client we're connected to.  Again, it is possible
+     that you'll get an empty string for the host name if your DNS
+     isn't configured correctly or if there is some other reason that
+     a TCP/IP addreess cannot be converted into a host name.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) connected with %s\n",
+              addr.get_host_name ()));
+
+  /* Always return zero on success.  */
   return 0;
 }
 
-/*
-   In the open() method, we registered with the reactor and requested to be
-   notified when there is data to be read.  When the reactor sees that activity
-   it will invoke this handle_input() method on us.  As I mentioned, the _handle
-   parameter isn't useful to us but it narrows the list of methods the reactor
-   has to worry about and the list of possible virtual functions we would have
-   to override. 
- */
-int Client_Handler::handle_input (ACE_HANDLE _handle)
+/* In the open() method, we registered with the reactor and requested
+   to be notified when there is data to be read.  When the reactor
+   sees that activity it will invoke this handle_input() method on us.
+   As I mentioned, the _handle parameter isn't useful to us but it
+   narrows the list of methods the reactor has to worry about and the
+   list of possible virtual functions we would have to override.  */
+int 
+Client_Handler::handle_input (ACE_HANDLE handle)
 {
-  /*
-     Some compilers don't like it when you fail to use a parameter.  This macro
-     will keep 'em quiet for you. 
-   */
-  ACE_UNUSED_ARG (_handle);
-
-  /*
-     Now, we create and initialize a buffer for receiving the data.  Since this
-     is just a simple test app, we'll use a small buffer size. 
-   */
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
-
-  /*
-     Invoke the process() method with a pointer to our data area.  We'll let
-     that method worry about interfacing with the data.  You might choose to go 
-     ahead and read the data and then pass the result to process().  However,
-     application logic may require that you read a few bytes to determine what
-     else to read...  It's best if we push that all into the application-logic
-     level. 
-   */
+  /* Some compilers don't like it when you fail to use a parameter.
+     This macro will keep 'em quiet for you.  */
+  ACE_UNUSED_ARG (handle);
+
+  /* Now, we create and initialize a buffer for receiving the data.
+     Since this is just a simple test app, we'll use a small buffer
+     size.  */
+  char buf[BUFSIZ];
+
+  /* Invoke the process() method with a pointer to our data area.
+     We'll let that method worry about interfacing with the data.  You
+     might choose to go ahead and read the data and then pass the
+     result to process().  However, application logic may require that
+     you read a few bytes to determine what else to read...  It's best
+     if we push that all into the application-logic level.  */
   return this->process (buf, sizeof (buf));
 }
 
-/*
-   If we return -1 out of handle_input() or if the reactor sees other problems
-   with us then handle_close() will be called.  The reactor framework
-   will take care of removing us (due to the -1), so we don't need to
-   use the destroy() method.  Instead, we just delete ourselves directly.
- */
-int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask)
+/* If we return -1 out of handle_input() or if the reactor sees other
+   problems with us then handle_close() will be called.  The reactor
+   framework will take care of removing us (due to the -1), so we
+   don't need to use the destroy() method.  Instead, we just delete
+   ourselves directly.  */
+int 
+Client_Handler::handle_close (ACE_HANDLE handle,
+                              ACE_Reactor_Mask mask)
 {
-  ACE_UNUSED_ARG (_handle);
-  ACE_UNUSED_ARG (_mask);
+  ACE_UNUSED_ARG (handle);
+  ACE_UNUSED_ARG (mask);
 
   delete this;
   return 0;
 }
 
-/*
-   And, at last, we get to the application-logic level.  Out of everything
-   we've done so far, this is the only thing that really has anything to do
-   with what your application will do.  In this method we will read and process 
-   the client's data.  In a real appliation, you will probably have a bit more
-   in main() to deal with command line options but after that point, all of the 
-   action takes place here. 
- */
-int Client_Handler::process (char *_rdbuf, int _rdbuf_len)
+/* And, at last, we get to the application-logic level.  Out of
+   everything we've done so far, this is the only thing that really
+   has anything to do with what your application will do.  In this
+   method we will read and process the client's data.  In a real
+   appliation, you will probably have a bit more in main() to deal
+   with command line options but after that point, all of the action
+   takes place here.  */
+int 
+Client_Handler::process (char *rdbuf,
+                         int rdbuf_len)
 {
-  /*
-     Using the buffer provided for us, we read the data from the client. If
-     there is a read error (eg -- recv() returns -1) then it's a pretty good
-     bet that the connection is gone.  Likewise, if we read zero bytes then
-     something wrong has happened.  The reactor wouldn't have called us if
-     there wasn't some kind of read activity but there wouldn't be activity if
-     there were no bytes to read...
-
-     On the other hand, if we got some data then we can display it in a  debug
-     message for everyone to see. 
-   */
-  switch (this->peer ().recv (_rdbuf, _rdbuf_len))
+  /* Using the buffer provided for us, we read the data from the
+     client. If there is a read error (eg -- recv() returns -1) then
+     it's a pretty good bet that the connection is gone.  Likewise, if
+     we read zero bytes then something wrong has happened.  The
+     reactor wouldn't have called us if there wasn't some kind of read
+     activity but there wouldn't be activity if there were no bytes to
+     read...
+
+     On the other hand, if we got some data then we can display it in
+     a debug message for everyone to see.  */
+  switch (this->peer ().recv (rdbuf, rdbuf_len))
     {
-    case -1:
-            // Complain and leave
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1);
-    case 0:
-            // Complain and leave
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1);
-    default:
-            // Show the data
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf));
+    case -1: // Complain and leave
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) %p bad read\n",
+                         "client"),
+                        -1);
+    case 0: // Complain and leave
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
+    default: // Show the data
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  rdbuf));
     }
 
-   /*
-     It's also worth mentioning that recv() has a cousin:  recv_n().
-     recv_n() will receive exactly the number of bytes you provide
-     it.  This is very good when you know exactly how much you expect
-     to receive.  For the application here, unfortunately, we don't
-     have any idea how much the client will be sending.  recv() will
-     read up-to-but-not-more-than the number of bytes we specify
-     (e.g. -- _rdbuf_len).  That works well when we don't know how
-     much the client will provide.
-    */
+   /* It's also worth mentioning that recv() has a cousin: recv_n().
+     recv_n() will receive exactly the number of bytes you provide it.
+     This is very good when you know exactly how much you expect to
+     receive.  For the application here, unfortunately, we don't have
+     any idea how much the client will be sending.  recv() will read
+     up-to-but-not-more-than the number of bytes we specify (e.g. --
+     _rdbuf_len).  That works well when we don't know how much the
+     client will provide.  */
 
   return 0;
 }
diff --git a/docs/tutorials/005/page06.html b/docs/tutorials/005/page06.html
index d3c6e91e80a..e7588a0ec36 100644
--- a/docs/tutorials/005/page06.html
+++ b/docs/tutorials/005/page06.html
@@ -127,6 +127,9 @@ SHAR : #
 UNSHAR : #
 	sh combine.shar
 
+CLEAN : realclean
+	rm -f hdr bodies *.pre *.pst
+
 #----------------------------------------------------------------------------
 #	Dependencies
 #----------------------------------------------------------------------------
diff --git a/docs/tutorials/005/server.cpp b/docs/tutorials/005/server.cpp
index 56991761c14..00f2f6778ae 100644
--- a/docs/tutorials/005/server.cpp
+++ b/docs/tutorials/005/server.cpp
@@ -1,20 +1,16 @@
 // $Id$
 
-/*
-   We try to keep main() very simple.  One of the ways we do that is to push
-   much of the complicated stuff into worker objects.  In this case, we only 
-   need to include the acceptor header in our main source file.  We let it
-   worry about the "real work".     
- */
+/* We try to keep main() very simple.  One of the ways we do that is
+   to push much of the complicated stuff into worker objects.  In this
+   case, we only need to include the acceptor header in our main
+   source file.  We let it worry about the "real work".  */
 
 #include "client_acceptor.h"
 
-/*
-   As before, we create a simple signal handler that will set our finished
-   flag.  There are, of course, more elegant ways to handle program shutdown 
-   requests but that isn't really our focus right now, so we'll just do the
-   easiest thing.     
- */
+/* As before, we create a simple signal handler that will set our
+   finished flag.  There are, of course, more elegant ways to handle
+   program shutdown requests but that isn't really our focus right
+   now, so we'll just do the easiest thing.  */
 
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
@@ -22,93 +18,87 @@ extern "C" void handler (int)
   finished = 1;
 }
 
-/*
-   A server has to listen for clients at a known TCP/IP port.  The default ACE
-   port is 10002 (at least on my system) and that's good enough for what  we
-   want to do here.  Obviously, a more robust application would take a command
-   line parameter or read from a configuration file or do some other  clever
-   thing.  Just like the signal handler above, though, that's not what we want to
-   focus on, so we're taking the easy way out.     
- */
+/* A server has to listen for clients at a known TCP/IP port.  The
+   default ACE port is 10002 (at least on my system) and that's good
+   enough for what we want to do here.  Obviously, a more robust
+   application would take a command line parameter or read from a
+   configuration file or do some other clever thing.  Just like the
+   signal handler above, though, that's not what we want to focus on,
+   so we're taking the easy way out.  */
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Finally, we get to main.  Some C++ compilers will complain loudly if your
-   function signature doesn't match the prototype.  Even though we're not 
-   going to use the parameters, we still  have to specify them.     
- */
+/* Finally, we get to main.  Some C++ compilers will complain loudly
+   if your function signature doesn't match the prototype.  Even
+   though we're not going to use the parameters, we still have to
+   specify them.  */
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-/*
-   In our earlier servers, we used a global pointer to get to the reactor. I've 
-   never really liked that idea, so I've moved it into main() this time. When
-   we  get to the Client_Handler object you'll see how we manage to get a
-   pointer back to this reactor.     
- */
+  /* In our earlier servers, we used a global pointer to get to the
+     reactor. I've never really liked that idea, so I've moved it into
+     main() this time. When we get to the Client_Handler object you'll
+     see how we manage to get a pointer back to this reactor.  */
   ACE_Reactor reactor;
 
-  /*
-     The acceptor will take care of letting clients connect to us.  It will
-     also arrange for a  Client_Handler to be created for each new client.
-     Since we're only going to listen at one  TCP/IP port, we only need one
-     acceptor.  If we wanted, though, we could create several of these  and
-     listen at several ports.  (That's what we would do if we wanted to rewrite 
-     inetd for  instance.)     
-   */
+  /* The acceptor will take care of letting clients connect to us.  It
+    will also arrange for a Client_Handler to be created for each new
+    client.  Since we're only going to listen at one TCP/IP port, we
+    only need one acceptor.  If we wanted, though, we could create
+    several of these and listen at several ports.  (That's what we
+    would do if we wanted to rewrite inetd for instance.)  */
   Client_Acceptor peer_acceptor;
 
-  /*
-     Create an ACE_INET_Addr that represents our endpoint of a connection. We
-     then open our acceptor object with that Addr.  Doing so tells the acceptor 
-     where to listen for connections.  Servers generally listen at "well known" 
-     addresses.  If not, there must be some mechanism by which the client is
-     informed of the server's address.
-
-     Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.  This
-     technique is used over and over again in our tutorials.    
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-     Here, we know that the open was successful.  If it had failed, we would
-     have exited above.  A nice side-effect of the open() is that we're already
-	 registered with the reactor we provided it.
-   */
-
-  /*
-     Install our signal handler.  You can actually register signal handlers
-     with the reactor.  You might do that when the signal handler is
-     responsible for performing "real" work.  Our simple flag-setter doesn't
-     justify deriving from ACE_Event_Handler and providing a callback function
-     though.    
-   */
+  /* Create an ACE_INET_Addr that represents our endpoint of a
+    connection. We then open our acceptor object with that Addr.
+    Doing so tells the acceptor where to listen for connections.
+    Servers generally listen at "well known" addresses.  If not, there
+    must be some mechanism by which the client is informed of the
+    server's address.
+
+    Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.
+    This technique is used over and over again in our tutorials.  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          &reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Here, we know that the open was successful.  If it had failed, we
+    would have exited above.  A nice side-effect of the open() is that
+    we're already registered with the reactor we provided it.  */
+
+  /* Install our signal handler.  You can actually register signal
+    handlers with the reactor.  You might do that when the signal
+    handler is responsible for performing "real" work.  Our simple
+    flag-setter doesn't justify deriving from ACE_Event_Handler and
+    providing a callback function though.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  /*
-     Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.  It's a
-     handy way to generate uniform debug output from your program.    
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-  /*
-     This will loop "forever" invoking the handle_events() method of our
-     reactor. handle_events() watches for activity on any registered handlers
-     and invokes their appropriate callbacks when necessary.  Callback-driven
-     programming is a big thing in ACE, you should get used to it. If the
-     signal handler catches something, the finished flag will be set and we'll
-     exit.  Conveniently enough, handle_events() is also interrupted by signals 
-     and will exit back to the while() loop.  (If you want your event loop to
-     not be interrupted by signals, checkout the 'restart' flag on the
-     open() method of ACE_Reactor if you're interested.)    
-   */
+  /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.
+    It's a handy way to generate uniform debug output from your
+    program.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server daemon\n"));
+
+  /* This will loop "forever" invoking the handle_events() method of
+    our reactor. handle_events() watches for activity on any
+    registered handlers and invokes their appropriate callbacks when
+    necessary.  Callback-driven programming is a big thing in ACE, you
+    should get used to it. If the signal handler catches something,
+    the finished flag will be set and we'll exit.  Conveniently
+    enough, handle_events() is also interrupted by signals and will
+    exit back to the while() loop.  (If you want your event loop to
+    not be interrupted by signals, checkout the 'restart' flag on the
+    open() method of ACE_Reactor if you're interested.)  */
   while (!finished)
     reactor.handle_events ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n"));
-
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server daemon\n"));
+  
   return 0;
 }
 
diff --git a/docs/tutorials/006/client_handler.cpp b/docs/tutorials/006/client_handler.cpp
index a250fb0f406..2280150ccc4 100644
--- a/docs/tutorials/006/client_handler.cpp
+++ b/docs/tutorials/006/client_handler.cpp
@@ -1,325 +1,290 @@
-
 // $Id$
 
-/*
-   In client_handler.h I alluded to the fact that we'll mess around with a
-   Client_Acceptor pointer.  To do so, we need the Client_Acceptor object
-   declaration.
+/* In client_handler.h I alluded to the fact that we'll mess around
+   with a Client_Acceptor pointer.  To do so, we need the
+   Client_Acceptor object declaration.
 
    We know that including client_handler.h is redundant because
-   client_acceptor.h includes it.  Still, the sentry prevents double-inclusion
-   from causing problems and it's sometimes good to be explicit about what
-   we're using.
+   client_acceptor.h includes it.  Still, the sentry prevents
+   double-inclusion from causing problems and it's sometimes good to
+   be explicit about what we're using.
 
-   On the other hand, we don't directly include any ACE header files here.  
- */
+   On the other hand, we don't directly include any ACE header files
+   here.  */
 #include "client_acceptor.h"
 #include "client_handler.h"
 
-/*
-   Our constructor doesn't do anything.  That's generally a good idea.  Unless
-   you want to start throwing exceptions, there isn't a really good way to
-   indicate that a constructor has failed.  If I had my way, I'd have a boolean 
-   return code from it that would cause new to return 0 if I failed.  Oh 
-   well...  
- */
+/* Our constructor doesn't do anything.  That's generally a good idea.
+   Unless you want to start throwing exceptions, there isn't a really
+   good way to indicate that a constructor has failed.  If I had my
+   way, I'd have a boolean return code from it that would cause new to
+   return 0 if I failed.  Oh well...  */
 Client_Handler::Client_Handler (void)
 {
 }
 
-/*
-   Our destructor doesn't do anything either.  That is also by design.
-   Remember, we really want folks to use destroy() to get rid of us.  If that's 
-   so, then there's nothing left to do when the destructor gets invoked.  
- */
+/* Our destructor doesn't do anything either.  That is also by design.
+   Remember, we really want folks to use destroy() to get rid of us.
+   If that's so, then there's nothing left to do when the destructor
+   gets invoked.  */
 Client_Handler::~Client_Handler (void)
 {
 }
 
-/*
-   The much talked about destroy() method!  The reason I keep going on about
-   this is because it's just a Bad Idea (TM) to do real work inside of a
-   destructor.   Although this method is void, it really should return
-   int so that it can tell the caller there was a problem.  Even as
-   void you could at least throw an exception which you would never want 
-   to do in a destructor.  
- */
-void Client_Handler::destroy (void)
+/* The much talked about destroy() method!  The reason I keep going on
+   about this is because it's just a Bad Idea (TM) to do real work
+   inside of a destructor.  Although this method is void, it really
+   should return int so that it can tell the caller there was a
+   problem.  Even as void you could at least throw an exception which
+   you would never want to do in a destructor.  */
+void 
+Client_Handler::destroy (void)
 {
-  /*
-     Tell the reactor to forget all about us.  Notice that we use the same args
-     here that we use in the open() method to register ourselves.  In addition,
-     we use the DONT_CALL flag to prevent handle_close() being called.  Since we 
-     likely got here due to handle_close(), that could cause a bit of nasty
-     recursion! 
-   */
+  /* Tell the reactor to forget all about us.  Notice that we use the
+    same args here that we use in the open() method to register
+    ourselves.  In addition, we use the DONT_CALL flag to prevent
+    handle_close() being called.  Since we likely got here due to
+    handle_close(), that could cause a bit of nasty recursion!  */
   this->reactor ()->remove_handler (this,
-        ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL);
+                                    ACE_Event_Handler::READ_MASK 
+                                    | ACE_Event_Handler::DONT_CALL);
 
-  /*
-     This is how we're able to tell folks not to use delete.  By
-     deleting our own instance, we take care of memory leaks after ensuring
-     that the object is shut down correctly.  
-   */
+  /* This is how we're able to tell folks not to use delete.  By
+    deleting our own instance, we take care of memory leaks after
+    ensuring that the object is shut down correctly.  */
   delete this;
 }
 
-/*
-   As mentioned before, the open() method is called by the Client_Acceptor when 
-   a new client connection has been accepted.  The Client_Acceptor instance
-   pointer is cast to a void* and given to us here.  We'll use that to avoid
-   some global data...  
- */
-int Client_Handler::open (void *_acceptor)
+/* As mentioned before, the open() method is called by the
+   Client_Acceptor when a new client connection has been accepted.
+   The Client_Acceptor instance pointer is cast to a void* and given
+   to us here.  We'll use that to avoid some global data...  */
+int 
+Client_Handler::open (void *acceptor)
 {
-  /*
-     We need this to store the address of the client that we are now connected
-     to.  We'll use it later to display a debug message.  
-   */
+  /* We need this to store the address of the client that we are now
+     connected to.  We'll use it later to display a debug message.  */
   ACE_INET_Addr addr;
 
-  /*
-     Our ACE_Svc_Handler baseclass gives us the peer() method as a way to
-     access our underlying ACE_SOCK_Stream.  On that object, we can invoke the
-     get_remote_addr() method to get get an ACE_INET_Addr having our client's
-     address information. As with most ACE methods, we'll get back (and return) 
-     a -1 if there was any kind of error.  Once we have the ACE_INET_Addr, we
-     can query it to find out the client's host name, TCP/IP address, TCP/IP
-     port value and so forth.  One word of warning:   the get_host_name()
-     method of ACE_INET_Addr may return you an empty string if your name server 
-     can't resolve it.  On the other hand, get_host_addr() will always give you 
-     the dotted-decimal string representing the TCP/IP address.  
-   */
+  /* Our ACE_Svc_Handler baseclass gives us the peer() method as a way
+    to access our underlying ACE_SOCK_Stream.  On that object, we can
+    invoke the get_remote_addr() method to get get an ACE_INET_Addr
+    having our client's address information. As with most ACE methods,
+    we'll get back (and return) a -1 if there was any kind of error.
+    Once we have the ACE_INET_Addr, we can query it to find out the
+    client's host name, TCP/IP address, TCP/IP port value and so
+    forth.  One word of warning: the get_host_name() method of
+    ACE_INET_Addr may return you an empty string if your name server
+    can't resolve it.  On the other hand, get_host_addr() will always
+    give you the dotted-decimal string representing the TCP/IP
+    address.  */
   if (this->peer ().get_remote_addr (addr) == -1)
-    {
-      return -1;
-    }
-
-  /*
-     Convert the void* to a Client_Acceptor*.  You should probably use those
-     fancy new C++ cast operators but I can never remember how/when to do so.
-     Since you can cast just about anything around a void* without compiler
-     warnings be very sure of what you're doing when you do this kind of thing.
-     That's where the new-style cast operators can save you.  
-   */
-  Client_Acceptor *acceptor = (Client_Acceptor *) _acceptor;
-
-  /*
-     Our Client_Acceptor is constructed with a concurrency strategy.  Here, we
-     go back to it to find out what that strategy was.  If thread-per-connection
-     was selected then we simply activate a thread for ourselves and exit.  Our
-     svc() method will then begin executing in that thread.
-
-     If we are told to use the single-threaded strategy, there is no difference
-     between this and the Tutorial 5 implementation.
-
-     Note that if we're in thread-per-connection mode, open() is
-     exited at this point.  Furthermore, thread-per-connection mode
-     does not use the reactor which means that handle_input() and it's 
-     fellows are not invoked.
-   */
-  if( acceptor->thread_per_connection() )
-  {
-    return this->activate();
-  }
-
-   // ************************************************************************
-   // From here on, we're doing the traditional reactor thing.  If
-   // you're operating in thread-per-connection mode, this code does
-   // not apply.
-   // ************************************************************************
-
-  /*
-     Our reactor reference will be set when we register ourselves but I decided
-     to go ahead and set it here.  No good reason really...  
-   */
+    return -1;
+
+  /* Convert the void* to a Client_Acceptor*.  You should probably use
+    those fancy ACE_*_cast macros but I can never remember how/when to
+    do so.  Since you can cast just about anything around a void*
+    without compiler warnings be very sure of what you're doing when
+    you do this kind of thing.  That's where the new-style cast
+    operators can save you.  */
+  Client_Acceptor *acceptor = (Client_Acceptor *) acceptor;
+
+  /* Our Client_Acceptor is constructed with a concurrency strategy.
+    Here, we go back to it to find out what that strategy was.  If
+    thread-per-connection was selected then we simply activate a
+    thread for ourselves and exit.  Our svc() method will then begin
+    executing in that thread.
+
+    If we are told to use the single-threaded strategy, there is no
+    difference between this and the Tutorial 5 implementation.
+
+    Note that if we're in thread-per-connection mode, open() is exited
+    at this point.  Furthermore, thread-per-connection mode does not
+    use the reactor which means that handle_input() and it's fellows
+    are not invoked.  */
+  if (acceptor->thread_per_connection ())
+    return this->activate ();
+
+  // ************************************************************************
+  // From here on, we're doing the traditional reactor thing.  If
+  // you're operating in thread-per-connection mode, this code does
+  // not apply.
+  // ************************************************************************
+
+  /* Our reactor reference will be set when we register ourselves but
+    I decided to go ahead and set it here.  No good reason really...  */
   this->reactor (acceptor->reactor ());
 
-  /*
-     If we managed to get the client's address then we're connected to a real
-     and valid client.  I suppose that in some cases, the client may connect
-     and disconnect so quickly that it is invalid by the time we get here. In
-     any case, the test above should always be done to ensure that the
-     connection is worth keeping.
-
-     Now, regiser ourselves with a reactor and tell that reactor that we want
-     to be notified when there is something to read.  Remember, we took our
-     reactor value from the acceptor which created us in the first place.
-     Since we're exploring a single-threaded implementation, this is the
-     correct thing to do. 
-   */
-  if (this->reactor ()->register_handler (this, ACE_Event_Handler::READ_MASK) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
-    }
-
-  /*
-     Here, we use the ACE_INET_Addr object to print a message with the name of
-     the client we're connected to.  Again, it is possible that you'll get an
-     empty string for the host name if your DNS isn't configured correctly or
-     if there is some other reason that a TCP/IP addreess cannot be converted
-     into a host name. 
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ()));
-
-  /*
-     Always return zero on success. 
-   */
+  /* If we managed to get the client's address then we're connected to
+    a real and valid client.  I suppose that in some cases, the client
+    may connect and disconnect so quickly that it is invalid by the
+    time we get here. In any case, the test above should always be
+    done to ensure that the connection is worth keeping.
+
+    Now, regiser ourselves with a reactor and tell that reactor that
+    we want to be notified when there is something to read.  Remember,
+    we took our reactor value from the acceptor which created us in
+    the first place.  Since we're exploring a single-threaded
+    implementation, this is the correct thing to do.  */
+  if (this->reactor ()->register_handler (this,
+                                          ACE_Event_Handler::READ_MASK) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "(%P|%t) can't register with reactor\n"),
+                      -1);
+
+  /* Here, we use the ACE_INET_Addr object to print a message with the
+    name of the client we're connected to.  Again, it is possible that
+    you'll get an empty string for the host name if your DNS isn't
+    configured correctly or if there is some other reason that a
+    TCP/IP addreess cannot be converted into a host name.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) connected with %s\n", addr.get_host_name ()));
+
+  /* Always return zero on success.  */
   return 0;
 }
 
-/*
-   As mentioned in the header, the typical way to close an object in a threaded
-   context is to invoke it's close() method.  Since we already have a handle_close()
-   method built to cleanup after us, we'll just forward the request on to that
-   object.
- */
-int Client_Handler::close(u_long flags)
+/* As mentioned in the header, the typical way to close an object in a
+   threaded context is to invoke it's close() method.  Since we
+   already have a handle_close() method built to cleanup after us,
+   we'll just forward the request on to that object.  */
+int 
+Client_Handler::close(u_long flags)
 {
-    ACE_UNUSED_ARG(flags);
-
-    /*
-      We use the destroy() method to clean up after ourselves.
-      That will take care of removing us from the reactor and then
-      freeing our memory.
-    */
-    this->destroy();
-
-	/*
-	   Don't forward the close() to the baseclass!  handle_close() above has
-	   already taken care of delete'ing.  Forwarding close() would cause that
-	   to happen again and things would get really ugly at that point!
-	 */
-	return 0;
+  ACE_UNUSED_ARG (flags);
+
+  /* We use the destroy() method to clean up after ourselves.  That
+    will take care of removing us from the reactor and then freeing
+    our memory.  */
+  this->destroy ();
+
+  /* Don't forward the close() to the baseclass!  handle_close() above
+    has already taken care of delete'ing.  Forwarding close() would
+    cause that to happen again and things would get really ugly at
+    that point!  */
+  return 0;
 }
 
-/*
-   In the open() method, we registered with the reactor and requested to be
-   notified when there is data to be read.  When the reactor sees that activity
-   it will invoke this handle_input() method on us.  As I mentioned, the _handle
-   parameter isn't useful to us but it narrows the list of methods the reactor
-   has to worry about and the list of possible virtual functions we would have
-   to override.
-
-   Again, this is not used if we're in thread-per-connection mode.
- */
-int Client_Handler::handle_input (ACE_HANDLE _handle)
+/* In the open() method, we registered with the reactor and requested
+   to be notified when there is data to be read.  When the reactor
+   sees that activity it will invoke this handle_input() method on us.
+   As I mentioned, the _handle parameter isn't useful to us but it
+   narrows the list of methods the reactor has to worry about and the
+   list of possible virtual functions we would have to override.
+
+   Again, this is not used if we're in thread-per-connection mode.  */
+int 
+Client_Handler::handle_input (ACE_HANDLE handle)
 {
-  /*
-     Some compilers don't like it when you fail to use a parameter.  This macro
-     will keep 'em quiet for you. 
-   */
-  ACE_UNUSED_ARG (_handle);
-
-  /*
-     Now, we create and initialize a buffer for receiving the data.  Since this
-     is just a simple test app, we'll use a small buffer size. 
-   */
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
-
-  /*
-     Invoke the process() method with a pointer to our data area.  We'll let
-     that method worry about interfacing with the data.  You might choose to go 
-     ahead and read the data and then pass the result to process().  However,
-     application logic may require that you read a few bytes to determine what
-     else to read...  It's best if we push that all into the application-logic
-     level. 
-   */
+  /* Some compilers don't like it when you fail to use a parameter.
+    This macro will keep 'em quiet for you.  */
+  ACE_UNUSED_ARG (handle);
+
+  /* Now, we create and initialize a buffer for receiving the data.
+    Since this is just a simple test app, we'll use a small buffer
+    size.  */
+  char buf[BUFSIZ];
+
+  /* Invoke the process() method with a pointer to our data area.
+    We'll let that method worry about interfacing with the data.  You
+    might choose to go ahead and read the data and then pass the
+    result to process().  However, application logic may require that
+    you read a few bytes to determine what else to read...  It's best
+    if we push that all into the application-logic level.  */
   return this->process (buf, sizeof (buf));
 }
 
-/*
-   If we return -1 out of handle_input() or if the reactor sees other problems
-   with us then handle_close() will be called.  The reactor framework
-   will take care of removing us (due to the -1), so we don't need to
-   use the destroy() method.  Instead, we just delete ourselves directly.
- */
-int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask)
+/* If we return -1 out of handle_input() or if the reactor sees other
+   problems with us then handle_close() will be called.  The reactor
+   framework will take care of removing us (due to the -1), so we
+   don't need to use the destroy() method.  Instead, we just delete
+   ourselves directly.  */
+int 
+Client_Handler::handle_close (ACE_HANDLE handle,
+                              ACE_Reactor_Mask mask)
 {
-  ACE_UNUSED_ARG (_handle);
-  ACE_UNUSED_ARG (_mask);
+  ACE_UNUSED_ARG (handle);
+  ACE_UNUSED_ARG (mask);
 
   this->destroy ();
   return 0;
 }
 
-/*
-   The ACE_Svc_Handler<> is ultimately derived from ACE_Task<>.  If you want to
-   create a multi-threaded application, these are your tools!  Simply override
-   the svc() method in your derivative and arrange for your activate() method
-   to be called.  The svc() method then executes in the new thread.
+/* The ACE_Svc_Handler<> is ultimately derived from ACE_Task<>.  If
+   you want to create a multi-threaded application, these are your
+   tools!  Simply override the svc() method in your derivative and
+   arrange for your activate() method to be called.  The svc() method
+   then executes in the new thread.
 
    Of course, this is only valid if we're in thread-per-connection
    mode.  If we're using the reactor model, then svc() never comes
-   into play.
- */
-int Client_Handler::svc(void)
+   into play.  */
+int 
+Client_Handler::svc(void)
 {
-  /*
-     Like handle_input(), we create a buffer for loading the data.  Doing so
-     in handle_input() doesn't help any but there is a small performance increase
-     by doing this here:  the buffer is created once when the thread is created
-     instead of for each invocation of process().
-   */
-  char buf[128];
+  /* Like handle_input(), we create a buffer for loading the data.
+    Doing so in handle_input() doesn't help any but there is a small
+    performance increase by doing this here: the buffer is created
+    once when the thread is created instead of for each invocation of
+    process().  */
+  char buf[BUFSIZ];
 
   // Forever...
   while( 1 )
-  {
-     // Clean the buffer...
-     ACE_OS::memset (buf, 0, sizeof (buf));
-
-     /*
-        Invoke the proces() method to read and process the data.  This is
-        exactly the way it is used by handle_input().  That's the reason I
-        created process() in the first place:  so that it can be used in either
-        concurrency strategy.  Since process() has all of our application-level
-        logic, it's nice that it doesn't have to change when we decide to go
-        multi-threaded.
+    {
+      /* Invoke the process() method to read and process the data.
+        This is exactly the way it is used by handle_input().  That's
+        the reason I created process() in the first place: so that it
+        can be used in either concurrency strategy.  Since process()
+        has all of our application-level logic, it's nice that it
+        doesn't have to change when we decide to go multi-threaded.
 
         Notice that since the recv() method call in process() blocks until
         there is data ready, this thread doesn't consume any CPU time until
-        there is actually data sent from the client.
-     */
-     if( this->process(buf,sizeof(buf)) == -1 )
-     {
-       return(-1);
-     }
-  }
-
-  return(0);
+        there is actually data sent from the client.  */
+      if (this->process(buf, sizeof (buf)) == -1)
+        return -1;
+    }
+
+  return 0;
 }
 
-/*
-   And, at last, we get to the application-logic level.  Out of everything
-   we've done so far, this is the only thing that really has anything to do
-   with what your application will do.  In this method we will read and process 
-   the client's data.  In a real appliation, you will probably have a bit more
-   in main() to deal with command line options but after that point, all of the 
-   action takes place here. 
- */
-int Client_Handler::process (char *_rdbuf, int _rdbuf_len)
+/* And, at last, we get to the application-logic level.  Out of
+   everything we've done so far, this is the only thing that really
+   has anything to do with what your application will do.  In this
+   method we will read and process the client's data.  In a real
+   appliation, you will probably have a bit more in main() to deal
+   with command line options but after that point, all of the action
+   takes place here.  */
+int 
+Client_Handler::process (char *rdbuf,
+                         int rdbuf_len)
 {
-  /*
-     Using the buffer provided for us, we read the data from the client. If
-     there is a read error (eg -- recv() returns -1) then it's a pretty good
-     bet that the connection is gone.  Likewise, if we read zero bytes then
-     something wrong has happened.  The reactor wouldn't have called us if
-     there wasn't some kind of read activity but there wouldn't be activity if
-     there were no bytes to read...
-
-     On the other hand, if we got some data then we can display it in a  debug
-     message for everyone to see. 
-   */
-  switch (this->peer ().recv (_rdbuf, _rdbuf_len))
+  /* Using the buffer provided for us, we read the data from the
+    client. If there is a read error (eg -- recv() returns -1) then
+    it's a pretty good bet that the connection is gone.  Likewise, if
+    we read zero bytes then something wrong has happened.  The reactor
+    wouldn't have called us if there wasn't some kind of read activity
+    but there wouldn't be activity if there were no bytes to read...
+
+    On the other hand, if we got some data then we can display it in a
+    debug message for everyone to see.  */
+  switch (this->peer ().recv (rdbuf, rdbuf_len))
     {
     case -1:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1);
+      ACE_ERROR_RETURN ((LM_ERROR, 
+                         "(%P|%t) %p bad read\n",
+                         "client"),
+                        -1);
     case 0:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
     default:
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  rdbuf));
     }
 
   return 0;
diff --git a/docs/tutorials/006/client_handler.h b/docs/tutorials/006/client_handler.h
index 1c63fe33861..e8d3695d37c 100644
--- a/docs/tutorials/006/client_handler.h
+++ b/docs/tutorials/006/client_handler.h
@@ -79,7 +79,7 @@ public:
     object on the mask that caused the -1 return.  This means that we
     don't have to do that ourselves!  */
   virtual int handle_close (ACE_HANDLE handle = ACE_INVALID_HANDLE,
-                            ACE_Reactor_Mask mask = ACE_Event_Handler::ALL_EVENTS_MASK );
+                            ACE_Reactor_Mask mask = ACE_Event_Handler::ALL_EVENTS_MASK);
 
 protected:
 
diff --git a/docs/tutorials/006/page02.html b/docs/tutorials/006/page02.html
index 53413a1e094..be0b111a071 100644
--- a/docs/tutorials/006/page02.html
+++ b/docs/tutorials/006/page02.html
@@ -24,21 +24,17 @@ Tutorial 5 implementation is a single comment.
 
 // $Id$
 
-/*
-   We try to keep main() very simple.  One of the ways we do that is to push
-   much of the complicated stuff into worker objects.  In this case, we only 
-   need to include the acceptor header in our main source file.  We let it
-   worry about the "real work".     
- */
+/* We try to keep main() very simple.  One of the ways we do that is
+   to push much of the complicated stuff into worker objects.  In this
+   case, we only need to include the acceptor header in our main
+   source file.  We let it worry about the "real work".  */
 
 #include "client_acceptor.h"
 
-/*
-   As before, we create a simple signal handler that will set our finished
-   flag.  There are, of course, more elegant ways to handle program shutdown 
-   requests but that isn't really our focus right now, so we'll just do the
-   easiest thing.     
- */
+/* As before, we create a simple signal handler that will set our
+   finished flag.  There are, of course, more elegant ways to handle
+   program shutdown requests but that isn't really our focus right
+   now, so we'll just do the easiest thing.  */
 
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
@@ -46,91 +42,85 @@ extern "C" void handler (int)
   finished = 1;
 }
 
-/*
-   A server has to listen for clients at a known TCP/IP port.  The default ACE
-   port is 10002 (at least on my system) and that's good enough for what  we
-   want to do here.  Obviously, a more robust application would take a command
-   line parameter or read from a configuration file or do some other  clever
-   thing.  Just like the signal handler above, though, that's what we want to
-   focus on, so we're taking the easy way out.     
- */
+/* A server has to listen for clients at a known TCP/IP port.  The
+   default ACE port is 10002 (at least on my system) and that's good
+   enough for what we want to do here.  Obviously, a more robust
+   application would take a command line parameter or read from a
+   configuration file or do some other clever thing.  Just like the
+   signal handler above, though, that's what we want to focus on, so
+   we're taking the easy way out.  */
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Finally, we get to main.  Some C++ compilers will complain loudly if your
-   function signature doesn't match the prototype.  Even though we're not 
-   going to use the parameters, we still  have to specify them.     
- */
+/* Finally, we get to main.  Some C++ compilers will complain loudly
+   if your function signature doesn't match the prototype.  Even
+   though we're not going to use the parameters, we still have to
+   specify them.  */
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-/*
-   In our earlier servers, we used a global pointer to get to the reactor. I've 
-   never really liked that idea, so I've moved it into main() this time. When
-   we  get to the Client_Handler object you'll see how we manage to get a
-   pointer back to this reactor.     
- */
+  /* In our earlier servers, we used a global pointer to get to the
+    reactor. I've never really liked that idea, so I've moved it into
+    main() this time. When we get to the Client_Handler object you'll
+    see how we manage to get a pointer back to this reactor.  */
   ACE_Reactor reactor;
 
-  /*
-     The acceptor will take care of letting clients connect to us.  It will
-     also arrange for a  Client_Handler to be created for each new client.
-     Since we're only going to listen at one  TCP/IP port, we only need one
-     acceptor.  If we wanted, though, we could create several of these  and
-     listen at several ports.  (That's what we would do if we wanted to rewrite 
-     inetd for  instance.)     
-   */
+  /* The acceptor will take care of letting clients connect to us.  It
+    will also arrange for a Client_Handler to be created for each new
+    client.  Since we're only going to listen at one TCP/IP port, we
+    only need one acceptor.  If we wanted, though, we could create
+    several of these and listen at several ports.  (That's what we
+    would do if we wanted to rewrite inetd for instance.)  */
   Client_Acceptor peer_acceptor;
 
-  /*
-     Create an ACE_INET_Addr that represents our endpoint of a connection. We
-     then open our acceptor object with that Addr.  Doing so tells the acceptor 
-     where to listen for connections.  Servers generally listen at "well known" 
-     addresses.  If not, there must be some mechanism by which the client is
-     informed of the server's address.
-
-     Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.  This
-     technique is used over and over again in our tutorials.    
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-     As with Tutorial 5, we know that we're now registered with our reactor
-	 so we don't have to mess with that step.
-   */
-
-  /*
-     Install our signal handler.  You can actually register signal handlers
-     with the reactor.  You might do that when the signal handler is
-     responsible for performing "real" work.  Our simple flag-setter doesn't
-     justify deriving from ACE_Event_Handler and providing a callback function
-     though.    
-   */
+  /* Create an ACE_INET_Addr that represents our endpoint of a
+    connection. We then open our acceptor object with that Addr.
+    Doing so tells the acceptor where to listen for connections.
+    Servers generally listen at "well known" addresses.  If not, there
+    must be some mechanism by which the client is informed of the
+    server's address.
+
+    Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.
+    This technique is used over and over again in our tutorials.  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          &reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* As with Tutorial 5, we know that we're now registered with our
+    reactor so we don't have to mess with that step.  */
+
+  /* Install our signal handler.  You can actually register signal
+    handlers with the reactor.  You might do that when the signal
+    handler is responsible for performing "real" work.  Our simple
+    flag-setter doesn't justify deriving from ACE_Event_Handler and
+    providing a callback function though.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  /*
-     Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.  It's a
-     handy way to generate uniform debug output from your program.    
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-  /*
-     This will loop "forever" invoking the handle_events() method of our
-     reactor. handle_events() watches for activity on any registered handlers
-     and invokes their appropriate callbacks when necessary.  Callback-driven
-     programming is a big thing in ACE, you should get used to it. If the
-     signal handler catches something, the finished flag will be set and we'll
-     exit.  Conveniently enough, handle_events() is also interrupted by signals 
-     and will exit back to the while() loop.  (If you want your event loop to
-     not be interrupted by signals, checkout the <i>restart</i> flag on the
-     open() method of ACE_Reactor if you're interested.)    
-   */
+  /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.
+    It's a handy way to generate uniform debug output from your
+    program.  */
+  ACE_DEBUG ((LM_DEBUG, 
+              "(%P|%t) starting up server daemon\n"));
+
+  /* This will loop "forever" invoking the handle_events() method of
+    our reactor. handle_events() watches for activity on any
+    registered handlers and invokes their appropriate callbacks when
+    necessary.  Callback-driven programming is a big thing in ACE, you
+    should get used to it. If the signal handler catches something,
+    the finished flag will be set and we'll exit.  Conveniently
+    enough, handle_events() is also interrupted by signals and will
+    exit back to the while() loop.  (If you want your event loop to
+    not be interrupted by signals, checkout the <i>restart</i> flag on
+    the open() method of ACE_Reactor if you're interested.)  */
   while (!finished)
     reactor.handle_events ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server daemon\n"));
 
   return 0;
 }
diff --git a/docs/tutorials/006/page04.html b/docs/tutorials/006/page04.html
index a580702f0cd..0c5ddce2758 100644
--- a/docs/tutorials/006/page04.html
+++ b/docs/tutorials/006/page04.html
@@ -104,7 +104,7 @@ public:
     object on the mask that caused the -1 return.  This means that we
     don't have to do that ourselves!  */
   virtual int handle_close (ACE_HANDLE handle = ACE_INVALID_HANDLE,
-                            ACE_Reactor_Mask mask = ACE_Event_Handler::ALL_EVENTS_MASK );
+                            ACE_Reactor_Mask mask = ACE_Event_Handler::ALL_EVENTS_MASK);
 
 protected:
 
diff --git a/docs/tutorials/006/page05.html b/docs/tutorials/006/page05.html
index ce9d99ce0b6..083f6ef0dcb 100644
--- a/docs/tutorials/006/page05.html
+++ b/docs/tutorials/006/page05.html
@@ -22,328 +22,293 @@ made in open() as well as the bit of cleverness in svc().
 


-
 // $Id$
 
-/*
-   In client_handler.h I alluded to the fact that we'll mess around with a
-   Client_Acceptor pointer.  To do so, we need the Client_Acceptor object
-   declaration.
+/* In client_handler.h I alluded to the fact that we'll mess around
+   with a Client_Acceptor pointer.  To do so, we need the
+   Client_Acceptor object declaration.
 
    We know that including client_handler.h is redundant because
-   client_acceptor.h includes it.  Still, the sentry prevents double-inclusion
-   from causing problems and it's sometimes good to be explicit about what
-   we're using.
+   client_acceptor.h includes it.  Still, the sentry prevents
+   double-inclusion from causing problems and it's sometimes good to
+   be explicit about what we're using.
 
-   On the other hand, we don't directly include any ACE header files here.  
- */
+   On the other hand, we don't directly include any ACE header files
+   here.  */
 #include "client_acceptor.h"
 #include "client_handler.h"
 
-/*
-   Our constructor doesn't do anything.  That's generally a good idea.  Unless
-   you want to start throwing exceptions, there isn't a really good way to
-   indicate that a constructor has failed.  If I had my way, I'd have a boolean 
-   return code from it that would cause new to return 0 if I failed.  Oh 
-   well...  
- */
+/* Our constructor doesn't do anything.  That's generally a good idea.
+   Unless you want to start throwing exceptions, there isn't a really
+   good way to indicate that a constructor has failed.  If I had my
+   way, I'd have a boolean return code from it that would cause new to
+   return 0 if I failed.  Oh well...  */
 Client_Handler::Client_Handler (void)
 {
 }
 
-/*
-   Our destructor doesn't do anything either.  That is also by design.
-   Remember, we really want folks to use destroy() to get rid of us.  If that's 
-   so, then there's nothing left to do when the destructor gets invoked.  
- */
+/* Our destructor doesn't do anything either.  That is also by design.
+   Remember, we really want folks to use destroy() to get rid of us.
+   If that's so, then there's nothing left to do when the destructor
+   gets invoked.  */
 Client_Handler::~Client_Handler (void)
 {
 }
 
-/*
-   The much talked about destroy() method!  The reason I keep going on about
-   this is because it's just a Bad Idea (TM) to do real work inside of a
-   destructor.   Although this method is void, it really should return
-   int so that it can tell the caller there was a problem.  Even as
-   void you could at least throw an exception which you would never want 
-   to do in a destructor.  
- */
-void Client_Handler::destroy (void)
+/* The much talked about destroy() method!  The reason I keep going on
+   about this is because it's just a Bad Idea (TM) to do real work
+   inside of a destructor.  Although this method is void, it really
+   should return int so that it can tell the caller there was a
+   problem.  Even as void you could at least throw an exception which
+   you would never want to do in a destructor.  */
+void 
+Client_Handler::destroy (void)
 {
-  /*
-     Tell the reactor to forget all about us.  Notice that we use the same args
-     here that we use in the open() method to register ourselves.  In addition,
-     we use the DONT_CALL flag to prevent handle_close() being called.  Since we 
-     likely got here due to handle_close(), that could cause a bit of nasty
-     recursion! 
-   */
+  /* Tell the reactor to forget all about us.  Notice that we use the
+    same args here that we use in the open() method to register
+    ourselves.  In addition, we use the DONT_CALL flag to prevent
+    handle_close() being called.  Since we likely got here due to
+    handle_close(), that could cause a bit of nasty recursion!  */
   this->reactor ()->remove_handler (this,
-        ACE_Event_Handler:: READ_MASK | ACE_Event_Handler::DONT_CALL);
+                                    ACE_Event_Handler::READ_MASK 
+                                    | ACE_Event_Handler::DONT_CALL);
 
-  /*
-     This is how we're able to tell folks not to use delete.  By
-     deleting our own instance, we take care of memory leaks after ensuring
-     that the object is shut down correctly.  
-   */
+  /* This is how we're able to tell folks not to use delete.  By
+    deleting our own instance, we take care of memory leaks after
+    ensuring that the object is shut down correctly.  */
   delete this;
 }
 
-/*
-   As mentioned before, the open() method is called by the Client_Acceptor when 
-   a new client connection has been accepted.  The Client_Acceptor instance
-   pointer is cast to a void* and given to us here.  We'll use that to avoid
-   some global data...  
- */
-int Client_Handler::open (void *_acceptor)
+/* As mentioned before, the open() method is called by the
+   Client_Acceptor when a new client connection has been accepted.
+   The Client_Acceptor instance pointer is cast to a void* and given
+   to us here.  We'll use that to avoid some global data...  */
+int 
+Client_Handler::open (void *acceptor)
 {
-  /*
-     We need this to store the address of the client that we are now connected
-     to.  We'll use it later to display a debug message.  
-   */
+  /* We need this to store the address of the client that we are now
+     connected to.  We'll use it later to display a debug message.  */
   ACE_INET_Addr addr;
 
-  /*
-     Our ACE_Svc_Handler baseclass gives us the peer() method as a way to
-     access our underlying ACE_SOCK_Stream.  On that object, we can invoke the
-     get_remote_addr() method to get get an ACE_INET_Addr having our client's
-     address information. As with most ACE methods, we'll get back (and return) 
-     a -1 if there was any kind of error.  Once we have the ACE_INET_Addr, we
-     can query it to find out the client's host name, TCP/IP address, TCP/IP
-     port value and so forth.  One word of warning:   the get_host_name()
-     method of ACE_INET_Addr may return you an empty string if your name server 
-     can't resolve it.  On the other hand, get_host_addr() will always give you 
-     the dotted-decimal string representing the TCP/IP address.  
-   */
+  /* Our ACE_Svc_Handler baseclass gives us the peer() method as a way
+    to access our underlying ACE_SOCK_Stream.  On that object, we can
+    invoke the get_remote_addr() method to get get an ACE_INET_Addr
+    having our client's address information. As with most ACE methods,
+    we'll get back (and return) a -1 if there was any kind of error.
+    Once we have the ACE_INET_Addr, we can query it to find out the
+    client's host name, TCP/IP address, TCP/IP port value and so
+    forth.  One word of warning: the get_host_name() method of
+    ACE_INET_Addr may return you an empty string if your name server
+    can't resolve it.  On the other hand, get_host_addr() will always
+    give you the dotted-decimal string representing the TCP/IP
+    address.  */
   if (this->peer ().get_remote_addr (addr) == -1)
-    {
-      return -1;
-    }
-
-  /*
-     Convert the void* to a Client_Acceptor*.  You should probably use those
-     fancy new C++ cast operators but I can never remember how/when to do so.
-     Since you can cast just about anything around a void* without compiler
-     warnings be very sure of what you're doing when you do this kind of thing.
-     That's where the new-style cast operators can save you.  
-   */
-  Client_Acceptor *acceptor = (Client_Acceptor *) _acceptor;
-
-  /*
-     Our Client_Acceptor is constructed with a concurrency strategy.  Here, we
-     go back to it to find out what that strategy was.  If thread-per-connection
-     was selected then we simply activate a thread for ourselves and exit.  Our
-     svc() method will then begin executing in that thread.
-
-     If we are told to use the single-threaded strategy, there is no difference
-     between this and the Tutorial 5 implementation.
-
-     Note that if we're in thread-per-connection mode, open() is
-     exited at this point.  Furthermore, thread-per-connection mode
-     does not use the reactor which means that handle_input() and it's 
-     fellows are not invoked.
-   */
-  if( acceptor->thread_per_connection() )
-  {
-    return this->activate();
-  }
-
-   // ************************************************************************
-   // From here on, we're doing the traditional reactor thing.  If
-   // you're operating in thread-per-connection mode, this code does
-   // not apply.
-   // ************************************************************************
-
-  /*
-     Our reactor reference will be set when we register ourselves but I decided
-     to go ahead and set it here.  No good reason really...  
-   */
+    return -1;
+
+  /* Convert the void* to a Client_Acceptor*.  You should probably use
+    those fancy ACE_*_cast macros but I can never remember how/when to
+    do so.  Since you can cast just about anything around a void*
+    without compiler warnings be very sure of what you're doing when
+    you do this kind of thing.  That's where the new-style cast
+    operators can save you.  */
+  Client_Acceptor *acceptor = (Client_Acceptor *) acceptor;
+
+  /* Our Client_Acceptor is constructed with a concurrency strategy.
+    Here, we go back to it to find out what that strategy was.  If
+    thread-per-connection was selected then we simply activate a
+    thread for ourselves and exit.  Our svc() method will then begin
+    executing in that thread.
+
+    If we are told to use the single-threaded strategy, there is no
+    difference between this and the Tutorial 5 implementation.
+
+    Note that if we're in thread-per-connection mode, open() is exited
+    at this point.  Furthermore, thread-per-connection mode does not
+    use the reactor which means that handle_input() and it's fellows
+    are not invoked.  */
+  if (acceptor->thread_per_connection ())
+    return this->activate ();
+
+  // ************************************************************************
+  // From here on, we're doing the traditional reactor thing.  If
+  // you're operating in thread-per-connection mode, this code does
+  // not apply.
+  // ************************************************************************
+
+  /* Our reactor reference will be set when we register ourselves but
+    I decided to go ahead and set it here.  No good reason really...  */
   this->reactor (acceptor->reactor ());
 
-  /*
-     If we managed to get the client's address then we're connected to a real
-     and valid client.  I suppose that in some cases, the client may connect
-     and disconnect so quickly that it is invalid by the time we get here. In
-     any case, the test above should always be done to ensure that the
-     connection is worth keeping.
-
-     Now, regiser ourselves with a reactor and tell that reactor that we want
-     to be notified when there is something to read.  Remember, we took our
-     reactor value from the acceptor which created us in the first place.
-     Since we're exploring a single-threaded implementation, this is the
-     correct thing to do. 
-   */
-  if (this->reactor ()->register_handler (this, ACE_Event_Handler::READ_MASK) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
-    }
-
-  /*
-     Here, we use the ACE_INET_Addr object to print a message with the name of
-     the client we're connected to.  Again, it is possible that you'll get an
-     empty string for the host name if your DNS isn't configured correctly or
-     if there is some other reason that a TCP/IP addreess cannot be converted
-     into a host name. 
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ()));
-
-  /*
-     Always return zero on success. 
-   */
+  /* If we managed to get the client's address then we're connected to
+    a real and valid client.  I suppose that in some cases, the client
+    may connect and disconnect so quickly that it is invalid by the
+    time we get here. In any case, the test above should always be
+    done to ensure that the connection is worth keeping.
+
+    Now, regiser ourselves with a reactor and tell that reactor that
+    we want to be notified when there is something to read.  Remember,
+    we took our reactor value from the acceptor which created us in
+    the first place.  Since we're exploring a single-threaded
+    implementation, this is the correct thing to do.  */
+  if (this->reactor ()->register_handler (this,
+                                          ACE_Event_Handler::READ_MASK) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "(%P|%t) can't register with reactor\n"),
+                      -1);
+
+  /* Here, we use the ACE_INET_Addr object to print a message with the
+    name of the client we're connected to.  Again, it is possible that
+    you'll get an empty string for the host name if your DNS isn't
+    configured correctly or if there is some other reason that a
+    TCP/IP addreess cannot be converted into a host name.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) connected with %s\n", addr.get_host_name ()));
+
+  /* Always return zero on success.  */
   return 0;
 }
 
-/*
-   As mentioned in the header, the typical way to close an object in a threaded
-   context is to invoke it's close() method.  Since we already have a handle_close()
-   method built to cleanup after us, we'll just forward the request on to that
-   object.
- */
-int Client_Handler::close(u_long flags)
+/* As mentioned in the header, the typical way to close an object in a
+   threaded context is to invoke it's close() method.  Since we
+   already have a handle_close() method built to cleanup after us,
+   we'll just forward the request on to that object.  */
+int 
+Client_Handler::close(u_long flags)
 {
-    ACE_UNUSED_ARG(flags);
-
-    /*
-      We use the destroy() method to clean up after ourselves.
-      That will take care of removing us from the reactor and then
-      freeing our memory.
-    */
-    this->destroy();
-
-	/*
-	   Don't forward the close() to the baseclass!  handle_close() above has
-	   already taken care of delete'ing.  Forwarding close() would cause that
-	   to happen again and things would get really ugly at that point!
-	 */
-	return 0;
+  ACE_UNUSED_ARG (flags);
+
+  /* We use the destroy() method to clean up after ourselves.  That
+    will take care of removing us from the reactor and then freeing
+    our memory.  */
+  this->destroy ();
+
+  /* Don't forward the close() to the baseclass!  handle_close() above
+    has already taken care of delete'ing.  Forwarding close() would
+    cause that to happen again and things would get really ugly at
+    that point!  */
+  return 0;
 }
 
-/*
-   In the open() method, we registered with the reactor and requested to be
-   notified when there is data to be read.  When the reactor sees that activity
-   it will invoke this handle_input() method on us.  As I mentioned, the _handle
-   parameter isn't useful to us but it narrows the list of methods the reactor
-   has to worry about and the list of possible virtual functions we would have
-   to override.
-
-   Again, this is not used if we're in thread-per-connection mode.
- */
-int Client_Handler::handle_input (ACE_HANDLE _handle)
+/* In the open() method, we registered with the reactor and requested
+   to be notified when there is data to be read.  When the reactor
+   sees that activity it will invoke this handle_input() method on us.
+   As I mentioned, the _handle parameter isn't useful to us but it
+   narrows the list of methods the reactor has to worry about and the
+   list of possible virtual functions we would have to override.
+
+   Again, this is not used if we're in thread-per-connection mode.  */
+int 
+Client_Handler::handle_input (ACE_HANDLE handle)
 {
-  /*
-     Some compilers don't like it when you fail to use a parameter.  This macro
-     will keep 'em quiet for you. 
-   */
-  ACE_UNUSED_ARG (_handle);
-
-  /*
-     Now, we create and initialize a buffer for receiving the data.  Since this
-     is just a simple test app, we'll use a small buffer size. 
-   */
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
-
-  /*
-     Invoke the process() method with a pointer to our data area.  We'll let
-     that method worry about interfacing with the data.  You might choose to go 
-     ahead and read the data and then pass the result to process().  However,
-     application logic may require that you read a few bytes to determine what
-     else to read...  It's best if we push that all into the application-logic
-     level. 
-   */
+  /* Some compilers don't like it when you fail to use a parameter.
+    This macro will keep 'em quiet for you.  */
+  ACE_UNUSED_ARG (handle);
+
+  /* Now, we create and initialize a buffer for receiving the data.
+    Since this is just a simple test app, we'll use a small buffer
+    size.  */
+  char buf[BUFSIZ];
+
+  /* Invoke the process() method with a pointer to our data area.
+    We'll let that method worry about interfacing with the data.  You
+    might choose to go ahead and read the data and then pass the
+    result to process().  However, application logic may require that
+    you read a few bytes to determine what else to read...  It's best
+    if we push that all into the application-logic level.  */
   return this->process (buf, sizeof (buf));
 }
 
-/*
-   If we return -1 out of handle_input() or if the reactor sees other problems
-   with us then handle_close() will be called.  The reactor framework
-   will take care of removing us (due to the -1), so we don't need to
-   use the destroy() method.  Instead, we just delete ourselves directly.
- */
-int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask)
+/* If we return -1 out of handle_input() or if the reactor sees other
+   problems with us then handle_close() will be called.  The reactor
+   framework will take care of removing us (due to the -1), so we
+   don't need to use the destroy() method.  Instead, we just delete
+   ourselves directly.  */
+int 
+Client_Handler::handle_close (ACE_HANDLE handle,
+                              ACE_Reactor_Mask mask)
 {
-  ACE_UNUSED_ARG (_handle);
-  ACE_UNUSED_ARG (_mask);
+  ACE_UNUSED_ARG (handle);
+  ACE_UNUSED_ARG (mask);
 
   this->destroy ();
   return 0;
 }
 
-/*
-   The ACE_Svc_Handler<> is ultimately derived from ACE_Task<>.  If you want to
-   create a multi-threaded application, these are your tools!  Simply override
-   the svc() method in your derivative and arrange for your activate() method
-   to be called.  The svc() method then executes in the new thread.
+/* The ACE_Svc_Handler<> is ultimately derived from ACE_Task<>.  If
+   you want to create a multi-threaded application, these are your
+   tools!  Simply override the svc() method in your derivative and
+   arrange for your activate() method to be called.  The svc() method
+   then executes in the new thread.
 
    Of course, this is only valid if we're in thread-per-connection
    mode.  If we're using the reactor model, then svc() never comes
-   into play.
- */
-int Client_Handler::svc(void)
+   into play.  */
+int 
+Client_Handler::svc(void)
 {
-  /*
-     Like handle_input(), we create a buffer for loading the data.  Doing so
-     in handle_input() doesn't help any but there is a small performance increase
-     by doing this here:  the buffer is created once when the thread is created
-     instead of for each invocation of process().
-   */
-  char buf[128];
+  /* Like handle_input(), we create a buffer for loading the data.
+    Doing so in handle_input() doesn't help any but there is a small
+    performance increase by doing this here: the buffer is created
+    once when the thread is created instead of for each invocation of
+    process().  */
+  char buf[BUFSIZ];
 
   // Forever...
   while( 1 )
-  {
-     // Clean the buffer...
-     ACE_OS::memset (buf, 0, sizeof (buf));
-
-     /*
-        Invoke the proces() method to read and process the data.  This is
-        exactly the way it is used by handle_input().  That's the reason I
-        created process() in the first place:  so that it can be used in either
-        concurrency strategy.  Since process() has all of our application-level
-        logic, it's nice that it doesn't have to change when we decide to go
-        multi-threaded.
+    {
+      /* Invoke the process() method to read and process the data.
+        This is exactly the way it is used by handle_input().  That's
+        the reason I created process() in the first place: so that it
+        can be used in either concurrency strategy.  Since process()
+        has all of our application-level logic, it's nice that it
+        doesn't have to change when we decide to go multi-threaded.
 
         Notice that since the recv() method call in process() blocks until
         there is data ready, this thread doesn't consume any CPU time until
-        there is actually data sent from the client.
-     */
-     if( this->process(buf,sizeof(buf)) == -1 )
-     {
-       return(-1);
-     }
-  }
-
-  return(0);
+        there is actually data sent from the client.  */
+      if (this->process(buf, sizeof (buf)) == -1)
+        return -1;
+    }
+
+  return 0;
 }
 
-/*
-   And, at last, we get to the application-logic level.  Out of everything
-   we've done so far, this is the only thing that really has anything to do
-   with what your application will do.  In this method we will read and process 
-   the client's data.  In a real appliation, you will probably have a bit more
-   in main() to deal with command line options but after that point, all of the 
-   action takes place here. 
- */
-int Client_Handler::process (char *_rdbuf, int _rdbuf_len)
+/* And, at last, we get to the application-logic level.  Out of
+   everything we've done so far, this is the only thing that really
+   has anything to do with what your application will do.  In this
+   method we will read and process the client's data.  In a real
+   appliation, you will probably have a bit more in main() to deal
+   with command line options but after that point, all of the action
+   takes place here.  */
+int 
+Client_Handler::process (char *rdbuf,
+                         int rdbuf_len)
 {
-  /*
-     Using the buffer provided for us, we read the data from the client. If
-     there is a read error (eg -- recv() returns -1) then it's a pretty good
-     bet that the connection is gone.  Likewise, if we read zero bytes then
-     something wrong has happened.  The reactor wouldn't have called us if
-     there wasn't some kind of read activity but there wouldn't be activity if
-     there were no bytes to read...
-
-     On the other hand, if we got some data then we can display it in a  debug
-     message for everyone to see. 
-   */
-  switch (this->peer ().recv (_rdbuf, _rdbuf_len))
+  /* Using the buffer provided for us, we read the data from the
+    client. If there is a read error (eg -- recv() returns -1) then
+    it's a pretty good bet that the connection is gone.  Likewise, if
+    we read zero bytes then something wrong has happened.  The reactor
+    wouldn't have called us if there wasn't some kind of read activity
+    but there wouldn't be activity if there were no bytes to read...
+
+    On the other hand, if we got some data then we can display it in a
+    debug message for everyone to see.  */
+  switch (this->peer ().recv (rdbuf, rdbuf_len))
     {
     case -1:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1);
+      ACE_ERROR_RETURN ((LM_ERROR, 
+                         "(%P|%t) %p bad read\n",
+                         "client"),
+                        -1);
     case 0:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
     default:
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  rdbuf));
     }
 
   return 0;
diff --git a/docs/tutorials/006/server.cpp b/docs/tutorials/006/server.cpp
index 5195f46f8d0..3d324960a52 100644
--- a/docs/tutorials/006/server.cpp
+++ b/docs/tutorials/006/server.cpp
@@ -1,20 +1,16 @@
 // $Id$
 
-/*
-   We try to keep main() very simple.  One of the ways we do that is to push
-   much of the complicated stuff into worker objects.  In this case, we only 
-   need to include the acceptor header in our main source file.  We let it
-   worry about the "real work".     
- */
+/* We try to keep main() very simple.  One of the ways we do that is
+   to push much of the complicated stuff into worker objects.  In this
+   case, we only need to include the acceptor header in our main
+   source file.  We let it worry about the "real work".  */
 
 #include "client_acceptor.h"
 
-/*
-   As before, we create a simple signal handler that will set our finished
-   flag.  There are, of course, more elegant ways to handle program shutdown 
-   requests but that isn't really our focus right now, so we'll just do the
-   easiest thing.     
- */
+/* As before, we create a simple signal handler that will set our
+   finished flag.  There are, of course, more elegant ways to handle
+   program shutdown requests but that isn't really our focus right
+   now, so we'll just do the easiest thing.  */
 
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
@@ -22,91 +18,85 @@ extern "C" void handler (int)
   finished = 1;
 }
 
-/*
-   A server has to listen for clients at a known TCP/IP port.  The default ACE
-   port is 10002 (at least on my system) and that's good enough for what  we
-   want to do here.  Obviously, a more robust application would take a command
-   line parameter or read from a configuration file or do some other  clever
-   thing.  Just like the signal handler above, though, that's what we want to
-   focus on, so we're taking the easy way out.     
- */
+/* A server has to listen for clients at a known TCP/IP port.  The
+   default ACE port is 10002 (at least on my system) and that's good
+   enough for what we want to do here.  Obviously, a more robust
+   application would take a command line parameter or read from a
+   configuration file or do some other clever thing.  Just like the
+   signal handler above, though, that's what we want to focus on, so
+   we're taking the easy way out.  */
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Finally, we get to main.  Some C++ compilers will complain loudly if your
-   function signature doesn't match the prototype.  Even though we're not 
-   going to use the parameters, we still  have to specify them.     
- */
+/* Finally, we get to main.  Some C++ compilers will complain loudly
+   if your function signature doesn't match the prototype.  Even
+   though we're not going to use the parameters, we still have to
+   specify them.  */
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-/*
-   In our earlier servers, we used a global pointer to get to the reactor. I've 
-   never really liked that idea, so I've moved it into main() this time. When
-   we  get to the Client_Handler object you'll see how we manage to get a
-   pointer back to this reactor.     
- */
+  /* In our earlier servers, we used a global pointer to get to the
+    reactor. I've never really liked that idea, so I've moved it into
+    main() this time. When we get to the Client_Handler object you'll
+    see how we manage to get a pointer back to this reactor.  */
   ACE_Reactor reactor;
 
-  /*
-     The acceptor will take care of letting clients connect to us.  It will
-     also arrange for a  Client_Handler to be created for each new client.
-     Since we're only going to listen at one  TCP/IP port, we only need one
-     acceptor.  If we wanted, though, we could create several of these  and
-     listen at several ports.  (That's what we would do if we wanted to rewrite 
-     inetd for  instance.)     
-   */
+  /* The acceptor will take care of letting clients connect to us.  It
+    will also arrange for a Client_Handler to be created for each new
+    client.  Since we're only going to listen at one TCP/IP port, we
+    only need one acceptor.  If we wanted, though, we could create
+    several of these and listen at several ports.  (That's what we
+    would do if we wanted to rewrite inetd for instance.)  */
   Client_Acceptor peer_acceptor;
 
-  /*
-     Create an ACE_INET_Addr that represents our endpoint of a connection. We
-     then open our acceptor object with that Addr.  Doing so tells the acceptor 
-     where to listen for connections.  Servers generally listen at "well known" 
-     addresses.  If not, there must be some mechanism by which the client is
-     informed of the server's address.
-
-     Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.  This
-     technique is used over and over again in our tutorials.    
-   */
-  if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-     As with Tutorial 5, we know that we're now registered with our reactor
-	 so we don't have to mess with that step.
-   */
-
-  /*
-     Install our signal handler.  You can actually register signal handlers
-     with the reactor.  You might do that when the signal handler is
-     responsible for performing "real" work.  Our simple flag-setter doesn't
-     justify deriving from ACE_Event_Handler and providing a callback function
-     though.    
-   */
+  /* Create an ACE_INET_Addr that represents our endpoint of a
+    connection. We then open our acceptor object with that Addr.
+    Doing so tells the acceptor where to listen for connections.
+    Servers generally listen at "well known" addresses.  If not, there
+    must be some mechanism by which the client is informed of the
+    server's address.
+
+    Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.
+    This technique is used over and over again in our tutorials.  */
+  if (peer_acceptor.open (ACE_INET_Addr (PORT),
+                          &reactor) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* As with Tutorial 5, we know that we're now registered with our
+    reactor so we don't have to mess with that step.  */
+
+  /* Install our signal handler.  You can actually register signal
+    handlers with the reactor.  You might do that when the signal
+    handler is responsible for performing "real" work.  Our simple
+    flag-setter doesn't justify deriving from ACE_Event_Handler and
+    providing a callback function though.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  /*
-     Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.  It's a
-     handy way to generate uniform debug output from your program.    
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-  /*
-     This will loop "forever" invoking the handle_events() method of our
-     reactor. handle_events() watches for activity on any registered handlers
-     and invokes their appropriate callbacks when necessary.  Callback-driven
-     programming is a big thing in ACE, you should get used to it. If the
-     signal handler catches something, the finished flag will be set and we'll
-     exit.  Conveniently enough, handle_events() is also interrupted by signals 
-     and will exit back to the while() loop.  (If you want your event loop to
-     not be interrupted by signals, checkout the restart flag on the
-     open() method of ACE_Reactor if you're interested.)    
-   */
+  /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.
+    It's a handy way to generate uniform debug output from your
+    program.  */
+  ACE_DEBUG ((LM_DEBUG, 
+              "(%P|%t) starting up server daemon\n"));
+
+  /* This will loop "forever" invoking the handle_events() method of
+    our reactor. handle_events() watches for activity on any
+    registered handlers and invokes their appropriate callbacks when
+    necessary.  Callback-driven programming is a big thing in ACE, you
+    should get used to it. If the signal handler catches something,
+    the finished flag will be set and we'll exit.  Conveniently
+    enough, handle_events() is also interrupted by signals and will
+    exit back to the while() loop.  (If you want your event loop to
+    not be interrupted by signals, checkout the restart flag on
+    the open() method of ACE_Reactor if you're interested.)  */
   while (!finished)
     reactor.handle_events ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server daemon\n"));
 
   return 0;
 }
diff --git a/docs/tutorials/007/client_acceptor.cpp b/docs/tutorials/007/client_acceptor.cpp
index 6cc90612558..16b707a505f 100644
--- a/docs/tutorials/007/client_acceptor.cpp
+++ b/docs/tutorials/007/client_acceptor.cpp
@@ -1,67 +1,56 @@
-
 // $Id$
 
 #include "client_acceptor.h"
 
-/*
-   Construct ourselves with the chosen concurrency strategy.  Notice that we also
-   set our Thread_Pool reference to our private instance.
- */
-Client_Acceptor::Client_Acceptor( int _concurrency )
- :  concurrency_(_concurrency)
-   ,the_thread_pool_(private_thread_pool_)
+/* Construct ourselves with the chosen concurrency strategy.  Notice
+   that we also set our Thread_Pool reference to our private instance.  */
+Client_Acceptor::Client_Acceptor (int concurrency)
+  : concurrency_ (concurrency),
+    the_thread_pool_ (private_thread_pool_)
 {
 }
 
-/*
-   Construct ourselves with a reference to somebody else' Thread_Pool.  Obvioulsy
-   our concurrency strategy is "thread_pool_" at this point.
- */
-Client_Acceptor::Client_Acceptor( Thread_Pool & _thread_pool )
- :  concurrency_(thread_pool_)
-   ,the_thread_pool_(_thread_pool)
+/* Construct ourselves with a reference to somebody else' Thread_Pool.
+   Obvioulsy our concurrency strategy is "thread_pool_" at this point.  */
+Client_Acceptor::Client_Acceptor (Thread_Pool &thread_pool)
+  : concurrency_ (thread_pool),
+    the_thread_pool_ (thread_pool)
 {
 }
 
-/*
-   When we're destructed, we may need to cleanup after ourselves.  If we're running
-   with a thread pool that we own, it is up to us to close it down.
- */
-Client_Acceptor::~Client_Acceptor( void )
+/* When we're destructed, we may need to cleanup after ourselves.  If
+   we're running with a thread pool that we own, it is up to us to
+   close it down.  */
+Client_Acceptor::~Client_Acceptor (void)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->close();
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->close ();
 }
 
-/*
-   Similar to the destructor (and close() below) it is necessary for us to open the
-   thread pool in some circumstances.
+/* Similar to the destructor (and close() below) it is necessary for
+   us to open the thread pool in some circumstances.
 
-   Notice how we delegate most of the open() work to the open() method of our baseclass.
- */
-int Client_Acceptor::open( const ACE_INET_Addr & _addr, ACE_Reactor * _reactor, int _pool_size )
+   Notice how we delegate most of the open() work to the open() method
+   of our baseclass.  */
+int 
+Client_Acceptor::open (const ACE_INET_Addr &addr,
+                       ACE_Reactor *reactor,
+                       int pool_size)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->open(_pool_size);
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->open (pool_size);
 
-	return inherited::open(_addr,_reactor);
+  return inherited::open (addr, reactor);
 }
 
-/*
-   Here again we find that we have to manage the thread pool.  Like open() we also delegate
-   the other work to our baseclass.
- */
-int Client_Acceptor::close(void)
+/* Here again we find that we have to manage the thread pool.  Like
+   open() we also delegate the other work to our baseclass.  */
+int 
+Client_Acceptor::close (void)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->close();
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->close ();
 
-	return inherited::close();
+  return inherited::close ();
 }
 
diff --git a/docs/tutorials/007/client_handler.cpp b/docs/tutorials/007/client_handler.cpp
index 849cf44bf58..60b743d8de1 100644
--- a/docs/tutorials/007/client_handler.cpp
+++ b/docs/tutorials/007/client_handler.cpp
@@ -1,248 +1,228 @@
-
 // $Id$
 
-/*
-   Since this is the third time we've seen most of this, I'm going to strip out almost
-   all of the comments that you've already seen.  That way, you can concentrate on the
-   new items.
- */
+/* Since this is the third time we've seen most of this, I'm going to
+   strip out almost all of the comments that you've already seen.
+   That way, you can concentrate on the new items.  */
 
 #include "client_acceptor.h"
 #include "client_handler.h"
 
-/*
-   We're going to be registering and unregistering a couple of times.  To make sure that
-   we use the same flags every time, I've created these handy macros.
- */
-#define	REGISTER_MASK	 ACE_Event_Handler::READ_MASK
-#define REMOVE_MASK		(ACE_Event_Handler::READ_MASK | ACE_Event_Handler::DONT_CALL)
-
-/*
-   Our constructor still doesn't really do anything.  We simply initialize the acceptor
-   pointer to "null" and get our current thread id.  The static self() method of ACE_Thread
-   will return you a thread id native to your platform.
- */
+/* We're going to be registering and unregistering a couple of times.
+   To make sure that we use the same flags every time, I've created
+   these handy macros.  */
+#define	REGISTER_MASK ACE_Event_Handler::READ_MASK
+#define REMOVE_MASK (ACE_Event_Handler::READ_MASK | ACE_Event_Handler::DONT_CALL)
+
+/* Our constructor still doesn't really do anything.  We simply
+   initialize the acceptor pointer to "null" and get our current
+   thread id.  The static self() method of ACE_Thread will return you
+   a thread id native to your platform.  */
 Client_Handler::Client_Handler (void)
- : client_acceptor_(0)
-  ,creator_(ACE_Thread::self())
+  : client_acceptor_(0),
+    creator_ (ACE_Thread::self ())
 {
 }
 
 Client_Handler::~Client_Handler (void)
 {
-    this->peer().close();
+  this->peer().close();
 }
 
-/*
-   Query our acceptor for the concurrency strategy.  Notice that we don't bother
-   to check that our acceptor pointer is valid.  That is proably a bad idea...
- */
-int Client_Handler::concurrency(void)
+/* Query our acceptor for the concurrency strategy.  Notice that we
+   don't bother to check that our acceptor pointer is valid.  That is
+   proably a bad idea...  */
+int 
+Client_Handler::concurrency(void)
 {
-	return this->client_acceptor()->concurrency();
+  return this->client_acceptor ()->concurrency ();
 }
 
-/*
-   And here we ask the acceptor about the thread pool.
- */
-Thread_Pool * Client_Handler::thread_pool(void)
+/* And here we ask the acceptor about the thread pool.  */
+Thread_Pool * 
+Client_Handler::thread_pool (void)
 {
-	return this->client_acceptor()->thread_pool();
+  return this->client_acceptor ()->thread_pool ();
 }
 
-/*
-   Back to our open() method.  This is straight out of Tutorial 6.  There's
-   nothing additional here for the thread-pool implementation.
- */
-int Client_Handler::open (void *_acceptor)
+/* Back to our open() method.  This is straight out of Tutorial 6.
+   There's nothing additional here for the thread-pool implementation.  */
+int 
+Client_Handler::open (void *acceptor)
 {
-  client_acceptor( (Client_Acceptor *) _acceptor );
+  client_acceptor ((Client_Acceptor *) acceptor);
 
-  if( concurrency() == Client_Acceptor::thread_per_connection_ )
-  {
-  	return this->activate();
-  }
+  if (concurrency () == Client_Acceptor::thread_per_connection_)
+    return this->activate ();
 
   this->reactor (client_acceptor()->reactor ());
 
   ACE_INET_Addr addr;
 
   if (this->peer ().get_remote_addr (addr) == -1)
-    {
-      return -1;
-    }
+    return -1;
 
-  if (this->reactor ()->register_handler (this, REGISTER_MASK) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
-    }
-
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ()));
+  if (this->reactor ()->register_handler (this,
+                                          REGISTER_MASK) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "(%P|%t) can't register with reactor\n"),
+                      -1);
 
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) connected with %s\n",
+              addr.get_host_name ()));
   return 0;
 }
 
-/*
-   The destroy() method will remove us from the reactor (with the
+/* The destroy() method will remove us from the reactor (with the
    DONT_CALL flag set!) and then free our memory.  This allows us to
-   be closed from outside of the reactor context without any danger.
- */
-void Client_Handler::destroy (void)
+   be closed from outside of the reactor context without any danger.  */
+void 
+Client_Handler::destroy (void)
 {
-    this->reactor ()->remove_handler (this, REMOVE_MASK );
-    delete this;
+  this->reactor ()->remove_handler (this, REMOVE_MASK);
+  delete this;
 }
 
-/*
-  As mentioned in the header, the typical way to close an object in a
-  threaded context is to invoke it's close() method.
-*/
-int Client_Handler::close(u_long flags)
+/* As mentioned in the header, the typical way to close an object in a
+  threaded context is to invoke it's close() method.  */
+int 
+Client_Handler::close (u_long flags)
 {
-    /*
-      We use the destroy() method to clean up after ourselves.
-      That will take care of removing us from the reactor and then
-      freeing our memory.
+  /*
+    We use the destroy() method to clean up after ourselves.
+    That will take care of removing us from the reactor and then
+    freeing our memory.
     */
-    this->destroy();
+  this->destroy ();
     
-	/*
-	   Don't forward the close() to the baseclass!  handle_close() above has
-	   already taken care of delete'ing.  Forwarding close() would cause that
-	   to happen again and things would get really ugly at that point!
-	 */
-	return 0;
+  /* Don't forward the close() to the baseclass!  handle_close() above
+    has already taken care of delete'ing.  Forwarding close() would
+    cause that to happen again and things would get really ugly at
+    that point!  */
+  return 0;
 }
 
-/*
-  We will be called when handle_input() returns -1.  That's our queue
-  to delete ourselves to prevent memory leaks.
- */
-int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask)
+/* We will be called when handle_input() returns -1.  That's our queue
+  to delete ourselves to prevent memory leaks.  */
+int 
+Client_Handler::handle_close (ACE_HANDLE handle,
+                              ACE_Reactor_Mask mask)
 {
-  ACE_UNUSED_ARG (_handle);
-  ACE_UNUSED_ARG (_mask);
+  ACE_UNUSED_ARG (handle);
+  ACE_UNUSED_ARG (mask);
 
   delete this;
   
   return 0;
 }
 
-/*
-   In the open() method, we registered with the reactor and requested to be
-   notified when there is data to be read.  When the reactor sees that activity
-   it will invoke this handle_input() method on us.  As I mentioned, the _handle
-   parameter isn't useful to us but it narrows the list of methods the reactor
-   has to worry about and the list of possible virtual functions we would have
-   to override. 
-
-   You've read that much before...  Now we have to do some extra stuff in case
-   we're using the thread-pool implementation.  If we're called by our creator
-   thread then we must be in the reactor.  In that case, we arrange to be put
-   into the thread pool.  If we're not in the creator thread then we must be
-   in the thread pool and we can do some work.
- */
-int Client_Handler::handle_input (ACE_HANDLE _handle)
+/* In the open() method, we registered with the reactor and requested
+   to be notified when there is data to be read.  When the reactor
+   sees that activity it will invoke this handle_input() method on us.
+   As I mentioned, the _handle parameter isn't useful to us but it
+   narrows the list of methods the reactor has to worry about and the
+   list of possible virtual functions we would have to override.
+
+   You've read that much before...  Now we have to do some extra stuff
+   in case we're using the thread-pool implementation.  If we're
+   called by our creator thread then we must be in the reactor.  In
+   that case, we arrange to be put into the thread pool.  If we're not
+   in the creator thread then we must be in the thread pool and we can
+   do some work.  */
+int 
+Client_Handler::handle_input (ACE_HANDLE handle)
 {
-  ACE_UNUSED_ARG (_handle);
+  ACE_UNUSED_ARG (handle);
 
-  /*
-     Check our strategy.  If we're using the thread pool and we're in the creation
-	 thread then we know we were called by the reactor.
-   */
-  if( concurrency() == Client_Acceptor::thread_pool_ )
-  {
-	if( ACE_OS::thr_equal(ACE_Thread::self(),creator_) )
+  /* Check our strategy.  If we're using the thread pool and we're in
+    the creation thread then we know we were called by the reactor.  */
+  if (concurrency () == Client_Acceptor::thread_pool_)
+    {
+      if (ACE_OS::thr_equal (ACE_Thread::self(),
+                             creator_))
 	{
-		/*
-		   Remove ourselves from the reactor and ask to be put into the thread pool's
-		   queue of work.  (You should be able to use suspend_handler() but I've had
-		   problems with that.)
-
-           By removing ourselves from the reactor, we're guaranteed
-           that we won't be called back until the thread pool picks us 
-           up out of the queue.  If we didn't remove ourselves, then
-           the reactor would continue to invoke handle_input() and we
-           don't want that to happen.
-		 */
-  		this->reactor()->remove_handler( this, REMOVE_MASK );
-		return this->thread_pool()->enqueue(this);
+          /* Remove ourselves from the reactor and ask to be put into
+            the thread pool's queue of work.  (You should be able to
+            use suspend_handler() but I've had problems with that.)
+
+            By removing ourselves from the reactor, we're guaranteed
+            that we won't be called back until the thread pool picks
+            us up out of the queue.  If we didn't remove ourselves,
+            then the reactor would continue to invoke handle_input()
+            and we don't want that to happen.  */
+          this->reactor ()->remove_handler (this, REMOVE_MASK);
+          return this->thread_pool ()->enqueue (this);
 	}
-  }
+    }
 
-  /*
-     Any strategy other than thread-per-connection will eventually get here.  If we're in the
-	 single-threaded implementation or the thread-pool, we still have to pass this way.
-   */
+  /* Any strategy other than thread-per-connection will eventually get
+    here.  If we're in the single-threaded implementation or the
+    thread-pool, we still have to pass this way.  */
 
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
+  char buf[BUFSIZ];
 
-  /*
-     Invoke the process() method to do the work but save it's return value instead
-	 of returning it immediately.
-   */
+  /* Invoke the process() method to do the work but save it's return
+    value instead of returning it immediately.  */
 
-  int rval = this->process(buf,sizeof(buf));
+  int rval = this->process (buf, sizeof (buf));
 
-  /*
-     Now, we look again to see if we're in the thread-pool implementation.  If so then we
-	 need to re-register ourselves with the reactor so that we can get more work when it
-	 is available.  (If suspend_handler() worked then we would use resume_handler() here.)
-   */
-  if( concurrency() == Client_Acceptor::thread_pool_ )
-  {
-	if( rval != -1 )
-	{
-        /*
-          If we don't remember to re-register ourselves, then we won't 
-          be able to respond to any future client requests.
-         */
-		this->reactor()->register_handler( this, REGISTER_MASK );
-	}
-  }
+  /* Now, we look again to see if we're in the thread-pool
+    implementation.  If so then we need to re-register ourselves with
+    the reactor so that we can get more work when it is available.
+    (If suspend_handler() worked then we would use resume_handler()
+    here.)  */
+  if (concurrency () == Client_Acceptor::thread_pool_)
+    {
+      if (rval != -1)
+        /* If we don't remember to re-register ourselves, then we
+            won't be able to respond to any future client requests.  */
+        this->reactor ()->register_handler (this,
+                                            REGISTER_MASK);
+    }
 
-  /*
-     Return the result of process()
-   */
-  return(rval);
+  /* Return the result of process() */
+  return rval;
 }
 
-/*
-   Remember that when we leave our svc() method, the framework will take care
-   of calling our close() method so that we can cleanup after ourselves.
- */
-int Client_Handler::svc(void)
+/* Remember that when we leave our svc() method, the framework will
+   take care of calling our close() method so that we can cleanup
+   after ourselves.  */
+int 
+Client_Handler::svc (void)
 {
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
-
-  while( 1 )
-  {
-     if( this->process(buf,sizeof(buf)) == -1 )
-	 {
-	   return(-1);
-     }
-  }
-
-  return(0);
+  char buf[BUFSIZ];
+
+  while (1)
+    if (this->process (buf, sizeof (buf)) == -1)
+      return -1;
+
+  return 0;
 }
 
-/*
-   Once again, we see that the application-level logic has not been at all affected
-   by our choice of threading models.  Of course, I'm not sharing data between threads
-   or anything.  We'll leave locking issues for a later tutorial.
- */
-int Client_Handler::process (char *_rdbuf, int _rdbuf_len)
+/* Once again, we see that the application-level logic has not been at
+   all affected by our choice of threading models.  Of course, I'm not
+   sharing data between threads or anything.  We'll leave locking
+   issues for a later tutorial.  */
+int 
+Client_Handler::process (char *rdbuf,
+                         int rdbuf_len)
 {
-  switch (this->peer ().recv (_rdbuf, _rdbuf_len))
+  switch (this->peer ().recv (rdbuf, rdbuf_len))
     {
     case -1:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) %p bad read\n",
+                         "client"),
+                        -1);
     case 0:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
     default:
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  rdbuf));
     }
 
   return 0;
diff --git a/docs/tutorials/007/page02.html b/docs/tutorials/007/page02.html
index fabab39642d..cea8488437e 100644
--- a/docs/tutorials/007/page02.html
+++ b/docs/tutorials/007/page02.html
@@ -18,21 +18,17 @@
 
 // $Id$
 
-/*
-   We try to keep main() very simple.  One of the ways we do that is to push
-   much of the complicated stuff into worker objects.  In this case, we only 
-   need to include the acceptor header in our main source file.  We let it
-   worry about the "real work".     
- */
+/* We try to keep main() very simple.  One of the ways we do that is
+   to push much of the complicated stuff into worker objects.  In this
+   case, we only need to include the acceptor header in our main
+   source file.  We let it worry about the "real work".  */
 
 #include "client_acceptor.h"
 
-/*
-   As before, we create a simple signal handler that will set our finished
-   flag.  There are, of course, more elegant ways to handle program shutdown 
-   requests but that isn't really our focus right now, so we'll just do the
-   easiest thing.     
- */
+/* As before, we create a simple signal handler that will set our
+   finished flag.  There are, of course, more elegant ways to handle
+   program shutdown requests but that isn't really our focus right
+   now, so we'll just do the easiest thing.  */
 
 static sig_atomic_t finished = 0;
 extern "C" void handler (int)
@@ -40,86 +36,81 @@ extern "C" void handler (int)
   finished = 1;
 }
 
-/*
-   A server has to listen for clients at a known TCP/IP port.  The default ACE
-   port is 10002 (at least on my system) and that's good enough for what  we
-   want to do here.  Obviously, a more robust application would take a command
-   line parameter or read from a configuration file or do some other  clever
-   thing.  Just like the signal handler above, though, that's what we want to
-   focus on, so we're taking the easy way out.     
- */
+/* A server has to listen for clients at a known TCP/IP port.  The
+   default ACE port is 10002 (at least on my system) and that's good
+   enough for what we want to do here.  Obviously, a more robust
+   application would take a command line parameter or read from a
+   configuration file or do some other clever thing.  Just like the
+   signal handler above, though, that's what we want to focus on, so
+   we're taking the easy way out.  */
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Finally, we get to main.  Some C++ compilers will complain loudly if your
-   function signature doesn't match the prototype.  Even though we're not 
-   going to use the parameters, we still  have to specify them.     
- */
+/* Finally, we get to main.  Some C++ compilers will complain loudly
+   if your function signature doesn't match the prototype.  Even
+   though we're not going to use the parameters, we still have to
+   specify them.  */
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-/*
-   In our earlier servers, we used a global pointer to get to the reactor. I've 
-   never really liked that idea, so I've moved it into main() this time. When
-   we  get to the Client_Handler object you'll see how we manage to get a
-   pointer back to this reactor.     
- */
+  /* In our earlier servers, we used a global pointer to get to the
+    reactor. I've never really liked that idea, so I've moved it into
+    main() this time. When we get to the Client_Handler object you'll
+    see how we manage to get a pointer back to this reactor.  */
   ACE_Reactor reactor;
 
-  /*
-     The acceptor will take care of letting clients connect to us.  It will
-     also arrange for a  Client_Handler to be created for each new client.
-     Since we're only going to listen at one  TCP/IP port, we only need one
-     acceptor.  If we wanted, though, we could create several of these  and
-     listen at several ports.  (That's what we would do if we wanted to rewrite 
-     inetd for  instance.)     
-   */
+  /* The acceptor will take care of letting clients connect to us.  It
+    will also arrange for a Client_Handler to be created for each new
+    client.  Since we're only going to listen at one TCP/IP port, we
+    only need one acceptor.  If we wanted, though, we could create
+    several of these and listen at several ports.  (That's what we
+    would do if we wanted to rewrite inetd for instance.)  */
   Client_Acceptor peer_acceptor;
 
-  /*
-     Create an ACE_INET_Addr that represents our endpoint of a connection. We
-     then open our acceptor object with that Addr.  Doing so tells the acceptor 
-     where to listen for connections.  Servers generally listen at "well known" 
-     addresses.  If not, there must be some mechanism by which the client is
-     informed of the server's address.
+  /* Create an ACE_INET_Addr that represents our endpoint of a
+    connection. We then open our acceptor object with that Addr.
+    Doing so tells the acceptor where to listen for connections.
+    Servers generally listen at "well known" addresses.  If not, there
+    must be some mechanism by which the client is informed of the
+    server's address.
 
-     Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.  This
-     technique is used over and over again in our tutorials.    
-   */
+    Note how ACE_ERROR_RETURN is used if we fail to open the acceptor.
+    This technique is used over and over again in our tutorials.  */
   if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1)
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-
-  /*
-     Install our signal handler.  You can actually register signal handlers
-     with the reactor.  You might do that when the signal handler is
-     responsible for performing "real" work.  Our simple flag-setter doesn't
-     justify deriving from ACE_Event_Handler and providing a callback function
-     though.    
-   */
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Install our signal handler.  You can actually register signal
+    handlers with the reactor.  You might do that when the signal
+    handler is responsible for performing "real" work.  Our simple
+    flag-setter doesn't justify deriving from ACE_Event_Handler and
+    providing a callback function though.  */
   ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT);
 
-  /*
-     Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.  It's a
-     handy way to generate uniform debug output from your program.    
-   */
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-  /*
-     This will loop "forever" invoking the handle_events() method of our
-     reactor. handle_events() watches for activity on any registered handlers
-     and invokes their appropriate callbacks when necessary.  Callback-driven
-     programming is a big thing in ACE, you should get used to it. If the
-     signal handler catches something, the finished flag will be set and we'll
-     exit.  Conveniently enough, handle_events() is also interrupted by signals 
-     and will exit back to the while() loop.  (If you want your event loop to
-     not be interrupted by signals, checkout the <i>restart</i> flag on the
-     open() method of ACE_Reactor if you're interested.)    
-   */
+  /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit.
+    It's a handy way to generate uniform debug output from your
+    program.  */
+  ACE_DEBUG ((LM_DEBUG, 
+              "(%P|%t) starting up server daemon\n"));
+
+  /* This will loop "forever" invoking the handle_events() method of
+    our reactor. handle_events() watches for activity on any
+    registered handlers and invokes their appropriate callbacks when
+    necessary.  Callback-driven programming is a big thing in ACE, you
+    should get used to it. If the signal handler catches something,
+    the finished flag will be set and we'll exit.  Conveniently
+    enough, handle_events() is also interrupted by signals and will
+    exit back to the while() loop.  (If you want your event loop to
+    not be interrupted by signals, checkout the <i>restart</i> flag on
+    the open() method of ACE_Reactor if you're interested.)  */
   while (!finished)
-  	reactor.handle_events ();
+    reactor.handle_events ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) shutting down server daemon\n"));
  
   return 0;
 }
diff --git a/docs/tutorials/007/page04.html b/docs/tutorials/007/page04.html
index 7a693d64a6f..df7d777c660 100644
--- a/docs/tutorials/007/page04.html
+++ b/docs/tutorials/007/page04.html
@@ -19,71 +19,60 @@ I finally had enough code to move it out of the header.
 


-
 // $Id$
 
 #include "client_acceptor.h"
 
-/*
-   Construct ourselves with the chosen concurrency strategy.  Notice that we also
-   set our Thread_Pool reference to our private instance.
- */
-Client_Acceptor::Client_Acceptor( int _concurrency )
- :  concurrency_(_concurrency)
-   ,the_thread_pool_(private_thread_pool_)
+/* Construct ourselves with the chosen concurrency strategy.  Notice
+   that we also set our Thread_Pool reference to our private instance.  */
+Client_Acceptor::Client_Acceptor (int concurrency)
+  : concurrency_ (concurrency),
+    the_thread_pool_ (private_thread_pool_)
 {
 }
 
-/*
-   Construct ourselves with a reference to somebody else' Thread_Pool.  Obvioulsy
-   our concurrency strategy is "thread_pool_" at this point.
- */
-Client_Acceptor::Client_Acceptor( Thread_Pool & _thread_pool )
- :  concurrency_(thread_pool_)
-   ,the_thread_pool_(_thread_pool)
+/* Construct ourselves with a reference to somebody else' Thread_Pool.
+   Obvioulsy our concurrency strategy is "thread_pool_" at this point.  */
+Client_Acceptor::Client_Acceptor (Thread_Pool &thread_pool)
+  : concurrency_ (thread_pool),
+    the_thread_pool_ (thread_pool)
 {
 }
 
-/*
-   When we're destructed, we may need to cleanup after ourselves.  If we're running
-   with a thread pool that we own, it is up to us to close it down.
- */
-Client_Acceptor::~Client_Acceptor( void )
+/* When we're destructed, we may need to cleanup after ourselves.  If
+   we're running with a thread pool that we own, it is up to us to
+   close it down.  */
+Client_Acceptor::~Client_Acceptor (void)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->close();
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->close ();
 }
 
-/*
-   Similar to the destructor (and close() below) it is necessary for us to open the
-   thread pool in some circumstances.
+/* Similar to the destructor (and close() below) it is necessary for
+   us to open the thread pool in some circumstances.
 
-   Notice how we delegate most of the open() work to the open() method of our baseclass.
- */
-int Client_Acceptor::open( const ACE_INET_Addr & _addr, ACE_Reactor * _reactor, int _pool_size )
+   Notice how we delegate most of the open() work to the open() method
+   of our baseclass.  */
+int 
+Client_Acceptor::open (const ACE_INET_Addr &addr,
+                       ACE_Reactor *reactor,
+                       int pool_size)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->open(_pool_size);
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->open (pool_size);
 
-	return inherited::open(_addr,_reactor);
+  return inherited::open (addr, reactor);
 }
 
-/*
-   Here again we find that we have to manage the thread pool.  Like open() we also delegate
-   the other work to our baseclass.
- */
-int Client_Acceptor::close(void)
+/* Here again we find that we have to manage the thread pool.  Like
+   open() we also delegate the other work to our baseclass.  */
+int 
+Client_Acceptor::close (void)
 {
-	if( this->concurrency() == thread_pool_ && thread_pool_is_private() )
-	{
-		thread_pool()->close();
-	}
+  if (this->concurrency() == thread_pool_ && thread_pool_is_private ())
+    thread_pool ()->close ();
 
-	return inherited::close();
+  return inherited::close ();
 }
 
 
diff --git a/docs/tutorials/007/page06.html b/docs/tutorials/007/page06.html index 5781faa76c4..7ca248b2961 100644 --- a/docs/tutorials/007/page06.html +++ b/docs/tutorials/007/page06.html @@ -20,251 +20,231 @@ though.


-
 // $Id$
 
-/*
-   Since this is the third time we've seen most of this, I'm going to strip out almost
-   all of the comments that you've already seen.  That way, you can concentrate on the
-   new items.
- */
+/* Since this is the third time we've seen most of this, I'm going to
+   strip out almost all of the comments that you've already seen.
+   That way, you can concentrate on the new items.  */
 
 #include "client_acceptor.h"
 #include "client_handler.h"
 
-/*
-   We're going to be registering and unregistering a couple of times.  To make sure that
-   we use the same flags every time, I've created these handy macros.
- */
-#define	REGISTER_MASK	 ACE_Event_Handler::READ_MASK
-#define REMOVE_MASK		(ACE_Event_Handler::READ_MASK | ACE_Event_Handler::DONT_CALL)
-
-/*
-   Our constructor still doesn't really do anything.  We simply initialize the acceptor
-   pointer to "null" and get our current thread id.  The static self() method of ACE_Thread
-   will return you a thread id native to your platform.
- */
+/* We're going to be registering and unregistering a couple of times.
+   To make sure that we use the same flags every time, I've created
+   these handy macros.  */
+#define	REGISTER_MASK ACE_Event_Handler::READ_MASK
+#define REMOVE_MASK (ACE_Event_Handler::READ_MASK | ACE_Event_Handler::DONT_CALL)
+
+/* Our constructor still doesn't really do anything.  We simply
+   initialize the acceptor pointer to "null" and get our current
+   thread id.  The static self() method of ACE_Thread will return you
+   a thread id native to your platform.  */
 Client_Handler::Client_Handler (void)
- : client_acceptor_(0)
-  ,creator_(ACE_Thread::self())
+  : client_acceptor_(0),
+    creator_ (ACE_Thread::self ())
 {
 }
 
 Client_Handler::~Client_Handler (void)
 {
-    this->peer().close();
+  this->peer().close();
 }
 
-/*
-   Query our acceptor for the concurrency strategy.  Notice that we don't bother
-   to check that our acceptor pointer is valid.  That is proably a bad idea...
- */
-int Client_Handler::concurrency(void)
+/* Query our acceptor for the concurrency strategy.  Notice that we
+   don't bother to check that our acceptor pointer is valid.  That is
+   proably a bad idea...  */
+int 
+Client_Handler::concurrency(void)
 {
-	return this->client_acceptor()->concurrency();
+  return this->client_acceptor ()->concurrency ();
 }
 
-/*
-   And here we ask the acceptor about the thread pool.
- */
-Thread_Pool * Client_Handler::thread_pool(void)
+/* And here we ask the acceptor about the thread pool.  */
+Thread_Pool * 
+Client_Handler::thread_pool (void)
 {
-	return this->client_acceptor()->thread_pool();
+  return this->client_acceptor ()->thread_pool ();
 }
 
-/*
-   Back to our open() method.  This is straight out of Tutorial 6.  There's
-   nothing additional here for the thread-pool implementation.
- */
-int Client_Handler::open (void *_acceptor)
+/* Back to our open() method.  This is straight out of Tutorial 6.
+   There's nothing additional here for the thread-pool implementation.  */
+int 
+Client_Handler::open (void *acceptor)
 {
-  client_acceptor( (Client_Acceptor *) _acceptor );
+  client_acceptor ((Client_Acceptor *) acceptor);
 
-  if( concurrency() == Client_Acceptor::thread_per_connection_ )
-  {
-  	return this->activate();
-  }
+  if (concurrency () == Client_Acceptor::thread_per_connection_)
+    return this->activate ();
 
   this->reactor (client_acceptor()->reactor ());
 
   ACE_INET_Addr addr;
 
   if (this->peer ().get_remote_addr (addr) == -1)
-    {
-      return -1;
-    }
+    return -1;
 
-  if (this->reactor ()->register_handler (this, REGISTER_MASK) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) can't register with reactor\n"), -1);
-    }
-
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) connected with %s\n", addr.get_host_name ()));
+  if (this->reactor ()->register_handler (this,
+                                          REGISTER_MASK) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "(%P|%t) can't register with reactor\n"),
+                      -1);
 
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) connected with %s\n",
+              addr.get_host_name ()));
   return 0;
 }
 
-/*
-   The destroy() method will remove us from the reactor (with the
+/* The destroy() method will remove us from the reactor (with the
    DONT_CALL flag set!) and then free our memory.  This allows us to
-   be closed from outside of the reactor context without any danger.
- */
-void Client_Handler::destroy (void)
+   be closed from outside of the reactor context without any danger.  */
+void 
+Client_Handler::destroy (void)
 {
-    this->reactor ()->remove_handler (this, REMOVE_MASK );
-    delete this;
+  this->reactor ()->remove_handler (this, REMOVE_MASK);
+  delete this;
 }
 
-/*
-  As mentioned in the header, the typical way to close an object in a
-  threaded context is to invoke it's close() method.
-*/
-int Client_Handler::close(u_long flags)
+/* As mentioned in the header, the typical way to close an object in a
+  threaded context is to invoke it's close() method.  */
+int 
+Client_Handler::close (u_long flags)
 {
-    /*
-      We use the destroy() method to clean up after ourselves.
-      That will take care of removing us from the reactor and then
-      freeing our memory.
+  /*
+    We use the destroy() method to clean up after ourselves.
+    That will take care of removing us from the reactor and then
+    freeing our memory.
     */
-    this->destroy();
+  this->destroy ();
     
-	/*
-	   Don't forward the close() to the baseclass!  handle_close() above has
-	   already taken care of delete'ing.  Forwarding close() would cause that
-	   to happen again and things would get really ugly at that point!
-	 */
-	return 0;
+  /* Don't forward the close() to the baseclass!  handle_close() above
+    has already taken care of delete'ing.  Forwarding close() would
+    cause that to happen again and things would get really ugly at
+    that point!  */
+  return 0;
 }
 
-/*
-  We will be called when handle_input() returns -1.  That's our queue
-  to delete ourselves to prevent memory leaks.
- */
-int Client_Handler::handle_close (ACE_HANDLE _handle, ACE_Reactor_Mask _mask)
+/* We will be called when handle_input() returns -1.  That's our queue
+  to delete ourselves to prevent memory leaks.  */
+int 
+Client_Handler::handle_close (ACE_HANDLE handle,
+                              ACE_Reactor_Mask mask)
 {
-  ACE_UNUSED_ARG (_handle);
-  ACE_UNUSED_ARG (_mask);
+  ACE_UNUSED_ARG (handle);
+  ACE_UNUSED_ARG (mask);
 
   delete this;
   
   return 0;
 }
 
-/*
-   In the open() method, we registered with the reactor and requested to be
-   notified when there is data to be read.  When the reactor sees that activity
-   it will invoke this handle_input() method on us.  As I mentioned, the _handle
-   parameter isn't useful to us but it narrows the list of methods the reactor
-   has to worry about and the list of possible virtual functions we would have
-   to override. 
-
-   You've read that much before...  Now we have to do some extra stuff in case
-   we're using the thread-pool implementation.  If we're called by our creator
-   thread then we must be in the reactor.  In that case, we arrange to be put
-   into the thread pool.  If we're not in the creator thread then we must be
-   in the thread pool and we can do some work.
- */
-int Client_Handler::handle_input (ACE_HANDLE _handle)
+/* In the open() method, we registered with the reactor and requested
+   to be notified when there is data to be read.  When the reactor
+   sees that activity it will invoke this handle_input() method on us.
+   As I mentioned, the _handle parameter isn't useful to us but it
+   narrows the list of methods the reactor has to worry about and the
+   list of possible virtual functions we would have to override.
+
+   You've read that much before...  Now we have to do some extra stuff
+   in case we're using the thread-pool implementation.  If we're
+   called by our creator thread then we must be in the reactor.  In
+   that case, we arrange to be put into the thread pool.  If we're not
+   in the creator thread then we must be in the thread pool and we can
+   do some work.  */
+int 
+Client_Handler::handle_input (ACE_HANDLE handle)
 {
-  ACE_UNUSED_ARG (_handle);
+  ACE_UNUSED_ARG (handle);
 
-  /*
-     Check our strategy.  If we're using the thread pool and we're in the creation
-	 thread then we know we were called by the reactor.
-   */
-  if( concurrency() == Client_Acceptor::thread_pool_ )
-  {
-	if( ACE_OS::thr_equal(ACE_Thread::self(),creator_) )
+  /* Check our strategy.  If we're using the thread pool and we're in
+    the creation thread then we know we were called by the reactor.  */
+  if (concurrency () == Client_Acceptor::thread_pool_)
+    {
+      if (ACE_OS::thr_equal (ACE_Thread::self(),
+                             creator_))
 	{
-		/*
-		   Remove ourselves from the reactor and ask to be put into the thread pool's
-		   queue of work.  (You should be able to use suspend_handler() but I've had
-		   problems with that.)
-
-           By removing ourselves from the reactor, we're guaranteed
-           that we won't be called back until the thread pool picks us 
-           up out of the queue.  If we didn't remove ourselves, then
-           the reactor would continue to invoke handle_input() and we
-           don't want that to happen.
-		 */
-  		this->reactor()->remove_handler( this, REMOVE_MASK );
-		return this->thread_pool()->enqueue(this);
+          /* Remove ourselves from the reactor and ask to be put into
+            the thread pool's queue of work.  (You should be able to
+            use suspend_handler() but I've had problems with that.)
+
+            By removing ourselves from the reactor, we're guaranteed
+            that we won't be called back until the thread pool picks
+            us up out of the queue.  If we didn't remove ourselves,
+            then the reactor would continue to invoke handle_input()
+            and we don't want that to happen.  */
+          this->reactor ()->remove_handler (this, REMOVE_MASK);
+          return this->thread_pool ()->enqueue (this);
 	}
-  }
+    }
 
-  /*
-     Any strategy other than thread-per-connection will eventually get here.  If we're in the
-	 single-threaded implementation or the thread-pool, we still have to pass this way.
-   */
+  /* Any strategy other than thread-per-connection will eventually get
+    here.  If we're in the single-threaded implementation or the
+    thread-pool, we still have to pass this way.  */
 
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
+  char buf[BUFSIZ];
 
-  /*
-     Invoke the process() method to do the work but save it's return value instead
-	 of returning it immediately.
-   */
+  /* Invoke the process() method to do the work but save it's return
+    value instead of returning it immediately.  */
 
-  int rval = this->process(buf,sizeof(buf));
+  int rval = this->process (buf, sizeof (buf));
 
-  /*
-     Now, we look again to see if we're in the thread-pool implementation.  If so then we
-	 need to re-register ourselves with the reactor so that we can get more work when it
-	 is available.  (If suspend_handler() worked then we would use resume_handler() here.)
-   */
-  if( concurrency() == Client_Acceptor::thread_pool_ )
-  {
-	if( rval != -1 )
-	{
-        /*
-          If we don't remember to re-register ourselves, then we won't 
-          be able to respond to any future client requests.
-         */
-		this->reactor()->register_handler( this, REGISTER_MASK );
-	}
-  }
+  /* Now, we look again to see if we're in the thread-pool
+    implementation.  If so then we need to re-register ourselves with
+    the reactor so that we can get more work when it is available.
+    (If suspend_handler() worked then we would use resume_handler()
+    here.)  */
+  if (concurrency () == Client_Acceptor::thread_pool_)
+    {
+      if (rval != -1)
+        /* If we don't remember to re-register ourselves, then we
+            won't be able to respond to any future client requests.  */
+        this->reactor ()->register_handler (this,
+                                            REGISTER_MASK);
+    }
 
-  /*
-     Return the result of process()
-   */
-  return(rval);
+  /* Return the result of process() */
+  return rval;
 }
 
-/*
-   Remember that when we leave our svc() method, the framework will take care
-   of calling our close() method so that we can cleanup after ourselves.
- */
-int Client_Handler::svc(void)
+/* Remember that when we leave our svc() method, the framework will
+   take care of calling our close() method so that we can cleanup
+   after ourselves.  */
+int 
+Client_Handler::svc (void)
 {
-  char buf[128];
-  ACE_OS::memset (buf, 0, sizeof (buf));
-
-  while( 1 )
-  {
-     if( this->process(buf,sizeof(buf)) == -1 )
-	 {
-	   return(-1);
-     }
-  }
-
-  return(0);
+  char buf[BUFSIZ];
+
+  while (1)
+    if (this->process (buf, sizeof (buf)) == -1)
+      return -1;
+
+  return 0;
 }
 
-/*
-   Once again, we see that the application-level logic has not been at all affected
-   by our choice of threading models.  Of course, I'm not sharing data between threads
-   or anything.  We'll leave locking issues for a later tutorial.
- */
-int Client_Handler::process (char *_rdbuf, int _rdbuf_len)
+/* Once again, we see that the application-level logic has not been at
+   all affected by our choice of threading models.  Of course, I'm not
+   sharing data between threads or anything.  We'll leave locking
+   issues for a later tutorial.  */
+int 
+Client_Handler::process (char *rdbuf,
+                         int rdbuf_len)
 {
-  switch (this->peer ().recv (_rdbuf, _rdbuf_len))
+  switch (this->peer ().recv (rdbuf, rdbuf_len))
     {
     case -1:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) %p bad read\n", "client"), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) %p bad read\n",
+                         "client"),
+                        -1);
     case 0:
-      ACE_ERROR_RETURN ((LM_ERROR, "(%P|%t) closing daemon (fd = %d)\n", this->get_handle ()), -1);
+      ACE_ERROR_RETURN ((LM_ERROR,
+                         "(%P|%t) closing daemon (fd = %d)\n",
+                         this->get_handle ()),
+                        -1);
     default:
-      ACE_DEBUG ((LM_DEBUG, "(%P|%t) from client: %s", _rdbuf));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) from client: %s",
+                  rdbuf));
     }
 
   return 0;
diff --git a/docs/tutorials/007/page08.html b/docs/tutorials/007/page08.html
index dbbfdb76a15..18ee6968ece 100644
--- a/docs/tutorials/007/page08.html
+++ b/docs/tutorials/007/page08.html
@@ -19,281 +19,256 @@ where we have the Thread_Pool object implementation.
 


-
 // $Id$
 
 #include "thread_pool.h"
 
-/*
-   We need this header so that we can invoke handle_input() on the objects we dequeue.
- */
+/* We need this header so that we can invoke handle_input() on the
+   objects we dequeue.  */
 #include "ace/Event_Handler.h"
 
-
-/*
-   All we do here is initialize our active thread counter.
- */
-Thread_Pool::Thread_Pool(void)
- : active_threads_(0)
+/* All we do here is initialize our active thread counter.  */
+Thread_Pool::Thread_Pool (void)
+  : active_threads_ (0)
 {
 }
 
-/*
-   Our open() method is a thin disguise around the ACE_Task<> activate() method.  By
-   hiding activate() in this way, the users of Thread_Pool don't have to worry about
-   the thread configuration flags.
- */
-int Thread_Pool::open( int _pool_size )
+/* Our open() method is a thin disguise around the ACE_Task<>
+   activate() method.  By hiding activate() in this way, the users of
+   Thread_Pool don't have to worry about the thread configuration
+   flags.  */
+int 
+Thread_Pool::open (int pool_size)
 {
-  return this->activate(THR_NEW_LWP,_pool_size);
+  return this->activate (THR_NEW_LWP, pool_size);
 }
 
-/*
-   Closing the thread pool can be a tricky exercise.  I've decided to take an easy approach
-   and simply enqueue a secret message for each thread we have active.
- */
-int Thread_Pool::close( u_long flags )
+/* Closing the thread pool can be a tricky exercise.  I've decided to
+   take an easy approach and simply enqueue a secret message for each
+   thread we have active.  */
+int 
+Thread_Pool::close (u_long flags)
 {
-	ACE_UNUSED_ARG(flags);
-
-	/*
-	   Find out how many threads are currently active
-	 */
-	int counter = active_threads_.value();
-
-	/*
-	   For each one of the active threads, enqueue a "null" event handler.  Below, we'll
-	   teach our svc() method that "null" means "shutdown".
-	 */
-	while( counter-- )
-	{
-		this->enqueue( 0 );
-	}
-
-	/*
-	   As each svc() method exits, it will decrement the active thread counter.  We just wait
-	   here for it to reach zero.  Since we don't know how long it will take, we sleep for
-	   a quarter-second or so between tries.
-	 */
-	while( active_threads_.value() )
-	{
-		ACE_OS::sleep( ACE_Time_Value(0.25) );
-	}
-
-	return(0);
+  ACE_UNUSED_ARG(flags);
+
+  /* Find out how many threads are currently active */
+  int counter = active_threads_.value ();
+
+  /* For each one of the active threads, enqueue a "null" event
+    handler.  Below, we'll teach our svc() method that "null" means
+    "shutdown".  */
+  while (counter--)
+    this->enqueue (0);
+
+  /* As each svc() method exits, it will decrement the active thread
+    counter.  We just wait here for it to reach zero.  Since we don't
+    know how long it will take, we sleep for a quarter of a second
+    between tries.  */
+  while (active_threads_.value ())
+    ACE_OS::sleep (ACE_Time_Value (0, 250000));
+
+  return(0);
 }
 
-/*
-   When an object wants to do work in the pool, it should call the enqueue() method.
-   We introduce the ACE_Message_Block here but, unfortunately, we seriously misuse it.
- */
-int Thread_Pool::enqueue( ACE_Event_Handler * _handler )
+/* When an object wants to do work in the pool, it should call the
+   enqueue() method.  We introduce the ACE_Message_Block here but,
+   unfortunately, we seriously misuse it.  */
+int 
+Thread_Pool::enqueue (ACE_Event_Handler *handler)
 {
-	/*
-	   An ACE_Message_Block is a chunk of data.  You put them into an ACE_Message_Queue.
-	   ACE_Task<> has an ACE_Message_Queue built in.  In fact, the parameter to ACE_Task<>
-	   is passed directly to ACE_Message_Queue.  If you look back at our header file you'll
-	   see that we used ACE_MT_SYNCH as the parameter indicating that we want MultiThread
-	   Synch safety.  This allows us to safely put ACE_Message_Block objects into the
-	   message queue in one thread and take them out in another.
-	 */
-
-	/*
-	   An ACE_Message_Block wants to have char* data.  We don't have that.  We could
-	   cast our ACE_Event_Handler* directly to a char* but I wanted to be more explicit.
-	   Since casting pointers around is a dangerous thing, I've gone out of my way here
-	   to be very clear about what we're doing.
-
-	   First:  Cast the handler pointer to a void pointer.  You can't do any useful work
-	           on a void pointer, so this is a clear message that we're making the
-			   pointer unusable.
-
-	   Next:   Cast the void pointer to a char pointer that the ACE_Message_Block will accept.
-	 */
-	void * v_data = (void*)_handler;
-	char * c_data = (char*)v_data;
-
-	/*
-	   Construct a new ACE_Message_Block.  For efficiency, you might want to preallocate a
-	   stack of these and reuse them.  For simplicity, I'll just create what I need as I need it.
-	 */
-	ACE_Message_Block * mb = new ACE_Message_Block( c_data );
-
-	/*
-	   Our putq() method is a wrapper around one of the enqueue methods of the ACE_Message_Queue
-	   that we own.  Like all good methods, it returns -1 if it fails for some reason.
-	 */
-	if( this->putq(mb) == -1 )
-	{
-	  /*
-	     Another trait of the ACE_Message_Block objects is that they are reference counted.
-		 Since they're designed to be passed around between various objects in several threads
-		 we can't just delete them whenever we feel like it.  The release() method is similar
-		 to the destroy() method we've used elsewhere.  It watches the reference count and will
-		 delete the object when possible.
-	   */
-	  mb->release();
-	  return(-1);
-	}
-
-	return(0);
+  /* An ACE_Message_Block is a chunk of data.  You put them into an
+    ACE_Message_Queue.  ACE_Task<> has an ACE_Message_Queue built in.
+    In fact, the parameter to ACE_Task<> is passed directly to
+    ACE_Message_Queue.  If you look back at our header file you'll see
+    that we used ACE_MT_SYNCH as the parameter indicating that we want
+    MultiThread Synch safety.  This allows us to safely put
+    ACE_Message_Block objects into the message queue in one thread and
+    take them out in another.  */
+
+  /* An ACE_Message_Block wants to have char* data.  We don't have
+    that.  We could cast our ACE_Event_Handler* directly to a char*
+    but I wanted to be more explicit.  Since casting pointers around
+    is a dangerous thing, I've gone out of my way here to be very
+    clear about what we're doing.
+
+    First: Cast the handler pointer to a void pointer.  You can't do
+    any useful work on a void pointer, so this is a clear message that
+    we're making the pointer unusable.
+
+    Next: Cast the void pointer to a char pointer that the ACE_Message_Block will accept.  */
+  void *v_data = (void *) handler;
+  char *c_data = (char *) v_data;
+
+  ACE_Message_Block *mb;
+
+  /* Construct a new ACE_Message_Block.  For efficiency, you might
+    want to preallocate a stack of these and reuse them.  For
+    simplicity, I'll just create what I need as I need it.  */
+  ACE_NEW_RETURN (mb,
+                  ACE_Message_Block (c_data),
+                  -1);
+
+  /* Our putq() method is a wrapper around one of the enqueue methods
+    of the ACE_Message_Queue that we own.  Like all good methods, it
+    returns -1 if it fails for some reason.  */
+  if (this->putq (mb) == -1)
+    {
+      /* Another trait of the ACE_Message_Block objects is that they
+        are reference counted.  Since they're designed to be passed
+        around between various objects in several threads we can't
+        just delete them whenever we feel like it.  The release()
+        method is similar to the destroy() method we've used
+        elsewhere.  It watches the reference count and will delete the
+        object when possible.  */
+      mb->release ();
+      return -1;
+    }
+
+  return 0;
 }
 
-/*
-   The "guard" concept is very powerful and used throughout multi-threaded applications.
-   A guard normally does some operation on an object at construction and the "opposite"
-   operation at destruction.  For instance, when you guard a mutex (lock) object, the guard
-   will acquire the lock on construction and release it on destruction.  In this way, your
-   method can simply let the guard go out of scope and know that the lock is released.
-
-   Guards aren't only useful for locks however.  In this application I've created two guard
-   objects for quite a different purpose.
- */
-
-/*
-   The Counter_Guard is constructed with a reference to the thread pool's active thread
-   counter.  The guard increments the counter when it is created and decrements it at
-   destruction.  By creating one of these in svc(), I know that the counter will be decremented
-   no matter how or where svc() returns.
- */
+/* The "guard" concept is very powerful and used throughout
+   multi-threaded applications.  A guard normally does some operation
+   on an object at construction and the "opposite" operation at
+   destruction.  For instance, when you guard a mutex (lock) object,
+   the guard will acquire the lock on construction and release it on
+   destruction.  In this way, your method can simply let the guard go
+   out of scope and know that the lock is released.
+
+   Guards aren't only useful for locks however.  In this application
+   I've created two guard objects for quite a different purpose.  */
+
+/* The Counter_Guard is constructed with a reference to the thread
+   pool's active thread counter.  The guard increments the counter
+   when it is created and decrements it at destruction.  By creating
+   one of these in svc(), I know that the counter will be decremented
+   no matter how or where svc() returns.  */
 class Counter_Guard
 {
 public:
-	Counter_Guard( Thread_Pool::counter_t & _counter )
-	 : counter_(_counter)
-	{
-		++counter_;
-	}
+  Counter_Guard (Thread_Pool::counter_t &counter)
+    : counter_ (counter)
+  {
+    ++counter_;
+  }
 
-	~Counter_Guard(void)
-	{
-		--counter_;
-	}
+  ~Counter_Guard (void)
+  {
+    --counter_;
+  }
 
 protected:
-	Thread_Pool::counter_t & counter_;
+  Thread_Pool::counter_t &counter_;
 };
 
-/*
-   My Message_Block_Guard is also a little non-traditional.  It doesn't do anything in the
-   constructor but it's destructor ensures that the message block's release() method is called.
-   This is a cheap way to prevent a memory leak if I need an additional exit point in svc().
- */
+/* My Message_Block_Guard is also a little non-traditional.  It
+   doesn't do anything in the constructor but it's destructor ensures
+   that the message block's release() method is called.  This is a
+   cheap way to prevent a memory leak if I need an additional exit
+   point in svc().  */
 class Message_Block_Guard
 {
 public:
-	Message_Block_Guard( ACE_Message_Block * & _mb )
-	 : mb_(_mb)
-	{
-	}
+  Message_Block_Guard (ACE_Message_Block *&mb)
+    : mb_ (mb)
+  {
+  }
 
-	~Message_Block_Guard( void )
-	{
-		mb_->release();
-	}
+  ~Message_Block_Guard (void)
+  {
+    mb_->release ();
+  }
 
 protected:
-	ACE_Message_Block * & mb_;
+  ACE_Message_Block *&mb_;
 };
 
-/*
-   Now we come to the svc() method.  As I said, this is being executed in each thread of the
-   Thread_Pool.  Here, we pull messages off of our built-in ACE_Message_Queue and cause them
-   to do work.
- */
-int Thread_Pool::svc(void)
+/* Now we come to the svc() method.  As I said, this is being executed
+   in each thread of the Thread_Pool.  Here, we pull messages off of
+   our built-in ACE_Message_Queue and cause them to do work.  */
+int 
+Thread_Pool::svc (void)
 {
-	/*
-	   The getq() method takes a reference to a pointer.  So... we need a pointer to give it
-	   a reference to.
-	 */
-	ACE_Message_Block * mb;
-
-	/*
-	   Create the guard for our active thread counter object.  No matter where we choose to
-	   return() from svc(), we now know that the counter will be decremented.
-	 */
-	Counter_Guard counter_guard(active_threads_);
-
-	/*
-	   Get messages from the queue until we have a failure.  There's no real good reason
-	   for failure so if it happens, we leave immediately.
-	 */
-	while( this->getq(mb) != -1 )
-	{
-		/*
-		   A successful getq() will cause "mb" to point to a valid refernce-counted
-		   ACE_Message_Block.  We use our guard object here so that we're sure to call
-		   the release() method of that message block and reduce it's reference count.
-		   Once the count reaches zero, it will be deleted.
-		 */
-		Message_Block_Guard message_block_guard(mb);
-
-		/*
-		   As noted before, the ACE_Message_Block stores it's data as a char*.  We pull that
-		   out here and later turn it into an ACE_Event_Handler*
-		 */
-		char * c_data = mb->base();
-
-		/*
-		   We've chosen to use a "null" value as an indication to leave.  If the data we got
-		   from the queue is not null then we have some work to do.
-		 */
-		if( c_data )
-		{
-			/*
-			   Once again, we go to great lengths to emphasize the fact that we're casting pointers
-			   around in rather impolite ways.  We could have cast the char* directly to an
-			   ACE_Event_Handler* but then folks might think that's an OK thing to do.
-
-			   (Note:  The correct way to use an ACE_Message_Block is to write data into it.
-			    What I should have done was create a message block big enough to hold an
-				event handler pointer and then written the pointer value into the block.  When
-				we got here, I would have to read that data back into a pointer.  While politically
-				correct, it is also a lot of work.  If you're careful you can get away with casting
-				pointers around.)
-			 */
-			void * v_data = (void*)c_data;
+  /* The getq() method takes a reference to a pointer.  So... we need
+    a pointer to give it a reference to.  */
+  ACE_Message_Block *mb;
+
+  /* Create the guard for our active thread counter object.  No matter
+    where we choose to return() from svc(), we now know that the
+    counter will be decremented.  */
+  Counter_Guard counter_guard (active_threads_);
+
+  /* Get messages from the queue until we have a failure.  There's no
+    real good reason for failure so if it happens, we leave
+    immediately.  */
+  while (this->getq (mb) != -1)
+    {
+      /* A successful getq() will cause "mb" to point to a valid
+        refernce-counted ACE_Message_Block.  We use our guard object
+        here so that we're sure to call the release() method of that
+        message block and reduce it's reference count.  Once the count
+        reaches zero, it will be deleted.  */
+      Message_Block_Guard message_block_guard (mb);
+
+      /* As noted before, the ACE_Message_Block stores it's data as a
+        char*.  We pull that out here and later turn it into an
+        ACE_Event_Handler* */
+      char *c_data = mb->base ();
+
+      /* We've chosen to use a "null" value as an indication to leave.
+        If the data we got from the queue is not null then we have
+        some work to do.  */
+      if (c_data)
+        {
+          /* Once again, we go to great lengths to emphasize the fact
+            that we're casting pointers around in rather impolite
+            ways.  We could have cast the char* directly to an
+            ACE_Event_Handler* but then folks might think that's an OK
+            thing to do.
+
+            (Note: The correct way to use an ACE_Message_Block is to
+            write data into it.  What I should have done was create a
+            message block big enough to hold an event handler pointer
+            and then written the pointer value into the block.  When
+            we got here, I would have to read that data back into a
+            pointer.  While politically correct, it is also a lot of
+            work.  If you're careful you can get away with casting
+            pointers around.)  */
+          void *v_data = (void *) c_data;
 	
-			ACE_Event_Handler * handler = (ACE_Event_Handler*)v_data;
+          ACE_Event_Handler *handler = (ACE_Event_Handler *) v_data;
 	
-			/*
-			   Now that we finally have an event handler pointer, invoke it's handle_input() method.
-			   Since we don't know it's handle, we just give it a default.  That's OK because we
-			   know that we're not using the handle in the method anyway.
-			 */
-			if( handler->handle_input(ACE_INVALID_HANDLE) == -1 )
-			{
-			  /*
-			     Tell the handler that it's time to go home.  The "normal" method for shutting
-				 down a handler whose handler failed is to invoke handle_close().  This will
-				 take care of cleaning it up for us.
-				 Notice how we use the handler's get_handle() method to populate it's "handle"
-				 parameter.  Convenient isn't it?
-			   */
-			  handler->handle_close(handler->get_handle(),0);
-
-			  /*
-			     Also notice that we don't exit the svc() method here!  The first time I did
-				 this, I was exiting.  After a few clients disconnect you have an empty
-				 thread pool.  Hard to do any more work after that...
-			   */
-			}
-		}
-		else
-		{
-			/*
-			   If we get here, we were given a message block with "null" data.  That is our
-			   signal to leave, so we return(0) to leave gracefully.
-			 */
-			return(0);		// Ok, shutdown request
-		}
-
-		// message_block_guard goes out of scope here
-		// and releases the message_block instance.
-	}
-
-	return(0);
+          /* Now that we finally have an event handler pointer, invoke
+            it's handle_input() method.  Since we don't know it's
+            handle, we just give it a default.  That's OK because we
+            know that we're not using the handle in the method anyway.  */
+          if (handler->handle_input (ACE_INVALID_HANDLE) == -1)
+            {
+              /* Tell the handler that it's time to go home.  The
+                "normal" method for shutting down a handler whose
+                handler failed is to invoke handle_close().  This will
+                take care of cleaning it up for us.  Notice how we use
+                the handler's get_handle() method to populate it's
+                "handle" parameter.  Convenient isn't it?  */
+              handler->handle_close (handler->get_handle (), 0);
+
+              /* Also notice that we don't exit the svc() method here!
+                The first time I did this, I was exiting.  After a few
+                clients disconnect you have an empty thread pool.
+                Hard to do any more work after that...  */
+            }
+        }
+      else
+        /* If we get here, we were given a message block with "null"
+           data.  That is our signal to leave, so we return(0) to
+           leave gracefully.  */
+          return 0;		// Ok, shutdown request
+
+      // message_block_guard goes out of scope here and releases the
+      // message_block instance.
+    }
+
+  return 0;
 }
 
 
diff --git a/docs/tutorials/007/server.cpp b/docs/tutorials/007/server.cpp index 55fb69c58ef..d7e616dfdaf 100644 --- a/docs/tutorials/007/server.cpp +++ b/docs/tutorials/007/server.cpp @@ -1,20 +1,16 @@ // $Id$ -/* - We try to keep main() very simple. One of the ways we do that is to push - much of the complicated stuff into worker objects. In this case, we only - need to include the acceptor header in our main source file. We let it - worry about the "real work". - */ +/* We try to keep main() very simple. One of the ways we do that is + to push much of the complicated stuff into worker objects. In this + case, we only need to include the acceptor header in our main + source file. We let it worry about the "real work". */ #include "client_acceptor.h" -/* - As before, we create a simple signal handler that will set our finished - flag. There are, of course, more elegant ways to handle program shutdown - requests but that isn't really our focus right now, so we'll just do the - easiest thing. - */ +/* As before, we create a simple signal handler that will set our + finished flag. There are, of course, more elegant ways to handle + program shutdown requests but that isn't really our focus right + now, so we'll just do the easiest thing. */ static sig_atomic_t finished = 0; extern "C" void handler (int) @@ -22,86 +18,81 @@ extern "C" void handler (int) finished = 1; } -/* - A server has to listen for clients at a known TCP/IP port. The default ACE - port is 10002 (at least on my system) and that's good enough for what we - want to do here. Obviously, a more robust application would take a command - line parameter or read from a configuration file or do some other clever - thing. Just like the signal handler above, though, that's what we want to - focus on, so we're taking the easy way out. - */ +/* A server has to listen for clients at a known TCP/IP port. The + default ACE port is 10002 (at least on my system) and that's good + enough for what we want to do here. Obviously, a more robust + application would take a command line parameter or read from a + configuration file or do some other clever thing. Just like the + signal handler above, though, that's what we want to focus on, so + we're taking the easy way out. */ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -/* - Finally, we get to main. Some C++ compilers will complain loudly if your - function signature doesn't match the prototype. Even though we're not - going to use the parameters, we still have to specify them. - */ +/* Finally, we get to main. Some C++ compilers will complain loudly + if your function signature doesn't match the prototype. Even + though we're not going to use the parameters, we still have to + specify them. */ -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { -/* - In our earlier servers, we used a global pointer to get to the reactor. I've - never really liked that idea, so I've moved it into main() this time. When - we get to the Client_Handler object you'll see how we manage to get a - pointer back to this reactor. - */ + /* In our earlier servers, we used a global pointer to get to the + reactor. I've never really liked that idea, so I've moved it into + main() this time. When we get to the Client_Handler object you'll + see how we manage to get a pointer back to this reactor. */ ACE_Reactor reactor; - /* - The acceptor will take care of letting clients connect to us. It will - also arrange for a Client_Handler to be created for each new client. - Since we're only going to listen at one TCP/IP port, we only need one - acceptor. If we wanted, though, we could create several of these and - listen at several ports. (That's what we would do if we wanted to rewrite - inetd for instance.) - */ + /* The acceptor will take care of letting clients connect to us. It + will also arrange for a Client_Handler to be created for each new + client. Since we're only going to listen at one TCP/IP port, we + only need one acceptor. If we wanted, though, we could create + several of these and listen at several ports. (That's what we + would do if we wanted to rewrite inetd for instance.) */ Client_Acceptor peer_acceptor; - /* - Create an ACE_INET_Addr that represents our endpoint of a connection. We - then open our acceptor object with that Addr. Doing so tells the acceptor - where to listen for connections. Servers generally listen at "well known" - addresses. If not, there must be some mechanism by which the client is - informed of the server's address. + /* Create an ACE_INET_Addr that represents our endpoint of a + connection. We then open our acceptor object with that Addr. + Doing so tells the acceptor where to listen for connections. + Servers generally listen at "well known" addresses. If not, there + must be some mechanism by which the client is informed of the + server's address. - Note how ACE_ERROR_RETURN is used if we fail to open the acceptor. This - technique is used over and over again in our tutorials. - */ + Note how ACE_ERROR_RETURN is used if we fail to open the acceptor. + This technique is used over and over again in our tutorials. */ if (peer_acceptor.open (ACE_INET_Addr (PORT), &reactor) == -1) - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - - /* - Install our signal handler. You can actually register signal handlers - with the reactor. You might do that when the signal handler is - responsible for performing "real" work. Our simple flag-setter doesn't - justify deriving from ACE_Event_Handler and providing a callback function - though. - */ + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); + + /* Install our signal handler. You can actually register signal + handlers with the reactor. You might do that when the signal + handler is responsible for performing "real" work. Our simple + flag-setter doesn't justify deriving from ACE_Event_Handler and + providing a callback function though. */ ACE_Sig_Action sa ((ACE_SignalHandler) handler, SIGINT); - /* - Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit. It's a - handy way to generate uniform debug output from your program. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n")); - - /* - This will loop "forever" invoking the handle_events() method of our - reactor. handle_events() watches for activity on any registered handlers - and invokes their appropriate callbacks when necessary. Callback-driven - programming is a big thing in ACE, you should get used to it. If the - signal handler catches something, the finished flag will be set and we'll - exit. Conveniently enough, handle_events() is also interrupted by signals - and will exit back to the while() loop. (If you want your event loop to - not be interrupted by signals, checkout the restart flag on the - open() method of ACE_Reactor if you're interested.) - */ + /* Like ACE_ERROR_RETURN, the ACE_DEBUG macro gets used quite a bit. + It's a handy way to generate uniform debug output from your + program. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) starting up server daemon\n")); + + /* This will loop "forever" invoking the handle_events() method of + our reactor. handle_events() watches for activity on any + registered handlers and invokes their appropriate callbacks when + necessary. Callback-driven programming is a big thing in ACE, you + should get used to it. If the signal handler catches something, + the finished flag will be set and we'll exit. Conveniently + enough, handle_events() is also interrupted by signals and will + exit back to the while() loop. (If you want your event loop to + not be interrupted by signals, checkout the restart flag on + the open() method of ACE_Reactor if you're interested.) */ while (!finished) - reactor.handle_events (); + reactor.handle_events (); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) shutting down server daemon\n")); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) shutting down server daemon\n")); return 0; } diff --git a/docs/tutorials/007/thread_pool.cpp b/docs/tutorials/007/thread_pool.cpp index ef7b4a0a250..8042b76b7e5 100644 --- a/docs/tutorials/007/thread_pool.cpp +++ b/docs/tutorials/007/thread_pool.cpp @@ -1,277 +1,252 @@ - // $Id$ #include "thread_pool.h" -/* - We need this header so that we can invoke handle_input() on the objects we dequeue. - */ +/* We need this header so that we can invoke handle_input() on the + objects we dequeue. */ #include "ace/Event_Handler.h" - -/* - All we do here is initialize our active thread counter. - */ -Thread_Pool::Thread_Pool(void) - : active_threads_(0) +/* All we do here is initialize our active thread counter. */ +Thread_Pool::Thread_Pool (void) + : active_threads_ (0) { } -/* - Our open() method is a thin disguise around the ACE_Task<> activate() method. By - hiding activate() in this way, the users of Thread_Pool don't have to worry about - the thread configuration flags. - */ -int Thread_Pool::open( int _pool_size ) +/* Our open() method is a thin disguise around the ACE_Task<> + activate() method. By hiding activate() in this way, the users of + Thread_Pool don't have to worry about the thread configuration + flags. */ +int +Thread_Pool::open (int pool_size) { - return this->activate(THR_NEW_LWP,_pool_size); + return this->activate (THR_NEW_LWP, pool_size); } -/* - Closing the thread pool can be a tricky exercise. I've decided to take an easy approach - and simply enqueue a secret message for each thread we have active. - */ -int Thread_Pool::close( u_long flags ) +/* Closing the thread pool can be a tricky exercise. I've decided to + take an easy approach and simply enqueue a secret message for each + thread we have active. */ +int +Thread_Pool::close (u_long flags) { - ACE_UNUSED_ARG(flags); - - /* - Find out how many threads are currently active - */ - int counter = active_threads_.value(); - - /* - For each one of the active threads, enqueue a "null" event handler. Below, we'll - teach our svc() method that "null" means "shutdown". - */ - while( counter-- ) - { - this->enqueue( 0 ); - } - - /* - As each svc() method exits, it will decrement the active thread counter. We just wait - here for it to reach zero. Since we don't know how long it will take, we sleep for - a quarter-second or so between tries. - */ - while( active_threads_.value() ) - { - ACE_OS::sleep( ACE_Time_Value(0.25) ); - } - - return(0); + ACE_UNUSED_ARG(flags); + + /* Find out how many threads are currently active */ + int counter = active_threads_.value (); + + /* For each one of the active threads, enqueue a "null" event + handler. Below, we'll teach our svc() method that "null" means + "shutdown". */ + while (counter--) + this->enqueue (0); + + /* As each svc() method exits, it will decrement the active thread + counter. We just wait here for it to reach zero. Since we don't + know how long it will take, we sleep for a quarter of a second + between tries. */ + while (active_threads_.value ()) + ACE_OS::sleep (ACE_Time_Value (0, 250000)); + + return(0); } -/* - When an object wants to do work in the pool, it should call the enqueue() method. - We introduce the ACE_Message_Block here but, unfortunately, we seriously misuse it. - */ -int Thread_Pool::enqueue( ACE_Event_Handler * _handler ) +/* When an object wants to do work in the pool, it should call the + enqueue() method. We introduce the ACE_Message_Block here but, + unfortunately, we seriously misuse it. */ +int +Thread_Pool::enqueue (ACE_Event_Handler *handler) { - /* - An ACE_Message_Block is a chunk of data. You put them into an ACE_Message_Queue. - ACE_Task<> has an ACE_Message_Queue built in. In fact, the parameter to ACE_Task<> - is passed directly to ACE_Message_Queue. If you look back at our header file you'll - see that we used ACE_MT_SYNCH as the parameter indicating that we want MultiThread - Synch safety. This allows us to safely put ACE_Message_Block objects into the - message queue in one thread and take them out in another. - */ - - /* - An ACE_Message_Block wants to have char* data. We don't have that. We could - cast our ACE_Event_Handler* directly to a char* but I wanted to be more explicit. - Since casting pointers around is a dangerous thing, I've gone out of my way here - to be very clear about what we're doing. - - First: Cast the handler pointer to a void pointer. You can't do any useful work - on a void pointer, so this is a clear message that we're making the - pointer unusable. - - Next: Cast the void pointer to a char pointer that the ACE_Message_Block will accept. - */ - void * v_data = (void*)_handler; - char * c_data = (char*)v_data; - - /* - Construct a new ACE_Message_Block. For efficiency, you might want to preallocate a - stack of these and reuse them. For simplicity, I'll just create what I need as I need it. - */ - ACE_Message_Block * mb = new ACE_Message_Block( c_data ); - - /* - Our putq() method is a wrapper around one of the enqueue methods of the ACE_Message_Queue - that we own. Like all good methods, it returns -1 if it fails for some reason. - */ - if( this->putq(mb) == -1 ) - { - /* - Another trait of the ACE_Message_Block objects is that they are reference counted. - Since they're designed to be passed around between various objects in several threads - we can't just delete them whenever we feel like it. The release() method is similar - to the destroy() method we've used elsewhere. It watches the reference count and will - delete the object when possible. - */ - mb->release(); - return(-1); - } - - return(0); + /* An ACE_Message_Block is a chunk of data. You put them into an + ACE_Message_Queue. ACE_Task<> has an ACE_Message_Queue built in. + In fact, the parameter to ACE_Task<> is passed directly to + ACE_Message_Queue. If you look back at our header file you'll see + that we used ACE_MT_SYNCH as the parameter indicating that we want + MultiThread Synch safety. This allows us to safely put + ACE_Message_Block objects into the message queue in one thread and + take them out in another. */ + + /* An ACE_Message_Block wants to have char* data. We don't have + that. We could cast our ACE_Event_Handler* directly to a char* + but I wanted to be more explicit. Since casting pointers around + is a dangerous thing, I've gone out of my way here to be very + clear about what we're doing. + + First: Cast the handler pointer to a void pointer. You can't do + any useful work on a void pointer, so this is a clear message that + we're making the pointer unusable. + + Next: Cast the void pointer to a char pointer that the ACE_Message_Block will accept. */ + void *v_data = (void *) handler; + char *c_data = (char *) v_data; + + ACE_Message_Block *mb; + + /* Construct a new ACE_Message_Block. For efficiency, you might + want to preallocate a stack of these and reuse them. For + simplicity, I'll just create what I need as I need it. */ + ACE_NEW_RETURN (mb, + ACE_Message_Block (c_data), + -1); + + /* Our putq() method is a wrapper around one of the enqueue methods + of the ACE_Message_Queue that we own. Like all good methods, it + returns -1 if it fails for some reason. */ + if (this->putq (mb) == -1) + { + /* Another trait of the ACE_Message_Block objects is that they + are reference counted. Since they're designed to be passed + around between various objects in several threads we can't + just delete them whenever we feel like it. The release() + method is similar to the destroy() method we've used + elsewhere. It watches the reference count and will delete the + object when possible. */ + mb->release (); + return -1; + } + + return 0; } -/* - The "guard" concept is very powerful and used throughout multi-threaded applications. - A guard normally does some operation on an object at construction and the "opposite" - operation at destruction. For instance, when you guard a mutex (lock) object, the guard - will acquire the lock on construction and release it on destruction. In this way, your - method can simply let the guard go out of scope and know that the lock is released. - - Guards aren't only useful for locks however. In this application I've created two guard - objects for quite a different purpose. - */ - -/* - The Counter_Guard is constructed with a reference to the thread pool's active thread - counter. The guard increments the counter when it is created and decrements it at - destruction. By creating one of these in svc(), I know that the counter will be decremented - no matter how or where svc() returns. - */ +/* The "guard" concept is very powerful and used throughout + multi-threaded applications. A guard normally does some operation + on an object at construction and the "opposite" operation at + destruction. For instance, when you guard a mutex (lock) object, + the guard will acquire the lock on construction and release it on + destruction. In this way, your method can simply let the guard go + out of scope and know that the lock is released. + + Guards aren't only useful for locks however. In this application + I've created two guard objects for quite a different purpose. */ + +/* The Counter_Guard is constructed with a reference to the thread + pool's active thread counter. The guard increments the counter + when it is created and decrements it at destruction. By creating + one of these in svc(), I know that the counter will be decremented + no matter how or where svc() returns. */ class Counter_Guard { public: - Counter_Guard( Thread_Pool::counter_t & _counter ) - : counter_(_counter) - { - ++counter_; - } + Counter_Guard (Thread_Pool::counter_t &counter) + : counter_ (counter) + { + ++counter_; + } - ~Counter_Guard(void) - { - --counter_; - } + ~Counter_Guard (void) + { + --counter_; + } protected: - Thread_Pool::counter_t & counter_; + Thread_Pool::counter_t &counter_; }; -/* - My Message_Block_Guard is also a little non-traditional. It doesn't do anything in the - constructor but it's destructor ensures that the message block's release() method is called. - This is a cheap way to prevent a memory leak if I need an additional exit point in svc(). - */ +/* My Message_Block_Guard is also a little non-traditional. It + doesn't do anything in the constructor but it's destructor ensures + that the message block's release() method is called. This is a + cheap way to prevent a memory leak if I need an additional exit + point in svc(). */ class Message_Block_Guard { public: - Message_Block_Guard( ACE_Message_Block * & _mb ) - : mb_(_mb) - { - } + Message_Block_Guard (ACE_Message_Block *&mb) + : mb_ (mb) + { + } - ~Message_Block_Guard( void ) - { - mb_->release(); - } + ~Message_Block_Guard (void) + { + mb_->release (); + } protected: - ACE_Message_Block * & mb_; + ACE_Message_Block *&mb_; }; -/* - Now we come to the svc() method. As I said, this is being executed in each thread of the - Thread_Pool. Here, we pull messages off of our built-in ACE_Message_Queue and cause them - to do work. - */ -int Thread_Pool::svc(void) +/* Now we come to the svc() method. As I said, this is being executed + in each thread of the Thread_Pool. Here, we pull messages off of + our built-in ACE_Message_Queue and cause them to do work. */ +int +Thread_Pool::svc (void) { - /* - The getq() method takes a reference to a pointer. So... we need a pointer to give it - a reference to. - */ - ACE_Message_Block * mb; - - /* - Create the guard for our active thread counter object. No matter where we choose to - return() from svc(), we now know that the counter will be decremented. - */ - Counter_Guard counter_guard(active_threads_); - - /* - Get messages from the queue until we have a failure. There's no real good reason - for failure so if it happens, we leave immediately. - */ - while( this->getq(mb) != -1 ) - { - /* - A successful getq() will cause "mb" to point to a valid refernce-counted - ACE_Message_Block. We use our guard object here so that we're sure to call - the release() method of that message block and reduce it's reference count. - Once the count reaches zero, it will be deleted. - */ - Message_Block_Guard message_block_guard(mb); - - /* - As noted before, the ACE_Message_Block stores it's data as a char*. We pull that - out here and later turn it into an ACE_Event_Handler* - */ - char * c_data = mb->base(); - - /* - We've chosen to use a "null" value as an indication to leave. If the data we got - from the queue is not null then we have some work to do. - */ - if( c_data ) - { - /* - Once again, we go to great lengths to emphasize the fact that we're casting pointers - around in rather impolite ways. We could have cast the char* directly to an - ACE_Event_Handler* but then folks might think that's an OK thing to do. - - (Note: The correct way to use an ACE_Message_Block is to write data into it. - What I should have done was create a message block big enough to hold an - event handler pointer and then written the pointer value into the block. When - we got here, I would have to read that data back into a pointer. While politically - correct, it is also a lot of work. If you're careful you can get away with casting - pointers around.) - */ - void * v_data = (void*)c_data; + /* The getq() method takes a reference to a pointer. So... we need + a pointer to give it a reference to. */ + ACE_Message_Block *mb; + + /* Create the guard for our active thread counter object. No matter + where we choose to return() from svc(), we now know that the + counter will be decremented. */ + Counter_Guard counter_guard (active_threads_); + + /* Get messages from the queue until we have a failure. There's no + real good reason for failure so if it happens, we leave + immediately. */ + while (this->getq (mb) != -1) + { + /* A successful getq() will cause "mb" to point to a valid + refernce-counted ACE_Message_Block. We use our guard object + here so that we're sure to call the release() method of that + message block and reduce it's reference count. Once the count + reaches zero, it will be deleted. */ + Message_Block_Guard message_block_guard (mb); + + /* As noted before, the ACE_Message_Block stores it's data as a + char*. We pull that out here and later turn it into an + ACE_Event_Handler* */ + char *c_data = mb->base (); + + /* We've chosen to use a "null" value as an indication to leave. + If the data we got from the queue is not null then we have + some work to do. */ + if (c_data) + { + /* Once again, we go to great lengths to emphasize the fact + that we're casting pointers around in rather impolite + ways. We could have cast the char* directly to an + ACE_Event_Handler* but then folks might think that's an OK + thing to do. + + (Note: The correct way to use an ACE_Message_Block is to + write data into it. What I should have done was create a + message block big enough to hold an event handler pointer + and then written the pointer value into the block. When + we got here, I would have to read that data back into a + pointer. While politically correct, it is also a lot of + work. If you're careful you can get away with casting + pointers around.) */ + void *v_data = (void *) c_data; - ACE_Event_Handler * handler = (ACE_Event_Handler*)v_data; + ACE_Event_Handler *handler = (ACE_Event_Handler *) v_data; - /* - Now that we finally have an event handler pointer, invoke it's handle_input() method. - Since we don't know it's handle, we just give it a default. That's OK because we - know that we're not using the handle in the method anyway. - */ - if( handler->handle_input(ACE_INVALID_HANDLE) == -1 ) - { - /* - Tell the handler that it's time to go home. The "normal" method for shutting - down a handler whose handler failed is to invoke handle_close(). This will - take care of cleaning it up for us. - Notice how we use the handler's get_handle() method to populate it's "handle" - parameter. Convenient isn't it? - */ - handler->handle_close(handler->get_handle(),0); - - /* - Also notice that we don't exit the svc() method here! The first time I did - this, I was exiting. After a few clients disconnect you have an empty - thread pool. Hard to do any more work after that... - */ - } - } - else - { - /* - If we get here, we were given a message block with "null" data. That is our - signal to leave, so we return(0) to leave gracefully. - */ - return(0); // Ok, shutdown request - } - - // message_block_guard goes out of scope here - // and releases the message_block instance. - } - - return(0); + /* Now that we finally have an event handler pointer, invoke + it's handle_input() method. Since we don't know it's + handle, we just give it a default. That's OK because we + know that we're not using the handle in the method anyway. */ + if (handler->handle_input (ACE_INVALID_HANDLE) == -1) + { + /* Tell the handler that it's time to go home. The + "normal" method for shutting down a handler whose + handler failed is to invoke handle_close(). This will + take care of cleaning it up for us. Notice how we use + the handler's get_handle() method to populate it's + "handle" parameter. Convenient isn't it? */ + handler->handle_close (handler->get_handle (), 0); + + /* Also notice that we don't exit the svc() method here! + The first time I did this, I was exiting. After a few + clients disconnect you have an empty thread pool. + Hard to do any more work after that... */ + } + } + else + /* If we get here, we were given a message block with "null" + data. That is our signal to leave, so we return(0) to + leave gracefully. */ + return 0; // Ok, shutdown request + + // message_block_guard goes out of scope here and releases the + // message_block instance. + } + + return 0; } diff --git a/docs/tutorials/008/broadcast_client.cpp b/docs/tutorials/008/broadcast_client.cpp index 2f754806d03..126b125ffc4 100644 --- a/docs/tutorials/008/broadcast_client.cpp +++ b/docs/tutorials/008/broadcast_client.cpp @@ -1,4 +1,3 @@ - // $Id$ #include "ace/SOCK_Dgram_Bcast.h" @@ -6,67 +5,76 @@ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -int main(int argc,char *argv[] ) +int +main (int argc,char *argv[]) { - ACE_INET_Addr local((u_short)0); - - /* - Instead of creating the ACE_SOCK_Dgram we created last time, - we'll create an ACE_SOCK_Dgram_Bcast. "Bcast" means, of course, - "Broadcast". This ACE object is clever enough to go out to the - OS and find all of the network interfaces. When you send() - on a Dgram_Bcast, it will send the datagram out on all of those - interfaces. This is quiet handy if you do it on a multi-homed - host that plays router... - */ - ACE_SOCK_Dgram_Bcast dgram; - - if( dgram.open(local) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "datagram open"),-1); - } - - char buf[512]; - - sprintf(buf, "Hello World!"); - - /* - The only other difference between us and the directed client - is that we don't specify a host to receive the datagram. - Instead, we use the magic value "INADDR_BROADCAST". All hosts - are obliged to respond to datagrams directed to this address - the same as they would to datagrams sent to their hostname. - - Remember, the Dgram_Bcast will send a datagram to all interfaces - on the host. That's true even if the address is for a specific - host (and the host address makes sense for the interface). - The real power is in using an INADDR_BROADCAST addressed datagram - against all interfaces. - */ - - ACE_INET_Addr remote(PORT,INADDR_BROADCAST); - - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Sending (%s) to the server.\n",buf)); - - if( dgram.send(buf,strlen(buf)+1,remote) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1); - } - - if( dgram.recv(buf,sizeof(buf),remote) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"),-1); - } - - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said: %s\n",buf)); - - /* - Using the "remote" object instance, find out where the server lives. - We could then save this address and use directed datagrams to chat - with the server for a while. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server can be found at: (%s:%d)\n", - remote.get_host_name(), PORT )); - - return(0); + ACE_INET_Addr local ((u_short) 0); + + /* Instead of creating the ACE_SOCK_Dgram we created last time, + we'll create an ACE_SOCK_Dgram_Bcast. "Bcast" means, of course, + "Broadcast". This ACE object is clever enough to go out to the OS + and find all of the network interfaces. When you send() on a + Dgram_Bcast, it will send the datagram out on all of those + interfaces. This is quiet handy if you do it on a multi-homed + host that plays router... */ + ACE_SOCK_Dgram_Bcast dgram; + + if (dgram.open (local) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "datagram open"), + -1); + + char buf[BUFSIZ]; + + sprintf (buf, "Hello World!"); + + /* The only other difference between us and the directed client is + that we don't specify a host to receive the datagram. Instead, we + use the magic value "INADDR_BROADCAST". All hosts are obliged to + respond to datagrams directed to this address the same as they + would to datagrams sent to their hostname. + + Remember, the Dgram_Bcast will send a datagram to all interfaces + on the host. That's true even if the address is for a specific + host (and the host address makes sense for the interface). The + real power is in using an INADDR_BROADCAST addressed datagram + against all interfaces. */ + + ACE_INET_Addr remote (PORT, + INADDR_BROADCAST); + + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Sending (%s) to the server.\n", + buf)); + + if (dgram.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); + + if (dgram.recv (buf, + sizeof (buf), + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "recv"), + -1); + + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server said: %s\n", + buf)); + + /* Using the "remote" object instance, find out where the server + lives. We could then save this address and use directed datagrams + to chat with the server for a while. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server can be found at: (%s:%d)\n", + remote.get_host_name(), + PORT)); + + return 0; } diff --git a/docs/tutorials/008/directed_client.cpp b/docs/tutorials/008/directed_client.cpp index 998aafa12db..d11367aa43e 100644 --- a/docs/tutorials/008/directed_client.cpp +++ b/docs/tutorials/008/directed_client.cpp @@ -1,118 +1,107 @@ - // $Id$ #include "ace/SOCK_Dgram.h" #include "ace/INET_Addr.h" -/* - Once again, we use the default server port. In a "real" system, - the server's port (or ports) would be published in some way so - that clients would know where to "look". We could even add entries - to the operating system's services file and use a service name - instead of a number. We'll come back to that in some other tutorial - though. For now, let's stay simple. - */ +/* Once again, we use the default server port. In a "real" system, + the server's port (or ports) would be published in some way so that + clients would know where to "look". We could even add entries to + the operating system's services file and use a service name instead + of a number. We'll come back to that in some other tutorial + though. For now, let's stay simple. */ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -/* - Our goal here is to develop a client that can send a datagram to - a server running on a known host. We'll use a command-line argument - to specify the hostname instead of hard-coding it. - */ -int main(int argc,char *argv[] ) +/* Our goal here is to develop a client that can send a datagram to a + server running on a known host. We'll use a command-line argument + to specify the hostname instead of hard-coding it. */ +int +main (int argc,char *argv[]) { - /* - All datagrams must have a point of origin. Since we intend to - transmit instead of receive, we initialize an address with zero - and let the OS choose a port for us. We could have chosen our - own value between 1025 and 65535 as long as it isn't already in - use. + /* All datagrams must have a point of origin. Since we intend to + transmit instead of receive, we initialize an address with zero + and let the OS choose a port for us. We could have chosen our own + value between 1025 and 65535 as long as it isn't already in use. - The biggest difference between client and server when datagrams - are used is the fact that servers tend to have a known/fixed - address at which they listen and clients tend to have arbitrary - addresses assigned by the OS. - */ - ACE_INET_Addr local((u_short)0); + The biggest difference between client and server when datagrams + are used is the fact that servers tend to have a known/fixed + address at which they listen and clients tend to have arbitrary + addresses assigned by the OS. */ + ACE_INET_Addr local((u_short) 0); - /* - And here is our datagram object. - */ - ACE_SOCK_Dgram dgram; + /* And here is our datagram object. */ + ACE_SOCK_Dgram dgram; - /* - Notice that this looks a lot like the server application. There's - no difference in creating server datagrams an client datagrams. - You can even use a zero-constructed address for your server datagram - as long as you tell the client where you're listening (eg -- by writting - into a file or some such). - */ - if( dgram.open(local) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "datagram open"),-1); - } - - /* - Yep. We've seen this before too... - */ - char buf[512]; + /* Notice that this looks a lot like the server application. + There's no difference in creating server datagrams an client + datagrams. You can even use a zero-constructed address for your + server datagram as long as you tell the client where you're + listening (eg -- by writting into a file or some such). */ + if (dgram.open (local) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "datagram open"), + -1); - /* - Ok, now we're doing something different. - */ - sprintf(buf, "Hello World!"); + /* Yep. We've seen this before too... */ + char buf[BUFSIZ]; - /* - Just like sending a telegram, we have to address our datagram. - Here, we create an address object at the desired port on the - chosen host. To keep us from crashing, we'll provide a default - host name if we aren't given one. - */ - ACE_INET_Addr remote(PORT, argc > 1 ? argv[1] : "localhost" ); + /* Ok, now we're doing something different. */ + sprintf (buf, "Hello World!"); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Sending (%s) to the server.\n",buf)); - /* - Now we send our buffer of stuff to the remote address. This is - just exactly what the server did after receiving a client message. - Datagrams are rather orthogonal that way: they don't generally make - much of a fuss about being either client or server. - */ - if( dgram.send(buf,strlen(buf)+1,remote) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1); - } + /* Just like sending a telegram, we have to address our datagram. + Here, we create an address object at the desired port on the + chosen host. To keep us from crashing, we'll provide a default + host name if we aren't given one. */ + ACE_INET_Addr remote (PORT, + argc > 1 ? argv[1] : "localhost"); - /* - Now we've turned around and put ourselves into "server mode" by - invoking the recv() method. We know our server is going to send - us something, so we hang out here and wait for it. Because we - know datagrams are unreliable, there is a chance that the server - will respond but we won't hear. You might consider providing a - timeout on the recv() in that case. If recv() fails due to timeout - it will return -1 and you can then resend your query and attempt - the recv() again. + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Sending (%s) to the server.\n", + buf)); + /* Now we send our buffer of stuff to the remote address. This is + just exactly what the server did after receiving a client message. + Datagrams are rather orthogonal that way: they don't generally + make much of a fuss about being either client or server. */ + if (dgram.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); - Like the server application, we have to give the recv() an - uninitialized addr object so that we can find out who is talking - back to us. - */ - if( dgram.recv(buf,sizeof(buf),remote) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"),-1); - } + /* Now we've turned around and put ourselves into "server mode" by + invoking the recv() method. We know our server is going to send + us something, so we hang out here and wait for it. Because we + know datagrams are unreliable, there is a chance that the server + will respond but we won't hear. You might consider providing a + timeout on the recv() in that case. If recv() fails due to + timeout it will return -1 and you can then resend your query and + attempt the recv() again. - /* - Find out what the server had to say. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said: %s\n",buf)); + Like the server application, we have to give the recv() an + uninitialized addr object so that we can find out who is talking + back to us. */ + if (dgram.recv (buf, + sizeof (buf), + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "recv"), + -1); - /* - Using the "remote" object instance, find out where the server lives. - We could then save this address and use directed datagrams to chat - with the server for a while. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server can be found at: (%s:%d)\n", - remote.get_host_name(), PORT )); + /* Find out what the server had to say. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server said: %s\n", + buf)); - return(0); + /* Using the "remote" object instance, find out where the server + lives. We could then save this address and use directed datagrams + to chat with the server for a while. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server can be found at: (%s:%d)\n", + remote.get_host_name(), + PORT)); + + return 0; } diff --git a/docs/tutorials/008/page02.html b/docs/tutorials/008/page02.html index 1b5b79c19a2..0a95d1a2be7 100644 --- a/docs/tutorials/008/page02.html +++ b/docs/tutorials/008/page02.html @@ -25,133 +25,120 @@ it responds to.  We'll tackle that issue in the next tutorial though...


-
 // $Id$
 
-/*
-   Our datagram server will, of course, need to create a datagram.
-   We'll also need an address object so that we know where to listen.
- */
+/* Our datagram server will, of course, need to create a datagram.
+   We'll also need an address object so that we know where to listen.  */
 #include "ace/SOCK_Dgram.h"
 #include "ace/INET_Addr.h"
 
-/*
-   Use the typical TCP/IP port address for receiving datagrams.
- */
+/* Use the typical TCP/IP port address for receiving datagrams.  */
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main(int,char**)
+int 
+main (int, char**)
 {
-    /*
-       This is where we'll listen for datagrams coming from the
-       clients.  We'll give this address to the open() method
-       below to enable the listener.
-     */
-    ACE_INET_Addr  local(PORT);
-
-    /*
-       A simply constructed datagram that we'll listen with.
-     */
-    ACE_SOCK_Dgram dgram;
-
-    /*
-       Like most ACE objects, the datagram has to be opened before
-       it can be uses.  Of course, -1 on failure.
-
-       A datagram will fail to open if there is already a datagram
-       listening at the port we've chosen.  It *is* OK to open
-       a datagram at a port where there is an ACE_SOCK_Stream
-       though.  This is because datagrams are UDP and SOCK_Stream
-       is TCP and the two don't cross paths.
-     */
-    if( dgram.open(local) == -1 )
-    {
-        ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"),-1);
-    }
-
-    /*
-       Create a simple buffer to receive the data.  You generally need
-       to provide a buffer big enough for the largest datagram you
-       expect to receive.  Some platforms will let you read a little
-       and then some more later but other platforms will throw out
-       whatever part of the datagram you don't get with the first
-       read.  (This is on a per-datagram basis BTW.)  The theoretical
-       limit on a datagram is about 64k.  The realistic limit (because
-       of routers & such) is much smaller.  Choose your buffer size
-       based on your application's needs.
-     */
-    char buf[512];
-
-    /*
-       Unlike ACE_SOCK_Stream, datagrams are unconnected.  That is,
-       there is no "virtual circuit" between server and client.
-       Because of this, the server has to provide a placeholder
-       for the OS to fill in the source (client) address information
-       on the recv.  You can initialize this INET_Addr to anything,
-       it will be overwritten when the data arrives.
-     */
-    ACE_INET_Addr remote;
-
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n"));
-
-    /*
-       Receive datagrams as long as we're able.
-     */
-    while( dgram.recv(buf,sizeof(buf),remote) != -1 )
+  /* This is where we'll listen for datagrams coming from the clients.
+    We'll give this address to the open() method below to enable the
+    listener.  */
+  ACE_INET_Addr local (PORT);
+
+  /* A simply constructed datagram that we'll listen with.  */
+  ACE_SOCK_Dgram dgram;
+
+  /* Like most ACE objects, the datagram has to be opened before it
+    can be uses.  Of course, -1 on failure.
+
+    A datagram will fail to open if there is already a datagram
+    listening at the port we've chosen.  It *is* OK to open a datagram
+    at a port where there is an ACE_SOCK_Stream though.  This is
+    because datagrams are UDP and SOCK_Stream is TCP and the two don't
+    cross paths.  */
+  if (dgram.open (local) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Create a simple buffer to receive the data.  You generally need
+    to provide a buffer big enough for the largest datagram you expect
+    to receive.  Some platforms will let you read a little and then
+    some more later but other platforms will throw out whatever part
+    of the datagram you don't get with the first read.  (This is on a
+    per-datagram basis BTW.)  The theoretical limit on a datagram is
+    about 64k.  The realistic limit (because of routers & such) is
+    much smaller.  Choose your buffer size based on your application's
+    needs.  */
+  char buf[BUFSIZ];
+
+  /* Unlike ACE_SOCK_Stream, datagrams are unconnected.  That is,
+    there is no "virtual circuit" between server and client.  Because
+    of this, the server has to provide a placeholder for the OS to
+    fill in the source (client) address information on the recv.  You
+    can initialize this INET_Addr to anything, it will be overwritten
+    when the data arrives. */
+  ACE_INET_Addr remote;
+
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) starting up server daemon\n"));
+
+  /* Receive datagrams as long as we're able.  */
+  while (dgram.recv (buf,
+                     sizeof (buf),
+                     remote) != -1)
     {
-        /*
-           Display a brief message about our progress.  Notice how we
-           use the 'remote' object to display the address of the client.
-           With an ACE_SOCK_Stream we used get_remote_addr() to get the
-           address the socket is connected to.  Because datagrams are
-           unconnected, we use the addr object provided to recv().
-         */
-        ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data (%s) from client (%s)\n", buf, remote.get_host_name()));
-
-        /*
-           To respond to the client's query, we have to become a client
-           ourselves.  To do so, we need an anonymous local address from
-           which we'll send the response and a datagram in which to send
-           it.  (An anonymous address is simply one where we let the OS
-           choose a port for us.  We really don't care what it is.
-         */
-        ACE_INET_Addr  local((u_short)0);
-        ACE_SOCK_Dgram client;
-
-        /*
-           Open up our response datagram as always.
-         */
-        if( client.open(local) == -1 )
+      /* Display a brief message about our progress.  Notice how we
+        use the 'remote' object to display the address of the client.
+        With an ACE_SOCK_Stream we used get_remote_addr() to get the
+        address the socket is connected to.  Because datagrams are
+        unconnected, we use the addr object provided to recv().  */
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) Data (%s) from client (%s)\n",
+                  buf,
+                  remote.get_host_name ()));
+
+      /* To respond to the client's query, we have to become a client
+        ourselves.  To do so, we need an anonymous local address from
+        which we'll send the response and a datagram in which to send
+        it.  (An anonymous address is simply one where we let the OS
+        choose a port for us.  We really don't care what it is.  */
+      ACE_INET_Addr local ((u_short) 0);
+      ACE_SOCK_Dgram client;
+
+      /* Open up our response datagram as always.  */
+      if (client.open (local) == -1)
         {
-            ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "client open"),-1);
-            return(0);
+          ACE_ERROR_RETURN ((LM_ERROR,
+                             "%p\n",
+                             "client open"),
+                            -1);
+          return 0;
         }
 
-        /*
-           Build a witty response...
-         */
-        sprintf(buf,"I am here");
-
-        /*
-           and send it to the client.  Notice the symmetry with the recv()
-           method.  Again, the unconnected nature of datagrams forces
-           us to specify an address object with each read/write operation.
-           In the case of read (recv()) that's where the OS stuffs the
-           address of the datagram sender.  In the case of write (send())
-           that we're doing here, the address is where we want the network
-           to deliver the data.
-
-           Of course, we're assuming that the client will be listening
-           for our reply...
-         */
-        if( client.send(buf,strlen(buf)+1,remote) == -1 )
-        {
-            ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1);
-            return(0);
-        }
+      /* Build a witty response...  */
+      sprintf (buf,
+               "I am here");
+
+      /* and send it to the client.  Notice the symmetry with the
+        recv() method.  Again, the unconnected nature of datagrams
+        forces us to specify an address object with each read/write
+        operation.  In the case of read (recv()) that's where the OS
+        stuffs the address of the datagram sender.  In the case of
+        write (send()) that we're doing here, the address is where we
+        want the network to deliver the data.
+
+        Of course, we're assuming that the client will be listening
+        for our reply...  */
+      if (client.send (buf,
+                       ACE_OS::strlen (buf) + 1,
+                       remote) == -1)
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "send"),
+                          -1);
     }
 
-    return(0);
+  return 0;
 }
 

diff --git a/docs/tutorials/008/page03.html b/docs/tutorials/008/page03.html index 5ba8c36a6a2..e33c396ec63 100644 --- a/docs/tutorials/008/page03.html +++ b/docs/tutorials/008/page03.html @@ -25,123 +25,112 @@ could be written this way.


-
 // $Id$
 
 #include "ace/SOCK_Dgram.h"
 #include "ace/INET_Addr.h"
 
-/*
-   Once again, we use the default server port.  In a "real" system,
-   the server's port (or ports) would be published in some way so
-   that clients would know where to "look".  We could even add entries
-   to the operating system's services file and use a service name
-   instead of a number.  We'll come back to that in some other tutorial
-   though.  For now, let's stay simple.
- */
+/* Once again, we use the default server port.  In a "real" system,
+   the server's port (or ports) would be published in some way so that
+   clients would know where to "look".  We could even add entries to
+   the operating system's services file and use a service name instead
+   of a number.  We'll come back to that in some other tutorial
+   though.  For now, let's stay simple.  */
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   Our goal here is to develop a client that can send a datagram to
-   a server running on a known host.  We'll use a command-line argument
-   to specify the hostname instead of hard-coding it.
- */
-int main(int argc,char *argv[] )
+/* Our goal here is to develop a client that can send a datagram to a
+   server running on a known host.  We'll use a command-line argument
+   to specify the hostname instead of hard-coding it.  */
+int 
+main (int argc,char *argv[])
 {
-	/*
-	   All datagrams must have a point of origin.  Since we intend to
-	   transmit instead of receive, we initialize an address with zero
-	   and let the OS choose a port for us.  We could have chosen our
-	   own value between 1025 and 65535 as long as it isn't already in
-       use.
-
-       The biggest difference between client and server when datagrams 
-       are used is the fact that servers tend to have a known/fixed
-       address at which they listen and clients tend to have arbitrary 
-       addresses assigned by the OS.
-	 */
-	ACE_INET_Addr  local((u_short)0);
-
-	/*
-	   And here is our datagram object.
-	 */
-	ACE_SOCK_Dgram dgram;
+  /* All datagrams must have a point of origin.  Since we intend to
+    transmit instead of receive, we initialize an address with zero
+    and let the OS choose a port for us.  We could have chosen our own
+    value between 1025 and 65535 as long as it isn't already in use.
+
+    The biggest difference between client and server when datagrams
+    are used is the fact that servers tend to have a known/fixed
+    address at which they listen and clients tend to have arbitrary
+    addresses assigned by the OS.  */
+  ACE_INET_Addr local((u_short) 0);
+
+  /* And here is our datagram object.  */
+  ACE_SOCK_Dgram dgram;
 	
-	/*
-	   Notice that this looks a lot like the server application.  There's
-	   no difference in creating server datagrams an client datagrams.
-	   You can even use a zero-constructed address for your server datagram
-	   as long as you tell the client where you're listening (eg -- by writting
-	   into a file or some such).
-	 */
-	if( dgram.open(local) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "datagram open"),-1);
-	}
-
-	/*
-	   Yep.  We've seen this before too...
-	 */
-	char buf[512];
-
-	/*
-	   Ok, now we're doing something different.
-	 */
-	sprintf(buf, "Hello World!");
-
-	/*
-	   Just like sending a telegram, we have to address our datagram.
-	   Here, we create an address object at the desired port on the
-	   chosen host.  To keep us from crashing, we'll provide a default
-	   host name if we aren't given one.
-	 */
-	ACE_INET_Addr  remote(PORT, argc > 1 ? argv[1] : "localhost" );
-
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) Sending (%s) to the server.\n",buf));
-	/*
-	    Now we send our buffer of stuff to the remote address.  This is
-	    just exactly what the server did after receiving a client message.
-	    Datagrams are rather orthogonal that way:  they don't generally make
-	    much of a fuss about being either client or server.
-	 */
-	if( dgram.send(buf,strlen(buf)+1,remote) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1);
-	}
-
-	/*
-	   Now we've turned around and put ourselves into "server mode" by 
-	   invoking the recv() method.  We know our server is going to send
-	   us something, so we hang out here and wait for it.  Because we
-	   know datagrams are unreliable, there is a chance that the server
-	   will respond but we won't hear.  You might consider providing a
-	   timeout on the recv() in that case.  If recv() fails due to timeout
-	   it will return -1 and you can then resend your query and attempt
-	   the recv() again.
-
-	   Like the server application, we have to give the recv() an 
-	   uninitialized addr object so that we can find out who is talking
-	   back to us.
-	 */
-	if( dgram.recv(buf,sizeof(buf),remote) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"),-1);
-	}
-
-	/*
-	   Find out what the server had to say.
-	 */
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said:  %s\n",buf));
-
-	/*
-	   Using the "remote" object instance, find out where the server lives.
-	   We could then save this address and use directed datagrams to chat
-	   with the server for a while.
-	 */
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server can be found at:  (%s:%d)\n",
-		remote.get_host_name(), PORT ));
-
-	return(0);
+  /* Notice that this looks a lot like the server application.
+    There's no difference in creating server datagrams an client
+    datagrams.  You can even use a zero-constructed address for your
+    server datagram as long as you tell the client where you're
+    listening (eg -- by writting into a file or some such).  */
+  if (dgram.open (local) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "datagram open"),
+                      -1);
+
+  /* Yep.  We've seen this before too...  */
+  char buf[BUFSIZ];
+
+  /* Ok, now we're doing something different.  */
+  sprintf (buf, "Hello World!");
+
+  /* Just like sending a telegram, we have to address our datagram.
+    Here, we create an address object at the desired port on the
+    chosen host.  To keep us from crashing, we'll provide a default
+    host name if we aren't given one.  */
+  ACE_INET_Addr remote (PORT,
+                        argc > 1 ? argv[1] : "localhost");
+
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Sending (%s) to the server.\n",
+              buf));
+  /* Now we send our buffer of stuff to the remote address.  This is
+    just exactly what the server did after receiving a client message.
+    Datagrams are rather orthogonal that way: they don't generally
+    make much of a fuss about being either client or server.  */
+  if (dgram.send (buf,
+                  ACE_OS::strlen (buf) + 1,
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "send"),
+                      -1);
+
+  /* Now we've turned around and put ourselves into "server mode" by
+    invoking the recv() method.  We know our server is going to send
+    us something, so we hang out here and wait for it.  Because we
+    know datagrams are unreliable, there is a chance that the server
+    will respond but we won't hear.  You might consider providing a
+    timeout on the recv() in that case.  If recv() fails due to
+    timeout it will return -1 and you can then resend your query and
+    attempt the recv() again.
+
+    Like the server application, we have to give the recv() an
+    uninitialized addr object so that we can find out who is talking
+    back to us.  */
+  if (dgram.recv (buf,
+                  sizeof (buf),
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "recv"),
+                      -1);
+
+  /* Find out what the server had to say.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server said:  %s\n",
+              buf));
+
+  /* Using the "remote" object instance, find out where the server
+    lives.  We could then save this address and use directed datagrams
+    to chat with the server for a while.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server can be found at:  (%s:%d)\n",
+              remote.get_host_name(),
+              PORT));
+  
+  return 0;
 }
 

diff --git a/docs/tutorials/008/page04.html b/docs/tutorials/008/page04.html index 05b28042b63..1d3bd52361d 100644 --- a/docs/tutorials/008/page04.html +++ b/docs/tutorials/008/page04.html @@ -25,7 +25,6 @@ subnet you're a part of.

I've only commented the parts that are different from the directed_client.


-
 // $Id$
 
 #include "ace/SOCK_Dgram_Bcast.h"
@@ -33,69 +32,78 @@ subnet you're a part of.
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main(int argc,char *argv[] )
+int 
+main (int argc,char *argv[])
 {
-	ACE_INET_Addr  local((u_short)0);
-
-	/*
-	   Instead of creating the ACE_SOCK_Dgram we created last time,
-	   we'll create an ACE_SOCK_Dgram_Bcast.  "Bcast" means, of course,
-	   "Broadcast".  This ACE object is clever enough to go out to the
-	   OS and find all of the network interfaces.  When you send()
-	   on a Dgram_Bcast, it will send the datagram out on all of those
-	   interfaces.  This is quiet handy if you do it on a multi-homed
-	   host that plays router...
-	 */
-	ACE_SOCK_Dgram_Bcast dgram;
-
-	if( dgram.open(local) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "datagram open"),-1);
-	}
-
-	char buf[512];
-
-	sprintf(buf, "Hello World!");
-
-	/*
-	   The only other difference between us and the directed client
-	   is that we don't specify a host to receive the datagram.
-	   Instead, we use the magic value "INADDR_BROADCAST".  All hosts
-	   are obliged to respond to datagrams directed to this address
-	   the same as they would to datagrams sent to their hostname.
-
-	   Remember, the Dgram_Bcast will send a datagram to all interfaces
-	   on the host.  That's true even if the address is for a specific
-	   host (and the host address makes sense for the interface).
-	   The real power is in using an INADDR_BROADCAST addressed datagram
-	   against all interfaces.
-	 */
-
-	ACE_INET_Addr  remote(PORT,INADDR_BROADCAST);
-
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) Sending (%s) to the server.\n",buf));
-
-	if( dgram.send(buf,strlen(buf)+1,remote) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1);
-	}
-
-	if( dgram.recv(buf,sizeof(buf),remote) == -1 )
-	{
-		ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"),-1);
-	}
-
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said:  %s\n",buf));
-
-	/*
-	   Using the "remote" object instance, find out where the server lives.
-	   We could then save this address and use directed datagrams to chat
-	   with the server for a while.
-	 */
-	ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server can be found at:  (%s:%d)\n",
-		remote.get_host_name(), PORT ));
-
-	return(0);
+  ACE_INET_Addr local ((u_short) 0);
+
+  /* Instead of creating the ACE_SOCK_Dgram we created last time,
+    we'll create an ACE_SOCK_Dgram_Bcast.  "Bcast" means, of course,
+    "Broadcast".  This ACE object is clever enough to go out to the OS
+    and find all of the network interfaces.  When you send() on a
+    Dgram_Bcast, it will send the datagram out on all of those
+    interfaces.  This is quiet handy if you do it on a multi-homed
+    host that plays router...  */
+  ACE_SOCK_Dgram_Bcast dgram;
+
+  if (dgram.open (local) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "datagram open"),
+                      -1);
+
+  char buf[BUFSIZ];
+
+  sprintf (buf, "Hello World!");
+
+  /* The only other difference between us and the directed client is
+    that we don't specify a host to receive the datagram.  Instead, we
+    use the magic value "INADDR_BROADCAST".  All hosts are obliged to
+    respond to datagrams directed to this address the same as they
+    would to datagrams sent to their hostname.
+
+    Remember, the Dgram_Bcast will send a datagram to all interfaces
+    on the host.  That's true even if the address is for a specific
+    host (and the host address makes sense for the interface).  The
+    real power is in using an INADDR_BROADCAST addressed datagram
+    against all interfaces.  */
+
+  ACE_INET_Addr remote (PORT,
+                        INADDR_BROADCAST);
+
+  ACE_DEBUG ((LM_DEBUG, 
+              "(%P|%t) Sending (%s) to the server.\n",
+              buf));
+
+  if (dgram.send (buf,
+                  ACE_OS::strlen (buf) + 1,
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "send"),
+                      -1);
+
+  if (dgram.recv (buf,
+                  sizeof (buf),
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "recv"),
+                      -1);
+
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server said:  %s\n",
+              buf));
+
+  /* Using the "remote" object instance, find out where the server
+    lives.  We could then save this address and use directed datagrams
+    to chat with the server for a while.  */
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server can be found at:  (%s:%d)\n",
+              remote.get_host_name(),
+              PORT));
+
+  return 0;
 }
 

diff --git a/docs/tutorials/008/server.cpp b/docs/tutorials/008/server.cpp index cdc7e2aaded..629c2dd42c8 100644 --- a/docs/tutorials/008/server.cpp +++ b/docs/tutorials/008/server.cpp @@ -1,128 +1,115 @@ - // $Id$ -/* - Our datagram server will, of course, need to create a datagram. - We'll also need an address object so that we know where to listen. - */ +/* Our datagram server will, of course, need to create a datagram. + We'll also need an address object so that we know where to listen. */ #include "ace/SOCK_Dgram.h" #include "ace/INET_Addr.h" -/* - Use the typical TCP/IP port address for receiving datagrams. - */ +/* Use the typical TCP/IP port address for receiving datagrams. */ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -int main(int,char**) +int +main (int, char**) { - /* - This is where we'll listen for datagrams coming from the - clients. We'll give this address to the open() method - below to enable the listener. - */ - ACE_INET_Addr local(PORT); - - /* - A simply constructed datagram that we'll listen with. - */ - ACE_SOCK_Dgram dgram; - - /* - Like most ACE objects, the datagram has to be opened before - it can be uses. Of course, -1 on failure. - - A datagram will fail to open if there is already a datagram - listening at the port we've chosen. It *is* OK to open - a datagram at a port where there is an ACE_SOCK_Stream - though. This is because datagrams are UDP and SOCK_Stream - is TCP and the two don't cross paths. - */ - if( dgram.open(local) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"),-1); - } - - /* - Create a simple buffer to receive the data. You generally need - to provide a buffer big enough for the largest datagram you - expect to receive. Some platforms will let you read a little - and then some more later but other platforms will throw out - whatever part of the datagram you don't get with the first - read. (This is on a per-datagram basis BTW.) The theoretical - limit on a datagram is about 64k. The realistic limit (because - of routers & such) is much smaller. Choose your buffer size - based on your application's needs. - */ - char buf[512]; - - /* - Unlike ACE_SOCK_Stream, datagrams are unconnected. That is, - there is no "virtual circuit" between server and client. - Because of this, the server has to provide a placeholder - for the OS to fill in the source (client) address information - on the recv. You can initialize this INET_Addr to anything, - it will be overwritten when the data arrives. - */ - ACE_INET_Addr remote; - - ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting up server daemon\n")); - - /* - Receive datagrams as long as we're able. - */ - while( dgram.recv(buf,sizeof(buf),remote) != -1 ) + /* This is where we'll listen for datagrams coming from the clients. + We'll give this address to the open() method below to enable the + listener. */ + ACE_INET_Addr local (PORT); + + /* A simply constructed datagram that we'll listen with. */ + ACE_SOCK_Dgram dgram; + + /* Like most ACE objects, the datagram has to be opened before it + can be uses. Of course, -1 on failure. + + A datagram will fail to open if there is already a datagram + listening at the port we've chosen. It *is* OK to open a datagram + at a port where there is an ACE_SOCK_Stream though. This is + because datagrams are UDP and SOCK_Stream is TCP and the two don't + cross paths. */ + if (dgram.open (local) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); + + /* Create a simple buffer to receive the data. You generally need + to provide a buffer big enough for the largest datagram you expect + to receive. Some platforms will let you read a little and then + some more later but other platforms will throw out whatever part + of the datagram you don't get with the first read. (This is on a + per-datagram basis BTW.) The theoretical limit on a datagram is + about 64k. The realistic limit (because of routers & such) is + much smaller. Choose your buffer size based on your application's + needs. */ + char buf[BUFSIZ]; + + /* Unlike ACE_SOCK_Stream, datagrams are unconnected. That is, + there is no "virtual circuit" between server and client. Because + of this, the server has to provide a placeholder for the OS to + fill in the source (client) address information on the recv. You + can initialize this INET_Addr to anything, it will be overwritten + when the data arrives. */ + ACE_INET_Addr remote; + + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) starting up server daemon\n")); + + /* Receive datagrams as long as we're able. */ + while (dgram.recv (buf, + sizeof (buf), + remote) != -1) { - /* - Display a brief message about our progress. Notice how we - use the 'remote' object to display the address of the client. - With an ACE_SOCK_Stream we used get_remote_addr() to get the - address the socket is connected to. Because datagrams are - unconnected, we use the addr object provided to recv(). - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data (%s) from client (%s)\n", buf, remote.get_host_name())); - - /* - To respond to the client's query, we have to become a client - ourselves. To do so, we need an anonymous local address from - which we'll send the response and a datagram in which to send - it. (An anonymous address is simply one where we let the OS - choose a port for us. We really don't care what it is. - */ - ACE_INET_Addr local((u_short)0); - ACE_SOCK_Dgram client; - - /* - Open up our response datagram as always. - */ - if( client.open(local) == -1 ) + /* Display a brief message about our progress. Notice how we + use the 'remote' object to display the address of the client. + With an ACE_SOCK_Stream we used get_remote_addr() to get the + address the socket is connected to. Because datagrams are + unconnected, we use the addr object provided to recv(). */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Data (%s) from client (%s)\n", + buf, + remote.get_host_name ())); + + /* To respond to the client's query, we have to become a client + ourselves. To do so, we need an anonymous local address from + which we'll send the response and a datagram in which to send + it. (An anonymous address is simply one where we let the OS + choose a port for us. We really don't care what it is. */ + ACE_INET_Addr local ((u_short) 0); + ACE_SOCK_Dgram client; + + /* Open up our response datagram as always. */ + if (client.open (local) == -1) { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "client open"),-1); - return(0); + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "client open"), + -1); + return 0; } - /* - Build a witty response... - */ - sprintf(buf,"I am here"); - - /* - and send it to the client. Notice the symmetry with the recv() - method. Again, the unconnected nature of datagrams forces - us to specify an address object with each read/write operation. - In the case of read (recv()) that's where the OS stuffs the - address of the datagram sender. In the case of write (send()) - that we're doing here, the address is where we want the network - to deliver the data. - - Of course, we're assuming that the client will be listening - for our reply... - */ - if( client.send(buf,strlen(buf)+1,remote) == -1 ) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"),-1); - return(0); - } + /* Build a witty response... */ + sprintf (buf, + "I am here"); + + /* and send it to the client. Notice the symmetry with the + recv() method. Again, the unconnected nature of datagrams + forces us to specify an address object with each read/write + operation. In the case of read (recv()) that's where the OS + stuffs the address of the datagram sender. In the case of + write (send()) that we're doing here, the address is where we + want the network to deliver the data. + + Of course, we're assuming that the client will be listening + for our reply... */ + if (client.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); } - return(0); + return 0; } diff --git a/docs/tutorials/009/broadcast_client.cpp b/docs/tutorials/009/broadcast_client.cpp index 76ff454d066..866e90c2588 100644 --- a/docs/tutorials/009/broadcast_client.cpp +++ b/docs/tutorials/009/broadcast_client.cpp @@ -1,4 +1,3 @@ - // $Id$ #include "ace/SOCK_Dgram_Bcast.h" @@ -6,34 +5,42 @@ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { ACE_INET_Addr local ((u_short) 0); ACE_INET_Addr remote (PORT, INADDR_BROADCAST); ACE_SOCK_Dgram_Bcast dgram; if (dgram.open (local) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - } - - char buf[512]; - - sprintf (buf, argc > 1 ? argv[1] : "Hello World!"); - - if (dgram.send (buf, strlen (buf) + 1, remote) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1); - } - + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); + char buf[BUFSIZ]; + + sprintf (buf, + argc > 1 ? argv[1] : "Hello World!"); + if (dgram.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); ACE_Time_Value timeout (2, 0); - if (dgram.recv (buf, sizeof (buf), remote, 0, &timeout) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"), -1); - } - - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server at (%s) said (%s)\n", - remote.get_host_name (), buf)); - - return (0); + if (dgram.recv (buf, + sizeof (buf), + remote, + 0, + &timeout) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "recv"), + -1); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server at (%s) said (%s)\n", + remote.get_host_name (), + buf)); + return 0; } diff --git a/docs/tutorials/009/directed_client.cpp b/docs/tutorials/009/directed_client.cpp index c90ee80599d..31ff8481ea7 100644 --- a/docs/tutorials/009/directed_client.cpp +++ b/docs/tutorials/009/directed_client.cpp @@ -1,4 +1,3 @@ - // $Id$ #include "ace/SOCK_Dgram.h" @@ -6,57 +5,64 @@ static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { ACE_INET_Addr local ((u_short) 0); - ACE_INET_Addr remote (PORT, argc > 1 ? argv[1] : "localhost"); + ACE_INET_Addr remote (PORT, + argc > 1 ? argv[1] : "localhost"); ACE_SOCK_Dgram dgram; if (dgram.open (local) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - } - - char buf[512]; - - /* - In order to conform to the "protocol" required by the server, - we allow the user to specify a signature. A default matching - the server's default is also available. - */ - sprintf (buf, argc > 2 ? argv[2] : "Hello World!"); - - if (dgram.send (buf, strlen (buf) + 1, remote) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1); - } - - /* - Because we may have sent a signature that the server doesn't - honor, we have to have some way to get out of the recv(). - Most ACE objects that have potential for infinite blocking - give you the option of providing a timeout. recv() is no - exception. Here, we construct an ACE_Time_Value representing - two seconds and no micro-seconds. If recv() fails to get - a response within the two seconds, it will return -1. - */ + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); + + char buf[BUFSIZ]; + + /* In order to conform to the "protocol" required by the server, we + allow the user to specify a signature. A default matching the + server's default is also available. */ + sprintf (buf, + argc > 2 ? argv[2] : "Hello World!"); + + if (dgram.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "send"), + -1); + + /* Because we may have sent a signature that the server doesn't + honor, we have to have some way to get out of the recv(). Most + ACE objects that have potential for infinite blocking give you the + option of providing a timeout. recv() is no exception. Here, we + construct an ACE_Time_Value representing two seconds and no + micro-seconds. If recv() fails to get a response within the two + seconds, it will return -1. */ ACE_Time_Value timeout (2, 0); - if (dgram.recv (buf, sizeof (buf), remote, 0, &timeout) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"), -1); - } + if (dgram.recv (buf, + sizeof (buf), + remote, + 0, + &timeout) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "recv"), + -1); - /* - Note: The fourth parameter to recv() is for flags. These flags + /* Note: The fourth parameter to recv() is for flags. These flags are passed directly to the underlying recv() or recvfrom() system - call. For Linux, resonable values are: - MSG_OOB process out-of-band data - MSG_PEEK peek at incoming message (but leave it in the OS buffers) - MSG_WAITALL wait for full request or error - See your system documentation for the gory details. - */ + call. For Linux, resonable values are: MSG_OOB process + out-of-band data MSG_PEEK peek at incoming message (but leave it + in the OS buffers) MSG_WAITALL wait for full request or error See + your system documentation for the gory details. */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said (%s)\n", buf)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) The server said (%s)\n", + buf)); - return (0); + return 0; } diff --git a/docs/tutorials/009/page02.html b/docs/tutorials/009/page02.html index 66801670f2b..46d7007f2a9 100644 --- a/docs/tutorials/009/page02.html +++ b/docs/tutorials/009/page02.html @@ -21,83 +21,84 @@ we add in just a bit of code to examine the datagram contents before responding.


-
 // $Id$
 
-/*
-   The actual datagram operations here are exactly the same as those used in
-   the previous tutorial.  What we've added is some logic that will prevent
-   this server from responding to just any old datagram.  I'll limit my
-   comments to those pieces of code.   
- */
+/* The actual datagram operations here are exactly the same as those
+   used in the previous tutorial.  What we've added is some logic that
+   will prevent this server from responding to just any old datagram.
+   I'll limit my comments to those pieces of code.  */
 
 #include "ace/SOCK_Dgram.h"
 #include "ace/INET_Addr.h"
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-/*
-   In order to be more selective, our server will be started with a
-   "signature".  If none is given, we'll use the one here instead.   
- */
+/* In order to be more selective, our server will be started with a
+   "signature".  If none is given, we'll use the one here instead.  */
 static const char *default_signature = "Hello World!";
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
   ACE_INET_Addr local (PORT);
   ACE_SOCK_Dgram dgram;
 
   if (dgram.open (local) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
 
-  char buf[512];
+  char buf[BUFSIZ];
   ACE_INET_Addr remote;
 
-  while (dgram.recv (buf, sizeof (buf), remote) != -1)
-  {
-    /*
-       What did the client say?   
-     */
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Received (%s) from (%s)\n", buf, remote.get_host_name ()));
-
-    /*
-       Use a simple string-op to decide if the client is one of our own.  Of
-       course, you could have sent numeric values or even a struct of data. For 
-       this simple exercise, however, strings are just fine.   
-     */
-    if (ACE_OS::strcmp (buf, argc > 1 ? argv[1] : default_signature))
+  while (dgram.recv (buf,
+                     sizeof (buf),
+                     remote) != -1)
     {
-      /*
-         If the client didn't say something we like then log it and move on.   
-       */
+      /* What did the client say?  */
       ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) Received (%s) from (%s)\n",
+                  buf,
+                  remote.get_host_name ()));
+
+      /* Use a simple string-op to decide if the client is one of our
+        own.  Of course, you could have sent numeric values or even a
+        struct of data. For this simple exercise, however, strings are
+        just fine.  */
+      if (ACE_OS::strcmp (buf,
+                          argc > 1 ? argv[1] : default_signature))
+        {
+          /* If the client didn't say something we like then log it
+            *and move on.  /
+          ACE_DEBUG ((LM_DEBUG,
 		      "(%P|%t) Client query does not match our signature (%s).  Response not sent.\n",
 		      argc > 1 ? argv[1] : default_signature));
+        }
+      else
+        {
+          /* As before, we respond to the client's query.  */
+
+          ACE_INET_Addr local ((u_short) 0);
+          ACE_SOCK_Dgram peer;
+          if (peer.open (local) == -1)
+            ACE_ERROR_RETURN ((LM_ERROR,
+                               "%p\n",
+                               "response open"),
+                              -1);
+          sprintf (buf,
+                   "I am here");
+          if (peer.send (buf,
+                         ACE_OS::strlen (buf) + 1,
+                         remote) == -1)
+            ACE_ERROR_RETURN ((LM_ERROR,
+                               "%p\n",
+                               "response send"),
+                              -1);
+        }
     }
-    else
-    {
-      /*
-         As before, we respond to the client's query.   
-       */
-
-      ACE_INET_Addr local ((u_short) 0);
-      ACE_SOCK_Dgram peer;
-      if (peer.open (local) == -1)
-      {
-         ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "response open"), -1);
-      }
-
-      sprintf (buf, "I am here");
-      if (peer.send (buf, strlen (buf) + 1, remote) == -1)
-      {
-         ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "response send"), -1);
-      }
-    }
-  }
 
-  return (0);
+  return 0;
 }
 

diff --git a/docs/tutorials/009/page03.html b/docs/tutorials/009/page03.html index e6fd2a41313..d21c0fb2fed 100644 --- a/docs/tutorials/009/page03.html +++ b/docs/tutorials/009/page03.html @@ -23,7 +23,6 @@ if the server doesn't like what we have to say.


-
 // $Id$
 
 #include "ace/SOCK_Dgram.h"
@@ -31,59 +30,66 @@ if the server doesn't like what we have to say.
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
   ACE_INET_Addr local ((u_short) 0);
-  ACE_INET_Addr remote (PORT, argc > 1 ? argv[1] : "localhost");
+  ACE_INET_Addr remote (PORT,
+                        argc > 1 ? argv[1] : "localhost");
   ACE_SOCK_Dgram dgram;
 
   if (dgram.open (local) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
-
-  char buf[512];
-
-  /*
-     In order to conform to the "protocol" required by the server,
-     we allow the user to specify a signature.  A default matching
-     the server's default is also available.
-   */
-  sprintf (buf, argc > 2 ? argv[2] : "Hello World!");
-
-  if (dgram.send (buf, strlen (buf) + 1, remote) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
-  }
-
-  /*
-     Because we may have sent a signature that the server doesn't
-     honor, we have to have some way to get out of the recv().
-     Most ACE objects that have potential for infinite blocking
-     give you the option of providing a timeout.  recv() is no
-     exception.  Here, we construct an ACE_Time_Value representing
-     two seconds and no micro-seconds.  If recv() fails to get
-     a response within the two seconds, it will return -1.
-   */
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  char buf[BUFSIZ];
+
+  /* In order to conform to the "protocol" required by the server, we
+    allow the user to specify a signature.  A default matching the
+    server's default is also available.  */
+  sprintf (buf,
+           argc > 2 ? argv[2] : "Hello World!");
+
+  if (dgram.send (buf,
+                  ACE_OS::strlen (buf) + 1,
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "send"),
+                      -1);
+
+  /* Because we may have sent a signature that the server doesn't
+    honor, we have to have some way to get out of the recv().  Most
+    ACE objects that have potential for infinite blocking give you the
+    option of providing a timeout.  recv() is no exception.  Here, we
+    construct an ACE_Time_Value representing two seconds and no
+    micro-seconds.  If recv() fails to get a response within the two
+    seconds, it will return -1.  */
   ACE_Time_Value timeout (2, 0);
-  if (dgram.recv (buf, sizeof (buf), remote, 0, &timeout) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"), -1);
-  }
-
-  /*
-    Note: The fourth parameter to recv() is for flags.  These flags
+  if (dgram.recv (buf,
+                  sizeof (buf),
+                  remote,
+                  0,
+                  &timeout) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "recv"),
+                      -1);
+
+  /* Note: The fourth parameter to recv() is for flags.  These flags
     are passed directly to the underlying recv() or recvfrom() system
-    call.  For Linux, resonable values are:
-      MSG_OOB      process out-of-band data
-      MSG_PEEK     peek at incoming message (but leave it in the OS buffers)
-      MSG_WAITALL  wait for full request or error
-    See your system documentation for the gory details.
-   */
+    call.  For Linux, resonable values are: MSG_OOB process
+    out-of-band data MSG_PEEK peek at incoming message (but leave it
+    in the OS buffers) MSG_WAITALL wait for full request or error See
+    your system documentation for the gory details.  */
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server said (%s)\n", buf));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server said (%s)\n",
+              buf));
 
-  return (0);
+  return 0;
 }
 

diff --git a/docs/tutorials/009/page04.html b/docs/tutorials/009/page04.html index e9bcc1d8fd4..dacc50c0215 100644 --- a/docs/tutorials/009/page04.html +++ b/docs/tutorials/009/page04.html @@ -23,7 +23,6 @@ of the timeout variable passed to recv().


-
 // $Id$
 
 #include "ace/SOCK_Dgram_Bcast.h"
@@ -31,36 +30,44 @@ of the timeout variable passed to recv().
 
 static const u_short PORT = ACE_DEFAULT_SERVER_PORT;
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
   ACE_INET_Addr local ((u_short) 0);
   ACE_INET_Addr remote (PORT, INADDR_BROADCAST);
   ACE_SOCK_Dgram_Bcast dgram;
 
   if (dgram.open (local) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1);
-  }
-
-  char buf[512];
-
-  sprintf (buf, argc > 1 ? argv[1] : "Hello World!");
-
-  if (dgram.send (buf, strlen (buf) + 1, remote) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "send"), -1);
-  }
-
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+  char buf[BUFSIZ];
+
+  sprintf (buf,
+           argc > 1 ? argv[1] : "Hello World!");
+  if (dgram.send (buf,
+                  ACE_OS::strlen (buf) + 1,
+                  remote) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "send"),
+                      -1);
   ACE_Time_Value timeout (2, 0);
-  if (dgram.recv (buf, sizeof (buf), remote, 0, &timeout) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "recv"), -1);
-  }
-
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) The server at (%s) said (%s)\n",
-	      remote.get_host_name (), buf));
-
-  return (0);
+  if (dgram.recv (buf,
+                  sizeof (buf),
+                  remote,
+                  0,
+                  &timeout) == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "recv"),
+                      -1);
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) The server at (%s) said (%s)\n",
+	      remote.get_host_name (),
+              buf));
+  return 0;
 }
 


diff --git a/docs/tutorials/009/server.cpp b/docs/tutorials/009/server.cpp index e18488313de..fe994ae27fa 100644 --- a/docs/tutorials/009/server.cpp +++ b/docs/tutorials/009/server.cpp @@ -1,78 +1,79 @@ - // $Id$ -/* - The actual datagram operations here are exactly the same as those used in - the previous tutorial. What we've added is some logic that will prevent - this server from responding to just any old datagram. I'll limit my - comments to those pieces of code. - */ +/* The actual datagram operations here are exactly the same as those + used in the previous tutorial. What we've added is some logic that + will prevent this server from responding to just any old datagram. + I'll limit my comments to those pieces of code. */ #include "ace/SOCK_Dgram.h" #include "ace/INET_Addr.h" static const u_short PORT = ACE_DEFAULT_SERVER_PORT; -/* - In order to be more selective, our server will be started with a - "signature". If none is given, we'll use the one here instead. - */ +/* In order to be more selective, our server will be started with a + "signature". If none is given, we'll use the one here instead. */ static const char *default_signature = "Hello World!"; -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { ACE_INET_Addr local (PORT); ACE_SOCK_Dgram dgram; if (dgram.open (local) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "open"), -1); - } + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); - char buf[512]; + char buf[BUFSIZ]; ACE_INET_Addr remote; - while (dgram.recv (buf, sizeof (buf), remote) != -1) - { - /* - What did the client say? - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Received (%s) from (%s)\n", buf, remote.get_host_name ())); - - /* - Use a simple string-op to decide if the client is one of our own. Of - course, you could have sent numeric values or even a struct of data. For - this simple exercise, however, strings are just fine. - */ - if (ACE_OS::strcmp (buf, argc > 1 ? argv[1] : default_signature)) + while (dgram.recv (buf, + sizeof (buf), + remote) != -1) { - /* - If the client didn't say something we like then log it and move on. - */ + /* What did the client say? */ ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Received (%s) from (%s)\n", + buf, + remote.get_host_name ())); + + /* Use a simple string-op to decide if the client is one of our + own. Of course, you could have sent numeric values or even a + struct of data. For this simple exercise, however, strings are + just fine. */ + if (ACE_OS::strcmp (buf, + argc > 1 ? argv[1] : default_signature)) + { + /* If the client didn't say something we like then log it + *and move on. / + ACE_DEBUG ((LM_DEBUG, "(%P|%t) Client query does not match our signature (%s). Response not sent.\n", argc > 1 ? argv[1] : default_signature)); - } - else - { - /* - As before, we respond to the client's query. - */ - - ACE_INET_Addr local ((u_short) 0); - ACE_SOCK_Dgram peer; - if (peer.open (local) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "response open"), -1); - } + } + else + { + /* As before, we respond to the client's query. */ - sprintf (buf, "I am here"); - if (peer.send (buf, strlen (buf) + 1, remote) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "response send"), -1); - } + ACE_INET_Addr local ((u_short) 0); + ACE_SOCK_Dgram peer; + if (peer.open (local) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "response open"), + -1); + sprintf (buf, + "I am here"); + if (peer.send (buf, + ACE_OS::strlen (buf) + 1, + remote) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "response send"), + -1); + } } - } - return (0); + return 0; } diff --git a/docs/tutorials/010/block.h b/docs/tutorials/010/block.h index 83c5c497b0e..7288c18724b 100644 --- a/docs/tutorials/010/block.h +++ b/docs/tutorials/010/block.h @@ -1,4 +1,3 @@ - // $Id$ #ifndef BLOCK_H @@ -10,29 +9,34 @@ # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ -/* - This simple ACE_Message_Block derivative will inform us of it's construction - and destruction. We'll use this to assure ourselves that we don't have any - memory leaks. In a real application, of course, this isn't necessary. - */ +/* This simple ACE_Message_Block derivative will inform us of it's + construction and destruction. We'll use this to assure ourselves + that we don't have any memory leaks. In a real application, of + course, this isn't necessary. */ class Block : public ACE_Message_Block { public: Block (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block ctor 0x%x\n", + (void *) this)); } Block (size_t size) - : ACE_Message_Block (size) + : ACE_Message_Block (size) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block ctor 0x%x\n", + (void *) this)); } - virtual ~ Block (void) + virtual ~Block (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block dtor 0x%x\n", + (void *) this)); } }; -#endif +#endif /* BLOCK_H */ diff --git a/docs/tutorials/010/message_queue.cpp b/docs/tutorials/010/message_queue.cpp index a446bfa3706..1cf1b0814b7 100644 --- a/docs/tutorials/010/message_queue.cpp +++ b/docs/tutorials/010/message_queue.cpp @@ -1,92 +1,89 @@ - // $Id$ -/* - To illustrate the ACE_Message_Queue, we use a derivative of ACE_Task<>. We - also derive from ACE_Message_Block to show that we don't have memory leaks. - */ +/* To illustrate the ACE_Message_Queue, we use a derivative of + ACE_Task<>. We also derive from ACE_Message_Block to show that we + don't have memory leaks. */ #include "task.h" #include "block.h" -int run_test( int iterations, int threads ) +int +run_test (int iterations, + int threads) { - /* - Create and star an instance of our Task object. - */ - Task task; + /* Create and star an instance of our Task object. */ + Task task (threads); - if (task.start (threads) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "start"), -1); - } + if (task.open () == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); - /* - Give the threads a moment to open. This isn't really necessary but if we - don't we find that all of our blocks are constructed and enqueued before - any of the threads get created. Basically, the sleep() makes the output - look more interesting. - */ + /* Give the threads a moment to open. This isn't really necessary + but if we don't we find that all of our blocks are constructed and + enqueued before any of the threads get created. Basically, the + sleep() makes the output look more interesting. */ ACE_OS::sleep (ACE_Time_Value (1)); int i; for (i = 0; i < iterations; ++i) - { - /* - Create a new message block to hold our data. Here, we ask for a block - that has 128 bytes of data space. - */ - Block *message = new Block (128); + { + /* Create a new message block to hold our data. Here, we ask + for a block that has 128 bytes of data space. */ + Block *message; + ACE_NEW_RETURN (message, + Block (128), + -1); - /* - Grab the "write pointer". This is a pointer into the data area where we - can write our data. After writting the data you have to increment the - wr_ptr() so that subsequent writes won't clobber what you've put there. - */ - ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i); - message->wr_ptr (strlen (message->rd_ptr ())); + /* Grab the "write pointer". This is a pointer into the data + area where we can write our data. After writting the data you + have to increment the wr_ptr() so that subsequent writes won't + clobber what you've put there. */ + ACE_OS::sprintf (message->wr_ptr (), + "This is message %d.", + i); + message->wr_ptr (ACE_OS::strlen (message->rd_ptr ())); - /* - Put the message block into the queue. One of the threads in the Task - object will pick up the block and "do work" on it. - */ - if (task.putq (message) == -1) - { - break; + /* Put the message block into the queue. One of the threads in + the Task object will pick up the block and "do work" on it. */ + if (task.putq (message) == -1) + break; } - } - /* - Once we're done, we have to signal the Task objects to shut down. There - are several choices including: - Send a message of zero length - Send a - message with a special content I don't like these choices because they're - likely to interfere with application logic. Instead, I use the message - type feature to send a message of type "hangup". The default type is - MB_DATA, so when the tasks get a MB_HANGUP type, they know to go away. - */ - Block *message = new Block (); + /* Once we're done, we have to signal the Task objects to shut + down. There are several choices including: - Send a message of + zero length - Send a message with a special content I don't like + these choices because they're likely to interfere with application + logic. Instead, I use the message type feature to send a message + of type "hangup". The default type is MB_DATA, so when the tasks + get a MB_HANGUP type, they know to go away. */ + Block *message; + + ACE_NEW_RETURN (message, + Block (), + -1); message->msg_type (ACE_Message_Block::MB_HANGUP); task.putq (message); - /* - Wait for the threads in our task object to go away. - */ + /* Wait for the threads in our task object to go away. */ task.wait (); - return(0); + return 0; } -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { - /* - Set the number of iterations through our putq() loop and the number of - threads to use in our Task<> derivative. - */ + /* Set the number of iterations through our putq() loop and the + number of threads to use in our Task<> derivative. */ int iterations = argc > 1 ? atoi (argv[1]) : 9; int threads = argc > 2 ? atoi (argv[2]) : 2; - (void)run_test(iterations,threads); + un_test (iterations, + threads); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Application exiting\n")); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Application exiting\n")); - return(0); + return 0; } diff --git a/docs/tutorials/010/page02.html b/docs/tutorials/010/page02.html index 342f71e5e4c..1ce9bc64e46 100644 --- a/docs/tutorials/010/page02.html +++ b/docs/tutorials/010/page02.html @@ -19,97 +19,94 @@ We'll look first at main().
-
 // $Id$
 
-/*
-   To illustrate the ACE_Message_Queue, we use a derivative of ACE_Task<>.  We
-   also derive from ACE_Message_Block to show that we don't have memory leaks. 
- */
+/* To illustrate the ACE_Message_Queue, we use a derivative of
+   ACE_Task<>.  We also derive from ACE_Message_Block to show that we
+   don't have memory leaks.  */
 #include "task.h"
 #include "block.h"
 
-int run_test( int iterations, int threads )
+int 
+run_test (int iterations,
+          int threads)
 {
-  /*
-     Create and star an instance of our Task object.
-   */
-  Task task;
-
-  if (task.start (threads) == -1)
-  {
-    ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "start"), -1);
-  }
-
-  /*
-     Give the threads a moment to open.  This isn't really necessary but if we
-     don't we find that all of our blocks are constructed and enqueued before
-     any of the threads get created. Basically, the sleep() makes the output
-     look more interesting.       
-   */
+  /* Create and star an instance of our Task object.  */
+  Task task (threads);
+
+  if (task.open () == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
+
+  /* Give the threads a moment to open.  This isn't really necessary
+    but if we don't we find that all of our blocks are constructed and
+    enqueued before any of the threads get created. Basically, the
+    sleep() makes the output look more interesting.  */
   ACE_OS::sleep (ACE_Time_Value (1));
 
   int i;
   for (i = 0; i < iterations; ++i)
-  {
-    /*
-       Create a new message block to hold our data.  Here, we ask for a block
-       that has 128 bytes of data space. 
-     */
-    Block *message = new Block (128);
-
-    /*
-       Grab the "write pointer".  This is a pointer into the data area where we 
-       can write our data.  After writting the data you have to increment the
-       wr_ptr() so that subsequent writes won't clobber what you've put there. 
-     */
-    ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i);
-    message->wr_ptr (strlen (message->rd_ptr ()));
-
-    /*
-       Put the message block into the queue.  One of the threads in the Task
-       object will pick up the block and "do work" on it. 
-     */
-    if (task.putq (message) == -1)
     {
-      break;
+      /* Create a new message block to hold our data.  Here, we ask
+        for a block that has 128 bytes of data space.  */
+      Block *message;
+      ACE_NEW_RETURN (message,
+                      Block (128),
+                      -1);
+
+      /* Grab the "write pointer".  This is a pointer into the data
+        area where we can write our data.  After writting the data you
+        have to increment the wr_ptr() so that subsequent writes won't
+        clobber what you've put there.  */
+      ACE_OS::sprintf (message->wr_ptr (),
+                       "This is message %d.",
+                       i);
+      message->wr_ptr (ACE_OS::strlen (message->rd_ptr ()));
+
+      /* Put the message block into the queue.  One of the threads in
+        the Task object will pick up the block and "do work" on it.  */
+      if (task.putq (message) == -1)
+        break;
     }
-  }
-
-  /*
-     Once we're done, we have to signal the Task objects to shut down. There
-     are several choices including: - Send a message of zero length - Send a
-     message with a special content I don't like these choices because they're
-     likely to interfere with application logic.  Instead, I use the message
-     type feature to send a message of type "hangup".  The default type is
-     MB_DATA, so when the tasks get a MB_HANGUP type, they know to go away. 
-   */
-  Block *message = new Block ();
+
+  /* Once we're done, we have to signal the Task objects to shut
+    down. There are several choices including: - Send a message of
+    zero length - Send a message with a special content I don't like
+    these choices because they're likely to interfere with application
+    logic.  Instead, I use the message type feature to send a message
+    of type "hangup".  The default type is MB_DATA, so when the tasks
+    get a MB_HANGUP type, they know to go away.  */
+  Block *message;
+
+  ACE_NEW_RETURN (message,
+                  Block (),
+                  -1);
   message->msg_type (ACE_Message_Block::MB_HANGUP);
   task.putq (message);
 
-  /*
-     Wait for the threads in our task object to go away. 
-   */
+  /* Wait for the threads in our task object to go away.  */
   task.wait ();
   
-  return(0);
+  return 0;
 }
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-  /*
-     Set the number of iterations through our putq() loop and the number of
-     threads to use in our Task<> derivative. 
-   */
+  /* Set the number of iterations through our putq() loop and the
+    number of threads to use in our Task<> derivative.  */
   int iterations = argc > 1 ? atoi (argv[1]) : 9;
   int threads = argc > 2 ? atoi (argv[2]) : 2;
   
-  (void)run_test(iterations,threads);
+  un_test (iterations,
+           threads);
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Application exiting\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Application exiting\n"));
   
-  return(0);
+  return 0;
 }
 

diff --git a/docs/tutorials/010/page03.html b/docs/tutorials/010/page03.html index 20710ff1c92..33c62e6cc28 100644 --- a/docs/tutorials/010/page03.html +++ b/docs/tutorials/010/page03.html @@ -20,7 +20,6 @@ that the message blocks to, indeed, get freed when we're done with 'em.
-
 // $Id$
 
 #ifndef BLOCK_H
@@ -32,32 +31,37 @@ that the message blocks to, indeed, get freed when we're done with 'em.
 # pragma once
 #endif /* ACE_LACKS_PRAGMA_ONCE */
 
-/*
-   This simple ACE_Message_Block derivative will inform us of it's construction
-   and destruction.  We'll use this to assure ourselves that we don't have any
-   memory leaks.  In a real application, of course, this isn't necessary.
- */
+/* This simple ACE_Message_Block derivative will inform us of it's
+   construction and destruction.  We'll use this to assure ourselves
+   that we don't have any memory leaks.  In a real application, of
+   course, this isn't necessary.  */
 class Block : public ACE_Message_Block
 {
 public:
   Block (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Block ctor 0x%x\n",
+                (void *) this));
   }
 
   Block (size_t size)
-   : ACE_Message_Block (size)
+    : ACE_Message_Block (size)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Block ctor 0x%x\n",
+                (void *) this));
   }
 
-  virtual ~ Block (void)
+  virtual ~Block (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block dtor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Block dtor 0x%x\n",
+                (void *) this));
   }
 };
 
-#endif
+#endif /* BLOCK_H */
 

diff --git a/docs/tutorials/010/page04.html b/docs/tutorials/010/page04.html index fb6b5cf67f3..fb3cee4e3cb 100644 --- a/docs/tutorials/010/page04.html +++ b/docs/tutorials/010/page04.html @@ -19,7 +19,6 @@ and reads from the message queue it contains.


-
 // $Id$
 
 #ifndef TASK_H
@@ -31,51 +30,40 @@ and reads from the message queue it contains.
 # pragma once
 #endif /* ACE_LACKS_PRAGMA_ONCE */
 
-/*
-  Like the thread-pool server tutorial, we'll derive from ACE_Task<>.
+/* Like the thread-pool server tutorial, we'll derive from ACE_Task<>.
   Our goal here is to show off the ACE_Message_Queue and the best way
   to do that is to use one to pass data between threads.  The easiest
-  way to create threads is with ACE_Task<>
- */
-class Task : public ACE_Task < ACE_MT_SYNCH >
+  way to create threads is with ACE_Task<> */
+class Task : public ACE_Task <ACE_MT_SYNCH>
 {
 public:
 
-  typedef ACE_Task < ACE_MT_SYNCH > inherited;
-
-    /*
-      The constructor/destructor are simple but take care of some
-      necessary housekeeping.
-    */
-    Task (void);
-   ~Task (void);
-
-    /*
-      I really wanted this to be called open() but that was already
-      claimed by the Task framework.  start() will kick off our thread 
-      pool for us.
-    */
-  int start (int threads = 1);
-
-  /*
-    Our worker method
-  */
+  typedef ACE_Task <ACE_MT_SYNCH> inherited;
+
+  /* The constructor/destructor are simple but take care of some
+    necessary housekeeping.  */
+  Task (size_t n_threads);
+  ~Task (void);
+
+  /* open() will kick off our thread pool for us.  */
+  int open (void * = 0);
+
+  /* Our worker method */
   int svc (void);
 
-  /*
-    All we'll do here is print a message to the user.
-  */
+  /* All we'll do here is print a message to the user.  */
   int close (u_long flags = 0);
 
 protected:
-    /*
-      Just to be clever, I'll use an ACE_Barrier to cause the threads
-      to sync in svc() before doing any real work.
-    */
-    ACE_Barrier *barrier_;
+  /* Just to be clever, jI'll use an ACE_Barrier to cause the threads
+    to sync in svc() before doing any real work.  */
+  ACE_Barrier *barrier_;
+
+  size_t n_threads_;
+  // Number of threads in the pool.
 };
 
-#endif
+#endif /* TASK_H */
 

diff --git a/docs/tutorials/010/page05.html b/docs/tutorials/010/page05.html index 238bfab2b6a..d0ccb910270 100644 --- a/docs/tutorials/010/page05.html +++ b/docs/tutorials/010/page05.html @@ -19,146 +19,136 @@ Our Task object definition:


-
 // $Id$
 
 #include "task.h"
 #include "block.h"
 
-/*
-  Set our  housekeeping pointer to NULL and tell the user we exist.
- */
-Task::Task (void)
-: barrier_ (0)
+/* Set our housekeeping pointer to NULL and tell the user we exist.  */
+Task::Task (size_t n_threads)
+  : barrier_ (0),
+    n_threads_ (n_threads)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task ctor 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task ctor 0x%x\n",
+              (void *) this));
 }
 
-/*
-  Take care of cleanup & tell the user we're going away.
-*/
+/* Take care of cleanup & tell the user we're going away.  */
 Task::~Task (void)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task dtor 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task dtor 0x%x\n",
+              (void *) this));
 
-  /*
-    Get our shutdown notification out of the queue and release it.
-  */
-  ACE_Message_Block * message;
+  /* Get our shutdown notification out of the queue and release it.  */
+  ACE_Message_Block *message;
 
-  /*
-    Like the getq() in svc() below, this will block until a message
-    arrives.  By blocking, we know that the destruction will be paused 
-    until the last thread is done with the message block.
-  */
-  this->getq(message);
-  message->release();
+  /* Like the getq() in svc() below, this will block until a message
+    arrives.  By blocking, we know that the destruction will be paused
+    until the last thread is done with the message block.  */
+  this->getq (message);
+  message->release ();
 
   delete barrier_;
 }
 
-/*
-  Open the object to do work.  We create the Barrier object and tell
-  it how many threads we'll be using.  Next, we activate the Task
-  into the number of requested threads.
-*/
-int Task::start (int threads)
+/* Open the object to do work.  We create the Barrier object and tell
+  it how many threads we'll be using.  Next, we activate the Task into
+  the number of requested threads.  */
+int 
+Task::open (void *unused)
 {
-  barrier_ = new ACE_Barrier (threads);
-  return this->activate (THR_NEW_LWP, threads);
+  ACE_UNUSED_ARG (unused);
+  barrier_;
+
+  ACE_NEW_RETURN (barrier_,
+                  ACE_Barrier (this->n_threads_),
+                  -1);
+
+  return this->activate (THR_NEW_LWP,
+                         threads);
 }
 
-/*
-  Tell the user we're closing and invoke the baseclass' close() to
-  take care of things.
-*/
-int Task::close (u_long flags)
+/* Tell the user we're closing and invoke the baseclass' close() to
+  take care of things.  */
+int 
+Task::close (u_long flags)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task close 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task close 0x%x\n",
+              (void *) this));
   return inherited::close (flags);
 }
 
-/*
-  Our svc() method waits for work on the queue and then processes that work.
- */
-int Task::svc (void)
+/* Our svc() method waits for work on the queue and then processes
+  that work.  */
+int 
+Task::svc (void)
 {
-  /*
-    This will cause all of the threads to wait on this line until all
+  /* This will cause all of the threads to wait on this line until all
     have invoked this method.  The net result is that no thread in the
-    Task will get a shot at the queue until all of the threads are active.
-    There's no real need to do this but it's an easy intro into the use
-    of ACE_Barrier.
-   */
+    Task will get a shot at the queue until all of the threads are
+    active.  There's no real need to do this but it's an easy intro
+    into the use of ACE_Barrier.  */
   this->barrier_->wait ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task 0x%x starts in thread %d\n", (void *) this, ACE_Thread::self ()));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task 0x%x starts in thread %d\n",
+              (void *) this,
+              ACE_Thread::self ()));
 
-  /*
-    Remember that get() needs a reference to a pointer.  To save stack
-    thrashing we'll go ahead and create a pointer outside of the almost-
-    infinite loop.
-   */
+  /* Remember that get() needs a reference to a pointer.  To save
+    stack thrashing we'll go ahead and create a pointer outside of the
+    almost- infinite loop.  */
   ACE_Message_Block *message;
-  while (1)
-  {
-    /*
-      Get a message from the queue.  Note that getq() will block until 
-      a message shows up.  That makes us very processor-friendly.
-    */
-    if (this->getq (message) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "getq"), -1);
-    }
 
-    /*
-      If we got the shutdown request, we need to go away.
-    */
-    if (message->msg_type () == ACE_Message_Block::MB_HANGUP)
+  for (;;)
     {
-      /*
-        Forward the request to any peer threads.
-       */
-      this->putq (message);
-
-      /*
-        Leave the infinite loop so that the thread exits.
-      */
-      break;
+      /* Get a message from the queue.  Note that getq() will block
+        until a message shows up.  That makes us very
+        processor-friendly.  */
+      if (this->getq (message) == -1)
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "getq"),
+                          -1);
+      /* If we got the shutdown request, we need to go away.  */
+      if (message->msg_type () == ACE_Message_Block::MB_HANGUP)
+        {
+          /* Forward the request to any peer threads.  */
+          this->putq (message);
+
+          /* Leave the infinite loop so that the thread exits.  */
+          break;
+        }
+
+      /* The message queue stores char* data.  We use rd_ptr() to get
+        to the beginning of the data.  */
+      const char *cp = message->rd_ptr ();
+
+      /* Move the rd_ptr() past the data we read.  This isn't real
+        useful here since we won't be reading any more from the block
+        but it's a good habit to get into.  */
+      message->rd_ptr (ACE_OS::strlen (cp));
+    
+      /* Display the block's address and data to the user.  */
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) Block 0x%x contains (%s)\n",
+                  (void *) message,
+                  cp));
+
+      /* Pretend that it takes a while to process the data.  */
+      ACE_OS::sleep (ACE_Time_Value (0, 5000));
+
+      /* Release the message block.  Notice that we never delete a
+        message block.  Blocks are reference counted & the release()
+        method will take care of the delete when there are no more
+        references to the data.  */
+      message->release ();
     }
 
-    /*
-      The message queue stores char* data.  We use rd_ptr() to get to
-      the beginning of the data.
-    */
-    const char *cp = message->rd_ptr ();
-
-    /*
-      Move the rd_ptr() past the data we read.  This isn't real useful
-      here since we won't be reading any more from the block but it's
-      a good habit to get into.
-    */
-    message->rd_ptr( strlen(cp) );
-    
-    /*
-      Display the block's address and data to the user.
-    */
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block 0x%x contains (%s)\n", (void *) message, cp));
-
-    /*
-       Pretend that it takes a while to process the data.
-     */
-    ACE_OS::sleep (ACE_Time_Value (0, 5000));
-
-    /*
-      Release the message block.  Notice that we never delete a message block.
-      Blocks are reference counted & the release() method will take care of
-      the delete when there are no more references to the data.
-    */
-    message->release ();
-  }
-
-  return (0);
+  return 0;
 }
 

diff --git a/docs/tutorials/010/task.cpp b/docs/tutorials/010/task.cpp index 4311474dceb..3b1a608daf2 100644 --- a/docs/tutorials/010/task.cpp +++ b/docs/tutorials/010/task.cpp @@ -1,141 +1,131 @@ - // $Id$ #include "task.h" #include "block.h" -/* - Set our housekeeping pointer to NULL and tell the user we exist. - */ -Task::Task (void) -: barrier_ (0) +/* Set our housekeeping pointer to NULL and tell the user we exist. */ +Task::Task (size_t n_threads) + : barrier_ (0), + n_threads_ (n_threads) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task ctor 0x%x\n", + (void *) this)); } -/* - Take care of cleanup & tell the user we're going away. -*/ +/* Take care of cleanup & tell the user we're going away. */ Task::~Task (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task dtor 0x%x\n", + (void *) this)); - /* - Get our shutdown notification out of the queue and release it. - */ - ACE_Message_Block * message; + /* Get our shutdown notification out of the queue and release it. */ + ACE_Message_Block *message; - /* - Like the getq() in svc() below, this will block until a message - arrives. By blocking, we know that the destruction will be paused - until the last thread is done with the message block. - */ - this->getq(message); - message->release(); + /* Like the getq() in svc() below, this will block until a message + arrives. By blocking, we know that the destruction will be paused + until the last thread is done with the message block. */ + this->getq (message); + message->release (); delete barrier_; } -/* - Open the object to do work. We create the Barrier object and tell - it how many threads we'll be using. Next, we activate the Task - into the number of requested threads. -*/ -int Task::start (int threads) +/* Open the object to do work. We create the Barrier object and tell + it how many threads we'll be using. Next, we activate the Task into + the number of requested threads. */ +int +Task::open (void *unused) { - barrier_ = new ACE_Barrier (threads); - return this->activate (THR_NEW_LWP, threads); + ACE_UNUSED_ARG (unused); + barrier_; + + ACE_NEW_RETURN (barrier_, + ACE_Barrier (this->n_threads_), + -1); + + return this->activate (THR_NEW_LWP, + threads); } -/* - Tell the user we're closing and invoke the baseclass' close() to - take care of things. -*/ -int Task::close (u_long flags) +/* Tell the user we're closing and invoke the baseclass' close() to + take care of things. */ +int +Task::close (u_long flags) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task close 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task close 0x%x\n", + (void *) this)); return inherited::close (flags); } -/* - Our svc() method waits for work on the queue and then processes that work. - */ -int Task::svc (void) +/* Our svc() method waits for work on the queue and then processes + that work. */ +int +Task::svc (void) { - /* - This will cause all of the threads to wait on this line until all + /* This will cause all of the threads to wait on this line until all have invoked this method. The net result is that no thread in the - Task will get a shot at the queue until all of the threads are active. - There's no real need to do this but it's an easy intro into the use - of ACE_Barrier. - */ + Task will get a shot at the queue until all of the threads are + active. There's no real need to do this but it's an easy intro + into the use of ACE_Barrier. */ this->barrier_->wait (); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task 0x%x starts in thread %d\n", (void *) this, ACE_Thread::self ())); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task 0x%x starts in thread %d\n", + (void *) this, + ACE_Thread::self ())); - /* - Remember that get() needs a reference to a pointer. To save stack - thrashing we'll go ahead and create a pointer outside of the almost- - infinite loop. - */ + /* Remember that get() needs a reference to a pointer. To save + stack thrashing we'll go ahead and create a pointer outside of the + almost- infinite loop. */ ACE_Message_Block *message; - while (1) - { - /* - Get a message from the queue. Note that getq() will block until - a message shows up. That makes us very processor-friendly. - */ - if (this->getq (message) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "getq"), -1); - } - /* - If we got the shutdown request, we need to go away. - */ - if (message->msg_type () == ACE_Message_Block::MB_HANGUP) + for (;;) { - /* - Forward the request to any peer threads. - */ - this->putq (message); - - /* - Leave the infinite loop so that the thread exits. - */ - break; + /* Get a message from the queue. Note that getq() will block + until a message shows up. That makes us very + processor-friendly. */ + if (this->getq (message) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "getq"), + -1); + /* If we got the shutdown request, we need to go away. */ + if (message->msg_type () == ACE_Message_Block::MB_HANGUP) + { + /* Forward the request to any peer threads. */ + this->putq (message); + + /* Leave the infinite loop so that the thread exits. */ + break; + } + + /* The message queue stores char* data. We use rd_ptr() to get + to the beginning of the data. */ + const char *cp = message->rd_ptr (); + + /* Move the rd_ptr() past the data we read. This isn't real + useful here since we won't be reading any more from the block + but it's a good habit to get into. */ + message->rd_ptr (ACE_OS::strlen (cp)); + + /* Display the block's address and data to the user. */ + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block 0x%x contains (%s)\n", + (void *) message, + cp)); + + /* Pretend that it takes a while to process the data. */ + ACE_OS::sleep (ACE_Time_Value (0, 5000)); + + /* Release the message block. Notice that we never delete a + message block. Blocks are reference counted & the release() + method will take care of the delete when there are no more + references to the data. */ + message->release (); } - /* - The message queue stores char* data. We use rd_ptr() to get to - the beginning of the data. - */ - const char *cp = message->rd_ptr (); - - /* - Move the rd_ptr() past the data we read. This isn't real useful - here since we won't be reading any more from the block but it's - a good habit to get into. - */ - message->rd_ptr( strlen(cp) ); - - /* - Display the block's address and data to the user. - */ - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block 0x%x contains (%s)\n", (void *) message, cp)); - - /* - Pretend that it takes a while to process the data. - */ - ACE_OS::sleep (ACE_Time_Value (0, 5000)); - - /* - Release the message block. Notice that we never delete a message block. - Blocks are reference counted & the release() method will take care of - the delete when there are no more references to the data. - */ - message->release (); - } - - return (0); + return 0; } diff --git a/docs/tutorials/010/task.h b/docs/tutorials/010/task.h index 9351959d6b1..b0d4e56fe15 100644 --- a/docs/tutorials/010/task.h +++ b/docs/tutorials/010/task.h @@ -1,4 +1,3 @@ - // $Id$ #ifndef TASK_H @@ -10,48 +9,37 @@ # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ -/* - Like the thread-pool server tutorial, we'll derive from ACE_Task<>. +/* Like the thread-pool server tutorial, we'll derive from ACE_Task<>. Our goal here is to show off the ACE_Message_Queue and the best way to do that is to use one to pass data between threads. The easiest - way to create threads is with ACE_Task<> - */ -class Task : public ACE_Task < ACE_MT_SYNCH > + way to create threads is with ACE_Task<> */ +class Task : public ACE_Task { public: - typedef ACE_Task < ACE_MT_SYNCH > inherited; - - /* - The constructor/destructor are simple but take care of some - necessary housekeeping. - */ - Task (void); - ~Task (void); - - /* - I really wanted this to be called open() but that was already - claimed by the Task framework. start() will kick off our thread - pool for us. - */ - int start (int threads = 1); - - /* - Our worker method - */ + typedef ACE_Task inherited; + + /* The constructor/destructor are simple but take care of some + necessary housekeeping. */ + Task (size_t n_threads); + ~Task (void); + + /* open() will kick off our thread pool for us. */ + int open (void * = 0); + + /* Our worker method */ int svc (void); - /* - All we'll do here is print a message to the user. - */ + /* All we'll do here is print a message to the user. */ int close (u_long flags = 0); protected: - /* - Just to be clever, I'll use an ACE_Barrier to cause the threads - to sync in svc() before doing any real work. - */ - ACE_Barrier *barrier_; + /* Just to be clever, jI'll use an ACE_Barrier to cause the threads + to sync in svc() before doing any real work. */ + ACE_Barrier *barrier_; + + size_t n_threads_; + // Number of threads in the pool. }; -#endif +#endif /* TASK_H */ diff --git a/docs/tutorials/011/block.h b/docs/tutorials/011/block.h index 1ffc5cb4e9d..28443494c93 100644 --- a/docs/tutorials/011/block.h +++ b/docs/tutorials/011/block.h @@ -1,4 +1,3 @@ - // $Id$ #ifndef BLOCK_H @@ -15,19 +14,25 @@ class Block : public ACE_Message_Block public: Block (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block ctor 0x%x\n", + (void *) this)); } Block (size_t size) - : ACE_Message_Block (size) + : ACE_Message_Block (size) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block ctor 0x%x\n", + (void *) this)); } - virtual ~ Block (void) + virtual ~Block (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block dtor 0x%x\n", + (void *) this)); } }; -#endif +#endif /* BLOCK_H */ diff --git a/docs/tutorials/011/data.h b/docs/tutorials/011/data.h index 998b4d009ec..098644c9a6f 100644 --- a/docs/tutorials/011/data.h +++ b/docs/tutorials/011/data.h @@ -1,4 +1,3 @@ - // $Id$ #ifndef DATA_H @@ -9,21 +8,28 @@ class DataBase public: DataBase (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) DataBase ctor 0x%x\n", + (void *) this)); } - virtual ~ DataBase (void) + virtual ~DataBase (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) DataBase dtor 0x%x\n", + (void *) this)); } void who_am_i (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase instance 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) DataBase instance 0x%x\n", + (void *) this)); } virtual void what_am_i (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) I am a DataBase object\n")); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) I am a DataBase object\n")); } }; @@ -32,29 +38,36 @@ class Data : public DataBase { public: Data (void) - : message_ (-1) + : message_ (-1) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Data ctor 0x%x\n", + (void *) this)); } Data (int message) - : message_ (message) + : message_ (message) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data ctor 0x%x for message %d\n", (void *) this, message_)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Data ctor 0x%x for message %d\n", + (void *) this, message_)); } - virtual ~ Data (void) + virtual ~Data (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Data dtor 0x%x\n", + (void *) this)); } void what_am_i (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) I am a Data object for message %d\n", message_)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) I am a Data object for message %d\n", + message_)); } protected: int message_; - }; -#endif +#endif /* DATA_H */ diff --git a/docs/tutorials/011/message_queue.cpp b/docs/tutorials/011/message_queue.cpp index e591f18f024..8ccc111481a 100644 --- a/docs/tutorials/011/message_queue.cpp +++ b/docs/tutorials/011/message_queue.cpp @@ -1,83 +1,84 @@ - // $Id$ -/* - Most of this is the same as the previous tutorial, so I'll just point out - the differences. - */ +/* Most of this is the same as the previous tutorial, so I'll just + point out the differences. */ #include "task.h" #include "block.h" #include "data.h" -int run_test (int iterations, int threads) +statuc int +run_test (int iterations, + int threads) { - Task task; + Task task (threads); - if (task.start (threads) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "start"), -1); - } + if (task.open () == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "open"), + -1); - ACE_OS::sleep (ACE_Time_Value (1)); + ACE_OS::sleep (ACE_Time_Value (1)); - int i; - for (i = 0; i < iterations; ++i) + int i; + for (i = 0; i < iterations; ++i) { - /* - Construct a Data object that we'll put into the Queue. - */ - Data data (i); - - /* - Create a block large enough for our Data object as well as a text - message. - */ - Block *message = new Block (sizeof (data) + 128); - - /* - As before, put a text message into the block. - */ - ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i); - message->wr_ptr (strlen (message->rd_ptr ())); - - *(message->wr_ptr ()) = 0; // Null-terminate the string we just wrote - - message->wr_ptr (1); // Move beyond the NULL - - /* - To copy arbitrary data into a message block, we use the copy() method. - Since it wants a 'const char*', we have to cast our Data - pointer. - - Note that copy() will advance the wr_ptr() for us. This means - we don't have to do it ourselves! If you do advance it, it - will be way beyond what you want. - */ - message->copy ((const char *) &data, sizeof (data)); - - if (task.putq (message) == -1) - { - break; - } + /* Construct a Data object that we'll put into the Queue. */ + Data data (i); + + /* Create a block large enough for our Data object as well as a + text message. */ + Block *message; + + ACE_NEW_RETURN (message, + Block (sizeof (data) + 128), + -1); + + /* As before, put a text message into the block. */ + ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i); + message->wr_ptr (strlen (message->rd_ptr ())); + + *(message->wr_ptr ()) = 0; // Null-terminate the string we just wrote + + message->wr_ptr (1); // Move beyond the NULL + + /* To copy arbitrary data into a message block, we use the + copy() method. Since it wants a 'const char*', we have to + cast our Data pointer. + + Note that copy() will advance the wr_ptr() for us. This means + we don't have to do it ourselves! If you do advance it, it + will be way beyond what you want. */ + message->copy ((const char *) &data, + sizeof (data)); + + if (task.putq (message) == -1) + break; } - Block *message = new Block (); - message->msg_type (ACE_Message_Block::MB_HANGUP); - task.putq (message); + Block *message; + ACE_NEW_RETURN (message, + Block, + -1); + message->msg_type (ACE_Message_Block::MB_HANGUP); + task.putq (message); - task.wait (); + task.wait (); - return (0); + return 0; } -int main (int argc, char *argv[]) +int +main (int argc, char *argv[]) { - int iterations = argc > 1 ? atoi (argv[1]) : 4; - int threads = argc > 2 ? atoi (argv[2]) : 2; + int iterations = argc > 1 ? atoi (argv[1]) : 4; + int threads = argc > 2 ? atoi (argv[2]) : 2; - (void) run_test (iterations, threads); + run_test (iterations, + threads); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Application exiting\n")); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Application exiting\n")); - return (0); + return 0; } diff --git a/docs/tutorials/011/page02.html b/docs/tutorials/011/page02.html index df8e77e977b..714b1927ca3 100644 --- a/docs/tutorials/011/page02.html +++ b/docs/tutorials/011/page02.html @@ -19,88 +19,89 @@ the same as before, so I've only commented the changes.
-
 // $Id$
 
-/*
-   Most of this is the same as the previous tutorial, so I'll just point out
-   the differences. 
- */
+/* Most of this is the same as the previous tutorial, so I'll just
+   point out the differences.  */
 #include "task.h"
 #include "block.h"
 #include "data.h"
 
-int run_test (int iterations, int threads)
+statuc int 
+run_test (int iterations,
+          int threads)
 {
-    Task task;
+  Task task (threads);
 
-    if (task.start (threads) == -1)
-    {
-        ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "start"), -1);
-    }
+  if (task.open () == -1)
+    ACE_ERROR_RETURN ((LM_ERROR,
+                       "%p\n",
+                       "open"),
+                      -1);
 
-    ACE_OS::sleep (ACE_Time_Value (1));
+  ACE_OS::sleep (ACE_Time_Value (1));
 
-    int i;
-    for (i = 0; i < iterations; ++i)
+  int i;
+  for (i = 0; i < iterations; ++i)
     {
-            /*
-              Construct a Data object that we'll put into the Queue. 
-            */
-        Data data (i);
-
-            /*
-              Create a block large enough for our Data object as well as a text
-              message. 
-            */
-        Block *message = new Block (sizeof (data) + 128);
-
-            /*
-              As before, put a text message into the block. 
-            */
-        ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i);
-        message->wr_ptr (strlen (message->rd_ptr ()));
-
-        *(message->wr_ptr ()) = 0;  // Null-terminate the string we just wrote
-
-        message->wr_ptr (1);        // Move beyond the NULL
-
-            /*
-              To copy arbitrary data into a message block, we use the copy() method.
-              Since it wants a 'const char*', we have to cast our Data
-              pointer. 
-
-              Note that copy() will advance the wr_ptr() for us.  This means
-              we don't have to do it ourselves!  If you do advance it, it
-              will be way beyond what you want.
-            */
-        message->copy ((const char *) &data, sizeof (data));
-
-        if (task.putq (message) == -1)
-        {
-            break;
-        }
+      /* Construct a Data object that we'll put into the Queue.  */
+      Data data (i);
+
+      /* Create a block large enough for our Data object as well as a
+        text message.  */
+      Block *message;
+
+      ACE_NEW_RETURN (message,
+                      Block (sizeof (data) + 128),
+                      -1);
+
+      /* As before, put a text message into the block.  */
+      ACE_OS::sprintf (message->wr_ptr (), "This is message %d.", i);
+      message->wr_ptr (strlen (message->rd_ptr ()));
+
+      *(message->wr_ptr ()) = 0;  // Null-terminate the string we just wrote
+
+      message->wr_ptr (1);        // Move beyond the NULL
+
+      /* To copy arbitrary data into a message block, we use the
+        copy() method.  Since it wants a 'const char*', we have to
+        cast our Data pointer.
+
+        Note that copy() will advance the wr_ptr() for us.  This means
+        we don't have to do it ourselves!  If you do advance it, it
+        will be way beyond what you want.  */
+      message->copy ((const char *) &data,
+                     sizeof (data));
+
+      if (task.putq (message) == -1)
+        break;
     }
 
-    Block *message = new Block ();
-    message->msg_type (ACE_Message_Block::MB_HANGUP);
-    task.putq (message);
+  Block *message;
+  ACE_NEW_RETURN (message,
+                  Block,
+                  -1);
+  message->msg_type (ACE_Message_Block::MB_HANGUP);
+  task.putq (message);
 
-    task.wait ();
+  task.wait ();
 
-    return (0);
+  return 0;
 }
 
-int main (int argc, char *argv[])
+int 
+main (int argc, char *argv[])
 {
-    int iterations = argc > 1 ? atoi (argv[1]) : 4;
-    int threads = argc > 2 ? atoi (argv[2]) : 2;
+  int iterations = argc > 1 ? atoi (argv[1]) : 4;
+  int threads = argc > 2 ? atoi (argv[2]) : 2;
 
-    (void) run_test (iterations, threads);
+  run_test (iterations,
+            threads);
 
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Application exiting\n"));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Application exiting\n"));
 
-    return (0);
+  return 0;
 }
 

diff --git a/docs/tutorials/011/page03.html b/docs/tutorials/011/page03.html index bff7cd1f319..4bbb5d3b62e 100644 --- a/docs/tutorials/011/page03.html +++ b/docs/tutorials/011/page03.html @@ -22,7 +22,6 @@ I've only commented the changes.

task.h

-
 // $Id$
 
 #ifndef TASK_H
@@ -34,45 +33,51 @@ I've only commented the changes.
 # pragma once
 #endif /* ACE_LACKS_PRAGMA_ONCE */
 
-class Task : public ACE_Task < ACE_MT_SYNCH >
+class Task : public ACE_Task <ACE_MT_SYNCH>
 {
 public:
 
-  typedef ACE_Task < ACE_MT_SYNCH > inherited;
+  typedef ACE_Task <ACE_MT_SYNCH> inherited;
 
-    Task (void);
-   ~Task (void);
+  Task (size_t n_threads);
+  ~Task (void);
 
-  int start (int threads = 1);
+  int open (void * = 0);
 
   int svc (void);
 
   int close (u_long flags = 0);
 
 protected:
-    ACE_Barrier * barrier_;
+  ACE_Barrier *barrier_;
+  
+  size_t n_threads_;
 };
 
-#endif
+#endif /* TASK_H */
 

task.cpp

-
 // $Id$
 
 #include "task.h"
 #include "block.h"
 #include "data.h"
 
-Task::Task (void)
-: barrier_ (0)
+Task::Task (size_t n_threads)
+  : barrier_ (0),
+    n_threads_ (n_threads)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task ctor 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task ctor 0x%x\n",
+              (void *) this));
 }
 
 Task::~Task (void)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task dtor 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task dtor 0x%x\n",
+              (void *) this));
 
   ACE_Message_Block *message;
   this->getq (message);
@@ -81,83 +86,99 @@ protected:
   delete barrier_;
 }
 
-int Task::start (int threads)
+int 
+Task::open (void *)
 {
-  barrier_ = new ACE_Barrier (threads);
-  return this->activate (THR_NEW_LWP, threads);
+  barrier_;
+
+  ACE_NEW_RETURN (barrier_,
+                  ACE_Barrier (this->n_threads_),
+                  -1);
+
+  return this->activate (THR_NEW_LWP,
+                         threads);
 }
-int Task::close (u_long flags)
+
+int 
+Task::close (u_long flags)
 {
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task close 0x%x\n", (void *) this));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task close 0x%x\n",
+              (void *) this));
   return inherited::close (flags);
 }
 
-int Task::svc (void)
+int 
+Task::svc (void)
 {
   this->barrier_->wait ();
 
-  ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task 0x%x starts in thread %d\n", (void *) this, ACE_Thread::self ()));
+  ACE_DEBUG ((LM_DEBUG,
+              "(%P|%t) Task 0x%x starts in thread %d\n",
+              (void *) this,
+              ACE_Thread::self ()));
 
   ACE_Message_Block *message;
-  while (1)
-  {
-    if (this->getq (message) == -1)
-    {
-      ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "getq"), -1);
-    }
 
-    if (message->msg_type () == ACE_Message_Block::MB_HANGUP)
+  for (;;)
     {
-      this->putq (message);
+      if (this->getq (message) == -1)
+        ACE_ERROR_RETURN ((LM_ERROR,
+                           "%p\n",
+                           "getq"),
+                          -1);
 
-      break;
-    }
+      if (message->msg_type () == ACE_Message_Block::MB_HANGUP)
+        {
+          this->putq (message);
+
+          break;
+        }
 
-    const char *cp = message->rd_ptr ();
-    message->rd_ptr (strlen (cp) + 1);  // Don't forget to skip the NULL we
-                                        // inserted
+      const char *cp = message->rd_ptr ();
+      // Don't forget to skip the NULL we inserted
+      message->rd_ptr (ACE_OS::strlen (cp) + 1);  
 
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block 0x%x contains (%s)\n", (void *) message, cp));
+      ACE_DEBUG ((LM_DEBUG,
+                  "(%P|%t) Block 0x%x contains (%s)\n",
+                  (void *) message,
+                  cp));
 
-    /*
-       Create a Data object into which we can extract the message block
-       contents. 
-     */
-    Data data;
-    /*
-       Use the rd_ptr() to access the message block data.  Note that we've
-       already moved it past the text string in the block. 
-     */
-    ACE_OS::memmove ((char *) &data, message->rd_ptr (), sizeof (data));
-    message->rd_ptr (sizeof (data));  // Move the rd_ptr() beyond the data.
+      /* Create a Data object into which we can extract the message
+        block contents.  */
+      Data data;
 
-    /*
-       Invoke a couple of method calls on the object we constructed. 
-     */
-    data.who_am_i ();
-    data.what_am_i ();
+      /* Use the rd_ptr() to access the message block data.  Note that
+        we've already moved it past the text string in the block.  */
+      ACE_OS::memmove ((char *) &data,
+                       message->rd_ptr (),
+                       sizeof (data));
+      message->rd_ptr (sizeof (data));  // Move the rd_ptr() beyond the data.
 
-    /*
-       An alternate approach:
+      /* Invoke a couple of method calls on the object we constructed.  */
+      data.who_am_i ();
+      data.what_am_i ();
 
-       Data * data;
-       data = (Data *)message->rd_ptr();
-       data->who_am_i();
-       data->what_am_i();
-       message->rd_ptr(sizeof(Data));
+      /* An alternate approach:
 
-       Even though this cuts down on the number of copies & constructions, I'm
-       not real fond of it.  You can get into trouble in a hurry by treating
-       memory blocks as multiple data types... 
-     */
+        Data * data;
+        data = (Data *)message->rd_ptr();
+        data->who_am_i();
+        data->what_am_i();
+        message->rd_ptr(sizeof(Data));
 
+        Even though this cuts down on the number of copies &
+        constructions, I'm not real fond of it.  You can get into
+        trouble in a hurry by treating memory blocks as multiple data
+        types...  */
 
-    ACE_OS::sleep (ACE_Time_Value (0, 5000));
 
-    message->release ();
-  }
+      ACE_OS::sleep (ACE_Time_Value (0, 5000));
+
+      message->release ();
+    }
 
-  return (0);
+  return 0;
 }
 

diff --git a/docs/tutorials/011/page04.html b/docs/tutorials/011/page04.html index 2b612b29a16..cd1092332b7 100644 --- a/docs/tutorials/011/page04.html +++ b/docs/tutorials/011/page04.html @@ -20,7 +20,6 @@ that's causing all the fuss.


-
 // $Id$
 
 #ifndef DATA_H
@@ -31,21 +30,28 @@ class DataBase
 public:
   DataBase (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase ctor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) DataBase ctor 0x%x\n",
+                (void *) this));
   }
-  virtual ~ DataBase (void)
+  virtual ~DataBase (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase dtor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) DataBase dtor 0x%x\n",
+                (void *) this));
   }
 
   void who_am_i (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) DataBase instance 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) DataBase instance 0x%x\n",
+                (void *) this));
   }
 
   virtual void what_am_i (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) I am a DataBase object\n"));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) I am a DataBase object\n"));
   }
 
 };
@@ -54,32 +60,39 @@ class Data : public DataBase
 {
 public:
   Data (void)
-  : message_ (-1)
+    : message_ (-1)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data ctor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Data ctor 0x%x\n",
+                (void *) this));
   }
 
   Data (int message)
-  : message_ (message)
+    : message_ (message)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data ctor 0x%x for message %d\n", (void *) this, message_));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Data ctor 0x%x for message %d\n",
+                (void *) this, message_));
   }
-  virtual ~ Data (void)
+  virtual ~Data (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) Data dtor 0x%x\n", (void *) this));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) Data dtor 0x%x\n",
+                (void *) this));
   }
 
   void what_am_i (void)
   {
-    ACE_DEBUG ((LM_DEBUG, "(%P|%t) I am a Data object for message %d\n", message_));
+    ACE_DEBUG ((LM_DEBUG,
+                "(%P|%t) I am a Data object for message %d\n",
+                message_));
   }
 
 protected:
   int message_;
-
 };
 
-#endif
+#endif /* DATA_H */
 

diff --git a/docs/tutorials/011/task.cpp b/docs/tutorials/011/task.cpp index 3a5e4393317..78a7e49df39 100644 --- a/docs/tutorials/011/task.cpp +++ b/docs/tutorials/011/task.cpp @@ -1,19 +1,23 @@ - // $Id$ #include "task.h" #include "block.h" #include "data.h" -Task::Task (void) -: barrier_ (0) +Task::Task (size_t n_threads) + : barrier_ (0), + n_threads_ (n_threads) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task ctor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task ctor 0x%x\n", + (void *) this)); } Task::~Task (void) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task dtor 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task dtor 0x%x\n", + (void *) this)); ACE_Message_Block *message; this->getq (message); @@ -22,81 +26,97 @@ Task::~Task (void) delete barrier_; } -int Task::start (int threads) +int +Task::open (void *) { - barrier_ = new ACE_Barrier (threads); - return this->activate (THR_NEW_LWP, threads); + barrier_; + + ACE_NEW_RETURN (barrier_, + ACE_Barrier (this->n_threads_), + -1); + + return this->activate (THR_NEW_LWP, + threads); } -int Task::close (u_long flags) + +int +Task::close (u_long flags) { - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task close 0x%x\n", (void *) this)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task close 0x%x\n", + (void *) this)); return inherited::close (flags); } -int Task::svc (void) +int +Task::svc (void) { this->barrier_->wait (); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Task 0x%x starts in thread %d\n", (void *) this, ACE_Thread::self ())); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Task 0x%x starts in thread %d\n", + (void *) this, + ACE_Thread::self ())); ACE_Message_Block *message; - while (1) - { - if (this->getq (message) == -1) - { - ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "getq"), -1); - } - if (message->msg_type () == ACE_Message_Block::MB_HANGUP) + for (;;) { - this->putq (message); + if (this->getq (message) == -1) + ACE_ERROR_RETURN ((LM_ERROR, + "%p\n", + "getq"), + -1); - break; - } + if (message->msg_type () == ACE_Message_Block::MB_HANGUP) + { + this->putq (message); + + break; + } - const char *cp = message->rd_ptr (); - message->rd_ptr (strlen (cp) + 1); // Don't forget to skip the NULL we - // inserted + const char *cp = message->rd_ptr (); + // Don't forget to skip the NULL we inserted + message->rd_ptr (ACE_OS::strlen (cp) + 1); - ACE_DEBUG ((LM_DEBUG, "(%P|%t) Block 0x%x contains (%s)\n", (void *) message, cp)); + ACE_DEBUG ((LM_DEBUG, + "(%P|%t) Block 0x%x contains (%s)\n", + (void *) message, + cp)); - /* - Create a Data object into which we can extract the message block - contents. - */ - Data data; - /* - Use the rd_ptr() to access the message block data. Note that we've - already moved it past the text string in the block. - */ - ACE_OS::memmove ((char *) &data, message->rd_ptr (), sizeof (data)); - message->rd_ptr (sizeof (data)); // Move the rd_ptr() beyond the data. + /* Create a Data object into which we can extract the message + block contents. */ + Data data; - /* - Invoke a couple of method calls on the object we constructed. - */ - data.who_am_i (); - data.what_am_i (); + /* Use the rd_ptr() to access the message block data. Note that + we've already moved it past the text string in the block. */ + ACE_OS::memmove ((char *) &data, + message->rd_ptr (), + sizeof (data)); + message->rd_ptr (sizeof (data)); // Move the rd_ptr() beyond the data. - /* - An alternate approach: + /* Invoke a couple of method calls on the object we constructed. */ + data.who_am_i (); + data.what_am_i (); - Data * data; - data = (Data *)message->rd_ptr(); - data->who_am_i(); - data->what_am_i(); - message->rd_ptr(sizeof(Data)); + /* An alternate approach: - Even though this cuts down on the number of copies & constructions, I'm - not real fond of it. You can get into trouble in a hurry by treating - memory blocks as multiple data types... - */ + Data * data; + data = (Data *)message->rd_ptr(); + data->who_am_i(); + data->what_am_i(); + message->rd_ptr(sizeof(Data)); + Even though this cuts down on the number of copies & + constructions, I'm not real fond of it. You can get into + trouble in a hurry by treating memory blocks as multiple data + types... */ - ACE_OS::sleep (ACE_Time_Value (0, 5000)); - message->release (); - } + ACE_OS::sleep (ACE_Time_Value (0, 5000)); + + message->release (); + } - return (0); + return 0; } diff --git a/docs/tutorials/011/task.h b/docs/tutorials/011/task.h index 255513ecd6f..a59f4fda029 100644 --- a/docs/tutorials/011/task.h +++ b/docs/tutorials/011/task.h @@ -1,4 +1,3 @@ - // $Id$ #ifndef TASK_H @@ -10,23 +9,25 @@ # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ -class Task : public ACE_Task < ACE_MT_SYNCH > +class Task : public ACE_Task { public: - typedef ACE_Task < ACE_MT_SYNCH > inherited; + typedef ACE_Task inherited; - Task (void); - ~Task (void); + Task (size_t n_threads); + ~Task (void); - int start (int threads = 1); + int open (void * = 0); int svc (void); int close (u_long flags = 0); protected: - ACE_Barrier * barrier_; + ACE_Barrier *barrier_; + + size_t n_threads_; }; -#endif +#endif /* TASK_H */ -- cgit v1.2.1