summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwilsond <wilsond@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2007-08-15 17:39:12 +0000
committerwilsond <wilsond@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2007-08-15 17:39:12 +0000
commit0f06f37214ad11a1ebefecee92694c97f332ea2a (patch)
treeb94b2327cc1d888ff1132a81cda2db616eb16a47
parenta27b85477326c51c075cab79b5a3aa498072f1fd (diff)
downloadATCD-0f06f37214ad11a1ebefecee92694c97f332ea2a.tar.gz
ChangeLogTag: Wed Aug 15 17:31:43 UTC 2007 Dale Wilson <wilsond@ociweb.com>
-rw-r--r--TAO/ChangeLog17
-rw-r--r--TAO/tao/Transport_Cache_Manager.cpp2
-rw-r--r--TAO/tao/Transport_Connector.cpp57
-rw-r--r--TAO/tests/Stack_Recursion/Client_Task.cpp6
-rw-r--r--TAO/tests/Stack_Recursion/README38
5 files changed, 92 insertions, 28 deletions
diff --git a/TAO/ChangeLog b/TAO/ChangeLog
index 955df9717da..3dccbfecd4d 100644
--- a/TAO/ChangeLog
+++ b/TAO/ChangeLog
@@ -1,3 +1,20 @@
+Wed Aug 15 17:31:43 UTC 2007 Dale Wilson <wilsond@ociweb.com>
+
+ * tao/Transport_Connector.cpp:
+ Disable code paths that call wait_for_transport. This
+ avoids a problem in which more than one thread waits
+ on the same transport. However it also negates the
+ fix for too-many-connections (bugzilla 2935)
+
+ * tao/Transport_Cache_Manager.cpp:
+ Cosmetic fix to a log message
+
+ * tests/Stack_Recursion/Client_Task.cpp:
+ Fix some ugly code without changing functionality.
+
+ * tests/Stack_Recursion/README:
+ Document what this test really does.
+
Wed Aug 15 15:00:38 UTC 2007 Abdullah Sowayan <abdullah.sowayan@lmco.com>
* examples/Borland/ChatClientWnd.h:
diff --git a/TAO/tao/Transport_Cache_Manager.cpp b/TAO/tao/Transport_Cache_Manager.cpp
index e994b083a3b..1410e7917ff 100644
--- a/TAO/tao/Transport_Cache_Manager.cpp
+++ b/TAO/tao/Transport_Cache_Manager.cpp
@@ -241,7 +241,7 @@ namespace TAO
{
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - Transport_Cache_Manager::find_i: ")
- ACE_TEXT ("Found available Transport[%d] @hash:index {%d:%d}(\n"),
+ ACE_TEXT ("Found available Transport[%d] @hash:index {%d:%d}\n"),
entry->item ().transport ()->id (),
entry->ext_id_.hash (),
entry->ext_id_.index ()
diff --git a/TAO/tao/Transport_Connector.cpp b/TAO/tao/Transport_Connector.cpp
index 8de0341f847..96012fd9f35 100644
--- a/TAO/tao/Transport_Connector.cpp
+++ b/TAO/tao/Transport_Connector.cpp
@@ -488,11 +488,11 @@ TAO_Connector::connect (TAO::Profile_Transport_Resolver *r,
return base_transport;
}
}
+#if 0 // @todo: <wilsond.ociweb.com> Temporarly disable this code because the wait_for_transport is not safe
else if (found == TAO::Transport_Cache_Manager::CACHE_FOUND_CONNECTING )
{
if (r->blocked_connect ())
{
-
if (TAO_debug_level > 4)
{
ACE_DEBUG ((LM_DEBUG,
@@ -501,18 +501,18 @@ TAO_Connector::connect (TAO::Profile_Transport_Resolver *r,
base_transport->id ()));
}
- // If wait_for_transport returns no errors, the base_transport
- // points to the connection we wait for.
- if (this->wait_for_transport (r, base_transport, timeout, false))
- {
- // be sure this transport is registered with the reactor
- // before using it.
- if (!base_transport->register_if_necessary ())
- {
- base_transport->remove_reference ();
- return 0;
- }
- }
+ // If wait_for_transport returns no errors, the base_transport
+ // points to the connection we wait for.
+ if (this->wait_for_transport (r, base_transport, timeout, false))
+ {
+ // be sure this transport is registered with the reactor
+ // before using it.
+ if (!base_transport->register_if_necessary ())
+ {
+ base_transport->remove_reference ();
+ return 0;
+ }
+ }
// In either success or failure cases of wait_for_transport, the
// ref counter in corresponding to the ref counter added by
// find_transport is decremented.
@@ -531,60 +531,71 @@ TAO_Connector::connect (TAO::Profile_Transport_Resolver *r,
return base_transport;
}
}
+#endif // Temporarily disable CONNECTING handling
else
{
// @todo: This is not the right place for this! (bugzilla 3023)
// Purge connections (if necessary)
tcm.purge ();
-
+#if 0 // @todo: <wilsond.ociweb.com> Temporarly disable this code because the wait_for_transport is not safe
bool make_new_connection =
(found == TAO::Transport_Cache_Manager::CACHE_FOUND_NONE) ||
(found == TAO::Transport_Cache_Manager::CACHE_FOUND_BUSY
&& this->new_connection_is_ok (busy_count));
if (make_new_connection)
+#endif // temporarily disable check -- just open the conneciton
{
- TAO_Transport* t = this->make_connection (r, *desc, timeout);
- if (t == 0)
+ // we aren't going to use the transport returned from the cache (if any)
+ if (base_transport != 0)
{
- return t;
+ base_transport->remove_reference ();
+ }
+
+ base_transport = this->make_connection (r, *desc, timeout);
+ if (base_transport == 0)
+ {
+ return base_transport;
}
// Should this code be moved? If so, where to?
- t->opened_as (TAO::TAO_CLIENT_ROLE);
+ base_transport->opened_as (TAO::TAO_CLIENT_ROLE);
if (TAO_debug_level > 4)
{
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT("TAO (%P|%t) - Transport_Connector::connect, ")
ACE_TEXT("opening Transport[%d] in TAO_CLIENT_ROLE\n"),
- t->id ()));
+ base_transport->id ()));
}
// Call post connect hook. If the post_connect_hook () returns
// false, just purge the entry.
- if (!t->post_connect_hook ())
+ if (!base_transport->post_connect_hook ())
{
if (TAO_debug_level > 4)
{
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT("TAO (%P|%t) - Post_connect_hook failed. ")
ACE_TEXT("Purging transport[%d]\n"),
- t->id ()));
+ base_transport->id ()));
}
- (void) t->purge_entry ();
+ (void) base_transport->purge_entry ();
}
// The new transport is in the cache. We'll pick it up from there
// next time thru this loop (using it from here causes more problems
// than it fixes due to the changes that allow a new connection to be
// re-used by a nested upcall before we get back here.)
- t->remove_reference ();
+ base_transport->remove_reference ();
}
+#if 0 // @todo: <wilsond.ociweb.com> Temporarly disable this code because the wait_for_transport is not safe
else // not making new connection
{
(void) this->wait_for_transport (r, base_transport, timeout, true);
base_transport->remove_reference ();
}
+#endif // temporarily disable check -- just open the conneciton
+
}
}
}
diff --git a/TAO/tests/Stack_Recursion/Client_Task.cpp b/TAO/tests/Stack_Recursion/Client_Task.cpp
index 7cf844a765f..61ae3c5e451 100644
--- a/TAO/tests/Stack_Recursion/Client_Task.cpp
+++ b/TAO/tests/Stack_Recursion/Client_Task.cpp
@@ -36,10 +36,8 @@ Client_Task::svc (void)
"(%P|%t) In iteration [%d] ....\n",
i));
#endif /*if 0*/
- Test::Payload_var pl = new Test::Payload;
- Test::Payload_out payload (pl.out ());
- this->sender_->get_data (this->event_size_,
- payload);
+ Test::Payload_var pl;
+ this->sender_->get_data (this->event_size_, pl.out ());
}
}
catch (const CORBA::Exception&)
diff --git a/TAO/tests/Stack_Recursion/README b/TAO/tests/Stack_Recursion/README
index cc202fc7c40..419e6cf040f 100644
--- a/TAO/tests/Stack_Recursion/README
+++ b/TAO/tests/Stack_Recursion/README
@@ -2,6 +2,44 @@
@page Stack_Recursion Test README File
+
+Begin Comment added August 14, 2007 <wilsond@ociweb.com>
+The original README starts below.
+
+The name of this test is misleading. See the bugzilla entry below for a
+historical justification of the name.
+
+It is a test of the throughput for replies to a clients CORBA requests.
+
+The client starts 8 tasks (aka threads.) Each task calls the ping method
+100 times. Ping is a nop method on the server. Presumably sending these
+pings opens one or more connections to the server. The actual number of
+connections varies from test run to test run.
+
+After that the client calls the get_data method 1000 times. Each call
+returns a sequence of octets containing 1048576 bytes (1Mb). The test
+succeeds if these replys are received by the client in 960 seconds
+(16 minutes).
+
+A successful test executes 8800 CORBA calls in 16 minutes (550 calls a
+minute = 9+ calls/second) and returns 8000 megabytes (500 megabytes/minute
+= 8.3 megabytes/second) via a local host connection.
+
+A typical failure for this test is a client timeout at the end of 16 minutes.
+This can happen if the test system is too slow or heavily loaded to handle
+this much data (including the necessary mallocs & frees (16M per call), or
+if there is a significant bottleneck or hang in the ORB. Because the test
+is sensitive to other loads on the test system, the results will be intermittent.
+
+8 tasks is hardcoded in client.cpp
+100 pings is hardcoded in Client_Task.cpp
+1000 calls to get_data is hardcoded in client.cpp
+1Mb per get_data call is hardcoded in client.cpp.
+960 seconds is hard coded in run_test.pl
+
+End Comment added August 14, 2007 <wilsond@ociweb.com>
+
+
A stress test for the stack recursion outlined in the bugzilla
under id 1125. Here is the link for the bug