From 71be3ec10855aec76f63c6775dcaee3904e685f8 Mon Sep 17 00:00:00 2001 From: vzykov Date: Mon, 15 Jun 2009 11:12:21 +0000 Subject: ChangeLogTag: Mon Jun 15 10:19:16 UTC 2009 Vladimir Zykov --- TAO/ChangeLog | 44 ++++++++++++++++++++++++++ TAO/tao/Leader_Follower_Flushing_Strategy.cpp | 23 +++++++++++++- TAO/tao/Messaging/Messaging_Policy_i.cpp | 3 +- TAO/tao/ORB_Core.cpp | 11 +++++++ TAO/tao/ORB_Core.h | 13 ++++---- TAO/tao/Transport.cpp | 12 +++---- TAO/tests/Big_AMI/client.cpp | 8 ----- TAO/tests/Bug_1270_Regression/Echo.cpp | 2 +- TAO/tests/Bug_1270_Regression/client.cpp | 25 +-------------- TAO/tests/Bug_1270_Regression/server.cpp | 22 ++++++++++++- TAO/tests/Bug_1476_Test/test.ior | 2 +- TAO/tests/Crash_On_Write/Crash_Task.cpp | 3 +- TAO/tests/Portable_Interceptors/AMI/client.cpp | 8 +++-- 13 files changed, 122 insertions(+), 54 deletions(-) diff --git a/TAO/ChangeLog b/TAO/ChangeLog index 0c7fdfaa8b7..38eb697da5e 100644 --- a/TAO/ChangeLog +++ b/TAO/ChangeLog @@ -1,3 +1,47 @@ +Mon Jun 15 10:19:16 UTC 2009 Vladimir Zykov + + * tests/Crash_On_Write/Crash_Task.cpp: + + Used terminate_process() instead of abort(). The later works + differently on Solaris. + + * tests/Big_AMI/client.cpp: + * tests/Portable_Interceptors/AMI/client.cpp: + * tests/Bug_1270_Regression/client.cpp: + * tests/Bug_1270_Regression/Echo.cpp: + * tests/Bug_1270_Regression/server.cpp: + + Fixed tests after the change for Bug#3682. In these tests it + was assumed that nothing could be received from server until + we run orb explicitly. The later is not true with synch scope + policy SYNC_WITH_TRANSPORT. + + * tests/Bug_1476_Test/test.ior: + + An attempt to fix this test on platforms where connect to + localhost on non-blocking socket returns with ECONNREFUSED + while it's expected to return with errno=EINPROGRESS. + This is Solaris and other Unices. I changed the remote host + in IOR from localhost to something really remote (google.com). + + * tao/ORB_Core.cpp: + * tao/ORB_Core.h: + * tao/Messaging/Messaging_Policy_i.cpp: + + This fixes Bug#3682. SYNC_WITH_TRANSPORT is now really + default synch scope policy in TAO. This must fix Single_Read + and AMH_Oneway tests on Solaris. + + * tao/Leader_Follower_Flushing_Strategy.cpp: + + This fixes Bug#3697. The comment in the code explains why this + fix is better than the code used before. This must fix + Big_Request_Muxing on Solaris. + + * tao/Transport.cpp: + + Changed to use queue_is_empty_i() instead of head_==0. + Mon Jun 15 07:42:47 UTC 2009 Olli Savia * utils/logWalker/PeerProcess.cpp: diff --git a/TAO/tao/Leader_Follower_Flushing_Strategy.cpp b/TAO/tao/Leader_Follower_Flushing_Strategy.cpp index 03dedc5764e..7573e9569f8 100644 --- a/TAO/tao/Leader_Follower_Flushing_Strategy.cpp +++ b/TAO/tao/Leader_Follower_Flushing_Strategy.cpp @@ -47,7 +47,28 @@ TAO_Leader_Follower_Flushing_Strategy::flush_transport ( { TAO_ORB_Core * const orb_core = transport->orb_core (); - while (!transport->queue_is_empty ()) + if (max_wait_time == 0) + { + // In case max_wait_time==0 it doesn't make sense to run + // while loop depending on transport->queue_is_empty () + // since in multi-threaded application it can easily happen + // that the other thread will run the orb and drain the + // queue in the transport we're coping with here. So, that + // transport->queue_is_empty () will return false but before + // we get a chance to run the orb the queue in the transport + // will become empty and we will wait forever. Instead while + // loop depending on reactor->work_pending () is much safer + // since transport will return 0 (letting the reactor know + // about more pending work) when handling output/timeout as + // long as its queue is not empty. + while (orb_core->reactor ()->work_pending ()) + { + ACE_Time_Value tv (0, 100); + if (orb_core->run (&tv, 1) == -1) + return -1; + } + } + else { if (orb_core->run (max_wait_time, 1) == -1) return -1; diff --git a/TAO/tao/Messaging/Messaging_Policy_i.cpp b/TAO/tao/Messaging/Messaging_Policy_i.cpp index 0de73d151a9..6a8ea3a75bb 100644 --- a/TAO/tao/Messaging/Messaging_Policy_i.cpp +++ b/TAO/tao/Messaging/Messaging_Policy_i.cpp @@ -235,7 +235,8 @@ TAO_Sync_Scope_Policy::hook (TAO_ORB_Core *orb_core, if (CORBA::is_nil (policy.in ())) { - has_synchronization = false; + has_synchronization = true; + scope = Messaging::SYNC_WITH_TRANSPORT; return; } Messaging::SyncScopePolicy_var p = diff --git a/TAO/tao/ORB_Core.cpp b/TAO/tao/ORB_Core.cpp index 658135a32b8..7130325aa13 100644 --- a/TAO/tao/ORB_Core.cpp +++ b/TAO/tao/ORB_Core.cpp @@ -294,6 +294,7 @@ TAO_ORB_Core::TAO_ORB_Core (const char *orbid, ACE_NEW (this->request_dispatcher_, TAO_Request_Dispatcher); + this->set_sync_scope_hook (TAO_ORB_Core::default_sync_scope_hook); } TAO_ORB_Core::~TAO_ORB_Core (void) @@ -2924,6 +2925,16 @@ TAO_ORB_Core::implrepo_service (void) return CORBA::Object::_duplicate (this->implrepo_service_); } +void +TAO_ORB_Core::default_sync_scope_hook (TAO_ORB_Core *, + TAO_Stub *, + bool &has_synchronization, + Messaging::SyncScope &scope) +{ + has_synchronization = true; + scope = Messaging::SYNC_WITH_TRANSPORT; +} + void TAO_ORB_Core::call_sync_scope_hook (TAO_Stub *stub, bool &has_synchronization, diff --git a/TAO/tao/ORB_Core.h b/TAO/tao/ORB_Core.h index 13d561541b6..42c2e3b296a 100644 --- a/TAO/tao/ORB_Core.h +++ b/TAO/tao/ORB_Core.h @@ -537,6 +537,12 @@ public: void set_sync_scope_hook (Sync_Scope_Hook hook); + /// Default Sync_Scope_Hook. + static void default_sync_scope_hook (TAO_ORB_Core *, + TAO_Stub *, + bool &has_synchronization, + Messaging::SyncScope &scope); + /// Handle to the factory for protocols_hooks_.. TAO_Protocols_Hooks *protocols_hooks_; @@ -870,10 +876,6 @@ public: /// Choose to be not a default ORB when there is more than one ORB. void not_default (const char * orb_id); - /// This strategy is the default, no explicit queueing and no explicit - /// flush - TAO::Transport_Queueing_Strategy *default_transport_queueing_strategy (void); - /// Verify condition for permanent forward is given, /// both parameters must provide group attributes. CORBA::Boolean is_permanent_forward_condition @@ -1185,9 +1187,6 @@ protected: #endif /* TAO_HAS_BUFFERING_CONSTRAINT_POLICY == 1 */ - /// This strategy will not queue by default and not flush - TAO::Transport_Queueing_Strategy *default_transport_queueing_strategy_; - /// Number of outstanding references to this object. ACE_Atomic_Op refcount_; diff --git a/TAO/tao/Transport.cpp b/TAO/tao/Transport.cpp index ea6e73f1382..25e196e2a0a 100644 --- a/TAO/tao/Transport.cpp +++ b/TAO/tao/Transport.cpp @@ -225,7 +225,7 @@ TAO_Transport::~TAO_Transport (void) // The following assert is needed for the test "Bug_2494_Regression". // See the bugzilla bug #2494 for details. - ACE_ASSERT (this->head_ == 0); + ACE_ASSERT (this->queue_is_empty_i ()); ACE_ASSERT (this->cache_map_entry_ == 0); #if TAO_HAS_TRANSPORT_CURRENT == 1 @@ -1020,7 +1020,7 @@ TAO_Transport::drain_queue_helper (int &iovcnt, iovec iov[], ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - Transport[%d]::drain_queue_helper, ") ACE_TEXT ("byte_count = %d, head_is_empty = %d\n"), - this->id(), byte_count, (this->head_ == 0))); + this->id(), byte_count, this->queue_is_empty_i ())); } return 1; @@ -1121,7 +1121,7 @@ TAO_Transport::drain_queue_i (TAO::Transport::Drain_Constraints const & dc) } } - if (this->head_ == 0) + if (this->queue_is_empty_i ()) { if (this->flush_timer_pending ()) { @@ -1152,7 +1152,7 @@ TAO_Transport::cleanup_queue_i () int msg_count = 0; // Cleanup all messages - while (this->head_ != 0) + while (!this->queue_is_empty_i ()) { TAO_Queued_Message *i = this->head_; @@ -1183,7 +1183,7 @@ TAO_Transport::cleanup_queue_i () void TAO_Transport::cleanup_queue (size_t byte_count) { - while (this->head_ != 0 && byte_count > 0) + while (!this->queue_is_empty_i () && byte_count > 0) { TAO_Queued_Message *i = this->head_; @@ -1363,7 +1363,7 @@ TAO_Transport::send_asynchronous_message_i (TAO_Stub *stub, // to send first: bool try_sending_first = true; - bool const queue_empty = (this->head_ == 0); + bool const queue_empty = this->queue_is_empty_i (); TAO::Transport_Queueing_Strategy *queue_strategy = stub->transport_queueing_strategy (); diff --git a/TAO/tests/Big_AMI/client.cpp b/TAO/tests/Big_AMI/client.cpp index ac7a4292842..990354339ff 100644 --- a/TAO/tests/Big_AMI/client.cpp +++ b/TAO/tests/Big_AMI/client.cpp @@ -190,14 +190,6 @@ ACE_TMAIN(int argc, ACE_TCHAR *argv[]) payload); } - // We are just sending all requests, but we shouldn't get any replies - // until we run the orb or do a real synchronous call, so check whether - // we didn't get any reply until this moment - if (handler.reply_count () > 0) - ACE_ERROR_RETURN ((LM_ERROR, - "ERROR: Got a reply during sending asynchronous calls\n"), - 1); - if (debug) { ACE_DEBUG ((LM_DEBUG, diff --git a/TAO/tests/Bug_1270_Regression/Echo.cpp b/TAO/tests/Bug_1270_Regression/Echo.cpp index d9c971f82e3..7c548962e89 100644 --- a/TAO/tests/Bug_1270_Regression/Echo.cpp +++ b/TAO/tests/Bug_1270_Regression/Echo.cpp @@ -36,6 +36,6 @@ Echo::echo_payload(Test::Payload const &) ACE_DEBUG ((LM_DEBUG, "(%P|%t) Echo::echo_payload, aborting\n")); // Kill the app - ACE_OS::abort(); + ACE::terminate_process (ACE_OS::getpid ()); } } diff --git a/TAO/tests/Bug_1270_Regression/client.cpp b/TAO/tests/Bug_1270_Regression/client.cpp index e58db268837..39e4d8ab5d5 100644 --- a/TAO/tests/Bug_1270_Regression/client.cpp +++ b/TAO/tests/Bug_1270_Regression/client.cpp @@ -37,29 +37,6 @@ ACE_TMAIN(int argc, ACE_TCHAR *argv[]) PortableServer::POAManager_var poa_manager = root_poa->the_POAManager (); - CORBA::Object_var object = - orb->resolve_initial_references ("PolicyCurrent"); - - CORBA::PolicyCurrent_var policy_current = - CORBA::PolicyCurrent::_narrow (object.in ()); - - if (CORBA::is_nil (policy_current.in ())) - { - ACE_ERROR ((LM_ERROR, "ERROR: Nil policy current\n")); - return 1; - } - CORBA::Any scope_as_any; - scope_as_any <<= Messaging::SYNC_WITH_TRANSPORT; - - CORBA::PolicyList policies(1); policies.length (1); - policies[0] = - orb->create_policy (Messaging::SYNC_SCOPE_POLICY_TYPE, - scope_as_any); - - policy_current->set_policy_overrides (policies, CORBA::ADD_OVERRIDE); - - policies[0]->destroy (); - if (parse_args (argc, argv) != 0) return 1; @@ -113,7 +90,7 @@ ACE_TMAIN(int argc, ACE_TCHAR *argv[]) } catch (const CORBA::Exception& ex) { - ex._tao_print_exception ("Exception caught:"); + ex._tao_print_exception ("Exception caught in client:"); return 1; } diff --git a/TAO/tests/Bug_1270_Regression/server.cpp b/TAO/tests/Bug_1270_Regression/server.cpp index fdacca6ce3d..36b26f22b52 100644 --- a/TAO/tests/Bug_1270_Regression/server.cpp +++ b/TAO/tests/Bug_1270_Regression/server.cpp @@ -38,6 +38,26 @@ ACE_TMAIN(int argc, ACE_TCHAR *argv[]) CORBA::Object_var object = orb->resolve_initial_references ("PolicyCurrent"); + CORBA::PolicyCurrent_var policy_current = + CORBA::PolicyCurrent::_narrow (object.in ()); + + if (CORBA::is_nil (policy_current.in ())) + { + ACE_ERROR ((LM_ERROR, "ERROR: Nil policy current\n")); + return 1; + } + CORBA::Any scope_as_any; + scope_as_any <<= Messaging::SYNC_NONE; + + CORBA::PolicyList policies(1); policies.length (1); + policies[0] = + orb->create_policy (Messaging::SYNC_SCOPE_POLICY_TYPE, + scope_as_any); + + policy_current->set_policy_overrides (policies, CORBA::ADD_OVERRIDE); + + policies[0]->destroy (); + if (parse_args (argc, argv) != 0) return 1; @@ -87,7 +107,7 @@ ACE_TMAIN(int argc, ACE_TCHAR *argv[]) } catch (const CORBA::Exception& ex) { - ex._tao_print_exception ("Exception caught:"); + ex._tao_print_exception ("Exception caught in server:"); return 1; } diff --git a/TAO/tests/Bug_1476_Test/test.ior b/TAO/tests/Bug_1476_Test/test.ior index da5b17a72c7..87ddb529545 100644 --- a/TAO/tests/Bug_1476_Test/test.ior +++ b/TAO/tests/Bug_1476_Test/test.ior @@ -1 +1 @@ -IOR:010000001300000049444c3a546573742f48656c6c6f3a312e300000010000000000000068000000010102000a0000006c6f63616c686f73740006ca1b00000014010f00525354f744f744d10502000000000001000000010000000002000000000000000800000001000000004f41540100000018000000010000000100010001000000010001050901010000000000 \ No newline at end of file +IOR:000000000000001349444c3a546573742f48656c6c6f3a312e30000000000001000000000000006c000102000000000b676f6f676c652e636f6d0000c12000000000001b14010f005253544a3268540002a34f000000000000000100000001000000000200000000000000080000000054414f000000000100000018000000000001000100000001050100010001010900000000 diff --git a/TAO/tests/Crash_On_Write/Crash_Task.cpp b/TAO/tests/Crash_On_Write/Crash_Task.cpp index 6b6e9c7d896..1c3528b7a99 100644 --- a/TAO/tests/Crash_On_Write/Crash_Task.cpp +++ b/TAO/tests/Crash_On_Write/Crash_Task.cpp @@ -20,7 +20,8 @@ Crash_Task::svc (void) ACE_DEBUG ((LM_DEBUG, "(%P|%t) Starting crash task\n")); ACE_OS::sleep (this->running_time_); ACE_DEBUG ((LM_DEBUG, "(%P|%t) Sleep done, crashing the server\n")); - ACE_DEBUG ((LM_DEBUG, "%a")); + ACE_DEBUG ((LM_DEBUG, "Aborting\n")); + ACE::terminate_process (ACE_OS::getpid ()); return 0; } diff --git a/TAO/tests/Portable_Interceptors/AMI/client.cpp b/TAO/tests/Portable_Interceptors/AMI/client.cpp index dec2ca68b50..51fe346a2c9 100644 --- a/TAO/tests/Portable_Interceptors/AMI/client.cpp +++ b/TAO/tests/Portable_Interceptors/AMI/client.cpp @@ -195,6 +195,8 @@ test_ami (CORBA::ORB_ptr orb, unsigned long initial_request_count = Echo_Client_Request_Interceptor::request_count; + unsigned long initial_reply_count = + Echo_Client_Request_Interceptor::reply_count; unsigned long initial_other_count = Echo_Client_Request_Interceptor::other_count; @@ -206,7 +208,8 @@ test_ami (CORBA::ORB_ptr orb, } unsigned long total_request_count = - Echo_Client_Request_Interceptor::request_count - initial_request_count; + Echo_Client_Request_Interceptor::request_count - + (Echo_Client_Request_Interceptor::reply_count + initial_request_count); unsigned long total_other_count = Echo_Client_Request_Interceptor::other_count - initial_other_count; @@ -221,8 +224,7 @@ test_ami (CORBA::ORB_ptr orb, } initial_request_count = - Echo_Client_Request_Interceptor::request_count; - unsigned long initial_reply_count = + Echo_Client_Request_Interceptor::request_count - Echo_Client_Request_Interceptor::reply_count; while (echo_handler_impl->replies () != ITERATIONS) -- cgit v1.2.1