diff options
-rw-r--r-- | TAO/ChangeLog-99c | 19 | ||||
-rw-r--r-- | TAO/tao/Acceptor_Impl.cpp | 68 | ||||
-rw-r--r-- | TAO/tao/Acceptor_Impl.h | 28 | ||||
-rw-r--r-- | TAO/tao/IIOP_Acceptor.cpp | 10 | ||||
-rw-r--r-- | TAO/tao/IIOP_Acceptor.h | 2 | ||||
-rw-r--r-- | TAO/tao/UIOP_Acceptor.cpp | 10 | ||||
-rw-r--r-- | TAO/tao/UIOP_Acceptor.h | 2 | ||||
-rw-r--r-- | TAO/tests/Connection_Purging/Connection_Purging.cpp | 135 |
8 files changed, 238 insertions, 36 deletions
diff --git a/TAO/ChangeLog-99c b/TAO/ChangeLog-99c index a5021c037bc..870fff6e44d 100644 --- a/TAO/ChangeLog-99c +++ b/TAO/ChangeLog-99c @@ -1,3 +1,22 @@ +Fri Jul 23 17:54:45 1999 Irfan Pyarali <irfan@cs.wustl.edu> + + * tao/Acceptor_Impl.cpp (TAO_Accept_Strategy): Added a new accept + strategy that purges old connections when it runs out of + descriptors. + + * tao/IIOP_Acceptor.cpp (open): + * tao/UIOP_Acceptor.cpp (open): + + Changed the acceptors to use the new accept strategy. + + * tests/Connection_Purging/Connection_Purging.cpp: + + - Added handle gobbling to this test in order to reduce the + number of iterations required to make purging happen. + + - Added the ability to go back and reinvoke on old servers. + This will check whether the purging did the right thing. + Fri Jul 23 17:23:07 1999 Carlos O'Ryan <coryan@cs.wustl.edu> * tao/Transport_Mux_Strategy.cpp: diff --git a/TAO/tao/Acceptor_Impl.cpp b/TAO/tao/Acceptor_Impl.cpp index c6933690369..94a21acc7bf 100644 --- a/TAO/tao/Acceptor_Impl.cpp +++ b/TAO/tao/Acceptor_Impl.cpp @@ -26,6 +26,7 @@ #include "tao/ORB_Core.h" #include "tao/Server_Strategy_Factory.h" +#include "tao/Connector_Registry.h" #if !defined(__ACE_INLINE__) #include "tao/Acceptor_Impl.i" @@ -33,6 +34,8 @@ ACE_RCSID(tao, Acceptor_Impl, "$Id$") +//////////////////////////////////////////////////////////////////////////////// + template <class SVC_HANDLER> TAO_Creation_Strategy<SVC_HANDLER>::TAO_Creation_Strategy (TAO_ORB_Core *orb_core) : orb_core_ (orb_core) @@ -50,13 +53,14 @@ TAO_Creation_Strategy<SVC_HANDLER>::make_svc_handler (SVC_HANDLER *&sh) return 0; } +//////////////////////////////////////////////////////////////////////////////// + template <class SVC_HANDLER> TAO_Concurrency_Strategy<SVC_HANDLER>::TAO_Concurrency_Strategy (TAO_ORB_Core *orb_core) : orb_core_ (orb_core) { } - template <class SVC_HANDLER> int TAO_Concurrency_Strategy<SVC_HANDLER>::activate_svc_handler (SVC_HANDLER *sh, void *arg) @@ -76,4 +80,66 @@ TAO_Concurrency_Strategy<SVC_HANDLER>::activate_svc_handler (SVC_HANDLER *sh, (sh, ACE_Event_Handler::READ_MASK); } +//////////////////////////////////////////////////////////////////////////////// + +template <class SVC_HANDLER, ACE_PEER_ACCEPTOR_1> +TAO_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2>::TAO_Accept_Strategy (TAO_ORB_Core *orb_core) + : orb_core_ (orb_core) +{ +} + +template <class SVC_HANDLER, ACE_PEER_ACCEPTOR_1> int +TAO_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2>::open (const ACE_PEER_ACCEPTOR_ADDR &local_addr, + int restart) +{ + int result = ACCEPT_STRATEGY_BASE::open (local_addr, + restart); + + if (result == 0) + return result; + + // If the error occured due to the fact that the open handle limit + // was exhausted, then purge some "old" connections. + result = this->out_of_sockets_handler (); + if (result == -1) + return -1; + + // If we are able to purge, try again. + return ACCEPT_STRATEGY_BASE::open (local_addr, restart); +} + +template <class SVC_HANDLER, ACE_PEER_ACCEPTOR_1> int +TAO_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2>::accept_svc_handler (SVC_HANDLER *svc_handler) +{ + int result = ACCEPT_STRATEGY_BASE::accept_svc_handler (svc_handler); + + if (result == 0) + return result; + + // If the error occured due to the fact that the open handle limit + // was exhausted, then purge some "old" connections. + this->out_of_sockets_handler (); + + return result; +} + +template <class SVC_HANDLER, ACE_PEER_ACCEPTOR_1> int +TAO_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2>::out_of_sockets_handler (void) +{ + if (ACE::out_of_handles (errno)) + { + // Close some cached connections by explicitly purging the + // connection cache maintained by the connectors in the + // connector registry. + if (TAO_debug_level > 0) + ACE_DEBUG ((LM_DEBUG, "Purging connections from Connectors in Connector Registry...\n")); + + return this->orb_core_->connector_registry ()->purge_connections (); + } + + return -1; +} + +//////////////////////////////////////////////////////////////////////////////// + #endif /* TAO_ACCEPTOR_IMPL_C */ diff --git a/TAO/tao/Acceptor_Impl.h b/TAO/tao/Acceptor_Impl.h index 81bd0085389..063e05baf22 100644 --- a/TAO/tao/Acceptor_Impl.h +++ b/TAO/tao/Acceptor_Impl.h @@ -63,6 +63,34 @@ protected: // Pointer to the ORB Core. }; +template <class SVC_HANDLER, ACE_PEER_ACCEPTOR_1> +class TAO_Accept_Strategy : public ACE_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2> +{ +public: + + TAO_Accept_Strategy (TAO_ORB_Core *orb_core); + // Constructor. + + int open (const ACE_PEER_ACCEPTOR_ADDR &local_addr, + int restart = 0); + // Initialize the <peer_acceptor_> with <local_addr>. If the + // process runs out of handles, purge some "old" connections. + + int accept_svc_handler (SVC_HANDLER *svc_handler); + // Delegates to the <accept> method of the PEER_ACCEPTOR. If the + // process runs out of handles, purge some "old" connections. + +protected: + int out_of_sockets_handler (void); + // Handler which deals with purging "old" connections. + + typedef ACE_Accept_Strategy<SVC_HANDLER, ACE_PEER_ACCEPTOR_2> ACCEPT_STRATEGY_BASE; + // Base class. + + TAO_ORB_Core *orb_core_; + // Pointer to the ORB Core. +}; + #if defined(__ACE_INLINE__) #include "tao/Acceptor_Impl.i" #endif /* __ACE_INLINE__ */ diff --git a/TAO/tao/IIOP_Acceptor.cpp b/TAO/tao/IIOP_Acceptor.cpp index e4c940c0e26..a9d0df1dcdb 100644 --- a/TAO/tao/IIOP_Acceptor.cpp +++ b/TAO/tao/IIOP_Acceptor.cpp @@ -34,6 +34,7 @@ TAO_IIOP_Acceptor::TAO_IIOP_Acceptor (void) base_acceptor_ (), creation_strategy_ (0), concurrency_strategy_ (0), + accept_strategy_ (0), version_ (TAO_DEF_GIOP_MAJOR, TAO_DEF_GIOP_MINOR), orb_core_ (0) { @@ -43,6 +44,7 @@ TAO_IIOP_Acceptor::~TAO_IIOP_Acceptor (void) { delete this->creation_strategy_; delete this->concurrency_strategy_; + delete this->accept_strategy_; } // TODO = @@ -158,10 +160,14 @@ TAO_IIOP_Acceptor::open_i (TAO_ORB_Core* orb_core, TAO_IIOP_CONCURRENCY_STRATEGY (this->orb_core_), -1); + ACE_NEW_RETURN (this->accept_strategy_, + TAO_IIOP_ACCEPT_STRATEGY (this->orb_core_), + -1); + if (this->base_acceptor_.open (addr, this->orb_core_->reactor (), this->creation_strategy_, - 0, + this->accept_strategy_, this->concurrency_strategy_) == -1) { if (TAO_debug_level > 0) @@ -239,6 +245,7 @@ template class ACE_Concurrency_Strategy<TAO_IIOP_Server_Connection_Handler>; template class ACE_Scheduling_Strategy<TAO_IIOP_Server_Connection_Handler>; template class TAO_Creation_Strategy<TAO_IIOP_Server_Connection_Handler>; template class TAO_Concurrency_Strategy<TAO_IIOP_Server_Connection_Handler>; +template class TAO_Accept_Strategy<TAO_IIOP_Server_Connection_Handler, ACE_SOCK_ACCEPTOR>; #elif defined (ACE_HAS_TEMPLATE_INSTANTIATION_PRAGMA) @@ -250,5 +257,6 @@ template class TAO_Concurrency_Strategy<TAO_IIOP_Server_Connection_Handler>; #pragma instantiate ACE_Scheduling_Strategy<TAO_IIOP_Server_Connection_Handler> #pragma instantiate TAO_Creation_Strategy<TAO_IIOP_Server_Connection_Handler> #pragma instantiate TAO_Concurrency_Strategy<TAO_IIOP_Server_Connection_Handler> +#pragma instantiate TAO_Accept_Strategy<TAO_IIOP_Server_Connection_Handler, ACE_SOCK_ACCEPTOR> #endif /* ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION */ diff --git a/TAO/tao/IIOP_Acceptor.h b/TAO/tao/IIOP_Acceptor.h index 900dcc408bc..c45e93d7fbd 100644 --- a/TAO/tao/IIOP_Acceptor.h +++ b/TAO/tao/IIOP_Acceptor.h @@ -82,6 +82,7 @@ public: typedef ACE_Strategy_Acceptor<TAO_IIOP_Server_Connection_Handler, ACE_SOCK_ACCEPTOR> TAO_IIOP_BASE_ACCEPTOR; typedef TAO_Creation_Strategy<TAO_IIOP_Server_Connection_Handler> TAO_IIOP_CREATION_STRATEGY; typedef TAO_Concurrency_Strategy<TAO_IIOP_Server_Connection_Handler> TAO_IIOP_CONCURRENCY_STRATEGY; + typedef TAO_Accept_Strategy<TAO_IIOP_Server_Connection_Handler, ACE_SOCK_ACCEPTOR> TAO_IIOP_ACCEPT_STRATEGY; private: int open_i (TAO_ORB_Core* orb_core, @@ -94,6 +95,7 @@ private: TAO_IIOP_CREATION_STRATEGY *creation_strategy_; TAO_IIOP_CONCURRENCY_STRATEGY *concurrency_strategy_; + TAO_IIOP_ACCEPT_STRATEGY *accept_strategy_; // Acceptor strategies. ACE_INET_Addr address_; diff --git a/TAO/tao/UIOP_Acceptor.cpp b/TAO/tao/UIOP_Acceptor.cpp index 737f330a47e..b62478db05c 100644 --- a/TAO/tao/UIOP_Acceptor.cpp +++ b/TAO/tao/UIOP_Acceptor.cpp @@ -38,6 +38,7 @@ TAO_UIOP_Acceptor::TAO_UIOP_Acceptor (void) base_acceptor_ (), creation_strategy_ (0), concurrency_strategy_ (0), + accept_strategy_ (0), version_ (TAO_DEF_GIOP_MAJOR, TAO_DEF_GIOP_MINOR), orb_core_ (0), unlink_on_close_ (1) @@ -48,6 +49,7 @@ TAO_UIOP_Acceptor::~TAO_UIOP_Acceptor (void) { delete this->creation_strategy_; delete this->concurrency_strategy_; + delete this->accept_strategy_; } int @@ -163,6 +165,10 @@ TAO_UIOP_Acceptor::open_i (TAO_ORB_Core* orb_core, TAO_UIOP_CONCURRENCY_STRATEGY (this->orb_core_), -1); + ACE_NEW_RETURN (this->accept_strategy_, + TAO_UIOP_ACCEPT_STRATEGY (this->orb_core_), + -1); + ACE_UNIX_Addr addr; this->rendezvous_point (addr, rendezvous); @@ -170,7 +176,7 @@ TAO_UIOP_Acceptor::open_i (TAO_ORB_Core* orb_core, if (this->base_acceptor_.open (addr, this->orb_core_->reactor (), this->creation_strategy_, - 0, + this->accept_strategy_, this->concurrency_strategy_) == -1) { // Don't unlink an existing rendezvous point since it may be in @@ -256,6 +262,7 @@ template class ACE_Concurrency_Strategy<TAO_UIOP_Server_Connection_Handler>; template class ACE_Scheduling_Strategy<TAO_UIOP_Server_Connection_Handler>; template class TAO_Creation_Strategy<TAO_UIOP_Server_Connection_Handler>; template class TAO_Concurrency_Strategy<TAO_UIOP_Server_Connection_Handler>; +template class TAO_Accept_Strategy<TAO_UIOP_Server_Connection_Handler, ACE_LSOCK_ACCEPTOR>; #elif defined (ACE_HAS_TEMPLATE_INSTANTIATION_PRAGMA) @@ -267,6 +274,7 @@ template class TAO_Concurrency_Strategy<TAO_UIOP_Server_Connection_Handler>; #pragma instantiate ACE_Scheduling_Strategy<TAO_UIOP_Server_Connection_Handler> #pragma instantiate TAO_Creation_Strategy<TAO_UIOP_Server_Connection_Handler> #pragma instantiate TAO_Concurrency_Strategy<TAO_UIOP_Server_Connection_Handler> +#pragma instantiate TAO_Accept_Strategy<TAO_UIOP_Server_Connection_Handler, ACE_LSOCK_ACCEPTOR> #endif /* ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION */ diff --git a/TAO/tao/UIOP_Acceptor.h b/TAO/tao/UIOP_Acceptor.h index 5557c55e8dc..50ada984c60 100644 --- a/TAO/tao/UIOP_Acceptor.h +++ b/TAO/tao/UIOP_Acceptor.h @@ -82,6 +82,7 @@ public: typedef ACE_Strategy_Acceptor<TAO_UIOP_Server_Connection_Handler, ACE_LSOCK_ACCEPTOR> TAO_UIOP_BASE_ACCEPTOR; typedef TAO_Creation_Strategy<TAO_UIOP_Server_Connection_Handler> TAO_UIOP_CREATION_STRATEGY; typedef TAO_Concurrency_Strategy<TAO_UIOP_Server_Connection_Handler> TAO_UIOP_CONCURRENCY_STRATEGY; + typedef TAO_Accept_Strategy<TAO_UIOP_Server_Connection_Handler, ACE_LSOCK_ACCEPTOR> TAO_UIOP_ACCEPT_STRATEGY; private: int open_i (TAO_ORB_Core *orb_core, const char *rendezvous); @@ -97,6 +98,7 @@ private: TAO_UIOP_CREATION_STRATEGY *creation_strategy_; TAO_UIOP_CONCURRENCY_STRATEGY *concurrency_strategy_; + TAO_UIOP_ACCEPT_STRATEGY *accept_strategy_; // Acceptor strategies. TAO_GIOP_Version version_; diff --git a/TAO/tests/Connection_Purging/Connection_Purging.cpp b/TAO/tests/Connection_Purging/Connection_Purging.cpp index 8145eb18125..0c345f244a9 100644 --- a/TAO/tests/Connection_Purging/Connection_Purging.cpp +++ b/TAO/tests/Connection_Purging/Connection_Purging.cpp @@ -20,6 +20,8 @@ #include "testS.h" #include "ace/Task.h" #include "ace/Get_Opt.h" +#include "ace/Handle_Gobbler.h" +#include "ace/Get_Opt.h" struct arguments { @@ -34,25 +36,42 @@ struct Info CORBA::String_var ior; }; +static size_t keep_handles_available = 10; static size_t iterations = 20; +static size_t remote_calls = 2; static Info *info = 0; static int debug = 0; +static int go_to_next_orb = 0; class test_i : public POA_test { public: + test_i (void); + void method (CORBA::Environment &ACE_TRY_ENV) ACE_THROW_SPEC ((CORBA::SystemException)); + +private: + + size_t counter_; }; +test_i::test_i (void) + : counter_ (0) +{ +} + void -test_i::method (CORBA::Environment &ACE_TRY_ENV) +test_i::method (CORBA::Environment &) ACE_THROW_SPEC ((CORBA::SystemException)) { + go_to_next_orb = 1; + if (debug) ACE_DEBUG ((LM_DEBUG, - "test_i::method()\n")); + "test_i::method() iteration = %d\n", + ++this->counter_)); } class Server_Task : public ACE_Task_Base @@ -75,11 +94,19 @@ Server_Task::Server_Task (Info *info) int Server_Task::svc (void) { - for (size_t i = 0; - i < iterations; - ++i) + for (size_t j = 0; + j < remote_calls; + ++j) { - this->info_[i].orb->run (); + for (size_t i = 0; + i < iterations; + ++i) + { + while (!go_to_next_orb) + this->info_[i].orb->perform_work (); + + go_to_next_orb = 0; + } } return 0; @@ -89,7 +116,7 @@ static int parse_args (int argc, char **argv) { - ACE_Get_Opt get_opts (argc, argv, "i:d"); + ACE_Get_Opt get_opts (argc, argv, "i:a:r:d"); int c; while ((c = get_opts ()) != -1) @@ -103,12 +130,22 @@ parse_args (int argc, debug = 1; break; + case 'a': + keep_handles_available = atoi (get_opts.optarg); + break; + + case 'r': + remote_calls = atoi (get_opts.optarg); + break; + case '?': default: ACE_ERROR_RETURN ((LM_ERROR, "usage: %s \n" "[-i iterations] \n" "[-d (debug)] \n" + "[-a (keep handles available)] \n", + "[-r (remote calls per server)] \n", "\n", argv [0]), -1); @@ -234,37 +271,48 @@ setup_client_orb (CORBA::ORB_out client_orb, void invoke_remote_calls (CORBA::ORB_ptr client_orb) { - for (size_t i = 0; - i < iterations; - ++i) - { - ACE_DECLARE_NEW_CORBA_ENV; + ACE_DECLARE_NEW_CORBA_ENV; - ACE_TRY + ACE_TRY + { + for (size_t j = 0; + j < remote_calls; + ++j) { - CORBA::Object_var object = client_orb->string_to_object (info[i].ior.in (), - ACE_TRY_ENV); - ACE_TRY_CHECK; - - test_var test_object = - test::_narrow (object.in (), - ACE_TRY_ENV); - ACE_TRY_CHECK; - - test_object->method (ACE_TRY_ENV); - ACE_TRY_CHECK; + for (size_t i = 0; + i < iterations; + ++i) + { + CORBA::Object_var object = + client_orb->string_to_object (info[i].ior.in (), + ACE_TRY_ENV); + ACE_TRY_CHECK; + + test_var test_object = + test::_narrow (object.in (), + ACE_TRY_ENV); + ACE_TRY_CHECK; + + test_object->method (ACE_TRY_ENV); + ACE_TRY_CHECK; + } + } + for (size_t i = 0; + i < iterations; + ++i) + { info[i].orb->shutdown (1, ACE_TRY_ENV); ACE_TRY_CHECK; } - ACE_CATCHANY - { - ACE_PRINT_EXCEPTION (ACE_ANY_EXCEPTION, "Exception in running client side"); - ACE_ASSERT (0); - } - ACE_ENDTRY; } + ACE_CATCHANY + { + ACE_PRINT_EXCEPTION (ACE_ANY_EXCEPTION, "Exception in running client side"); + ACE_ASSERT (0); + } + ACE_ENDTRY; } void @@ -326,9 +374,8 @@ main (int argc, test_i servant; setup_server_orbs (servant, - argc, - argv); - + argc_copy, + argv_copy); for (j = 0; j < argc; @@ -341,9 +388,31 @@ main (int argc, result = server_task.activate (THR_BOUND); ACE_ASSERT (result == 0); + // Consume all handles in the process, leaving us + // <keep_handles_available> to play with. + ACE_Handle_Gobbler handle_gobbler; + result = handle_gobbler.consume_handles (keep_handles_available); + ACE_ASSERT (result == 0); + invoke_remote_calls (client_orb.in ()); cleanup (); return 0; } + +#if defined (ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION) + +// = Handle Gobbler +template class ACE_Node<ACE_HANDLE>; +template class ACE_Unbounded_Set<ACE_HANDLE>; +template class ACE_Unbounded_Set_Iterator<ACE_HANDLE>; + +#elif defined (ACE_HAS_TEMPLATE_INSTANTIATION_PRAGMA) + +// = Handle Gobbler +#pragma instantiate ACE_Node<ACE_HANDLE> +#pragma instantiate ACE_Unbounded_Set<ACE_HANDLE> +#pragma instantiate ACE_Unbounded_Set_Iterator<ACE_HANDLE> + +#endif /* ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION */ |