summaryrefslogtreecommitdiff
path: root/TAO/tao/Thread_Lane_Resources.cpp
blob: 100b355798b96d86d0909b32837db6525c9f156f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
// $Id$

#include "tao/Thread_Lane_Resources.h"

ACE_RCSID(tao, Thread_Lane_Resources, "$Id$")

#include "tao/ORB_Core.h"
#include "tao/Acceptor_Registry.h"
#include "tao/Transport_Cache_Manager.h"
#include "tao/Leader_Follower.h"
#include "ace/Reactor.h"

#if !defined (__ACE_INLINE__)
# include "tao/Thread_Lane_Resources.i"
#endif /* ! __ACE_INLINE__ */

TAO_Thread_Lane_Resources::TAO_Thread_Lane_Resources (TAO_ORB_Core &orb_core,
                                                      TAO_New_Leader_Generator *new_leader_generator)
  : orb_core_ (orb_core),
    acceptor_registry_ (0),
    transport_cache_ (0),
    leader_follower_ (0),
    new_leader_generator_ (new_leader_generator)
{
  // Create the transport cache.
  ACE_NEW (this->transport_cache_,
           TAO_Transport_Cache_Manager (orb_core));

}

TAO_Thread_Lane_Resources::~TAO_Thread_Lane_Resources (void)
{
}

TAO_Transport_Cache_Manager &
TAO_Thread_Lane_Resources::transport_cache (void)
{
  return *this->transport_cache_;
}

int
TAO_Thread_Lane_Resources::has_acceptor_registry_been_created (void) const
{
  return this->acceptor_registry_ != 0;
}

int
TAO_Thread_Lane_Resources::is_collocated (const TAO_MProfile& mprofile)
{
  if (!this->has_acceptor_registry_been_created ())
    return 0;

  return this->acceptor_registry ().is_collocated (mprofile);
}

TAO_Acceptor_Registry &
TAO_Thread_Lane_Resources::acceptor_registry (void)
{
  // Double check.
  if (this->acceptor_registry_ == 0)
    {
      ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, ace_mon, this->lock_, *this->acceptor_registry_);
      if (this->acceptor_registry_ == 0)
        {
          // Get the resource factory.
          TAO_Resource_Factory &resource_factory =
            *this->orb_core_.resource_factory ();

          // Ask it to create a new acceptor registry.
          this->acceptor_registry_ =
            resource_factory.get_acceptor_registry ();
        }
    }

  return *this->acceptor_registry_;
}

TAO_Leader_Follower &
TAO_Thread_Lane_Resources::leader_follower (void)
{
  // Double check.
  if (this->leader_follower_ == 0)
    {
      ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, ace_mon, this->lock_, *this->leader_follower_);
      if (this->leader_follower_ == 0)
        {
          // Create a new Leader Follower object.
          ACE_NEW_RETURN (this->leader_follower_,
                          TAO_Leader_Follower (&this->orb_core_,
                                               this->new_leader_generator_),
                          *this->leader_follower_);
        }
    }

  return *this->leader_follower_;
}

int
TAO_Thread_Lane_Resources::open_acceptor_registry (int ignore_address
                                                   ACE_ENV_ARG_DECL)
{
  /// Access the acceptor registry.
  TAO_Acceptor_Registry &ar =
    this->acceptor_registry ();

  // Open it.
  int result =
    ar.open (&this->orb_core_,
             this->leader_follower ().reactor (),
             ignore_address
              ACE_ENV_ARG_PARAMETER);
  ACE_CHECK_RETURN (-1);

  return result;
}

void
TAO_Thread_Lane_Resources::finalize (void)
{
  // Ask the registry to close all registered acceptors.
  if (this->acceptor_registry_ != 0)
    {
      this->acceptor_registry_->close_all ();
      delete this->acceptor_registry_;
    }

  // Set of file descriptors corresponding to open connections.  This
  // handle set is used to explicitly deregister the connection event
  // handlers from the Reactor.  This is particularly important for
  // dynamically loaded ORBs where an application level reactor, such
  // as the Singleton reactor, is used instead of an ORB created one.
  ACE_Handle_Set handle_set;
  TAO_EventHandlerSet unregistered;

  // Close the transport cache and return the handle set that needs
  // to be de-registered from the reactor.
  this->transport_cache_->close (handle_set, unregistered);

  // Shutdown all open connections that are registered with the ORB
  // Core.  Note that the ACE_Event_Handler::DONT_CALL mask is NOT
  // used here since the reactor should invoke each handle's
  // corresponding ACE_Event_Handler::handle_close() method to ensure
  // that the connection is shutdown gracefully prior to destroying
  // the ORB Core.
  if (handle_set.num_set () > 0)
    (void) this->leader_follower ().reactor ()->remove_handler (handle_set,
                                                                ACE_Event_Handler::ALL_EVENTS_MASK);
  if (!unregistered.is_empty ())
    {
      ACE_Event_Handler** eh;
      for (TAO_EventHandlerSetIterator iter(unregistered);
           iter.next (eh);
           iter.advance())
        {
          (*eh)->handle_close (ACE_INVALID_HANDLE,
                               ACE_Event_Handler::ALL_EVENTS_MASK);
        }
    }

  delete this->transport_cache_;
  delete this->leader_follower_;
}

void
TAO_Thread_Lane_Resources::shutdown_reactor (void)
{
  TAO_Leader_Follower &leader_follower =
    this->leader_follower ();

  ACE_GUARD (TAO_SYNCH_MUTEX,
             ace_mon,
             leader_follower.lock ());

  // Wakeup all the threads waiting blocked in the event loop, this
  // does not guarantee that they will all go away, but reduces the
  // load on the POA....
  ACE_Reactor *reactor =
    leader_follower.reactor ();

  reactor->wakeup_all_threads ();

  // If there are some client threads running we have to wait until
  // they finish, when the last one does it will shutdown the reactor
  // for us.  Meanwhile no new requests will be accepted because the
  // POA will not process them.
  if (!leader_follower.has_clients ())
    {
      // Wake up all waiting threads in the reactor.
      reactor->end_reactor_event_loop ();
    }
}