summaryrefslogtreecommitdiff
path: root/TAO/tao/Wait_Strategy.cpp
diff options
context:
space:
mode:
authorirfan <irfan@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2000-01-25 20:17:52 +0000
committerirfan <irfan@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2000-01-25 20:17:52 +0000
commit4d2f3b1d58c5c70a1c1a802a2a91ff925cfdf042 (patch)
tree71eb83d977a8e5a54cbc3b537bcd745042232b5d /TAO/tao/Wait_Strategy.cpp
parent8359b5a37b58d0b830ddefa965d86cf69d5551e4 (diff)
downloadATCD-4d2f3b1d58c5c70a1c1a802a2a91ff925cfdf042.tar.gz
ChangeLogTag: Tue Jan 25 14:05:42 2000 Irfan Pyarali <irfan@cs.wustl.edu>
Diffstat (limited to 'TAO/tao/Wait_Strategy.cpp')
-rw-r--r--TAO/tao/Wait_Strategy.cpp539
1 files changed, 283 insertions, 256 deletions
diff --git a/TAO/tao/Wait_Strategy.cpp b/TAO/tao/Wait_Strategy.cpp
index bdfb0562eb5..87dc5813023 100644
--- a/TAO/tao/Wait_Strategy.cpp
+++ b/TAO/tao/Wait_Strategy.cpp
@@ -217,174 +217,185 @@ TAO_Exclusive_Wait_On_Leader_Follower::wait (ACE_Time_Value *max_wait_time,
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon,
leader_follower.lock (), -1);
- leader_follower.set_client_thread ();
-
- ACE_Countdown_Time countdown (max_wait_time);
+ // Optmize the first iteration [no access to errno]
+ int result = 1;
- // Check if there is a leader, but the leader is not us
- if (leader_follower.leader_available ()
- && !leader_follower.is_leader_thread ())
- {
- // = Wait as a follower.
-
- // wait until we have input available or there is no leader, in
- // which case we must become the leader anyway....
- // @@ Alex: I am uncertain about how many condition variables
- // should we have, should there be one-per-thread (after all
- // the thread blocks on the condition variable) or there
- // should be one per-connection. I think the first case is
- // the "Right Thing"[tm]
- ACE_SYNCH_CONDITION* cond =
- this->cond_response_available ();
+ //
+ // Begin artificial scope for auto_ptr like helpers calling:
+ // leader_follower.set_client_thread () and (maybe later on)
+ // leader_follower.set_leader_thread ().
+ //
+ {
+ // Calls leader_follower.set_client_thread () on construction and
+ // leader_follower.reset_client_thread () on destruction.
+ TAO_LF_Client_Thread_Helper client_thread_helper (leader_follower);
+ ACE_UNUSED_ARG (client_thread_helper);
+
+ ACE_Countdown_Time countdown (max_wait_time);
+
+ // Check if there is a leader, but the leader is not us
+ if (leader_follower.leader_available ()
+ && !leader_follower.is_leader_thread ())
+ {
+ // = Wait as a follower.
+
+ // wait until we have input available or there is no leader, in
+ // which case we must become the leader anyway....
+ // @@ Alex: I am uncertain about how many condition variables
+ // should we have, should there be one-per-thread (after all
+ // the thread blocks on the condition variable) or there
+ // should be one per-connection. I think the first case is
+ // the "Right Thing"[tm]
+ ACE_SYNCH_CONDITION* cond =
+ this->cond_response_available ();
#if defined (TAO_DEBUG_LEADER_FOLLOWER)
- ACE_DEBUG ((LM_DEBUG,
- "TAO (%P|%t): TAO_Wait_On_LF::wait - "
- "(follower) on <%x>\n",
- cond));
+ ACE_DEBUG ((LM_DEBUG,
+ "TAO (%P|%t): TAO_Wait_On_LF::wait - "
+ "(follower) on <%x>\n",
+ cond));
#endif /* TAO_DEBUG_LEADER_FOLLOWER */
- while (!this->reply_received_
- && leader_follower.leader_available ())
- {
- // Add to the follower set, that operation will if the
- // condition variable returns due to an spurious wake up
- // (i.e. a wait interrupted by the OS) but otherwise we risk
- // dead-locks:
- // Assume that we are the only follower, another thread is
- // the leader and it completes its work, it sends us the
- // signal and removes us from the set.
- // Before waking up another thread becomes the leader, when
- // we do wake up we believe that it was a false return from
- // the condition variable and go into the loop again.
- // But now the follower set is empty and nobody is ever
- // going to wake us up, dead-locking the application.
-
- if (leader_follower.add_follower (cond) == -1)
- {
- // -1 indicates a severe problem, like running out of
- // memory, the comment above does not apply in this
- // case.
- return -1;
- }
+ while (!this->reply_received_
+ && leader_follower.leader_available ())
+ {
+ // Add to the follower set, that operation will if the
+ // condition variable returns due to an spurious wake up
+ // (i.e. a wait interrupted by the OS) but otherwise we
+ // risk dead-locks:
+ // Assume that we are the only follower, another thread
+ // is the leader and it completes its work, it sends us
+ // the signal and removes us from the set. Before
+ // waking up another thread becomes the leader, when we
+ // do wake up we believe that it was a false return from
+ // the condition variable and go into the loop again.
+ // But now the follower set is empty and nobody is ever
+ // going to wake us up, dead-locking the application.
+
+ if (leader_follower.add_follower (cond) == -1)
+ {
+ // -1 indicates a severe problem, like running out of
+ // memory, the comment above does not apply in this
+ // case.
+ return -1;
+ }
#if defined (TAO_DEBUG_LEADER_FOLLOWER)
- ACE_DEBUG ((LM_DEBUG,
- "TAO (%P|%t): TAO_Wait_On_LF::wait - "
- "waiting in follower <%x>\n",
- cond));
+ ACE_DEBUG ((LM_DEBUG,
+ "TAO (%P|%t): TAO_Wait_On_LF::wait - "
+ "waiting in follower <%x>\n",
+ cond));
#endif /* TAO_DEBUG_LEADER_FOLLOWER */
- if (max_wait_time == 0)
- {
- if (cond == 0 || cond->wait () == -1)
- return -1;
- }
- else
- {
- countdown.update ();
- ACE_Time_Value tv = ACE_OS::gettimeofday ();
- tv += *max_wait_time;
- if (cond == 0 || cond->wait (&tv) == -1)
- return -1;
- }
- }
+ if (max_wait_time == 0)
+ {
+ if (cond == 0 || cond->wait () == -1)
+ return -1;
+ }
+ else
+ {
+ countdown.update ();
+ ACE_Time_Value tv = ACE_OS::gettimeofday ();
+ tv += *max_wait_time;
+ if (cond == 0 || cond->wait (&tv) == -1)
+ return -1;
+ }
+ }
- countdown.update ();
+ countdown.update ();
#if defined (TAO_DEBUG_LEADER_FOLLOWER)
- ACE_DEBUG ((LM_DEBUG,
- "TAO (%P|%t): TAO_Wait_On_LF::wait - "
- "done (follower:%d) on <%x>\n",
- this->reply_received_, cond));
+ ACE_DEBUG ((LM_DEBUG,
+ "TAO (%P|%t): TAO_Wait_On_LF::wait - "
+ "done (follower:%d) on <%x>\n",
+ this->reply_received_, cond));
#endif /* TAO_DEBUG_LEADER_FOLLOWER */
- // Now somebody woke us up to become a leader or to handle
- // our input. We are already removed from the follower queue.
- if (this->reply_received_ == 1)
- {
- // But first reset our state in case we are invoked
- // again...
- this->reply_received_ = 0;
- this->expecting_response_ = 0;
- this->calling_thread_ = ACE_OS::NULL_thread;
+ // Now somebody woke us up to become a leader or to handle our
+ // input. We are already removed from the follower queue.
+ if (this->reply_received_ == 1)
+ {
+ // But first reset our state in case we are invoked
+ // again...
+ this->reply_received_ = 0;
+ this->expecting_response_ = 0;
+ this->calling_thread_ = ACE_OS::NULL_thread;
+
+ return 0;
+ }
+ else if (this->reply_received_ == -1)
+ {
+ // But first reset our state in case we are invoked
+ // again...
+ this->reply_received_ = 0;
+ this->expecting_response_ = 0;
+ this->calling_thread_ = ACE_OS::NULL_thread;
+
+ return -1;
+ }
+ // FALLTHROUGH
+ // We only get here if we woke up but the reply is not
+ // complete yet, time to assume the leader role....
+ // i.e. ACE_ASSERT (this->reply_received_ == 0);
+ }
+
+ // = Leader Code.
+
+ // The only way to reach this point is if we must become the
+ // leader, because there is no leader or we have to update to a
+ // leader or we are doing nested upcalls in this case we do
+ // increase the refcount on the leader in TAO_ORB_Core.
+
+ // Calls leader_follower.set_leader_thread () on construction and
+ // leader_follower.reset_leader_thread () on destruction.
+ // Note that this may increase the refcount of the leader.
+ TAO_LF_Leader_Thread_Helper leader_thread_helper (leader_follower);
+ ACE_UNUSED_ARG (leader_thread_helper);
- leader_follower.reset_client_thread ();
-
- return 0;
- }
- else if (this->reply_received_ == -1)
- {
- // But first reset our state in case we are invoked
- // again...
- this->reply_received_ = 0;
- this->expecting_response_ = 0;
- this->calling_thread_ = ACE_OS::NULL_thread;
-
- leader_follower.reset_client_thread ();
-
- return -1;
- }
- // FALLTHROUGH
- // We only get here if we woke up but the reply is not complete
- // yet, time to assume the leader role....
- // i.e. ACE_ASSERT (this->reply_received_ == 0);
-
- }
-
- // = Leader Code.
-
- // The only way to reach this point is if we must become the leader,
- // because there is no leader or we have to update to a leader or we
- // are doing nested upcalls in this case we do increase the refcount
- // on the leader in TAO_ORB_Core.
-
- // This might increase the refcount of the leader.
- leader_follower.set_leader_thread ();
-
- int result = 1; // Optmize the first iteration [no access to errno]
-
- {
- ACE_GUARD_RETURN (ACE_Reverse_Lock<ACE_SYNCH_MUTEX>, rev_mon,
- leader_follower.reverse_lock (), -1);
+ {
+ ACE_GUARD_RETURN (ACE_Reverse_Lock<ACE_SYNCH_MUTEX>, rev_mon,
+ leader_follower.reverse_lock (), -1);
- // @@ Do we need to do this?
- // Become owner of the reactor.
- orb_core->reactor ()->owner (ACE_Thread::self ());
+ // @@ Do we need to do this?
+ // Become owner of the reactor.
+ orb_core->reactor ()->owner (ACE_Thread::self ());
- // Run the reactor event loop.
+ // Run the reactor event loop.
#if defined (TAO_DEBUG_LEADER_FOLLOWER)
- ACE_DEBUG ((LM_DEBUG,
- "TAO (%P|%t): TAO_Wait_On_LF::wait - "
- "wait (leader) on <%x>\n",
- this->transport_));
+ ACE_DEBUG ((LM_DEBUG,
+ "TAO (%P|%t): TAO_Wait_On_LF::wait - "
+ "wait (leader) on <%x>\n",
+ this->transport_));
#endif /* TAO_DEBUG_LEADER_FOLLOWER */
- while (this->reply_received_ == 0
- && (result > 0
- || (result == 0
- && max_wait_time != 0
- && *max_wait_time != ACE_Time_Value::zero)))
- result = orb_core->reactor ()->handle_events (max_wait_time);
+ while (this->reply_received_ == 0
+ && (result > 0
+ || (result == 0
+ && max_wait_time != 0
+ && *max_wait_time != ACE_Time_Value::zero)))
+ result = orb_core->reactor ()->handle_events (max_wait_time);
#if defined (TAO_DEBUG_LEADER_FOLLOWER)
- ACE_DEBUG ((LM_DEBUG,
- "TAO (%P|%t): TAO_Wait_On_LF::wait - "
- "done (leader) on <%x>\n",
- this->transport_));
+ ACE_DEBUG ((LM_DEBUG,
+ "TAO (%P|%t): TAO_Wait_On_LF::wait - "
+ "done (leader) on <%x>\n",
+ this->transport_));
#endif /* TAO_DEBUG_LEADER_FOLLOWER */
+ }
}
+ //
+ // End artificial scope for auto_ptr like helpers calling:
+ // leader_follower.reset_client_thread () and (maybe)
+ // leader_follower.reset_leader_thread ().
+ //
// Wake up the next leader, we cannot do that in handle_input,
- // because the woken up thread would try to get into
- // handle_events, which is at the time in handle_input still
- // occupied. But do it before checking the error in <result>, even
- // if there is an error in our input we should continue running the
- // loop in another thread.
-
- leader_follower.reset_leader_thread ();
- leader_follower.reset_client_thread ();
+ // because the woken up thread would try to get into handle_events,
+ // which is at the time in handle_input still occupied. But do it
+ // before checking the error in <result>, even if there is an error
+ // in our input we should continue running the loop in another
+ // thread.
if (leader_follower.elect_new_leader () == -1)
ACE_ERROR_RETURN ((LM_ERROR,
@@ -608,148 +619,164 @@ TAO_Muxed_Wait_On_Leader_Follower::wait (ACE_Time_Value *max_wait_time,
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon,
leader_follower.lock (), -1);
- leader_follower.set_client_thread ();
-
- ACE_Countdown_Time countdown (max_wait_time);
-
- // Check if there is a leader, but the leader is not us
- if (leader_follower.leader_available ()
- && !leader_follower.is_leader_thread ())
- {
- // = Wait as a follower.
+ // Optmize the first iteration [no access to errno]
+ int result = 1;
- // Grab the condtion variable.
- ACE_SYNCH_CONDITION* cond =
- orb_core->leader_follower_condition_variable ();
+ //
+ // Begin artificial scope for auto_ptr like helpers calling:
+ // leader_follower.set_client_thread () and (maybe later on)
+ // leader_follower.set_leader_thread ().
+ //
+ {
+ // Calls leader_follower.set_client_thread () on construction and
+ // leader_follower.reset_client_thread () on destruction.
+ TAO_LF_Client_Thread_Helper client_thread_helper (leader_follower);
+ ACE_UNUSED_ARG (client_thread_helper);
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - wait (follower) on Transport <%x>, cond <%x>\n"),
- this->transport_,
- cond));
+ ACE_Countdown_Time countdown (max_wait_time);
- // Add ourselves to the list, do it only once because we can
- // wake up multiple times from the CV loop. And only do it if
- // the reply has not been received (it could have arrived while
- // we were preparing to receive it).
+ // Check if there is a leader, but the leader is not us
+ if (leader_follower.leader_available ()
+ && !leader_follower.is_leader_thread ())
+ {
+ // = Wait as a follower.
- if (!reply_received
- && leader_follower.leader_available ())
- {
- if (leader_follower.add_follower (cond) == -1)
- ACE_ERROR ((LM_ERROR,
- ASYS_TEXT ("TAO (%P|%t) TAO_Muxed_Wait_On_Leader_Follower::wait - ")
- ASYS_TEXT ("add_follower failed for <%x>\n"),
- cond));
- }
+ // Grab the condtion variable.
+ ACE_SYNCH_CONDITION* cond =
+ orb_core->leader_follower_condition_variable ();
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - wait (follower) on Transport <%x>, cond <%x>\n"),
+ this->transport_,
+ cond));
- while (!reply_received &&
- leader_follower.leader_available ())
- {
- if (max_wait_time == 0)
- {
- if (cond == 0 || cond->wait () == -1)
- {
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - wait (follower) on <%x> ")
- ASYS_TEXT ("cond == 0 || cond->wait () == -1 : cond = %d\n"),
- this->transport_, (cond == 0) ? 0 : cond));
- return -1;
- }
- }
- else
- {
- countdown.update ();
- ACE_Time_Value tv = ACE_OS::gettimeofday ();
- tv += *max_wait_time;
- if (cond == 0 || cond->wait (&tv) == -1)
- {
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - wait (follower) on <%x> ")
- ASYS_TEXT ("cond == 0 || cond->wait (tv) == -1\n"),
- this->transport_));
+ // Add ourselves to the list, do it only once because we can
+ // wake up multiple times from the CV loop. And only do it if
+ // the reply has not been received (it could have arrived
+ // while we were preparing to receive it).
+
+ if (!reply_received
+ && leader_follower.leader_available ())
+ {
+ if (leader_follower.add_follower (cond) == -1)
+ ACE_ERROR ((LM_ERROR,
+ ASYS_TEXT ("TAO (%P|%t) TAO_Muxed_Wait_On_Leader_Follower::wait - ")
+ ASYS_TEXT ("add_follower failed for <%x>\n"),
+ cond));
+ }
+
+
+ while (!reply_received &&
+ leader_follower.leader_available ())
+ {
+ if (max_wait_time == 0)
+ {
+ if (cond == 0 || cond->wait () == -1)
+ {
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - wait (follower) on <%x> ")
+ ASYS_TEXT ("cond == 0 || cond->wait () == -1 : cond = %d\n"),
+ this->transport_, (cond == 0) ? 0 : cond));
+ return -1;
+ }
+ }
+ else
+ {
+ countdown.update ();
+ ACE_Time_Value tv = ACE_OS::gettimeofday ();
+ tv += *max_wait_time;
+ if (cond == 0 || cond->wait (&tv) == -1)
+ {
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - wait (follower) on <%x> ")
+ ASYS_TEXT ("cond == 0 || cond->wait (tv) == -1\n"),
+ this->transport_));
return -1;
- }
- }
- }
+ }
+ }
+ }
- countdown.update ();
+ countdown.update ();
#if 0
- // Cannot remove the follower here, we *must* remove it when we
- // signal it so the same condition is not signalled for both
- // wake up as a follower and as the next leader.
- if (leader_follower.remove_follower (cond) == -1)
- ACE_ERROR ((LM_ERROR,
- "TAO (%P|%t) TAO_Muxed_Wait_On_Leader_Follower::wait - "
- "remove_follower failed for <%x>\n", cond));
+ // Cannot remove the follower here, we *must* remove it when
+ // we signal it so the same condition is not signalled for
+ // both wake up as a follower and as the next leader.
+ if (leader_follower.remove_follower (cond) == -1)
+ ACE_ERROR ((LM_ERROR,
+ "TAO (%P|%t) TAO_Muxed_Wait_On_Leader_Follower::wait - "
+ "remove_follower failed for <%x>\n", cond));
#endif /* 0 */
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - done (follower) on <%x>, reply_received %d\n"),
- this->transport_, reply_received));
-
- // Now somebody woke us up to become a leader or to handle
- // our input. We are already removed from the follower queue.
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - done (follower) on <%x>, reply_received %d\n"),
+ this->transport_, reply_received));
- if (reply_received == 1)
- return 0;
+ // Now somebody woke us up to become a leader or to handle our
+ // input. We are already removed from the follower queue.
- // FALLTHROUGH
- // We only get here if we woke up but the reply is not complete
- // yet, time to assume the leader role....
- // i.e. ACE_ASSERT (this->reply_received_ == 0);
- }
+ if (reply_received == 1)
+ return 0;
- // = Leader Code.
+ // FALLTHROUGH
+ // We only get here if we woke up but the reply is not
+ // complete yet, time to assume the leader role....
+ // i.e. ACE_ASSERT (this->reply_received_ == 0);
+ }
- // The only way to reach this point is if we must become the leader,
- // because there is no leader or we have to update to a leader or we
- // are doing nested upcalls in this case we do increase the refcount
- // on the leader in TAO_ORB_Core.
+ // = Leader Code.
- // This might increase the refcount of the leader.
- leader_follower.set_leader_thread ();
+ // The only way to reach this point is if we must become the
+ // leader, because there is no leader or we have to update to a
+ // leader or we are doing nested upcalls in this case we do
+ // increase the refcount on the leader in TAO_ORB_Core.
- int result = 1;
+ // Calls leader_follower.set_leader_thread () on construction and
+ // leader_follower.reset_leader_thread () on destruction.
+ // Note that this may increase the refcount of the leader.
+ TAO_LF_Leader_Thread_Helper leader_thread_helper (leader_follower);
+ ACE_UNUSED_ARG (leader_thread_helper);
- {
- ACE_GUARD_RETURN (ACE_Reverse_Lock<ACE_SYNCH_MUTEX>, rev_mon,
- leader_follower.reverse_lock (), -1);
+ {
+ ACE_GUARD_RETURN (ACE_Reverse_Lock<ACE_SYNCH_MUTEX>, rev_mon,
+ leader_follower.reverse_lock (), -1);
- // @@ Do we need to do this?
- // Become owner of the reactor.
- orb_core->reactor ()->owner (ACE_Thread::self ());
+ // @@ Do we need to do this?
+ // Become owner of the reactor.
+ orb_core->reactor ()->owner (ACE_Thread::self ());
- // Run the reactor event loop.
+ // Run the reactor event loop.
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - wait (leader):to enter reactor event loop on <%x>\n"),
- this->transport_));
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - wait (leader):to enter reactor event loop on <%x>\n"),
+ this->transport_));
- while (result > 0 && reply_received == 0)
- result = orb_core->reactor ()->handle_events (max_wait_time);
+ while (result > 0 && reply_received == 0)
+ result = orb_core->reactor ()->handle_events (max_wait_time);
- if (TAO_debug_level >= 5)
- ACE_DEBUG ((LM_DEBUG,
- ASYS_TEXT ("TAO (%P|%t) - wait : (leader) : done with reactor event loop on <%x>\n"),
- this->transport_));
+ if (TAO_debug_level >= 5)
+ ACE_DEBUG ((LM_DEBUG,
+ ASYS_TEXT ("TAO (%P|%t) - wait : (leader) : done with reactor event loop on <%x>\n"),
+ this->transport_));
+ }
}
+ //
+ // End artificial scope for auto_ptr like helpers calling:
+ // leader_follower.reset_client_thread () and (maybe)
+ // leader_follower.reset_leader_thread ().
+ //
// Wake up the next leader, we cannot do that in handle_input,
- // because the woken up thread would try to get into
- // handle_events, which is at the time in handle_input still
- // occupied. But do it before checking the error in <result>, even
- // if there is an error in our input we should continue running the
- // loop in another thread.
-
- leader_follower.reset_leader_thread ();
- leader_follower.reset_client_thread ();
+ // because the woken up thread would try to get into handle_events,
+ // which is at the time in handle_input still occupied. But do it
+ // before checking the error in <result>, even if there is an error
+ // in our input we should continue running the loop in another
+ // thread.
if (leader_follower.elect_new_leader () == -1)
ACE_ERROR_RETURN ((LM_ERROR,