summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoryamuna <yamuna@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2003-06-27 02:57:03 +0000
committeryamuna <yamuna@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2003-06-27 02:57:03 +0000
commit87041647376df38155b6f7dc3ae64d96bc3cec21 (patch)
tree0dc08eccd4ff0e43eaf0ce0eae60c4ce97596d44
parentabbf512427dace3c6040e6cde6593aea5fc33170 (diff)
downloadATCD-87041647376df38155b6f7dc3ae64d96bc3cec21.tar.gz
ChangelogTag: Thu Jun 26 22:34:10 2003 Yamuna Krishnamurthy <yamuna@oomworks.com>
-rw-r--r--TAO/tao/RTScheduling/Current.cpp1090
-rw-r--r--TAO/tao/RTScheduling/Current.h362
2 files changed, 1452 insertions, 0 deletions
diff --git a/TAO/tao/RTScheduling/Current.cpp b/TAO/tao/RTScheduling/Current.cpp
new file mode 100644
index 00000000000..39d336f9ecc
--- /dev/null
+++ b/TAO/tao/RTScheduling/Current.cpp
@@ -0,0 +1,1090 @@
+//$Id$
+#include "Current.h"
+#include "tao/ORB_Core.h"
+#include "Distributable_Thread.h"
+#include "tao/RTCORBA/Priority_Mapping_Manager.h"
+#include "tao/RTCORBA/RT_Current.h"
+
+//#include "ThreadAction.h"
+
+
+ACE_Atomic_Op<ACE_Thread_Mutex, long> guid_counter;
+
+u_long
+TAO_DTId_Hash::operator () (const IdType &id) const
+{
+ return ACE::hash_pjw ((const char *) id.get_buffer (),
+ id.length ());
+}
+
+TAO_RTScheduler_Current::TAO_RTScheduler_Current (TAO_ORB_Core* orb)
+ : orb_ (orb)
+{
+ // Create the RT_Current.
+ RTCORBA::Current_ptr current;
+ ACE_NEW_THROW_EX (current,
+ TAO_RT_Current (orb),
+ CORBA::NO_MEMORY (
+ CORBA::SystemException::_tao_minor_code (
+ TAO_DEFAULT_MINOR_CODE,
+ ENOMEM),
+ CORBA::COMPLETED_NO));
+ this->rt_current_ = current;
+}
+
+
+void
+TAO_RTScheduler_Current::rt_current (RTCORBA::Current_ptr rt_current)
+{
+ this->rt_current_ = RTCORBA::Current::_duplicate (rt_current);
+}
+
+TAO_ORB_Core*
+TAO_RTScheduler_Current::orb (void)
+{
+ return this->orb_;
+}
+
+DT_Hash_Map*
+TAO_RTScheduler_Current::dt_hash (void)
+{
+ return &this->dt_hash_;
+}
+
+
+void
+TAO_RTScheduler_Current::begin_scheduling_segment(const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
+{
+// ACE_DEBUG ((LM_DEBUG,
+// "TAO_RTScheduler_Current::begin_scheduling_segment\n"));
+
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ {
+ ACE_NEW_THROW_EX (impl,
+ TAO_RTScheduler_Current_i (this->orb_,
+ &this->dt_hash_),
+ CORBA::NO_MEMORY (
+ CORBA::SystemException::_tao_minor_code (
+ TAO_DEFAULT_MINOR_CODE,
+ ENOMEM),
+ CORBA::COMPLETED_NO));
+ ACE_CHECK;
+
+ this->implementation (impl);
+ }
+
+ impl->begin_scheduling_segment (name,
+ sched_param,
+ implicit_sched_param
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+
+void
+TAO_RTScheduler_Current::update_scheduling_segment (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW (CORBA::BAD_INV_ORDER ());
+
+ impl->update_scheduling_segment (name,
+ sched_param,
+ implicit_sched_param
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+void
+TAO_RTScheduler_Current::end_scheduling_segment (const char * name
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+
+ // ACE_DEBUG ((LM_DEBUG,
+ // "TAO_RTScheduler_Current::end_scheduling_segment\n"));
+
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ {
+ ACE_THROW (CORBA::BAD_INV_ORDER ());
+ }
+
+ impl->end_scheduling_segment (name
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+RTScheduling::DistributableThread_ptr
+TAO_RTScheduler_Current::lookup(const RTScheduling::Current::IdType & id
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ RTScheduling::DistributableThread_var DT;
+ int result = this->dt_hash_.find (id,
+ DT);
+ if (result == 0)
+ return DT._retn ();
+ else return RTScheduling::DistributableThread::_nil ();
+}
+
+// returns a null reference if
+// the distributable thread is
+// not known to the local scheduler
+
+RTScheduling::DistributableThread_ptr
+TAO_RTScheduler_Current::spawn (RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char* name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ CORBA::ULong stack_size,
+ RTCORBA::Priority base_priority
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW_RETURN (CORBA::BAD_INV_ORDER (), 0);
+
+ return impl->spawn (start,
+ data,
+ name,
+ sched_param,
+ implicit_sched_param,
+ stack_size,
+ base_priority
+ ACE_ENV_ARG_PARAMETER);
+}
+
+RTScheduling::Current::IdType *
+TAO_RTScheduler_Current::id (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW_RETURN (CORBA::BAD_INV_ORDER (), 0);
+
+ return impl->id (ACE_ENV_SINGLE_ARG_PARAMETER);
+}
+
+CORBA::Policy_ptr
+TAO_RTScheduler_Current::scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW_RETURN (CORBA::BAD_INV_ORDER (), 0);
+
+ return impl->scheduling_parameter (ACE_ENV_SINGLE_ARG_PARAMETER);
+}
+
+CORBA::Policy_ptr
+TAO_RTScheduler_Current::implicit_scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW_RETURN (CORBA::BAD_INV_ORDER (), 0);
+
+ return impl->implicit_scheduling_parameter (ACE_ENV_SINGLE_ARG_PARAMETER);
+}
+
+RTScheduling::Current::NameList *
+TAO_RTScheduler_Current::current_scheduling_segment_names (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ TAO_RTScheduler_Current_i *impl = this->implementation ();
+
+ if (impl == 0)
+ ACE_THROW_RETURN (CORBA::BAD_INV_ORDER (), 0);
+
+ return impl->current_scheduling_segment_names (ACE_ENV_SINGLE_ARG_PARAMETER);
+}
+
+RTCORBA::Priority
+TAO_RTScheduler_Current::the_priority (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ return this->rt_current_->the_priority (ACE_ENV_SINGLE_ARG_PARAMETER);
+}
+
+void
+TAO_RTScheduler_Current::the_priority (RTCORBA::Priority the_priority
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ this->rt_current_->the_priority(the_priority
+ ACE_ENV_ARG_PARAMETER);
+}
+
+TAO_RTScheduler_Current_i*
+TAO_RTScheduler_Current::implementation (TAO_RTScheduler_Current_i* new_current)
+{
+ TAO_TSS_Resources *tss =
+ TAO_TSS_RESOURCES::instance ();
+
+ TAO_RTScheduler_Current_i *old =
+ ACE_static_cast (TAO_RTScheduler_Current_i *,
+ tss->rtscheduler_current_impl_);
+ tss->rtscheduler_current_impl_ = new_current;
+
+ return old;
+}
+
+TAO_RTScheduler_Current_i*
+TAO_RTScheduler_Current::implementation (void)
+{
+ TAO_TSS_Resources *tss =
+ TAO_TSS_RESOURCES::instance ();
+
+ TAO_RTScheduler_Current_i* impl =
+ ACE_static_cast (TAO_RTScheduler_Current_i *,
+ tss->rtscheduler_current_impl_);
+ return impl;
+}
+
+TAO_ORB_Core*
+TAO_RTScheduler_Current_i::orb (void)
+{
+ return this->orb_;
+}
+
+DT_Hash_Map*
+TAO_RTScheduler_Current_i::dt_hash (void)
+{
+ return this->dt_hash_;
+}
+
+RTScheduling::Scheduler_ptr
+TAO_RTScheduler_Current_i::scheduler (void)
+{
+ return RTScheduling::Scheduler::_duplicate (this->scheduler_.in ());
+}
+
+TAO_RTScheduler_Current_i::TAO_RTScheduler_Current_i (TAO_ORB_Core* orb,
+ DT_Hash_Map* dt_hash
+ ACE_ENV_ARG_DECL)
+ :orb_ (orb),
+ dt_ (RTScheduling::DistributableThread::_nil ()),
+ previous_current_ (0),
+ dt_hash_ (dt_hash)
+{
+// ACE_DEBUG ((LM_DEBUG,
+// "TAO_RTScheduler_Current_i::TAO_RTScheduler_Current_i\n"));
+
+
+ CORBA::Object_ptr scheduler_obj = this->orb_->object_ref_table ().resolve_initial_references ("RTScheduler"
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->scheduler_ = RTScheduling::Scheduler::_narrow (scheduler_obj
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+TAO_RTScheduler_Current_i::TAO_RTScheduler_Current_i (TAO_ORB_Core* orb,
+ DT_Hash_Map* dt_hash,
+ RTScheduling::Current::IdType guid,
+ const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ RTScheduling::DistributableThread_ptr dt,
+ TAO_RTScheduler_Current_i* prev_current
+ ACE_ENV_ARG_DECL)
+ : orb_ (orb),
+ guid_ (guid),
+ name_ (CORBA::string_dup (name)),
+ sched_param_ (sched_param),
+ implicit_sched_param_ (implicit_sched_param),
+ dt_ (RTScheduling::DistributableThread::_duplicate (dt)),
+ previous_current_ (prev_current),
+ dt_hash_ (dt_hash)
+{
+// ACE_DEBUG ((LM_DEBUG,
+// "TAO_RTScheduler_Current_i::TAO_RTScheduler_Current_i\n"));
+
+ CORBA::Object_ptr scheduler_obj = orb->object_ref_table ().resolve_initial_references ("RTScheduler"
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->scheduler_ = RTScheduling::Scheduler::_narrow (scheduler_obj
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+void
+TAO_RTScheduler_Current_i::begin_scheduling_segment(const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
+{
+ // Check if it is a new Scheduling Segmnet
+ if (this->guid_.length () == 0)
+ {
+ //Generate GUID
+ long temp = ++guid_counter;
+ this->guid_.length (sizeof(long));
+ ACE_OS::memcpy (this->guid_.get_buffer (),
+ &temp,
+ sizeof(long));
+
+ int guid;
+ ACE_OS::memcpy (&guid,
+ this->guid_.get_buffer (),
+ this->guid_.length ());
+
+// ACE_DEBUG ((LM_DEBUG,
+// "The Guid is %d %d\n",
+// guid,
+// guid_counter.value_i ()));
+
+ // Inform the scheduler of the new scheduling segment.
+ this->scheduler_->begin_new_scheduling_segment (this->guid_,
+ name,
+ sched_param,
+ implicit_sched_param
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ if (CORBA::is_nil (this->dt_.in ()))
+ //Create new DT.
+ this->dt_ = TAO_DistributableThread_Factory::create_DT ();
+
+ //Add new DT to map
+ int result = this->dt_hash_->bind (this->guid_,
+ this->dt_);
+
+ // Error in binding to the map - cancel thread.
+ if (result != 0)
+ {
+ this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ // Remember parameters for the scheduling segment.
+ this->name_ = CORBA::string_dup (name);
+ this->sched_param_ = CORBA::Policy::_duplicate (sched_param);
+ this->implicit_sched_param_ = CORBA::Policy::_duplicate (implicit_sched_param);
+
+ }
+ else //Nested segment
+ {
+ // Check current DT state.
+ if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
+ {
+ this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ // Inform scheduler of start of nested scheduling segment.
+ this->scheduler_->begin_nested_scheduling_segment
+ (this->guid_,
+ name,
+ sched_param,
+ implicit_sched_param
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ TAO_TSS_Resources *tss =
+ TAO_TSS_RESOURCES::instance ();
+
+ TAO_RTScheduler_Current_i* new_current;
+ ACE_NEW_THROW_EX (new_current,
+ TAO_RTScheduler_Current_i (this->orb_,
+ this->dt_hash_,
+ this->guid_,
+ name,
+ sched_param,
+ implicit_sched_param,
+ this->dt_.in (),
+ this),
+ CORBA::NO_MEMORY (
+ CORBA::SystemException::_tao_minor_code (
+ TAO_DEFAULT_MINOR_CODE,
+ ENOMEM),
+ CORBA::COMPLETED_NO));
+ ACE_CHECK;
+
+ tss->rtscheduler_current_impl_ = new_current;
+ }
+}
+
+void
+TAO_RTScheduler_Current_i::update_scheduling_segment (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE))
+{
+ // Check if DT has been cancelled
+ if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
+ {
+ this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ // Let scheduler know of the updates.
+ this->scheduler_->update_scheduling_segment (this->guid_,
+ name,
+ sched_param,
+ implicit_sched_param
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Remember the new values.
+ this->name_ = CORBA::string_dup (name);
+ this->sched_param_ = CORBA::Policy::_duplicate (sched_param);
+ this->implicit_sched_param_ = CORBA::Policy::_duplicate (implicit_sched_param);
+}
+
+void
+TAO_RTScheduler_Current_i::end_scheduling_segment (const char * name
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ // Check if DT has been cancelled
+ if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
+ {
+ this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ if (this->previous_current_ == 0)
+ {
+ // Let the scheduler know that the DT is
+ // terminating.
+ this->scheduler_->end_scheduling_segment(this->guid_,
+ name
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Cleanup DT.
+ this->cleanup_DT ();
+
+ // Cleanup current.
+ this->cleanup_current ();
+
+ // A Nested segment.
+ } else {
+
+ // Inform scheduler of end of nested
+ // scheduling segment.
+ this->scheduler_->end_nested_scheduling_segment (this->guid_,
+ name,
+ this->previous_current_->sched_param_
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Cleanup current.
+ this->cleanup_current ();
+ }
+}
+
+
+// returns a null reference if
+// the distributable thread is
+// not known to the local scheduler
+RTScheduling::DistributableThread_ptr
+TAO_RTScheduler_Current_i::spawn (RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char* name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ CORBA::ULong stack_size,
+ RTCORBA::Priority base_priority
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ // Check if DT has been cancelled.
+ if (this->dt_->state () == RTScheduling::DistributableThread::CANCELLED)
+ this->cancel_thread (ACE_ENV_SINGLE_ARG_PARAMETER);
+
+ // Create new task for new DT.
+ DTTask *dttask;
+
+ // If no scheduling parameter is specified then use the current
+ // implicit scheduling parameter as the scheduling parameter
+ if (sched_param == 0)
+ sched_param = this->implicit_sched_param_;
+
+ RTScheduling::DistributableThread_var dt = TAO_DistributableThread_Factory::create_DT ();
+ TAO_RTScheduler_Current_i *new_current;
+
+ ACE_NEW_THROW_EX (new_current,
+ TAO_RTScheduler_Current_i (this->orb_,
+ this->dt_hash_),
+ CORBA::NO_MEMORY (
+ CORBA::SystemException::_tao_minor_code (
+ TAO_DEFAULT_MINOR_CODE,
+ ENOMEM),
+ CORBA::COMPLETED_NO));
+ ACE_CHECK;
+
+ new_current->DT (dt.in ());
+
+ ACE_NEW_THROW_EX (dttask,
+ DTTask (//thread_manager_,
+ this->orb_,
+ this->dt_hash_,
+ new_current,
+ start,
+ data,
+ name,
+ sched_param,
+ implicit_sched_param),
+ CORBA::NO_MEMORY (
+ CORBA::SystemException::_tao_minor_code (
+ TAO_DEFAULT_MINOR_CODE,
+ ENOMEM),
+ CORBA::COMPLETED_NO)
+ );
+ ACE_CHECK;
+
+
+
+ if (dttask->activate_task (base_priority,
+ stack_size) == -1)
+ {
+ ACE_ERROR((LM_ERROR,
+ "Error in Spawning\n"));
+
+ RTScheduling::DistributableThread::_nil ();
+ }
+
+ return dt._retn ();
+}
+
+int
+DTTask::activate_task (RTCORBA::Priority base_priority,
+ CORBA::ULong stack_size
+ ACE_ENV_ARG_DECL)
+{
+ // Activate thread.
+ long default_flags = THR_NEW_LWP | THR_JOINABLE;
+ long flags =
+ default_flags |
+ this->orb_->orb_params ()->scope_policy () |
+ this->orb_->orb_params ()->sched_policy ();
+
+ CORBA::Object_var object = this->orb_->object_ref_table ().resolve_initial_references ("PriorityMappingManager"
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ RTCORBA::PriorityMappingManager_var mapping_manager =
+ RTCORBA::PriorityMappingManager::_narrow (object.in ()
+ ACE_ENV_ARG_PARAMETER);
+ ACE_TRY_CHECK;
+
+ RTCORBA::PriorityMapping *pm =
+ mapping_manager->mapping ();
+
+ RTCORBA::NativePriority native_priority;
+ pm->to_native (base_priority,
+ native_priority);
+
+ size_t stack [1];
+ stack [0] = stack_size;
+ if (this->activate (flags,
+ 1,
+ 0,//force_active
+ native_priority,//priority
+ -1,//grp_id
+ 0,//ACE_Task_Base
+ 0,//thread_handles
+ 0,//stack
+ stack//stack_size
+ ) == -1)
+ {
+ if (ACE_OS::last_error () == EPERM)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("Insufficient privilege to run this test.\n")),
+ -1);
+ }
+ return 0;
+}
+
+DTTask::DTTask (//ACE_Thread_Manager *manager,
+ TAO_ORB_Core *orb,
+ DT_Hash_Map *dt_hash,
+ TAO_RTScheduler_Current_i* new_current,
+ RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char *name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param)
+ ://manager_ (manager),
+ orb_ (orb),
+ dt_hash_ (dt_hash),
+ current_ (new_current),
+ start_ (RTScheduling::ThreadAction::_duplicate (start)),
+ data_ (data),
+ name_ (CORBA::string_dup (name)),
+ sched_param_ (CORBA::Policy::_duplicate (sched_param)),
+ implicit_sched_param_ (CORBA::Policy::_duplicate (implicit_sched_param))
+{
+}
+
+int
+DTTask::svc (void)
+{
+ ACE_TRY_NEW_ENV
+ {
+
+ TAO_TSS_Resources *tss =
+ TAO_TSS_RESOURCES::instance ();
+
+ tss->rtscheduler_current_impl_ = this->current_;
+
+ this->current_->begin_scheduling_segment (this->name_.in (),
+ this->sched_param_.in (),
+ this->implicit_sched_param_.in ()
+ ACE_ENV_ARG_PARAMETER);
+ ACE_TRY_CHECK;
+
+ // Invoke entry point into new DT.
+ this->start_->_cxx_do (this->data_
+ ACE_ENV_ARG_PARAMETER);
+ ACE_TRY_CHECK;
+
+ this->current_->end_scheduling_segment (this->name_.in ()
+ ACE_ENV_ARG_PARAMETER);
+ ACE_TRY_CHECK;
+ }
+ ACE_CATCHANY
+ {
+ ACE_PRINT_EXCEPTION (ACE_ANY_EXCEPTION,
+ "Caught exception:");
+ return -1;
+ }
+ ACE_ENDTRY;
+
+ return 0;
+}
+
+
+RTScheduling::Current::IdType *
+TAO_RTScheduler_Current_i::id (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ RTScheduling::Current::IdType_var guid = this->guid_;
+ return guid._retn ();
+}
+
+
+CORBA::Policy_ptr
+TAO_RTScheduler_Current_i::scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ return CORBA::Policy::_duplicate (this->sched_param_);
+}
+
+CORBA::Policy_ptr
+TAO_RTScheduler_Current_i::implicit_scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ return CORBA::Policy::_duplicate (this->implicit_sched_param_);
+}
+
+RTScheduling::Current::NameList *
+TAO_RTScheduler_Current_i::current_scheduling_segment_names (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ RTScheduling::Current::NameList* name_list;
+ ACE_NEW_RETURN (name_list,
+ RTScheduling::Current::NameList,
+ 0);
+
+ TAO_RTScheduler_Current_i* current = this;
+
+ for (int index = 0; current != 0; index++)
+ {
+ name_list->length (index+1);
+ (*name_list) [index] = current->name ();
+ current = current->previous_current_;
+ }
+
+ return name_list;
+}
+
+const char*
+TAO_RTScheduler_Current_i::name (void)
+{
+ return CORBA::string_dup (this->name_.in ());
+}
+
+void
+TAO_RTScheduler_Current_i::cancel_thread (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ ACE_DEBUG ((LM_DEBUG,
+ "Distributable Thread - %d is cancelled\n",
+ (const int*) this->guid_.get_buffer ()));
+
+ // Let the scheduler know that the thread has
+ // been cancelled.
+ this->scheduler_->cancel (this->guid_
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->cleanup_DT ();
+
+ // Remove all related nested currents.
+ this->delete_all_currents ();
+
+ // Throw exception.
+ ACE_THROW (CORBA::THREAD_CANCELLED ());
+}
+
+void
+TAO_RTScheduler_Current_i::cleanup_DT (void)
+{
+ // Remove DT from map.
+ this->dt_hash_->unbind (this->guid_);
+}
+
+void
+TAO_RTScheduler_Current_i::cleanup_current (void)
+{
+ TAO_TSS_Resources *tss =
+ TAO_TSS_RESOURCES::instance ();
+
+ tss->rtscheduler_current_impl_ = this->previous_current_;
+
+ // Delete this current.
+ delete this;
+}
+
+void
+TAO_RTScheduler_Current_i::delete_all_currents (void)
+{
+ TAO_RTScheduler_Current_i* current = this;
+
+ while (current != 0)
+ {
+ TAO_RTScheduler_Current_i* prev_current = current->previous_current_;
+ current->cleanup_current ();
+ current = prev_current;
+ }
+}
+
+void
+TAO_RTScheduler_Current_i::id (RTScheduling::Current::IdType guid)
+{
+ this->guid_ = guid;
+}
+
+void
+TAO_RTScheduler_Current_i::name (char * name)
+{
+ this->name_ = CORBA::string_dup (name);
+}
+
+RTScheduling::DistributableThread_ptr
+TAO_RTScheduler_Current_i::DT (void)
+{
+ return this->dt_._retn ();
+}
+
+void
+TAO_RTScheduler_Current_i::DT (RTScheduling::DistributableThread_ptr dt)
+{
+ this->dt_ = RTScheduling::DistributableThread::_duplicate (dt);
+}
+
+void
+TAO_RTScheduler_Current_i::scheduling_parameter (CORBA::Policy_ptr sched_param)
+{
+ this->sched_param_ = CORBA::Policy::_duplicate (sched_param);
+}
+
+void
+TAO_RTScheduler_Current_i::implicit_scheduling_parameter (CORBA::Policy_ptr implicit_sched_param)
+{
+ this->implicit_sched_param_ = CORBA::Policy::_duplicate (implicit_sched_param);
+}
+
+
+// *************************************************************
+
+// *************************************************************
+// Operations for class TAO_RTScheduler_Current_var
+// *************************************************************
+
+TAO_RTScheduler_Current_var::TAO_RTScheduler_Current_var (void) // default constructor
+ : ptr_ (TAO_RTScheduler_Current::_nil ())
+{}
+
+::TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::ptr (void) const
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_var::TAO_RTScheduler_Current_var (const ::TAO_RTScheduler_Current_var &p)
+ : TAO_Base_var (),
+ ptr_ (TAO_RTScheduler_Current::_duplicate (p.ptr ()))
+{}
+
+TAO_RTScheduler_Current_var::~TAO_RTScheduler_Current_var (void) // destructor
+{
+ CORBA::release (this->ptr_);
+}
+
+TAO_RTScheduler_Current_var &
+TAO_RTScheduler_Current_var::operator= (TAO_RTScheduler_Current_ptr p)
+{
+ CORBA::release (this->ptr_);
+ this->ptr_ = p;
+ return *this;
+}
+
+TAO_RTScheduler_Current_var &
+TAO_RTScheduler_Current_var::operator= (const ::TAO_RTScheduler_Current_var &p)
+{
+ if (this != &p)
+ {
+ CORBA::release (this->ptr_);
+ this->ptr_ = ::TAO_RTScheduler_Current::_duplicate (p.ptr ());
+ }
+ return *this;
+}
+
+TAO_RTScheduler_Current_var::operator const ::TAO_RTScheduler_Current_ptr &() const
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_var::operator ::TAO_RTScheduler_Current_ptr &()
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::operator-> (void) const
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::in (void) const
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_ptr &
+TAO_RTScheduler_Current_var::inout (void)
+{
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_ptr &
+TAO_RTScheduler_Current_var::out (void)
+{
+ CORBA::release (this->ptr_);
+ this->ptr_ = ::TAO_RTScheduler_Current::_nil ();
+ return this->ptr_;
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::_retn (void)
+{
+ // yield ownership of managed obj reference
+ ::TAO_RTScheduler_Current_ptr val = this->ptr_;
+ this->ptr_ = ::TAO_RTScheduler_Current::_nil ();
+ return val;
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::duplicate (TAO_RTScheduler_Current_ptr p)
+{
+ return ::TAO_RTScheduler_Current::_duplicate (p);
+}
+
+void
+TAO_RTScheduler_Current_var::release (TAO_RTScheduler_Current_ptr p)
+{
+ CORBA::release (p);
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::nil (void)
+{
+ return ::TAO_RTScheduler_Current::_nil ();
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current_var::narrow (
+ CORBA::Object *p
+ ACE_ENV_ARG_DECL
+ )
+{
+ return ::TAO_RTScheduler_Current::_narrow (p ACE_ENV_ARG_PARAMETER);
+}
+
+CORBA::Object *
+TAO_RTScheduler_Current_var::upcast (void *src)
+{
+ TAO_RTScheduler_Current **tmp =
+ ACE_static_cast (TAO_RTScheduler_Current **, src);
+ return *tmp;
+}
+
+TAO_RTScheduler_Current_ptr TAO_RTScheduler_Current::_narrow (
+ CORBA::Object_ptr obj
+ ACE_ENV_ARG_DECL
+ )
+{
+ return TAO_RTScheduler_Current::_unchecked_narrow (obj ACE_ENV_ARG_PARAMETER);
+}
+
+TAO_RTScheduler_Current_ptr TAO_RTScheduler_Current::_unchecked_narrow (
+ CORBA::Object_ptr obj
+ ACE_ENV_ARG_DECL_NOT_USED
+ )
+{
+ if (CORBA::is_nil (obj))
+ return TAO_RTScheduler_Current::_nil ();
+ return
+ ACE_reinterpret_cast
+ (
+ TAO_RTScheduler_Current_ptr,
+ obj->_tao_QueryInterface
+ (
+ ACE_reinterpret_cast
+ (
+ ptr_arith_t,
+ &TAO_RTScheduler_Current::_narrow
+ )
+ )
+ );
+}
+
+TAO_RTScheduler_Current_ptr
+TAO_RTScheduler_Current::_duplicate (TAO_RTScheduler_Current_ptr obj)
+{
+ if (!CORBA::is_nil (obj))
+ obj->_add_ref ();
+ return obj;
+}
+
+void *TAO_RTScheduler_Current::_tao_QueryInterface (ptr_arith_t type)
+{
+ void *retv = 0;
+ if (type == ACE_reinterpret_cast
+ (ptr_arith_t,
+ &TAO_RTScheduler_Current::_narrow))
+ retv = ACE_reinterpret_cast (void*, this);
+ else if (type == ACE_reinterpret_cast (
+ ptr_arith_t,
+ &ACE_NESTED_CLASS (::RTScheduling, Current)::_tao_class_id)
+ )
+ {
+ retv = ACE_reinterpret_cast (void*, this);
+ }
+ else if (type == ACE_reinterpret_cast (
+ ptr_arith_t,
+ &::RTCORBA::Current::_tao_class_id)
+ )
+ {
+ retv =
+ ACE_reinterpret_cast (
+ void *,
+ ACE_static_cast (
+ RTCORBA::Current_ptr,
+ this
+ )
+ );
+ }
+ else if (type == ACE_reinterpret_cast (
+ ptr_arith_t,
+ &::CORBA::Current::_tao_class_id)
+ )
+ {
+ retv =
+ ACE_reinterpret_cast (
+ void *,
+ ACE_static_cast (
+ CORBA::Current_ptr,
+ this
+ )
+ );
+ }
+ else if (type == ACE_reinterpret_cast (
+ ptr_arith_t,
+ &CORBA::Object::_tao_class_id)
+ )
+ {
+ retv =
+ ACE_reinterpret_cast (
+ void *,
+ ACE_static_cast (CORBA::Object_ptr, this)
+ );
+ }
+
+// else if (type == ACE_reinterpret_cast
+// (ptr_arith_t,
+// &::RTScheduling::Current::_narrow))
+// retv = ACE_reinterpret_cast
+// (
+// void *,
+// ACE_static_cast
+// (
+// RTScheduling::Current_ptr,
+// this
+// )
+// );
+// else if (type == ACE_reinterpret_cast (ptr_arith_t, &CORBA::Object::_narrow))
+// retv = ACE_reinterpret_cast (void *,
+// ACE_static_cast (CORBA::Object_ptr, this));
+
+ if (retv)
+ this->_add_ref ();
+ return retv;
+}
+
+const char* TAO_RTScheduler_Current::_interface_repository_id (void) const
+{
+ return "IDL:TAO_RTScheduler_Current:1.0";
+}
+
+
+#if defined (ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION)
+
+template class ACE_Equal_To<IdType>;
+template class ACE_Hash_Map_Manager_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+template class ACE_Hash_Map_Iterator_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+template class ACE_Hash_Map_Entry<IdType, RTScheduling::DistributableThread_var>;
+template class ACE_Hash_Map_Reverse_Iterator_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+template class ACE_Hash_Map_Iterator_Base_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+
+#elif defined (ACE_HAS_TEMPLATE_INSTANTIATION_PRAGMA)
+#pragma instantiate ACE_Equal_To<IdType>;
+#pragma instantiate ACE_Hash_Map_Manager_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+#pragma instantiate ACE_Hash_Map_Iterator_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+#pragma instantiate ACE_Hash_Map_Entry<IdType, RTScheduling::DistributableThread_var>;
+#pragma instantiate ACE_Hash_Map_Reverse_Iterator_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+#pragma instantiate ACE_Hash_Map_Iterator_Base_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex>;
+
+#endif /* ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION */
diff --git a/TAO/tao/RTScheduling/Current.h b/TAO/tao/RTScheduling/Current.h
new file mode 100644
index 00000000000..be1009bcb2b
--- /dev/null
+++ b/TAO/tao/RTScheduling/Current.h
@@ -0,0 +1,362 @@
+//$Id$
+#ifndef TAO_RTSCHEDULER_CURRENT_H
+#define TAO_RTSCHEDULER_CURRENT_H
+
+
+#include "ace/Hash_Map_Manager_T.h"
+#include "rtscheduler_export.h"
+#include "RTSchedulerC.h"
+#include "ace/Task.h"
+#include "ace/Atomic_Op.h"
+
+#if !defined (ACE_LACKS_PRAGMA_ONCE)
+# pragma once
+#endif /* ACE_LACKS_PRAGMA_ONCE */
+
+
+class TAO_RTScheduler_Current_i;
+class TAO_TSS_Resources;
+
+
+/**
+ * @class TAO_DTId_Hash
+ *
+ * @brief Hashing class for Distributable Thread Ids.
+ *
+ * Define the hash() method for Object Ids.
+ */
+
+typedef TAO_Unbounded_Sequence<CORBA::Octet> IdType;
+
+extern ACE_Atomic_Op<ACE_Thread_Mutex, long> guid_counter;
+
+class TAO_RTScheduler_Export TAO_DTId_Hash
+{
+public:
+
+ /// Returns hash value.
+ u_long operator () (const IdType &id) const;
+};
+
+
+typedef ACE_Hash_Map_Manager_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex> DT_Hash_Map;
+typedef ACE_Hash_Map_Iterator_Ex<IdType, RTScheduling::DistributableThread_var, TAO_DTId_Hash, ACE_Equal_To<IdType>, ACE_Thread_Mutex> DT_Hash_Map_Iterator;
+typedef ACE_Hash_Map_Entry <IdType,RTScheduling::DistributableThread_var> DT_Hash_Map_Entry;
+
+class TAO_RTScheduler_Current;
+class TAO_RTScheduler_Current_var;
+
+typedef TAO_RTScheduler_Current* TAO_RTScheduler_Current_ptr;
+
+class TAO_RTScheduler_Export TAO_RTScheduler_Current:
+public RTScheduling::Current,
+ public TAO_Local_RefCounted_Object
+{
+ public:
+
+ TAO_RTScheduler_Current (TAO_ORB_Core*);
+
+
+ virtual RTCORBA::Priority the_priority (ACE_ENV_SINGLE_ARG_DECL_WITH_DEFAULTS)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual void the_priority (RTCORBA::Priority the_priority
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ void rt_current (RTCORBA::Current_ptr rt_current);
+
+ virtual void begin_scheduling_segment
+ (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE));
+
+ virtual void update_scheduling_segment
+ (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE));
+
+ virtual void end_scheduling_segment
+ (const char * name
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual RTScheduling::DistributableThread_ptr
+ lookup(const RTScheduling::Current::IdType & id
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ // returns a null reference if
+ // the distributable thread is
+ // not known to the local scheduler
+
+ virtual RTScheduling::DistributableThread_ptr
+ spawn (RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char* name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ CORBA::ULong stack_size,
+ RTCORBA::Priority base_priority
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual ::RTScheduling::Current::IdType *
+ id (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual ::CORBA::Policy_ptr
+ scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual ::CORBA::Policy_ptr
+ implicit_scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual ::RTScheduling::Current::NameList *
+ current_scheduling_segment_names (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ TAO_RTScheduler_Current_i* implementation (void);
+
+ TAO_RTScheduler_Current_i* implementation (TAO_RTScheduler_Current_i*);
+
+ TAO_ORB_Core* orb (void);
+
+ DT_Hash_Map* dt_hash (void);
+
+ /**
+ * @name Reference Related Methods
+ */
+ //@{
+#if !defined(__GNUC__) || !defined (ACE_HAS_GNUG_PRE_2_8)
+ typedef TAO_RTScheduler_Current_ptr _ptr_type;
+ typedef TAO_RTScheduler_Current_var _var_type;
+#endif /* ! __GNUC__ || g++ >= 2.8 */
+
+ static TAO_RTScheduler_Current_ptr _duplicate (TAO_RTScheduler_Current_ptr obj);
+
+ static TAO_RTScheduler_Current_ptr _narrow (
+ CORBA::Object_ptr obj
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS);
+
+
+ static TAO_RTScheduler_Current_ptr _unchecked_narrow (
+ CORBA::Object_ptr obj
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS);
+
+
+ static TAO_RTScheduler_Current_ptr _nil (void)
+ {
+ return (TAO_RTScheduler_Current_ptr)0;
+ }
+
+ virtual void *_tao_QueryInterface (ptr_arith_t type);
+
+ virtual const char* _interface_repository_id (void) const;
+ //@}
+
+ private:
+ RTCORBA::Current_var rt_current_;
+ TAO_ORB_Core* orb_;
+ DT_Hash_Map dt_hash_;
+
+};
+
+
+
+/**
+ * @class TAO_RTScheduler_Current_var
+ */
+class TAO_RTScheduler_Export TAO_RTScheduler_Current_var : public TAO_Base_var
+{
+public:
+
+ TAO_RTScheduler_Current_var (void); // default constructor
+ TAO_RTScheduler_Current_var (TAO_RTScheduler_Current_ptr p) : ptr_ (p) {}
+ TAO_RTScheduler_Current_var (const TAO_RTScheduler_Current_var &); // copy constructor
+ ~TAO_RTScheduler_Current_var (void); // destructor
+
+ TAO_RTScheduler_Current_var &operator= (TAO_RTScheduler_Current_ptr);
+ TAO_RTScheduler_Current_var &operator= (const TAO_RTScheduler_Current_var &);
+ TAO_RTScheduler_Current_ptr operator-> (void) const;
+
+ operator const TAO_RTScheduler_Current_ptr &() const;
+ operator TAO_RTScheduler_Current_ptr &();
+ // in, inout, out, _retn
+ TAO_RTScheduler_Current_ptr in (void) const;
+ TAO_RTScheduler_Current_ptr &inout (void);
+ TAO_RTScheduler_Current_ptr &out (void);
+ TAO_RTScheduler_Current_ptr _retn (void);
+ TAO_RTScheduler_Current_ptr ptr (void) const;
+
+ // Hooks used by template sequence and object manager classes
+ // for non-defined forward declared interfaces.
+ static TAO_RTScheduler_Current_ptr duplicate (TAO_RTScheduler_Current_ptr);
+ static void release (TAO_RTScheduler_Current_ptr);
+ static TAO_RTScheduler_Current_ptr nil (void);
+ static TAO_RTScheduler_Current_ptr narrow (
+ CORBA::Object *
+ ACE_ENV_ARG_DECL_NOT_USED
+ );
+ static CORBA::Object * upcast (void *);
+
+private:
+
+ TAO_RTScheduler_Current_ptr ptr_;
+ // Unimplemented - prevents widening assignment.
+ TAO_RTScheduler_Current_var (const TAO_Base_var &rhs);
+ TAO_RTScheduler_Current_var &operator= (const TAO_Base_var &rhs);
+
+};
+
+
+class TAO_RTScheduler_Export TAO_RTScheduler_Current_i
+{
+ public:
+
+ TAO_RTScheduler_Current_i (TAO_ORB_Core* orb,
+ DT_Hash_Map* dt_hash
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS);
+
+ TAO_RTScheduler_Current_i (TAO_ORB_Core* orb,
+ DT_Hash_Map* dt_hash,
+ RTScheduling::Current::IdType guid,
+ const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ RTScheduling::DistributableThread_ptr dt,
+ TAO_RTScheduler_Current_i* prev_current
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS);
+
+ virtual ~TAO_RTScheduler_Current_i (void)
+ {
+ };
+
+ virtual RTScheduling::DistributableThread_ptr
+ spawn (RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char* name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param,
+ CORBA::ULong stack_size,
+ RTCORBA::Priority base_priority
+ ACE_ENV_ARG_DECL_WITH_DEFAULTS)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual void begin_scheduling_segment
+ (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE));
+
+ virtual void update_scheduling_segment
+ (const char * name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RTScheduling::Current::UNSUPPORTED_SCHEDULING_DISCIPLINE));
+
+ virtual void end_scheduling_segment
+ (const char * name
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual RTScheduling::Current::IdType *
+ id (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+
+ void id (RTScheduling::Current::IdType guid );
+
+ virtual CORBA::Policy_ptr
+ scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ virtual CORBA::Policy_ptr
+ implicit_scheduling_parameter (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+
+ void scheduling_parameter (CORBA::Policy_ptr);
+
+ void implicit_scheduling_parameter (CORBA::Policy_ptr);
+
+ virtual RTScheduling::Current::NameList *
+ current_scheduling_segment_names (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ void cancel_thread (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+
+ void cleanup_DT (void);
+
+ void cleanup_current (void);
+
+ void delete_all_currents (void);
+
+ const char* name (void);
+ void name (char *);
+
+ TAO_ORB_Core* orb (void);
+
+ RTScheduling::Scheduler_ptr scheduler (void);
+
+ DT_Hash_Map* dt_hash (void);
+
+ RTScheduling::DistributableThread_ptr DT (void);
+ void DT (RTScheduling::DistributableThread_ptr);
+
+ private:
+ RTScheduling::Scheduler_var scheduler_;
+ TAO_ORB_Core* orb_;
+ RTScheduling::Current::IdType guid_;
+ CORBA::String_var name_;
+ CORBA::Policy_ptr sched_param_;
+ CORBA::Policy_ptr implicit_sched_param_;
+ RTScheduling::DistributableThread_var dt_;
+ TAO_RTScheduler_Current_i* previous_current_;
+ DT_Hash_Map* dt_hash_;
+};
+
+// This class provides an entry point for the// new DT.
+class DTTask : public ACE_Task <ACE_SYNCH>
+{
+public:
+ DTTask (//ACE_Thread_Manager manager,
+ TAO_ORB_Core* orb,
+ DT_Hash_Map* dt_hash,
+ TAO_RTScheduler_Current_i*,
+ RTScheduling::ThreadAction_ptr start,
+ CORBA::VoidData data,
+ const char* name,
+ CORBA::Policy_ptr sched_param,
+ CORBA::Policy_ptr implicit_sched_param);
+
+ int activate_task (RTCORBA::Priority base_priority,
+ CORBA::ULong stack_size);
+
+ virtual int svc (void);
+
+ private:
+ //ACE_Thread_Manager* manager_;
+ TAO_ORB_Core* orb_;
+ DT_Hash_Map* dt_hash_;
+ TAO_RTScheduler_Current_i* current_;
+ RTScheduling::ThreadAction_var start_;
+ CORBA::VoidData data_;
+ RTScheduling::Current::IdType guid_;
+ CORBA::String_var name_;
+ CORBA::Policy_var sched_param_;
+ CORBA::Policy_var implicit_sched_param_;
+};
+#endif /*TAO_RTSCHEDULER_CURRENT_H*/