summaryrefslogtreecommitdiff
path: root/TAO/orbsvcs/orbsvcs/Sched
diff options
context:
space:
mode:
authorvenkita <venkita@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2003-08-24 18:27:39 +0000
committervenkita <venkita@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2003-08-24 18:27:39 +0000
commit7a948522b445bc66347906b547a512f4780656bb (patch)
tree36babaa15a1dbd1784d07c64f28f7fbee6683dff /TAO/orbsvcs/orbsvcs/Sched
parent6e48e15dc70618eb1bf07c9fc19d55718d95b043 (diff)
downloadATCD-7a948522b445bc66347906b547a512f4780656bb.tar.gz
ChangeLogTag: Sun Aug 24 13:09:50 2003 Venkita Subramonian <venkita@cs.wustl.edu>
Diffstat (limited to 'TAO/orbsvcs/orbsvcs/Sched')
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.cpp176
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.h127
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.cpp1120
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.h380
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.cpp539
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.h175
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.cpp2077
-rw-r--r--TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.h339
8 files changed, 4183 insertions, 750 deletions
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.cpp b/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.cpp
index f6da5e721a3..6c98a2db43c 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.cpp
+++ b/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.cpp
@@ -181,6 +181,7 @@ void ACE_Config_Scheduler::priority (RtecScheduler::handle_t handle,
RtecScheduler::UNKNOWN_TASK,
RtecScheduler::NOT_SCHEDULED))
{
+
if (impl->priority (handle, priority, p_subpriority, p_priority) == -1)
{
ACE_ERROR ((LM_ERROR,
@@ -212,6 +213,7 @@ void ACE_Config_Scheduler::add_dependency (RtecScheduler::handle_t handle,
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::UNKNOWN_TASK))
{
+
RtecScheduler::RT_Info* rt_info = 0;
switch (impl->lookup_rt_info (handle, rt_info))
{
@@ -242,6 +244,7 @@ void ACE_Config_Scheduler::add_dependency (RtecScheduler::handle_t handle,
void ACE_Config_Scheduler::compute_scheduling (CORBA::Long minimum_priority,
CORBA::Long maximum_priority,
RtecScheduler::RT_Info_Set_out infos,
+ RtecScheduler::Dependency_Set_out dependencies,
RtecScheduler::Config_Info_Set_out configs,
RtecScheduler::Scheduling_Anomaly_Set_out anomalies
ACE_ENV_ARG_DECL_NOT_USED)
@@ -250,6 +253,7 @@ void ACE_Config_Scheduler::compute_scheduling (CORBA::Long minimum_priority,
RtecScheduler::INSUFFICIENT_THREAD_PRIORITY_LEVELS,
RtecScheduler::TASK_COUNT_MISMATCH))
{
+
// Initialize the scheduler implementation.
impl->init (minimum_priority, maximum_priority);
@@ -438,8 +442,10 @@ void ACE_Config_Scheduler::compute_scheduling (CORBA::Long minimum_priority,
ACE_DEBUG ((LM_DEBUG, "Schedule prepared.\n"));
ACE_DEBUG ((LM_DEBUG, "Dumping to stdout.\n"));
- ACE_Scheduler_Factory::dump_schedule (*(infos.ptr()), *(configs.ptr()),
- *(anomalies.ptr()), 0);
+ ACE_Scheduler_Factory::dump_schedule (*(infos.ptr()),
+ *(dependencies.ptr()),
+ *(configs.ptr()),
+ *(anomalies.ptr()), 0);
ACE_DEBUG ((LM_DEBUG, "Dump done.\n"));
}
@@ -452,6 +458,7 @@ void ACE_Config_Scheduler::dispatch_configuration (RtecScheduler::Preemption_Pri
RtecScheduler::NOT_SCHEDULED,
RtecScheduler::UNKNOWN_PRIORITY_LEVEL))
{
+
if (impl->dispatch_configuration (p_priority, priority, d_type) == -1)
{
ACE_ERROR ((LM_ERROR,
@@ -468,6 +475,7 @@ ACE_Config_Scheduler::last_scheduled_priority (ACE_ENV_SINGLE_ARG_DECL_NOT_USED)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::NOT_SCHEDULED))
{
+
RtecScheduler::Preemption_Priority_t priority = impl->minimum_priority_queue ();
if (priority < 0)
@@ -496,3 +504,167 @@ ACE_Config_Scheduler::get_config_infos (RtecScheduler::Config_Info_Set_out confi
//for now, this function is unimplemented
return;
}
+
+virtual void reset (RtecScheduler::handle_t handle,
+ RtecScheduler::Criticality_t criticality,
+ RtecScheduler::Time time,
+ RtecScheduler::Time typical_time,
+ RtecScheduler::Time cached_time,
+ RtecScheduler::Period_t period,
+ RtecScheduler::Importance_t importance,
+ RtecScheduler::Quantum_t quantum,
+ CORBA::Long threads,
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void set_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+// Set characteristics of the RT_Infos corresponding to the passed handles.
+// Tuples are added in the case of existing and/or multiple definitions.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void reset_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+// Reset characteristics of the RT_Infos corresponding to the passed handles.
+// Tuples are replaced in the case of existing and/or multiple definitions.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void replace_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+// Replace all RT_Infos, resetting characteristics of the RT_Infos
+// corresponding to the passed handles. All other RT_Infos are
+// reset to their uninitialized values, i.e., the same they have
+// just after the create call.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void remove_dependency (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+// This method removes a dependency between two RT_Infos.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void set_dependency_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+// This method sets the enable state of a dependency between two RT_Infos.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void set_dependency_enable_state_seq (const RtecScheduler::Dependency_Set & dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+// This method sets the enable state of a sequence of dependencies.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void set_rt_info_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::RT_Info_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+// This method enables or disables an RT_Info.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void set_rt_info_enable_state_seq (const RtecScheduler::RT_Info_Enable_State_Pair_Set & pair_set
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+// This method enables or disables a sequence of RT_Infos.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void recompute_scheduling (CORBA::Long minimum_priority,
+ CORBA::Long maximum_priority,
+ RtecScheduler::Scheduling_Anomaly_Set_out anomalies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UTILIZATION_BOUND_EXCEEDED,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INSUFFICIENT_THREAD_PRIORITY_LEVELS,
+ RtecScheduler::TASK_COUNT_MISMATCH,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::DUPLICATE_NAME))
+// Recomputes the scheduling priorities, etc.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void get_rt_info_set (RtecScheduler::RT_Info_Set_out infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+// Returns the set of rt_infos, with their assigned priorities (as
+// of the last schedule re-computation).
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void get_dependency_set (RtecScheduler::Dependency_Set_out dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+// Returns the set of rt_infos, with their assigned priorities (as
+// of the last schedule re-computation).
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
+
+virtual void get_config_info_set (RtecScheduler::Config_Info_Set_out configs
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+// Returns the set of config_infos, describing the appropriate
+// number, types, and priority levels for the dispatching lanes.
+{
+ ACE_THROW (CORBA::NO_IMPLEMENT ());
+}
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.h b/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.h
index e0278a40462..1fc887b8fb5 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.h
+++ b/TAO/orbsvcs/orbsvcs/Sched/Config_Scheduler.h
@@ -90,6 +90,7 @@ public:
virtual void compute_scheduling (CORBA::Long minimum_priority,
CORBA::Long maximum_priority,
RtecScheduler::RT_Info_Set_out infos,
+ RtecScheduler::Dependency_Set_out dependencies,
RtecScheduler::Config_Info_Set_out configs,
RtecScheduler::Scheduling_Anomaly_Set_out anomalies
ACE_ENV_ARG_DECL)
@@ -125,6 +126,132 @@ public:
RtecScheduler::NOT_SCHEDULED));
// Provides the set of Config_Infos associated with the current schedule.
+ virtual void reset (RtecScheduler::handle_t handle,
+ RtecScheduler::Criticality_t criticality,
+ RtecScheduler::Time time,
+ RtecScheduler::Time typical_time,
+ RtecScheduler::Time cached_time,
+ RtecScheduler::Period_t period,
+ RtecScheduler::Importance_t importance,
+ RtecScheduler::Quantum_t quantum,
+ CORBA::Long threads,
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Reset characteristics of the RT_Info corresponding to the passed handle.
+
+ virtual void set_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Set characteristics of the RT_Infos corresponding to the passed handles.
+ // Tuples are added in the case of existing and/or multiple definitions.
+
+ virtual void reset_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Reset characteristics of the RT_Infos corresponding to the passed handles.
+ // Tuples are replaced in the case of existing and/or multiple definitions.
+
+ virtual void replace_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Replace all RT_Infos, resetting characteristics of the RT_Infos
+ // corresponding to the passed handles. All other RT_Infos are
+ // reset to their uninitialized values, i.e., the same they have
+ // just after the create call.
+
+ virtual void remove_dependency (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method removes a dependency between two RT_Infos.
+
+ virtual void set_dependency_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method sets the enable state of a dependency between two RT_Infos.
+
+ virtual void set_dependency_enable_state_seq (const RtecScheduler::Dependency_Set & dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method sets the enable state of a sequence of dependencies.
+
+ virtual void set_rt_info_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::RT_Info_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method enables or disables an RT_Info.
+
+ virtual void set_rt_info_enable_state_seq (const RtecScheduler::RT_Info_Enable_State_Pair_Set & pair_set
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method enables or disables a sequence of RT_Infos.
+
+ virtual void recompute_scheduling (CORBA::Long minimum_priority,
+ CORBA::Long maximum_priority,
+ RtecScheduler::Scheduling_Anomaly_Set_out anomalies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UTILIZATION_BOUND_EXCEEDED,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INSUFFICIENT_THREAD_PRIORITY_LEVELS,
+ RtecScheduler::TASK_COUNT_MISMATCH,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::DUPLICATE_NAME));
+ // Recomputes the scheduling priorities, etc.
+
+ virtual void get_rt_info_set (RtecScheduler::RT_Info_Set_out infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of rt_infos, with their assigned priorities (as
+ // of the last schedule re-computation).
+
+ virtual void get_dependency_set (RtecScheduler::Dependency_Set_out dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of rt_infos, with their assigned priorities (as
+ // of the last schedule re-computation).
+
+ virtual void get_config_info_set (RtecScheduler::Config_Info_Set_out configs
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of config_infos, describing the appropriate
+ // number, types, and priority levels for the dispatching lanes.
+
private:
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.cpp b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.cpp
index 25c6cca9bd0..4ad5e2c850f 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.cpp
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.cpp
@@ -34,6 +34,197 @@
ACE_RCSID (Sched, Reconfig_Sched_Utils, "$Id$")
+///////////////////////////
+// struct TAO_RT_Info_Ex //
+///////////////////////////
+
+// Default Constructor.
+
+TAO_RT_Info_Ex::TAO_RT_Info_Ex ()
+{
+ // Note: the entry_point string takes care of itself.
+ handle = 0;
+ criticality = RtecScheduler::VERY_LOW_CRITICALITY;
+ worst_case_execution_time = 0;
+ typical_execution_time = 0;
+ cached_execution_time = 0;
+ period = 0;
+ importance = RtecScheduler::VERY_LOW_IMPORTANCE;
+ quantum = 0;
+ threads = 0;
+ info_type = RtecScheduler::OPERATION;
+ priority = 0;
+ preemption_subpriority = 0;
+ preemption_priority = 0;
+ enabled = RtecScheduler::RT_INFO_ENABLED;
+ volatile_token = 0;
+}
+
+
+// Constructor from an RT_Info
+// (Also serves as a copy constructor)
+
+TAO_RT_Info_Ex::TAO_RT_Info_Ex (const RtecScheduler::RT_Info &info)
+{
+ this->entry_point = info.entry_point;
+ this->handle = info.handle;
+ this->criticality = info.criticality;
+ this->worst_case_execution_time = info.worst_case_execution_time;
+ this->typical_execution_time = info.typical_execution_time;
+ this->cached_execution_time = info.cached_execution_time;
+ this->period = info.period;
+ this->importance = info.importance;
+ this->quantum = info.quantum;
+ this->threads = info.threads;
+ this->info_type = info.info_type;
+ this->priority = info.priority;
+ this->preemption_subpriority = info.preemption_subpriority;
+ this->preemption_priority = info.preemption_priority;
+ this->enabled = info.enabled; // TODO- rethink?
+ this->volatile_token = info.volatile_token;
+}
+
+
+// Destructor.
+TAO_RT_Info_Ex::~TAO_RT_Info_Ex ()
+{
+}
+
+
+// Assignment operator with an RT_Info on the RHS.
+
+void
+TAO_RT_Info_Ex::operator = (const RtecScheduler::RT_Info &info)
+{
+ // IMPORTANT: we don't copy the name or the handle or the output
+ // attributes or the volatile token (entry pointer) or the valid
+ // flag. These can only be copied in the copy ctor at
+ // initialization.
+
+ criticality = info.criticality;
+ worst_case_execution_time = info.worst_case_execution_time;
+ typical_execution_time = info.typical_execution_time;
+ cached_execution_time = info.cached_execution_time;
+ period = info.period;
+ importance = info.importance;
+ quantum = info.quantum;
+ threads = info.threads;
+ info_type = info.info_type;
+ enabled = info.enabled;
+}
+
+
+// Resets all data members to initial (invalid) values, and removes
+// tuples corresponding to the reset flags.
+
+void
+TAO_RT_Info_Ex::reset (u_long reset_flags)
+{
+ // IMPORTANT: among the input arguments, we only reset the period ...
+ // TBD - if execution times etc. can be selected as well, then reset those, e.g.,
+ //
+ // criticality = RtecScheduler::VERY_LOW_CRITICALITY;
+ // worst_case_execution_time = 0;
+ // typical_execution_time = 0;
+ // cached_execution_time = 0;
+ // importance = RtecScheduler::VERY_LOW_IMPORTANCE;
+ // quantum = 0;
+ // threads = 0;
+ // info_type = RtecScheduler::OPERATION;
+
+ period = 0;
+
+ // ... However, we do reset the output attributes ...
+
+ priority = 0;
+ preemption_subpriority = 0;
+ preemption_priority = 0;
+
+ // ... and the appropriate tuples associated with the entry.
+ TAO_Reconfig_Scheduler_Entry * entry_ptr =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ volatile_token);
+
+ ACE_DEBUG((LM_DEBUG, "Removing Entries for RT_Info: %d, entry_ptr: %x\n", handle, entry_ptr));
+ if (entry_ptr)
+ {
+ entry_ptr->remove_tuples (reset_flags);
+ }
+ else
+ {
+ ACE_ERROR ((LM_ERROR, "Pointer to associated entry is zero."));
+ }
+}
+
+void
+TAO_RT_Info_Ex::enabled_state (RtecScheduler::RT_Info_Enabled_Type_t enabled_in)
+{
+ TAO_Reconfig_Scheduler_Entry * entry_ptr =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ volatile_token);
+ if (entry_ptr)
+ {
+ this->enabled = enabled_in;
+ entry_ptr->enabled_state (enabled_in);
+ }
+ else
+ {
+ ACE_ERROR ((LM_ERROR, "Pointer to associated entry is zero."));
+ }
+}
+
+RtecScheduler::RT_Info_Enabled_Type_t
+TAO_RT_Info_Ex::enabled_state ()
+{
+ return this->enabled;
+}
+
+
+
+//////////////////////////////
+// struct TAO_RT_Info_Tuple //
+//////////////////////////////
+
+// Default Constructor.
+TAO_RT_Info_Tuple::TAO_RT_Info_Tuple ()
+ : rate_index (0)
+{
+}
+
+
+// Constructor from an RT_Info.
+// (Also serves as a copy constructor)
+
+TAO_RT_Info_Tuple::TAO_RT_Info_Tuple (const RtecScheduler::RT_Info &info)
+ : TAO_RT_Info_Ex (info),
+ rate_index (0)
+{
+}
+
+// Destructor.
+
+TAO_RT_Info_Tuple::~TAO_RT_Info_Tuple ()
+{
+}
+
+
+// Assignment operator with an RT_Info on the RHS.
+
+void
+TAO_RT_Info_Tuple::operator = (const RtecScheduler::RT_Info &info)
+{
+ ACE_static_cast (TAO_RT_Info_Ex, *this) = info;
+}
+
+
+// Less-than comparison operator: orders tuples by ascending rate (descending period).
+
+int
+TAO_RT_Info_Tuple::operator < (const TAO_RT_Info_Tuple &t)
+{
+ return (this->period > t.period) ? 1 : 0;
+}
+
////////////////////////////////////////
// class TAO_Reconfig_Scheduler_Entry //
@@ -42,7 +233,7 @@ ACE_RCSID (Sched, Reconfig_Sched_Utils, "$Id$")
// Constructor.
TAO_Reconfig_Scheduler_Entry::
-TAO_Reconfig_Scheduler_Entry (RtecScheduler::RT_Info &rt_info)
+TAO_Reconfig_Scheduler_Entry (TAO_RT_Info_Ex &rt_info)
: actual_rt_info_ (&rt_info),
fwd_dfs_status_ (NOT_VISITED),
rev_dfs_status_ (NOT_VISITED),
@@ -53,17 +244,222 @@ TAO_Reconfig_Scheduler_Entry (RtecScheduler::RT_Info &rt_info)
is_thread_delineator_ (0),
has_unresolved_remote_dependencies_ (0),
has_unresolved_local_dependencies_ (0),
- effective_exec_multiplier_ (0),
- effective_period_ (0)
+ aggregate_exec_time_ (0),
+ orig_tuple_period_sum_ (0),
+ prop_tuple_period_sum_ (0),
+ orig_tuple_count_ (0),
+ prop_tuple_count_ (0),
+ current_admitted_tuple_ (0),
+ enabled_ (rt_info.enabled)
+ // effective_exec_multiplier_ (0), //WSOA merge commented out
+ // effective_period_ (0) //WSOA merge commented out
{
// Store the RT_Info fields.
this->orig_rt_info_data (*actual_rt_info_);
}
+// Constructor.
+
+TAO_Reconfig_Scheduler_Entry::
+~TAO_Reconfig_Scheduler_Entry ()
+{
+ this->remove_tuples (ORIGINAL | PROPAGATED);
+}
+
+
+// Removes all tuples from the entry.
+
+void
+TAO_Reconfig_Scheduler_Entry::
+remove_tuples (u_long tuple_flags)
+{
+ TAO_RT_Info_Tuple **tuple_ptr_ptr;
+
+ if (tuple_flags & ORIGINAL)
+ {
+ TUPLE_SET_ITERATOR orig_tuple_iter (this->orig_tuple_subset_);
+
+ while (orig_tuple_iter.done () == 0)
+ {
+ if (orig_tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
+ {
+ ACE_ERROR ((LM_ERROR,
+ "Failed to access tuple under iterator"));
+ return;
+ }
+
+ delete (*tuple_ptr_ptr);
+
+ orig_tuple_iter.advance ();
+ }
+
+ this->orig_tuple_subset_.reset ();
+ }
+
+ // If either the originals or the propagated tuple pointers are to
+ // be removed, we have to get rid of the propagated pointers lest
+ // they become handles to access violations after the original
+ // tuples are destroyed.
+ if (tuple_flags & PROPAGATED
+ || tuple_flags & ORIGINAL)
+ {
+ this->prop_tuple_subset_.reset ();
+ }
+}
+
+
+// Adds a new tuple to the entry and updates the
+// rate indices and mean rate for the tuples.
+
+int
+TAO_Reconfig_Scheduler_Entry::
+insert_tuple (TAO_RT_Info_Tuple &tuple,
+ Tuple_Type tuple_type,
+ int replace)
+{
+ // Choose the appropriate tuple subset.
+ TUPLE_SET *set_ptr = (tuple_type == ORIGINAL) ? & orig_tuple_subset_ : & prop_tuple_subset_;
+
+ // Recompute rate indices.
+
+ tuple.rate_index = 0;
+ TAO_RT_Info_Tuple **tuple_ptr_ptr;
+
+ TUPLE_SET_ITERATOR tuple_iter (*set_ptr);
+
+ while (tuple_iter.done () == 0)
+ {
+ // Get a pointer to the tuple under the iterator.
+ if (tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR, "Failed to access tuple under iterator"), -1);
+ }
+
+ // Update existing tuples
+ if ((*tuple_ptr_ptr)->period > tuple.period)
+ {
+ // Move the tuple's rate index higher than any in the set
+ // with lower rates.
+ ++tuple.rate_index;
+ }
+ else if (replace && (*tuple_ptr_ptr)->period == tuple.period)
+ {
+ // If the replace flag is set, and there is already a tuple
+ // with the same rate in the set, just update that tuple and
+ // return.
+ **tuple_ptr_ptr = tuple;
+ return 1;
+ }
+ else
+ {
+ // Otherwise, just update the rate index of the subsequent
+ // tuples, which have the same or higher rates.
+ ++(*tuple_ptr_ptr)->rate_index;
+ }
+
+ tuple_iter.advance ();
+ }
+
+ // Update aggregate rate data, insert the tuple
+ if (tuple_type == ORIGINAL)
+ {
+ this->orig_tuple_period_sum_ += tuple.period;
+ ++this->orig_tuple_count_;
+ return (this->orig_tuple_subset_.insert (&tuple) < 0) ? -1 : 0;
+ }
+ else
+ {
+ this->prop_tuple_period_sum_ += tuple.period;
+ ++this->prop_tuple_count_;
+ return (this->prop_tuple_subset_.insert (&tuple) < 0) ? -1 : 0;
+ }
+}
+
+
+// Updates a matching tuple.
+
+int
+TAO_Reconfig_Scheduler_Entry::
+update_tuple (TAO_RT_Info_Ex &info,
+ Tuple_Type tuple_type)
+{
+ // Choose the appropriate tuple subset.
+ TUPLE_SET *set_ptr = (tuple_type == ORIGINAL) ? & orig_tuple_subset_ : & prop_tuple_subset_;
+
+ // Find and update the first matching tuple, if any.
+
+ TAO_RT_Info_Tuple **tuple_ptr_ptr;
+ TUPLE_SET_ITERATOR tuple_iter (*set_ptr);
+
+ while (tuple_iter.done () == 0)
+ {
+ // Get a pointer to the tuple under the iterator.
+ if (tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR, "Failed to access tuple under iterator"), -1);
+ }
+ else if ((*tuple_ptr_ptr)->period < info.period)
+ {
+ // If we've hit a tuple with a shorter period (higher rate),
+ // then we're done.
+ break;
+ }
+ else if ((*tuple_ptr_ptr)->period == info.period)
+ {
+ // If the replace flag is set, and there is already a tuple
+ // with the same rate in the set, just update that tuple and
+ // return.
+ **tuple_ptr_ptr = info;
+ return 1;
+ }
+
+ tuple_iter.advance ();
+ }
+
+ return 0;
+}
+
+
+// Registers tuples into the passed tuple pointer array.
+int
+TAO_Reconfig_Scheduler_Entry::
+register_tuples (TAO_RT_Info_Tuple ** tuple_ptr_array,
+ long &tuple_count)
+{
+ // Iterate over the tuples, adding them to the pointer array.
+
+ TAO_RT_Info_Tuple **tuple_ptr_ptr;
+ TUPLE_SET_ITERATOR tuple_iter (orig_tuple_subset_);
+
+ while (tuple_iter.done () == 0)
+ {
+ // Get a pointer to the tuple under the iterator.
+ if (tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR, "Failed to access tuple under iterator"), -1);
+ }
+ else
+ {
+ tuple_ptr_array [tuple_count] = *tuple_ptr_ptr;
+ ++tuple_count;
+ }
+
+ tuple_iter.advance ();
+ }
+
+ return 0;
+}
+
+
+
// Accessor for original RT_Info data.
-RtecScheduler::RT_Info &
+TAO_RT_Info_Ex &
TAO_Reconfig_Scheduler_Entry::orig_rt_info_data ()
{
return orig_rt_info_data_;
@@ -73,7 +469,7 @@ TAO_Reconfig_Scheduler_Entry::orig_rt_info_data ()
// Mutator for stored original RT_Info data.
void
-TAO_Reconfig_Scheduler_Entry::orig_rt_info_data (RtecScheduler::RT_Info &data)
+TAO_Reconfig_Scheduler_Entry::orig_rt_info_data (TAO_RT_Info_Ex &data)
{
// Only store the information that can be updated by the public interface at run-time.
this->orig_rt_info_data_.worst_case_execution_time = data.worst_case_execution_time;
@@ -85,11 +481,12 @@ TAO_Reconfig_Scheduler_Entry::orig_rt_info_data (RtecScheduler::RT_Info &data)
this->orig_rt_info_data_.quantum = data.quantum;
this->orig_rt_info_data_.threads = data.threads;
this->orig_rt_info_data_.info_type = data.info_type;
+ this->orig_rt_info_data_.enabled = data.enabled;
}
// Accessor for actual RT_Info pointer.
-RtecScheduler::RT_Info *
+TAO_RT_Info_Ex *
TAO_Reconfig_Scheduler_Entry::
actual_rt_info ()
{
@@ -101,7 +498,7 @@ actual_rt_info ()
void
TAO_Reconfig_Scheduler_Entry::
-actual_rt_info (RtecScheduler::RT_Info *rt_info)
+actual_rt_info (TAO_RT_Info_Ex *rt_info)
{
this->actual_rt_info_ = rt_info;
}
@@ -293,7 +690,7 @@ has_unresolved_local_dependencies (int i)
this->has_unresolved_local_dependencies_ = i;
}
-
+/* WSOA merge - commented out
// Accessor for effective period of corresponding RT_Info.
RtecScheduler::Period_t
@@ -332,7 +729,7 @@ effective_exec_multiplier (CORBA::Long l)
{
this->effective_exec_multiplier_ = l;
}
-
+*/
///////////////////////////
// TAO_RSE_Reset_Visitor //
@@ -356,7 +753,20 @@ TAO_RSE_Reset_Visitor::visit (TAO_Reconfig_Scheduler_Entry &rse)
// visitor is applied prior to a DFS traversal, in which callers
// *unset* the thread delineator status of any of their called
// operations that do not specify a period or threads.
- rse.is_thread_delineator (1);
+
+ if (rse.actual_rt_info ()->enabled != RtecScheduler::RT_INFO_NON_VOLATILE)
+ {
+ rse.is_thread_delineator (1);
+
+ // Only reset the period for entries that are not root nodes. Added by BRM.
+ if (rse.actual_rt_info ()->threads == 0)
+ {
+ rse.actual_rt_info ()->period = 0;
+ }
+ }
+
+ // Remove the propagated tuples in the entry.
+ rse.remove_tuples (TAO_Reconfig_Scheduler_Entry::PROPAGATED);
rse.fwd_dfs_status (TAO_Reconfig_Scheduler_Entry::NOT_VISITED);
rse.rev_dfs_status (TAO_Reconfig_Scheduler_Entry::NOT_VISITED);
@@ -366,30 +776,184 @@ TAO_RSE_Reset_Visitor::visit (TAO_Reconfig_Scheduler_Entry &rse)
rse.rev_finished (-1);
rse.has_unresolved_remote_dependencies (0);
rse.has_unresolved_local_dependencies (0);
+ rse.aggregate_exec_time (rse.actual_rt_info ()->worst_case_execution_time);
+ rse.current_admitted_tuple (0);
+ //WSOA merge - commented out
// These settings are used for a conservative but
// efficient approach to estimating utilization:
// for an exact algorithm using frame merging,
// other initial settings might be needed.
- rse.effective_exec_multiplier (0);
- rse.effective_period (0);
+ //rse.effective_exec_multiplier (0);
+ //rse.effective_period (0);
return 0;
}
+// Accessor for effective execution time of corresponding RT_Info.
+
+RtecScheduler::Time
+TAO_Reconfig_Scheduler_Entry::
+aggregate_exec_time ()
+{
+ return this->aggregate_exec_time_;
+}
+// Mutator for effective execution time of corresponding RT_Info.
-///////////////////////////////////////////
-// class TAO_MUF_Reconfig_Sched_Strategy //
-///////////////////////////////////////////
+void
+TAO_Reconfig_Scheduler_Entry::
+aggregate_exec_time (RtecScheduler::Time t)
+{
+ this->aggregate_exec_time_ = t;
+}
+
+// Accessor for the sum of periods for tuples directly associated
+// with the entry.
+RtecScheduler::Period_t
+TAO_Reconfig_Scheduler_Entry::
+orig_tuple_period_sum ()
+{
+ return orig_tuple_period_sum_;
+}
+
+
+// Mutator for the sum of periods for tuples directly associated
+// with the entry.
+void
+TAO_Reconfig_Scheduler_Entry::
+orig_tuple_period_sum (RtecScheduler::Period_t p)
+{
+ orig_tuple_period_sum_ = p;
+}
+
+
+// Accessor for the sum of periods for tuples propagated via
+// dependencies on other entries.
+RtecScheduler::Period_t
+TAO_Reconfig_Scheduler_Entry::
+prop_tuple_period_sum ()
+{
+ return prop_tuple_period_sum_;
+}
+
+
+// Mutator for the sum of periods for tuples propagated via
+// dependencies on other entries.
+void
+TAO_Reconfig_Scheduler_Entry::
+prop_tuple_period_sum (RtecScheduler::Period_t p)
+{
+ prop_tuple_period_sum_ = p;
+}
+
+
+// Accessor for the number of tuples directly associated with the
+// entry.
+u_int
+TAO_Reconfig_Scheduler_Entry::
+orig_tuple_count ()
+{
+ return orig_tuple_count_;
+}
+
+
+// Mutator for the number of tuples directly associated with the
+// entry.
+void
+TAO_Reconfig_Scheduler_Entry::
+orig_tuple_count (u_int c)
+{
+ orig_tuple_count_ = c;
+}
+
+
+// Accessor for the number of tuples propagated via dependencies on
+// other entries.
+u_int
+TAO_Reconfig_Scheduler_Entry::
+prop_tuple_count ()
+{
+ return prop_tuple_count_;
+}
+
+
+// Mutator for the number of tuples propagated via dependencies on
+// other entries.
+void
+TAO_Reconfig_Scheduler_Entry::
+prop_tuple_count (u_int c)
+{
+ prop_tuple_count_ = c;
+}
+
+
+// Accessor for the set of tuples directly associated with the
+// entry.
+TUPLE_SET &
+TAO_Reconfig_Scheduler_Entry::
+orig_tuple_subset ()
+{
+ return orig_tuple_subset_;
+}
+
+
+// Accessor for the set of tuples propagated via dependencies on
+// other entries.
+TUPLE_SET &
+TAO_Reconfig_Scheduler_Entry::
+prop_tuple_subset ()
+{
+ return prop_tuple_subset_;
+}
+
+
+TAO_RT_Info_Tuple *
+TAO_Reconfig_Scheduler_Entry::
+current_admitted_tuple ()
+{
+ return current_admitted_tuple_;
+}
+
+
+void
+TAO_Reconfig_Scheduler_Entry::
+current_admitted_tuple (TAO_RT_Info_Tuple * t)
+{
+ current_admitted_tuple_ = t;
+}
+
+// Accessor for flag indicating whether or not node is enabled.
+
+RtecScheduler::RT_Info_Enabled_Type_t
+TAO_Reconfig_Scheduler_Entry::
+enabled_state () const
+{
+ return this->enabled_;
+}
+
+
+// Mutator for flag indicating whether or not node is enabled.
+
+void
+TAO_Reconfig_Scheduler_Entry::
+enabled_state (RtecScheduler::RT_Info_Enabled_Type_t et)
+{
+ this->enabled_ = et;
+}
+
+
+////////////////////////////////////////////
+// class TAO_Reconfig_Sched_Strategy_Base //
+////////////////////////////////////////////
// Ordering function to compare the DFS finish times of
// two task entries, so qsort orders these in topological
// order, with the higher times *first*
int
-TAO_MUF_Reconfig_Sched_Strategy::comp_entry_finish_times (const void *first, const void *second)
+TAO_Reconfig_Sched_Strategy_Base::comp_entry_finish_times (const void *first, const void *second)
{
const TAO_Reconfig_Scheduler_Entry *first_entry =
* ACE_reinterpret_cast (const TAO_Reconfig_Scheduler_Entry *const *,
@@ -409,6 +973,16 @@ TAO_MUF_Reconfig_Sched_Strategy::comp_entry_finish_times (const void *first, con
return -1;
}
+ // sort disabled entries to the end
+ if (first_entry->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return (second_entry->enabled_state () == RtecScheduler::RT_INFO_DISABLED) ? 0 : 1;
+ }
+ else if (second_entry->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return -1;
+ }
+
// Sort entries with higher forward DFS finishing times before those
// with lower forward DFS finishing times.
if (first_entry->fwd_finished () >
@@ -425,12 +999,88 @@ TAO_MUF_Reconfig_Sched_Strategy::comp_entry_finish_times (const void *first, con
return 0;
}
-// Ordering function used to qsort an array of TAO_Reconfig_Scheduler_Entry
-// pointers into a total <priority, subpriority> ordering. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+// Determines whether or not an entry is critical, based on operation characteristics.
+// returns 1 if critical, 0 if not
int
-TAO_MUF_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void *t)
+TAO_Reconfig_Sched_Strategy_Base::is_critical (TAO_Reconfig_Scheduler_Entry &rse)
+{
+ // Look at the underlying RT_Info's criticality field.
+ return (rse.actual_rt_info ()->criticality == RtecScheduler::HIGH_CRITICALITY ||
+ rse.actual_rt_info ()->criticality == RtecScheduler::VERY_HIGH_CRITICALITY)
+ ? 1 : 0;
+}
+
+// Determines whether or not a tuple is critical, based on operation
+// characteristics. returns 1 if critical, 0 if not
+
+int
+TAO_Reconfig_Sched_Strategy_Base::is_critical (TAO_RT_Info_Tuple &t)
+{
+ // Look at the underlying RT_Info's criticality field.
+ return (t.criticality == RtecScheduler::HIGH_CRITICALITY ||
+ t.criticality == RtecScheduler::VERY_HIGH_CRITICALITY)
+ ? 1 : 0;
+}
+
+
+// Compares two entries by subpriority alone. Returns -1 if the first
+// one is higher, 0 if they're the same, and 1 if the second one is
+// higher.
+
+int
+TAO_Reconfig_Sched_Strategy_Base::compare_subpriority (TAO_Reconfig_Scheduler_Entry &lhs,
+ TAO_Reconfig_Scheduler_Entry &rhs)
+{
+ // First, compare importance.
+
+ if (lhs.actual_rt_info ()->importance > rhs.actual_rt_info ()->importance)
+ {
+ return -1;
+ }
+ else if (lhs.actual_rt_info ()->importance < rhs.actual_rt_info ()->importance)
+ {
+ return 1;
+ }
+
+ // Same importance, so look at dfs finish time as a tiebreaker.
+
+ else if (lhs.fwd_finished () > rhs.fwd_finished ())
+ {
+ return -1;
+ }
+ else if (lhs.fwd_finished () < rhs.fwd_finished ())
+ {
+ return 1;
+ }
+
+ // Same dfs finish time, so look at handle as a tiebreaker.
+
+ else if (lhs.actual_rt_info ()->handle > rhs.actual_rt_info ()->handle)
+ {
+ return -1;
+ }
+ else if (lhs.actual_rt_info ()->handle < rhs.actual_rt_info ()->handle)
+ {
+ return 1;
+ }
+
+ // They're the same if we got here.
+ return 0;
+}
+
+
+////////////////////////////////////////////////
+// class TAO_MUF_FAIR_Reconfig_Sched_Strategy //
+////////////////////////////////////////////////
+
+// Ordering function used to qsort an array of TAO_RT_Info_Tuple
+// pointers into a total <priority, subpriority> ordering. Returns -1
+// if the first one is higher, 0 if they're the same, and 1 if the
+// second one is higher.
+
+int
+TAO_MUF_FAIR_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void *t)
{
// Convert the passed pointers: the double cast is needed to
// make Sun C++ 4.2 happy.
@@ -451,15 +1101,25 @@ TAO_MUF_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void
return -1;
}
+ // sort disabled entries to the end
+ if ((*first)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED) ? 0 : 1;
+ }
+ else if ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return -1;
+ }
+
int result =
- TAO_MUF_Reconfig_Sched_Strategy::priority_diff (*((*first)->actual_rt_info ()),
- *((*second)->actual_rt_info ()));
+ TAO_MUF_FAIR_Reconfig_Sched_Strategy::compare_priority (**first,
+ **second);
// Check whether they were distinguished by priority.
if (result == 0)
{
- return TAO_MUF_Reconfig_Sched_Strategy::compare_subpriority (**first,
- **second);
+ return TAO_Reconfig_Sched_Strategy_Base::compare_subpriority (**first,
+ **second);
}
else
{
@@ -468,66 +1128,102 @@ TAO_MUF_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void
}
-// Compares two entries by priority alone. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+// Ordering function used to qsort an array of RT_Info_Tuple
+// pointers into a total ordering for admission control. Returns
+// -1 if the first one is higher, 0 if they're the same, and 1 if
+// the second one is higher.
int
-TAO_MUF_Reconfig_Sched_Strategy::compare_priority (TAO_Reconfig_Scheduler_Entry &s,
- TAO_Reconfig_Scheduler_Entry &t)
+TAO_MUF_FAIR_Reconfig_Sched_Strategy::total_admission_comp (const void *s,
+ const void *t)
{
- // Simply call the corresponding comparison based on the underlying rt_infos.
- return TAO_MUF_Reconfig_Sched_Strategy::priority_diff (*s.actual_rt_info (),
- *t.actual_rt_info ());
-}
+ // Convert the passed pointers: the double cast is needed to
+ // make Sun C++ 4.2 happy.
+ TAO_RT_Info_Tuple **first =
+ ACE_reinterpret_cast (TAO_RT_Info_Tuple **,
+ ACE_const_cast (void *, s));
+ TAO_Reconfig_Scheduler_Entry * first_entry =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ (*first)->volatile_token);
-// Compares two entries by subpriority alone. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+ TAO_RT_Info_Tuple **second =
+ ACE_reinterpret_cast (TAO_RT_Info_Tuple **,
+ ACE_const_cast (void *, t));
-int
-TAO_MUF_Reconfig_Sched_Strategy::compare_subpriority (TAO_Reconfig_Scheduler_Entry &s,
- TAO_Reconfig_Scheduler_Entry &t)
-{
- // @@ TO DO: add dependency hash tables to strategy, use them to look for
- // *direct* dependencies between two nodes.
+ TAO_Reconfig_Scheduler_Entry * second_entry =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ (*second)->volatile_token);
- // Compare importance.
- if (s.actual_rt_info ()->importance > t.actual_rt_info ()->importance)
+ // Check the converted pointers.
+ if (first == 0 || *first == 0)
{
- return -1;
+ return (second == 0 || *second == 0) ? 0 : 1;
}
- else if (s.actual_rt_info ()->importance < t.actual_rt_info ()->importance)
+ else if (second == 0 || *second == 0)
{
- return 1;
+ return -1;
}
- // Same importance, so look at dfs finish time as a tiebreaker.
- else if (s.fwd_finished () > t.fwd_finished ())
+
+ // sort disabled tuples to the end
+ if ((*first)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED) ? 0 : 1;
+ }
+ else if ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return -1;
+ }
+
+ // First, compare according to rate index.
+
+ if ((*first)->rate_index < (*second)->rate_index)
{
return -1;
}
- else if (s.fwd_finished () < t.fwd_finished ())
+ else if ((*second)->rate_index < (*first)->rate_index)
{
return 1;
}
- // They're the same if we got here.
+ // Then compare by priority.
+
+ int result =
+ TAO_MUF_FAIR_Reconfig_Sched_Strategy::compare_priority (**first, **second);
+ if (result != 0)
+ {
+ return result;
+ }
+
+ // Then compare by subpriority.
+
+ result = TAO_Reconfig_Sched_Strategy_Base::compare_subpriority (*first_entry,
+ *second_entry);
+ if (result != 0)
+ {
+ return result;
+ }
+
return 0;
}
-// Compares two RT_Infos by priority alone. Returns -1 if the
+
+// Compares two RT_Info entries by priority alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
int
-TAO_MUF_Reconfig_Sched_Strategy::priority_diff (RtecScheduler::RT_Info &s,
- RtecScheduler::RT_Info &t)
+TAO_MUF_FAIR_Reconfig_Sched_Strategy::compare_priority (TAO_Reconfig_Scheduler_Entry &lhs,
+ TAO_Reconfig_Scheduler_Entry &rhs)
{
// In MUF, priority is per criticality level: compare criticalities.
- if (s.criticality > t.criticality)
+ if (lhs.actual_rt_info ()->criticality >
+ rhs.actual_rt_info ()->criticality)
{
return -1;
}
- else if (s.criticality < t.criticality)
+ else if (lhs.actual_rt_info ()->criticality <
+ rhs.actual_rt_info ()->criticality)
{
return 1;
}
@@ -537,24 +1233,34 @@ TAO_MUF_Reconfig_Sched_Strategy::priority_diff (RtecScheduler::RT_Info &s,
}
-// Determines whether or not an entry is critical, based on operation characteristics.
-// returns 1 if critical, 0 if not
+// Compares two RT_Info tuples by priority alone. Returns -1 if the
+// first one is higher, 0 if they're the same, and 1 if the second one is higher.
int
-TAO_MUF_Reconfig_Sched_Strategy::is_critical (TAO_Reconfig_Scheduler_Entry &rse)
+TAO_MUF_FAIR_Reconfig_Sched_Strategy::compare_priority (TAO_RT_Info_Tuple &lhs,
+ TAO_RT_Info_Tuple &rhs)
{
- // Look at the underlying RT_Info's criticality field.
- return (rse.actual_rt_info ()->criticality == RtecScheduler::HIGH_CRITICALITY ||
- rse.actual_rt_info ()->criticality == RtecScheduler::VERY_HIGH_CRITICALITY)
- ? 1 : 0;
+ // In MUF, priority is per criticality level: compare criticalities.
+ if (lhs.criticality > rhs.criticality)
+ {
+ return -1;
+ }
+ else if (lhs.criticality < rhs.criticality)
+ {
+ return 1;
+ }
+
+ // They're the same if we got here.
+ return 0;
}
+
// Fills in a static dispatch configuration for a priority level, based
// on the operation characteristics of a representative scheduling entry.
int
-TAO_MUF_Reconfig_Sched_Strategy::assign_config (RtecScheduler::Config_Info &info,
- TAO_Reconfig_Scheduler_Entry &rse)
+TAO_MUF_FAIR_Reconfig_Sched_Strategy::assign_config (RtecScheduler::Config_Info &info,
+ TAO_Reconfig_Scheduler_Entry &rse)
{
// Global and thread priority of dispatching queue are simply
// those assigned the representative operation it will dispatch.
@@ -567,56 +1273,20 @@ TAO_MUF_Reconfig_Sched_Strategy::assign_config (RtecScheduler::Config_Info &info
return 0;
}
-///////////////////////////////////////////
-// class TAO_RMS_Reconfig_Sched_Strategy //
-///////////////////////////////////////////
-// Ordering function to compare the DFS finish times of
-// two task entries, so qsort orders these in topological
-// order, with the higher times *first*
-int
-TAO_RMS_Reconfig_Sched_Strategy::comp_entry_finish_times (const void *first, const void *second)
-{
- const TAO_Reconfig_Scheduler_Entry *first_entry =
- * ACE_reinterpret_cast (const TAO_Reconfig_Scheduler_Entry *const *,
- first);
-
- const TAO_Reconfig_Scheduler_Entry *second_entry =
- * ACE_reinterpret_cast (const TAO_Reconfig_Scheduler_Entry *const *,
- second);
-
- // sort blank entries to the end
- if (! first_entry)
- {
- return (second_entry) ? 1 : 0;
- }
- else if (! second_entry)
- {
- return -1;
- }
- // Sort entries with higher forward DFS finishing times before those
- // with lower forward DFS finishing times.
- if (first_entry->fwd_finished () >
- second_entry->fwd_finished ())
- {
- return -1;
- }
- else if (first_entry->fwd_finished () <
- second_entry->fwd_finished ())
- {
- return 1;
- }
+///////////////////////////////////////////////////
+// class TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy //
+///////////////////////////////////////////////////
- return 0;
-}
-// Ordering function used to qsort an array of TAO_Reconfig_Scheduler_Entry
-// pointers into a total <priority, subpriority> ordering. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+// Ordering function used to qsort an array of TAO_RT_Info_Tuple
+// pointers into a total <priority, subpriority> ordering. Returns -1
+// if the first one is higher, 0 if they're the same, and 1 if the
+// second one is higher.
int
-TAO_RMS_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void *t)
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void *t)
{
// Convert the passed pointers: the double cast is needed to
// make Sun C++ 4.2 happy.
@@ -637,15 +1307,28 @@ TAO_RMS_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void
return -1;
}
+ // sort disabled entries to the end
+ if ((*first)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED) ? 0 : 1;
+ }
+ else if ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return -1;
+ }
+
+
+ // Check whether they are distinguished by priority, and if not,
+ // then by subpriority.
+
int result =
- TAO_RMS_Reconfig_Sched_Strategy::priority_diff (*((*first)->actual_rt_info ()),
- *((*second)->actual_rt_info ()));
+ TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_priority (**first,
+ **second);
- // Check whether they were distinguished by priority.
if (result == 0)
{
- return TAO_RMS_Reconfig_Sched_Strategy::compare_subpriority (**first,
- **second);
+ return TAO_Reconfig_Sched_Strategy_Base::compare_subpriority (**first,
+ **second);
}
else
{
@@ -654,104 +1337,235 @@ TAO_RMS_Reconfig_Sched_Strategy::total_priority_comp (const void *s, const void
}
-// Compares two entries by priority alone. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+// Ordering function used to qsort an array of RT_Info_Tuple
+// pointers into a total ordering for admission control. Returns
+// -1 if the first one is higher, 0 if they're the same, and 1 if
+// the second one is higher.
int
-TAO_RMS_Reconfig_Sched_Strategy::compare_priority (TAO_Reconfig_Scheduler_Entry &s,
- TAO_Reconfig_Scheduler_Entry &t)
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::total_admission_comp (const void *s,
+ const void *t)
{
- // Simply call the corresponding comparison based on the underlying rt_infos.
- return TAO_RMS_Reconfig_Sched_Strategy::priority_diff (*s.actual_rt_info (),
- *t.actual_rt_info ());
-}
+ // Convert the passed pointers: the double cast is needed to
+ // make Sun C++ 4.2 happy.
+ TAO_RT_Info_Tuple **first =
+ ACE_reinterpret_cast (TAO_RT_Info_Tuple **,
+ ACE_const_cast (void *, s));
+ TAO_Reconfig_Scheduler_Entry * first_entry =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ (*first)->volatile_token);
-// Compares two entries by subpriority alone. Returns -1 if the
-// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+ TAO_RT_Info_Tuple **second =
+ ACE_reinterpret_cast (TAO_RT_Info_Tuple **,
+ ACE_const_cast (void *, t));
-int
-TAO_RMS_Reconfig_Sched_Strategy::compare_subpriority (TAO_Reconfig_Scheduler_Entry &s,
- TAO_Reconfig_Scheduler_Entry &t)
-{
- // @@ TO DO: add dependency hash tables to strategy, use them to look for
- // *direct* dependencies between two nodes.
+ TAO_Reconfig_Scheduler_Entry * second_entry =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ (*second)->volatile_token);
- // Compare importance.
- if (s.actual_rt_info ()->importance > t.actual_rt_info ()->importance)
+ // Check the converted pointers.
+ if (first == 0 || *first == 0)
+ {
+ return (second == 0 || *second == 0) ? 0 : 1;
+ }
+ else if (second == 0 || *second == 0)
{
return -1;
}
- else if (s.actual_rt_info ()->importance < t.actual_rt_info ()->importance)
+
+ // sort disabled tuples to the end
+ if ((*first)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED) ? 0 : 1;
+ }
+ else if ((*second)->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return -1;
+ }
+
+ // First, compare according to minimal rate index.
+
+ if ((*first)->rate_index == 0 && (*second)->rate_index != 0)
+ {
+ return -1;
+ }
+ else if ((*second)->rate_index == 0 && (*first)->rate_index != 0)
{
return 1;
}
- // Same importance, so look at dfs finish time as a tiebreaker.
- else if (s.fwd_finished () > t.fwd_finished ())
+
+ // Then compare by priority.
+
+ int result =
+ TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_criticality (**first,
+ **second);
+ if (result != 0)
+ {
+ return result;
+ }
+
+ // Then compare by subpriority.
+
+ result = TAO_Reconfig_Sched_Strategy_Base::compare_subpriority (*first_entry,
+ *second_entry);
+ if (result != 0)
+ {
+ return result;
+ }
+
+ // Finally, compare by rate index.
+
+ if ((*first)->rate_index < (*second)->rate_index)
{
return -1;
}
- else if (s.fwd_finished () < t.fwd_finished ())
+ else if ((*second)->rate_index < (*first)->rate_index)
{
return 1;
}
- // They're the same if we got here.
return 0;
}
-// Compares two RT_Infos by priority alone. Returns -1 if the
+// Compares two RT_Info entries by criticality alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
int
-TAO_RMS_Reconfig_Sched_Strategy::priority_diff (RtecScheduler::RT_Info &s,
- RtecScheduler::RT_Info &t)
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_criticality(TAO_Reconfig_Scheduler_Entry &lhs,
+ TAO_Reconfig_Scheduler_Entry &rhs)
{
- // In RMS, priority is per criticality level: compare criticalities.
- if (s.period > t.period)
+ // In MUF, priority is per criticality level: compare criticalities.
+
+ if (lhs.actual_rt_info ()->criticality > rhs.actual_rt_info ()->criticality)
{
return -1;
}
- else if (s.period < t.period)
+ else if (lhs.actual_rt_info ()->criticality < rhs.actual_rt_info ()->criticality)
{
return 1;
}
+ else
+ {
+ return 0;
+ }
+}
+
+// Compares two RT_Info entries by criticality alone. Returns -1 if the
+// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+int
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_criticality(TAO_RT_Info_Tuple &lhs,
+ TAO_RT_Info_Tuple &rhs)
+{
+ if (lhs.criticality > rhs.criticality)
+ {
+ return -1;
+ }
+ else if (lhs.criticality < rhs.criticality)
+ {
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+// Compares two RT_Info entries by priority alone. Returns -1 if the
+// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+
+int
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_priority (TAO_Reconfig_Scheduler_Entry &lhs,
+ TAO_Reconfig_Scheduler_Entry &rhs)
+{
+ // In MUF, priority is per criticality level: compare criticalities.
+ int result = TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_criticality(lhs, rhs);
+
+ if (result != 0)
+ {
+ return result;
+ }
+
+ // Same criticality: if high criticality, differentiate by rate.
+ if (TAO_Reconfig_Sched_Strategy_Base::is_critical (rhs))
+ {
+ if (lhs.actual_rt_info ()->period < rhs.actual_rt_info ()->period)
+ {
+ return -1;
+ }
+ else if (lhs.actual_rt_info ()->period > rhs.actual_rt_info ()->period)
+ {
+ return 1;
+ }
+ }
// They're the same if we got here.
return 0;
}
-// Determines whether or not an entry is critical, based on operation characteristics.
-// returns 1 if critical, 0 if not
+// Compares two RT_Info tuples by priority alone. Returns -1 if the
+// first one is higher, 0 if they're the same, and 1 if the second one is higher.
int
-TAO_RMS_Reconfig_Sched_Strategy::is_critical (TAO_Reconfig_Scheduler_Entry &rse)
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::compare_priority (TAO_RT_Info_Tuple &lhs,
+ TAO_RT_Info_Tuple &rhs)
{
- // Look at the underlying RT_Info's criticality field.
- return (rse.actual_rt_info ()->criticality == RtecScheduler::HIGH_CRITICALITY ||
- rse.actual_rt_info ()->criticality == RtecScheduler::VERY_HIGH_CRITICALITY)
- ? 1 : 0;
+ // In RMS_Dyn, priority is first partitioned per criticality level:
+ // compare criticalities.
+
+ if (lhs.criticality > rhs.criticality)
+ {
+ return -1;
+ }
+ else if (lhs.criticality < rhs.criticality)
+ {
+ return 1;
+ }
+
+ // Same criticality: if high criticality, differentiate by rate.
+ else if (TAO_Reconfig_Sched_Strategy_Base::is_critical (rhs))
+ {
+ if (lhs.period < rhs.period)
+ {
+ return -1;
+ }
+ else if (lhs.period > rhs.period)
+ {
+ return 1;
+ }
+ }
+
+ // They're the same if we got here.
+ return 0;
}
+
// Fills in a static dispatch configuration for a priority level, based
// on the operation characteristics of a representative scheduling entry.
int
-TAO_RMS_Reconfig_Sched_Strategy::assign_config (RtecScheduler::Config_Info &info,
- TAO_Reconfig_Scheduler_Entry &rse)
+TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy::assign_config (RtecScheduler::Config_Info &info,
+ TAO_Reconfig_Scheduler_Entry &rse)
{
// Global and thread priority of dispatching queue are simply
// those assigned the representative operation it will dispatch.
info.preemption_priority = rse.actual_rt_info ()->preemption_priority;
info.thread_priority = rse.actual_rt_info ()->priority;
- // Dispatching queues are all laxity-based in this strategy.
- info.dispatching_type = RtecScheduler::STATIC_DISPATCHING;
+ // Critical queues are static, and non-critical ones are
+ // laxity-based in this strategy.
+ if (TAO_Reconfig_Sched_Strategy_Base::is_critical (rse))
+ {
+ info.dispatching_type = RtecScheduler::STATIC_DISPATCHING;
+ }
+ else
+ {
+ info.dispatching_type = RtecScheduler::LAXITY_DISPATCHING;
+ }
return 0;
}
-
#endif /* TAO_RECONFIG_SCHED_UTILS_C */
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.h b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.h
index 1b9e3bb151f..06c2a18da5e 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.h
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils.h
@@ -18,6 +18,10 @@
#define TAO_RECONFIG_SCHED_UTILS_H
#include /**/ "ace/pre.h"
+// Uncomment this to turn on some extra trace level debugging info,
+// comment it out to turn off that extra debugging info.
+#define SCHEDULER_LOGGING
+
#include "ace/config-all.h"
#include "orbsvcs/Scheduler_Factory.h"
@@ -28,10 +32,77 @@
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
+struct TAO_RTSched_Export TAO_RT_Info_Ex
+ : public RtecScheduler::RT_Info
+ // = TITLE
+ // A wrapper class for the IDL-generated RT_Info operation descriptors.
+ //
+ // = DESCRIPTION
+ // This class provides (re)initialization methods and a validity flag
+ // for the underlying IDL-generated RT_Info descriptor.
+{
+ TAO_RT_Info_Ex ();
+ // Default Constructor.
+
+ TAO_RT_Info_Ex (const RtecScheduler::RT_Info &);
+ // Constructor from an RT_Info
+ // (also serves as a copy constructor).
+
+ virtual ~TAO_RT_Info_Ex ();
+ // Destructor.
+
+ void operator = (const RtecScheduler::RT_Info &);
+ // Assignment operator with an RT_Info on the RHS.
+
+ virtual void reset (u_long reset_flags);
+ // Resets output data members to initial values, and removes tuples
+ // corresponding to the reset flags.
+
+ virtual void enabled_state (RtecScheduler::RT_Info_Enabled_Type_t);
+ // Sets the info and underlying entry's enable states.
+
+ virtual RtecScheduler::RT_Info_Enabled_Type_t enabled_state ();
+ // Returns the info's enable state.
+};
+
+
+struct TAO_RTSched_Export TAO_RT_Info_Tuple
+ : public TAO_RT_Info_Ex
+ // = TITLE
+ // An implementation class used to wrap available operation descriptors.
+ // Each operation may have one or more tuples describing different rates,
+ // etc., for the same operation.
+ //
+ // = DESCRIPTION
+ // This class enables admission control within the Reconfig Scheduler,
+ // which offers improved performance compared to admission control
+ // outside the scheduler.
+{
+ TAO_RT_Info_Tuple ();
+ // Default Constructor.
+
+ TAO_RT_Info_Tuple (const RtecScheduler::RT_Info &);
+ // Constructor from an RT_Info.
+ // (also serves as a copy constructor).
+
+ virtual ~TAO_RT_Info_Tuple ();
+ // Destructor.
+
+ void operator = (const RtecScheduler::RT_Info &);
+ // Assignment operator with an RT_Info on the RHS.
+
+ int operator < (const TAO_RT_Info_Tuple &t);
+ // Less-than comparison operator: orders tuples by ascending rate (descending period).
+
+ u_long rate_index;
+ // Index of the tuple in the operation's ordered available rates
+};
+
class TAO_RTSched_Export TAO_Reconfig_Scheduler_Entry
// = TITLE
- // An implementation class used to store interim scheduling results
+ // An implementation class used to decouple the available descriptors
+ // from the admitted descriptors, and to store interim scheduling results
// such as DFS finishing order, etc.
//
// = DESCRIPTION
@@ -44,19 +115,40 @@ public:
// Info for DFS traversal, topological sort of call graph.
enum DFS_Status {NOT_VISITED, VISITED, FINISHED};
- TAO_Reconfig_Scheduler_Entry (RtecScheduler::RT_Info &rt_info);
+ enum Tuple_Type {ORIGINAL = 0x01UL, PROPAGATED = 0x02UL};
+
+ TAO_Reconfig_Scheduler_Entry (TAO_RT_Info_Ex &rt_info);
// Constructor.
- RtecScheduler::RT_Info & orig_rt_info_data (void);
+ ~TAO_Reconfig_Scheduler_Entry ();
+ // Destructor.
+
+ void remove_tuples (u_long tuple_flags = ORIGINAL | PROPAGATED);
+ // Removes all tuples from the entry.
+
+ int insert_tuple (TAO_RT_Info_Tuple &tuple,
+ Tuple_Type tuple_type = ORIGINAL,
+ int replace = 0);
+ // Inserts a tuple into the appropriate tuple multiset.
+
+ int update_tuple (TAO_RT_Info_Ex &info,
+ Tuple_Type tuple_type = ORIGINAL);
+ // Updates a matching tuple.
+
+ int register_tuples (TAO_RT_Info_Tuple ** tuple_ptr_array,
+ long &tuple_count);
+ // Registers tuples into the passed tuple pointer array.
+
+ TAO_RT_Info_Ex & orig_rt_info_data (void);
// Accessor for stored original RT_Info data.
- void orig_rt_info_data (RtecScheduler::RT_Info &data);
+ void orig_rt_info_data (TAO_RT_Info_Ex &data);
// Mutator for stored original RT_Info data.
- RtecScheduler::RT_Info * actual_rt_info ();
+ TAO_RT_Info_Ex * actual_rt_info ();
// Accessor for actual RT_Info.
- void actual_rt_info (RtecScheduler::RT_Info *);
+ void actual_rt_info (TAO_RT_Info_Ex *);
// Mutator for actual RT_Info.
long fwd_discovered () const;
@@ -119,6 +211,74 @@ public:
// Mutator for flag indicating whether node has unresolved local
// dependencies.
+ RtecScheduler::Time aggregate_exec_time ();
+ // Accessor for effective execution time of the corresponding
+ // RT_Info and all of its disjunctively (i.e., dispatching waveforms
+ // are additive) executed dependants.
+
+ void aggregate_exec_time (RtecScheduler::Time t);
+ // Mutator for effective execution time of the corresponding RT_Info
+ // and its disjunctively executed dependants.
+
+ RtecScheduler::Period_t orig_tuple_period_sum ();
+ // Accessor for the sum of periods for tuples directly associated
+ // with the entry. It can be used to compute the mean rate for the
+ // entry.
+
+ void orig_tuple_period_sum (RtecScheduler::Period_t p);
+ // Mutator for the sum of periods for tuples directly associated
+ // with the entry. It can be used to compute the mean rate for the
+ // entry.
+
+ RtecScheduler::Period_t prop_tuple_period_sum ();
+ // Accessor for the sum of periods for tuples propagated via
+ // dependencies on other entries. It can be used to compute the
+ // mean rate for the entry.
+
+ void prop_tuple_period_sum (RtecScheduler::Period_t p);
+ // Mutator for the sum of periods for tuples propagated via
+ // dependencies on other entries. It can be used to compute the
+ // mean rate for the entry.
+
+ u_int orig_tuple_count ();
+ // Accessor for the number of tuples directly associated with the
+ // entry.
+
+ void orig_tuple_count (u_int c);
+ // Mutator for the number of tuples directly associated with the
+ // entry.
+
+ u_int prop_tuple_count ();
+ // Accessor for the number of tuples propagated via dependencies on
+ // other entries.
+
+ void prop_tuple_count (u_int c);
+ // Mutator for the number of tuples propagated via dependencies on
+ // other entries.
+
+ TUPLE_SET& orig_tuple_subset ();
+ // Accessor for the set of tuples directly associated with the
+ // entry.
+
+ TUPLE_SET& prop_tuple_subset ();
+ // Accessor for the set of tuples propagated via dependencies on
+ // other entries.
+
+ TAO_RT_Info_Tuple * current_admitted_tuple ();
+ // Returns a pointer to the entry's most recently admitted tuple.
+ // The pointer is zero if no tuples have been admitted so far.
+
+ void current_admitted_tuple (TAO_RT_Info_Tuple *);
+ // Sets a pointer to the entry's most recently admitted tuple.
+ // The pointer is zero if no tuples have been admitted so far.
+
+ RtecScheduler::RT_Info_Enabled_Type_t enabled_state () const;
+ // Accessor for flag indicating whether or not node is enabled.
+
+ void enabled_state (RtecScheduler::RT_Info_Enabled_Type_t);
+ // Mutator for flag indicating whether or not node is enabled.
+
+/* WSOA merge - commented out
RtecScheduler::Period_t effective_period ();
// Accessor for effective period of corresponding RT_Info.
@@ -132,16 +292,17 @@ public:
void effective_exec_multiplier (CORBA::Long l);
// Mutator for effective execution time multiplier of corresponding
// RT_Info.
+*/
private:
- RtecScheduler::RT_Info orig_rt_info_data_;
+ TAO_RT_Info_Ex orig_rt_info_data_;
// Stores the values of operation characteristics as they were specified
// in the most recent call to the Reconfig_Scheduler's set () method.
// That way, the scheduler propagation pass can overwrite RT_Info fields
// without losing the original values. This is useful when
- RtecScheduler::RT_Info *actual_rt_info_;
+ TAO_RT_Info_Ex *actual_rt_info_;
// Points to the actual RT_Info to which the schedling entry corresponds.
DFS_Status fwd_dfs_status_;
@@ -173,12 +334,45 @@ private:
// Flag indicating whether or not there are unresolved local
// dependencies in the entry's dependency call chain.
+ RtecScheduler::Time aggregate_exec_time_;
+ // Effective execution time for corresponding RT_Info and its
+ // disjunctively executed dependants.
+
+ RtecScheduler::Period_t orig_tuple_period_sum_;
+ // Sum of periods for tuples directly associated with the entry. It
+ // can be used to compute the mean rate for the entry.
+
+ RtecScheduler::Period_t prop_tuple_period_sum_;
+ // The sum of periods for tuples propagated via dependencies on
+ // other entries. It can be used to compute the mean rate for the
+ // entry.
+
+ u_int orig_tuple_count_;
+ // The number of tuples directly associated with the entry.
+
+ u_int prop_tuple_count_;
+ // The number of tuples propagated via dependencies on other
+ // entries.
+
+ TUPLE_SET orig_tuple_subset_;
+ // The set of tuples directly associated with the entry.
+
+ TUPLE_SET prop_tuple_subset_;
+ // The set of tuples propagated via dependencies on other entries.
+
+ TAO_RT_Info_Tuple * current_admitted_tuple_;
+ // A pointer to the entry's most recently admitted tuple.
+
+ RtecScheduler::RT_Info_Enabled_Type_t enabled_;
+ // Flag indicating whether or not node is enabled.
+
+ /* - WSOA merge - commented out
CORBA::Long effective_exec_multiplier_;
// Effective execution time multiplier for corresponding RT_Info.
RtecScheduler::Period_t effective_period_;
// Effective period of corresponding RT_Info.
-
+ */
};
@@ -213,58 +407,104 @@ public:
TAO_RSE_Reset_Visitor ();
// Constructor.
+ virtual ~TAO_RSE_Reset_Visitor () {}
+ // Destructor.
+
virtual int visit (TAO_Reconfig_Scheduler_Entry &rse);
// Resets the fields in the entry to pre-DFS traversal states.
// Returns 0 on success and -1 on error.
};
+class TAO_RTSched_Export TAO_RT_Info_Tuple_Visitor
+ // = TITLE
+ // An abstract base class for RT_Info tuple visitors.
+ //
+ // = DESCRIPTION
+ // This class simplifies the reconfig scheduler implementation
+ // by giving a common interface for distinct visitors over the
+ // RT_Info tuples.
+{
+public:
+
+ virtual int visit (TAO_RT_Info_Tuple &) = 0;
+ // Visit a RT_Info tuple.
+
+};
+
+
+class TAO_RTSched_Export TAO_Reconfig_Sched_Strategy_Base
+ // = TITLE
+ // A base class for scheduling strategies
+ //
+ // = DESCRIPTION This class provides a DFS finish time comparison
+ // function, a static subpriority comparison function, and a
+ // criticality evaluation function for all scheduling strategies.
+{
+public:
+
+ static int comp_entry_finish_times (const void *first, const void *second);
+ // Ordering function to compare the DFS finish times of
+ // two task entries, so qsort orders these in topological
+ // order, with the higher times *first*.
+ static int is_critical (TAO_Reconfig_Scheduler_Entry &rse);
+ // Determines whether or not an entry is critical, based on
+ // operation characteristics. returns 1 if critical, 0 if not
+ static int is_critical (TAO_RT_Info_Tuple &t);
+ // Determines whether or not a tuple is critical, based on operation
+ // characteristics. returns 1 if critical, 0 if not
-class TAO_RTSched_Export TAO_MUF_Reconfig_Sched_Strategy
- // = TITLE
- // A scheduling strategy that implements the Maximum
- // Urgency First scheduling algorithm.
+ static int compare_subpriority (TAO_Reconfig_Scheduler_Entry &,
+ TAO_Reconfig_Scheduler_Entry &);
+ // Compares two entries by subpriority alone. Returns -1 if the
+ // first one is higher, 0 if they're the same, and 1 if the second one is higher.
+};
+
+class TAO_RTSched_Export TAO_MUF_FAIR_Reconfig_Sched_Strategy
+ : public TAO_Reconfig_Sched_Strategy_Base
+ // = TITLE
+ // A scheduling strategy that implements the Maximum Urgency First
+ // scheduling algorithm with Fair Admission of Indexed Rates
+ // (FAIR).
//
// = DESCRIPTION
// The strategy assigns static thread and global priority according
// to operation criticality, assigns static subpriority according to
// importance and then topological order, and assigns a dispatching
// configuration with a minimum laxity dispatching queue for each
- // distinct criticality level.
+ // distinct criticality level. It admits operation tuples in order
+ // of ascending rate index, where the lowest rate for an operation
+ // has index 0, the next higher rate has index 1, etc.
{
public:
- static int comp_entry_finish_times (const void *first, const void *second);
- // Ordering function to compare the DFS finish times of
- // two task entries, so qsort orders these in topological
- // order, with the higher times *first*.
-
static int total_priority_comp (const void *, const void *);
- // Ordering function used to qsort an array of
- // TAO_Reconfig_Scheduler_Entry pointers into a total <priority,
- // subpriority> ordering. Returns -1 if the first one is higher, 0
- // if they're the same, and 1 if the second one is higher.
+ // Ordering function used to qsort an array of RT_Info_Tuple
+ // pointers into a total <priority, subpriority> ordering. Returns
+ // -1 if the first one is higher, 0 if they're the same, and 1 if
+ // the second one is higher.
+
+ static int total_admission_comp (const void *, const void *);
+ // Ordering function used to qsort an array of RT_Info_Tuple
+ // pointers into a total ordering for admission control. Returns
+ // -1 if the first one is higher, 0 if they're the same, and 1 if
+ // the second one is higher.
static int compare_priority (TAO_Reconfig_Scheduler_Entry &,
TAO_Reconfig_Scheduler_Entry &);
// Compares two entries by priority alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
-
- static int compare_subpriority (TAO_Reconfig_Scheduler_Entry &,
- TAO_Reconfig_Scheduler_Entry &);
- // Compares two entries by subpriority alone. Returns -1 if the
+ static int compare_priority (TAO_RT_Info_Tuple &,
+ TAO_RT_Info_Tuple &);
+ // Compares two tuples by priority alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
- static int priority_diff (RtecScheduler::RT_Info &s,
- RtecScheduler::RT_Info &t);
- // Compares two RT_Infos by priority alone. Returns -1 if the
- // first one is higher, 0 if they're the same, and 1 if the second one is higher.
-
- static int is_critical (TAO_Reconfig_Scheduler_Entry &rse);
- // Determines whether or not an entry is critical, based on operation characteristics.
- // returns 1 if critical, 0 if not
+ static int compare_admission_order (TAO_RT_Info_Tuple &,
+ TAO_RT_Info_Tuple &);
+ // Compares two tuples by the given admission ordering. Returns -1 if the
+ // first one is earlier, 0 if they're the same, and 1 if the second one is earlier.
static int assign_config (RtecScheduler::Config_Info &,
TAO_Reconfig_Scheduler_Entry &);
@@ -272,51 +512,69 @@ public:
// on the operation characteristics of a representative scheduling entry.
};
-class TAO_RTSched_Export TAO_RMS_Reconfig_Sched_Strategy
+
+class TAO_RTSched_Export TAO_RMS_Dyn_MNO_Reconfig_Sched_Strategy
+ : public TAO_Reconfig_Sched_Strategy_Base
// = TITLE
- // A scheduling strategy that implements the Maximum
- // Urgency First scheduling algorithm.
+ // A scheduling strategy that implements the Boeing RMS-Dynamic
+ // scheduling algorithm, and the Honeywell MNO admission control
+ // algorithm.
//
// = DESCRIPTION
- // The strategy assigns static thread and global priority according
- // to operation criticality, assigns static subpriority according to
- // importance and then topological order, and assigns a dispatching
- // configuration with a minimum laxity dispatching queue for each
- // distinct criticality level.
+ // The strategy assigns static thread and global priority
+ // according to criticality and rate, assigns static subpriority
+ // according to importance and then topological order, and assigns
+ // a dispatching configuration with a static dispatching queue for
+ // each high criticality rate, and a single minimum laxity
+ // dispatching queue for all low criticality operations. It
+ // admits the lowest rate-index tuple for each operation, then
+ // admits operations at their highest admissible rates in priority
+ // order.
{
public:
- static int comp_entry_finish_times (const void *first, const void *second);
- // Ordering function to compare the DFS finish times of
- // two task entries, so qsort orders these in topological
- // order, with the higher times *first*.
-
static int total_priority_comp (const void *, const void *);
- // Ordering function used to qsort an array of
- // TAO_Reconfig_Scheduler_Entry pointers into a total <priority,
- // subpriority> ordering. Returns -1 if the first one is higher, 0
- // if they're the same, and 1 if the second one is higher.
+ // Ordering function used to qsort an array of RT_Info_Tuple
+ // pointers into a total <priority, subpriority> ordering. Returns
+ // -1 if the first one is higher, 0 if they're the same, and 1 if
+ // the second one is higher.
+
+ static int total_admission_comp (const void *, const void *);
+ // Ordering function used to qsort an array of RT_Info_Tuple
+ // pointers into a total ordering for admission control. Returns
+ // -1 if the first one is higher, 0 if they're the same, and 1 if
+ // the second one is higher.
+
+ static int compare_criticality(TAO_Reconfig_Scheduler_Entry &lhs,
+ TAO_Reconfig_Scheduler_Entry &rhs);
+ // Compares two entries by criticality alone. Returns -1 if the
+ // first one is higher, 0 if they're the same, and 1 if the second one is higher.
+
+ static int compare_criticality(TAO_RT_Info_Tuple &lhs,
+ TAO_RT_Info_Tuple &rhs);
+ // Compares two entries by criticality alone. Returns -1 if the
+ // first one is higher, 0 if they're the same, and 1 if the second one is higher.
static int compare_priority (TAO_Reconfig_Scheduler_Entry &,
TAO_Reconfig_Scheduler_Entry &);
// Compares two entries by priority alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
+ static int compare_priority (TAO_RT_Info_Tuple &,
+ TAO_RT_Info_Tuple &);
+ // Compares two tuples by priority alone. Returns -1 if the
+ // first one is higher, 0 if they're the same, and 1 if the second one is higher.
+
+ static int compare_admission_order (TAO_RT_Info_Tuple &,
+ TAO_RT_Info_Tuple &);
+ // Compares two entries by admission ordering policy. Returns -1 if the
+ // first one is earlier, 0 if they're the same, and 1 if the second one is earlier.
static int compare_subpriority (TAO_Reconfig_Scheduler_Entry &,
TAO_Reconfig_Scheduler_Entry &);
// Compares two entries by subpriority alone. Returns -1 if the
// first one is higher, 0 if they're the same, and 1 if the second one is higher.
- static int priority_diff (RtecScheduler::RT_Info &s,
- RtecScheduler::RT_Info &t);
- // Compares two RT_Infos by priority alone. Returns -1 if the
- // first one is higher, 0 if they're the same, and 1 if the second one is higher.
-
- static int is_critical (TAO_Reconfig_Scheduler_Entry &rse);
- // Determines whether or not an entry is critical, based on operation characteristics.
- // returns 1 if critical, 0 if not
-
static int assign_config (RtecScheduler::Config_Info &,
TAO_Reconfig_Scheduler_Entry &);
// Fills in a static dispatch configuration for a priority level, based
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.cpp b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.cpp
index d9919c85c6f..6882f6632c0 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.cpp
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.cpp
@@ -40,15 +40,7 @@ ACE_RCSID(Sched, Reconfig_Sched_Utils_T, "$Id$")
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Dependency_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map, ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map)
+ (DEPENDENCY_SET_MAP & dependency_map, RT_INFO_MAP & rt_info_map)
: dependency_map_ (dependency_map),
rt_info_map_ (rt_info_map)
{
@@ -65,6 +57,7 @@ visit (TAO_Reconfig_Scheduler_Entry &rse)
{
int result = 0;
+ /* WSOA merge - commented out
// Call unconditional action method, which performs any necessary
// modifications that are applied to each node unconditionally.
if (this->unconditional_action (rse) < 0)
@@ -73,6 +66,7 @@ visit (TAO_Reconfig_Scheduler_Entry &rse)
"TAO_RSE_Dependency_Visitor::"
"visit: error from unconditional action.\n"), -1);
}
+ */
// Call precondition hook method, and only proceed if the
// precondition returns 0 for success.
@@ -103,15 +97,22 @@ visit (TAO_Reconfig_Scheduler_Entry &rse)
{
// Iterate over the set of dependencies for the current entry.
TAO_Reconfig_Scheduler_Entry * next_rse = 0;
- RtecScheduler::RT_Info *next_rt_info;
+ TAO_RT_Info_Ex *next_rt_info;
for (u_int i = 0; i < dependency_set->length (); ++i)
{
+ // Skip over disabled dependencies
+ if ((*dependency_set) [i].enabled == RtecBase::DEPENDENCY_DISABLED)
+ {
+ continue;
+ }
+
// Take the handle from the dependency and use it
// to obtain an RT_Info pointer from the map.
if (rt_info_map_.find ((*dependency_set) [i].rt_info,
next_rt_info) != 0)
{
- ACE_ERROR_RETURN ((LM_ERROR, "RT_Info not found.\n"), -1);
+ ACE_ERROR_RETURN ((LM_ERROR, "RT_Info (%i) not found.\n",
+ (*dependency_set) [i].rt_info), -1);
}
// Extract a pointer to the scheduling entry from the RT_Info.
@@ -170,7 +171,7 @@ visit (TAO_Reconfig_Scheduler_Entry &rse)
return 0;
}
-
+/* WSOA merge - commented out
// Performs an unconditional action when the entry is first reached.
// Returns 0 for success, and -1 if an error occurred.
@@ -182,7 +183,7 @@ unconditional_action (TAO_Reconfig_Scheduler_Entry &rse)
ACE_UNUSED_ARG (rse);
return 0;
}
-
+*/
// Tests whether or not any conditional actions should be taken for
// the entry. Returns 0 if the actions should be applied, 1 if the
@@ -192,9 +193,10 @@ template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
precondition (TAO_Reconfig_Scheduler_Entry &rse)
{
- // Default behavior: just return success.
- ACE_UNUSED_ARG (rse);
- return 0;
+ // Only signal to proceed (0) if the passed entry is enabled or non-volatile
+ return (rse.enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ ? 1
+ : 0;
}
@@ -251,16 +253,8 @@ postfix_action (TAO_Reconfig_Scheduler_Entry &rse)
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_DFS_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_DFS_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map)
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
(dependency_map, rt_info_map),
DFS_time_ (0)
@@ -276,12 +270,17 @@ template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_DFS_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
precondition (TAO_Reconfig_Scheduler_Entry &rse)
{
- return (rse.fwd_dfs_status () ==
- TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
- ? 0 : 1;
+ int result =
+ TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+ precondition (rse);
+
+ return (result == 0)
+ ? ((rse.fwd_dfs_status () == TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
+ ? 0
+ : 1)
+ : result;
}
-
// Marks entry as forward visited and sets its forward DFS start
// time, prior to visiting any of its successors. Returns 0 on
// success and -1 on error.
@@ -308,10 +307,11 @@ pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
ACE_UNUSED_ARG (entry);
ACE_UNUSED_ARG (di);
- // Operations we reached via a dependency and that do not
+ // Enabled operations we reached via a dependency and that do not
// specify a period are not thread delineators.
- if (successor.actual_rt_info ()->period == 0 &&
- successor.actual_rt_info ()->threads == 0)
+ if (successor.enabled_state () != RtecScheduler::RT_INFO_DISABLED
+ && successor.actual_rt_info ()->period == 0
+ && successor.actual_rt_info ()->threads == 0)
{
successor.is_thread_delineator (0);
}
@@ -343,16 +343,8 @@ postfix_action (TAO_Reconfig_Scheduler_Entry &rse)
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_SCC_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map)
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
(dependency_map, rt_info_map),
DFS_time_ (0),
@@ -361,7 +353,6 @@ TAO_RSE_SCC_Visitor
{
}
-
// Accessor for number of cycles detected in traversal.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
@@ -393,7 +384,7 @@ in_a_cycle (int i)
this->in_a_cycle_ = i;
}
-
+/* WSOA merge - commented out
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unconditional_action (TAO_Reconfig_Scheduler_Entry &rse)
@@ -408,6 +399,7 @@ unconditional_action (TAO_Reconfig_Scheduler_Entry &rse)
return 0;
}
+*/
// Makes sure the entry has not previously been visited in the
// reverse DFS (call graph transpose) direction. Returns 0 if
@@ -418,9 +410,15 @@ template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_RSE_SCC_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
precondition (TAO_Reconfig_Scheduler_Entry &rse)
{
- return (rse.rev_dfs_status () ==
- TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
- ? 0 : 1;
+ int result =
+ TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+ precondition (rse);
+
+ return (result == 0)
+ ? ((rse.rev_dfs_status () == TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
+ ? 0
+ : 1)
+ : 1;
}
@@ -454,7 +452,9 @@ pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
{
ACE_UNUSED_ARG (di);
- if (successor.rev_dfs_status () ==
+ if (successor.enabled_state () !=
+ RtecScheduler::RT_INFO_DISABLED
+ && successor.rev_dfs_status () ==
TAO_Reconfig_Scheduler_Entry::NOT_VISITED)
{
if (this->in_a_cycle () == 0)
@@ -487,26 +487,85 @@ postfix_action (TAO_Reconfig_Scheduler_Entry &rse)
return 0;
}
+/////////////////////////////////////////
+// TAO_RSE_Reverse_Propagation_Visitor //
+/////////////////////////////////////////
+
+// Constructor.
-/////////////////////////////////
-// TAO_RSE_Propagation_Visitor //
-/////////////////////////////////
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+TAO_RSE_Reverse_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Reverse_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
+ : TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map)
+{
+}
+
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
+TAO_RSE_Reverse_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
+ TAO_Reconfig_Scheduler_Entry &successor,
+ const RtecScheduler::Dependency_Info &di)
+{
+ ACE_UNUSED_ARG (di);
+
+ // @TODO - check for conjunction nodes here and perform conjunctive
+ // function on existing rate tuples. Idea: treat conjunctive tuples
+ // as skolem functions over the possible rates of their incedent
+ // edges thread delineators!!! Then, can tentatively compute
+ // utilization for rate combinations. Question: can I find a case
+ // where this makes tuple rate admission non-monotonic??? I.e.,
+ // where a higher rate for an input results in a lower utilization?
+ // Might require a skew in the exec times and rates. What are the
+ // determining characteristics of this? What impact if any does
+ // phasing have on this?
+
+ // Check for conjunction nodes and don't propagate
+ // upward from them: they represent a cut point in the graph.
+ // Do not allow conjunction nodes for now.
+ if (entry.actual_rt_info ()->info_type == RtecScheduler::CONJUNCTION)
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ ACE_TEXT ("Conjunction Nodes are not supported currently.")),
+ -1);
+ }
+ else
+ {
+ // @TODO - replace the explicit WCET attribute propagation with
+ // a scheduling strategy functor that propagates arbitrary
+ // execution time attributes. BTW, for conjunctions BCET and WCET
+ // are probably needed relative the upper and lower bounds on
+ // arrival waveforms.
+
+ // Add the successor's aggregate time to the entry's aggregate time.
+ // Since we're visiting in topological order (called nodes before
+ // calling nodes), the successor's aggregate time is up to date.
+ if (successor.enabled_state () != RtecScheduler::RT_INFO_DISABLED)
+ {
+ entry.aggregate_exec_time (entry.aggregate_exec_time ()
+ + successor.aggregate_exec_time ());
+ }
+ }
+
+
+ // Do not recurse on the successor node, just continue to the next successor.
+ return 1;
+}
+
+
+/////////////////////////////////////////
+// TAO_RSE_Forward_Propagation_Visitor //
+/////////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
-TAO_RSE_Propagation_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map)
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
: TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map),
unresolved_locals_ (0),
unresolved_remotes_ (0),
@@ -518,7 +577,7 @@ TAO_RSE_Propagation_Visitor
// Accessor for number of nodes with unresolved local dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_locals (void)
{
return this->unresolved_locals_;
@@ -528,7 +587,7 @@ unresolved_locals (void)
// Mutator for number of nodes with unresolved local dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_locals (int i)
{
this->unresolved_locals_ = i;
@@ -538,7 +597,7 @@ unresolved_locals (int i)
// Accessor for number of nodes with unresolved remote dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_remotes (void)
{
return this->unresolved_remotes_;
@@ -548,7 +607,7 @@ unresolved_remotes (void)
// Mutator for number of nodes with unresolved remote dependencies.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
unresolved_remotes (int i)
{
this->unresolved_remotes_ = i;
@@ -557,7 +616,7 @@ unresolved_remotes (int i)
// Accessor for number of nodes with thread specification errors.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
thread_specification_errors (void)
{
return this->thread_specification_errors_;
@@ -567,7 +626,7 @@ thread_specification_errors (void)
// Mutator for number of nodes with thread specification errors.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
thread_specification_errors (int i)
{
this->thread_specification_errors_ = i;
@@ -581,7 +640,7 @@ thread_specification_errors (int i)
// problems is not considered an error, at least for this method).
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
prefix_action (TAO_Reconfig_Scheduler_Entry &rse)
{
// Complain about anything that is still marked as a thread
@@ -622,7 +681,7 @@ prefix_action (TAO_Reconfig_Scheduler_Entry &rse)
// must also specify a period.
++this->thread_specification_errors_;
ACE_DEBUG ((LM_ERROR,
- "RT_Info \"%s\" specifies %ld "
+ "RT_Info \"%s\" specifies %1d "
"threads, but no period.\n",
rse.actual_rt_info ()->entry_point.in (),
rse.actual_rt_info ()->threads));
@@ -634,112 +693,98 @@ prefix_action (TAO_Reconfig_Scheduler_Entry &rse)
}
-// Propagates effective period and execution time multiplier from
-// entry to successor prior to visiting successor. Returns 0 on
-// success and -1 on error.
+// Propagates effective period from entry to successor prior to
+// visiting successor. Returns 0 on success and -1 on error.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
-TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
TAO_Reconfig_Scheduler_Entry &successor,
const RtecScheduler::Dependency_Info &di)
{
+ if (successor.enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return 1;
+ }
+
ACE_UNUSED_ARG (di);
+ TAO_RT_Info_Tuple **tuple_ptr_ptr;
- // This method makes a conservative estimate in cases where periods
- // differ, taking the minimum frame size and dividing down the
- // execution multiplier of the longer frame (and rounding the result
- // upward as needed). A more exact computation could be achieved
- // by merging sets of arrivals and frame offsets, but that
- // would in turn cost more in run-time computation time.
- // NOTE: this approach *only* works for harmonic periods. For
- // non-harmonic periods, the set merge approach is necessary.
+ TUPLE_SET_ITERATOR orig_tuple_iter (entry.orig_tuple_subset ());
- if (successor.effective_period () == 0)
+ while (orig_tuple_iter.done () == 0)
{
- // If this is the first dependency by which the successor has
- // been reached, and the successor is not itself a thread
- // delineator, then simply adopt the effective period and
- // execution time multiplier of the shorter period.
- successor.effective_period (entry.effective_period ());
- successor.effective_exec_multiplier (entry.effective_exec_multiplier ());
+ if (orig_tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
+ {
+ ACE_ERROR ((LM_ERROR,
+ "Failed to access tuple under iterator"));
+ return -1;
+ }
+
+ // @TODO - check for conjunction nodes here and perform conjunctive
+ // function on existing rate tuples.
+
+ ACE_DEBUG((LM_DEBUG, "Inserting new propagated tuple for RT_Info: %d, entry_ptr: 0x%x, tuple_ptr: 0x%x\n",
+ successor.actual_rt_info ()->handle,
+ &successor,
+ (*tuple_ptr_ptr)));
+ // Propagate tuples disjunctively.
+ successor.insert_tuple (**tuple_ptr_ptr,
+ TAO_Reconfig_Scheduler_Entry::PROPAGATED);
+
+ successor.actual_rt_info ()->period =
+ (successor.actual_rt_info ()->period == 0)
+ ? (*tuple_ptr_ptr)->period
+ : ACE::minimum_frame_size (successor.actual_rt_info ()->period,
+ (*tuple_ptr_ptr)->period);
+ orig_tuple_iter.advance ();
}
- else
- {
- // Otherwise, take the smaller of the two periods, and divide down
- // the execution multipliers accordingly.
- long new_exec_multiplier = 0;
- long old_exec_multiplier = 0;
+ TUPLE_SET_ITERATOR prop_tuple_iter (entry.prop_tuple_subset ());
- if (successor.effective_period () < entry.effective_period ())
+ while (prop_tuple_iter.done () == 0)
+ {
+ if (prop_tuple_iter.next (tuple_ptr_ptr) == 0
+ || tuple_ptr_ptr == 0 || *tuple_ptr_ptr == 0)
{
- // Store the previous execution multiplier.
- old_exec_multiplier = successor.effective_exec_multiplier ();
-
- // Divide down the new execution multiplier.
- new_exec_multiplier =
- ACE_static_cast (long,
- (old_exec_multiplier *
- successor.effective_period ()) /
- entry.effective_period ());
-
- // Adjust for round-off error.
- if (old_exec_multiplier >
- ACE_static_cast (long,
- (new_exec_multiplier *
- entry.effective_period ()) /
- successor.effective_period ()))
- {
- ++new_exec_multiplier;
- }
-
- // Set the successor's effective period and execution multiplier.
- successor.effective_period (entry.effective_period ());
- successor.effective_exec_multiplier (entry.effective_exec_multiplier () +
- new_exec_multiplier);
- }
- else
- {
- // Store the previous execution multiplier.
- old_exec_multiplier = entry.effective_exec_multiplier ();
-
- // Divide down the new execution multiplier.
- new_exec_multiplier =
- ACE_static_cast (long,
- old_exec_multiplier *
- entry.effective_period () /
- successor.effective_period ());
-
- // Adjust for round-off error.
- if (old_exec_multiplier >
- ACE_static_cast (long,
- new_exec_multiplier *
- successor.effective_period () /
- entry.effective_period ()))
- {
- ++new_exec_multiplier;
- }
-
- // Just set the successor's execution multiplier (the period is unchanged).
- successor.effective_exec_multiplier (successor.effective_exec_multiplier () +
- new_exec_multiplier);
+ ACE_ERROR ((LM_ERROR,
+ "Failed to access tuple under iterator"));
+ return -1;
}
+
+ // @TODO - check for conjunction nodes here and perform conjunctive
+ // function on existing rate tuples.
+
+ ACE_DEBUG((LM_DEBUG, "Inserting new propagated tuple for RT_Info: %d, entry_ptr: 0x%x, tuple_ptr: 0x%x\n",
+ successor.actual_rt_info ()->handle,
+ &successor,
+ (*tuple_ptr_ptr)));
+ // Propagate tuples disjunctively.
+ successor.insert_tuple (**tuple_ptr_ptr,
+ TAO_Reconfig_Scheduler_Entry::PROPAGATED);
+
+ successor.actual_rt_info ()->period =
+ (successor.actual_rt_info ()->period == 0)
+ ? (*tuple_ptr_ptr)->period
+ : ACE::minimum_frame_size (successor.actual_rt_info ()->period,
+ (*tuple_ptr_ptr)->period);
+
+ prop_tuple_iter.advance ();
}
// Do not recurse on the successor node, just continue to the next successor.
return 1;
}
-
////////////////////////////////////
// class TAO_RSE_Priority_Visitor //
////////////////////////////////////
// Constructor.
-template <class RECONFIG_SCHED_STRATEGY>
-TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
TAO_RSE_Priority_Visitor (RtecScheduler::handle_t handles,
TAO_Reconfig_Scheduler_Entry ** entry_ptr_array)
: previous_entry_ (0),
@@ -754,17 +799,23 @@ TAO_RSE_Priority_Visitor (RtecScheduler::handle_t handles,
}
-// Visit a Reconfig Scheduler Entry. This method
-// assigns a priority and subpriority value to each
-// entry. Priorities are assigned in increasing value
-// order, with lower numbers corresponding to higher
-// priorities.
+// Visit a RT_Info tuple. This method assigns a priority and
+// subpriority value to each tuple. Priorities are assigned in
+// increasing numeric order, with lower numbers corresponding to
+// higher priorities.
-template <class RECONFIG_SCHED_STRATEGY> int
-TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler_Entry &rse)
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
+TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::visit (TAO_Reconfig_Scheduler_Entry &rse)
{
int result = 0;
+ ACE_DEBUG ((LM_DEBUG,
+ "Priority_Visitor visiting %s[%d],crit=%d,period=%d\n",
+ rse.actual_rt_info ()->entry_point.in(),
+ rse.actual_rt_info ()->handle,
+ rse.actual_rt_info ()->criticality,
+ rse.actual_rt_info ()->period));
+
if (previous_entry_ == 0)
{
// Indicate a new priority level was assigned.
@@ -777,7 +828,16 @@ TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler
}
else
{
- if (RECONFIG_SCHED_STRATEGY::compare_priority (*previous_entry_, rse) == 0)
+ ACE_DEBUG ((LM_DEBUG,
+ "Previous entry %s[%d],crit=%d,period=%d\n",
+ previous_entry_->actual_rt_info ()->entry_point.in(),
+ previous_entry_->actual_rt_info ()->handle,
+ previous_entry_->actual_rt_info ()->criticality,
+ previous_entry_->actual_rt_info ()->period));
+
+ // Don't change priority levels on a disabled node.
+ if (rse.enabled_state () == RtecScheduler::RT_INFO_DISABLED
+ || RECONFIG_SCHED_STRATEGY::compare_priority (*previous_entry_, rse) == 0)
{
// Subpriority is increased at each new node.
++subpriority_;
@@ -796,8 +856,7 @@ TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler
// Iterate back through and adjust the subpriority levels.
for (int i = 0; i <= subpriority_; ++i, ++first_subpriority_entry_)
{
- (*first_subpriority_entry_)->
- actual_rt_info ()->
+ (*first_subpriority_entry_)->actual_rt_info ()->
preemption_subpriority += subpriority_;
}
@@ -805,9 +864,12 @@ TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler
rse.actual_rt_info ()->preemption_subpriority = subpriority_;
++priority_;
+#ifdef SCHEDULER_LOGGING
+ ACE_DEBUG ((LM_DEBUG, "New priority %d formed\n", priority_));
+#endif
os_priority_ = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- os_priority_,
- ACE_SCOPE_PROCESS);
+ os_priority_,
+ ACE_SCOPE_PROCESS);
}
}
@@ -825,31 +887,34 @@ TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler
// Finishes scheduler entry priority assignment by iterating over the
// remaining entries in the last subpriority level, and adjusting
// their subpriorities.
-template <class RECONFIG_SCHED_STRATEGY> int
-TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>::finish ()
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
+TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::finish ()
{
// Iterate back through and adjust the subpriority levels.
for (int i = 0; i <= subpriority_; ++i, ++first_subpriority_entry_)
{
- (*first_subpriority_entry_)->
- actual_rt_info ()->
- preemption_subpriority += subpriority_;
+ (*first_subpriority_entry_)->actual_rt_info ()->
+ preemption_subpriority += subpriority_;
}
- // Indicate no new proirity level was identified.
+ // Indicate no new priority level was identified.
return 0;
}
///////////////////////////////////////
-// class TAO_RSE_Utilization_Visitor //
+// class TAO_Tuple_Admission_Visitor //
///////////////////////////////////////
// Constructor.
template <class RECONFIG_SCHED_STRATEGY>
-TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::TAO_RSE_Utilization_Visitor ()
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::
+TAO_Tuple_Admission_Visitor (const CORBA::Double & critical_utilization_threshold,
+ const CORBA::Double & noncritical_utilization_threshold)
: critical_utilization_ (0.0),
- noncritical_utilization_ (0.0)
+ noncritical_utilization_ (0.0),
+ critical_utilization_threshold_ (critical_utilization_threshold),
+ noncritical_utilization_threshold_ (noncritical_utilization_threshold)
{
}
@@ -860,24 +925,63 @@ TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::TAO_RSE_Utilization_Visito
// operation is critical.
template <class RECONFIG_SCHED_STRATEGY> int
-TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Scheduler_Entry &rse)
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_RT_Info_Tuple &t)
{
- CORBA::Double entry_period = rse.effective_period ();
- CORBA::Double entry_time = ACE_static_cast (
- CORBA::Double,
- ACE_UINT64_DBLCAST_ADAPTER (rse.actual_rt_info ()->
- worst_case_execution_time));
- CORBA::Double entry_mult = rse.effective_exec_multiplier ();
+ TAO_Reconfig_Scheduler_Entry *entry =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ t.volatile_token);
+
+ // Ignore disabled tuples and entries
+ if (t.enabled_state () == RtecScheduler::RT_INFO_DISABLED
+ || entry->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ return 0;
+ }
- if (RECONFIG_SCHED_STRATEGY::is_critical (rse))
+ // Compute the current tuple's utilization.
+ CORBA::Double delta_utilization =
+ (ACE_static_cast (CORBA::Double,
+ t.threads)
+ * ACE_static_cast (CORBA::Double,
+ ACE_UINT64_DBLCAST_ADAPTER (entry->
+ aggregate_exec_time ())))
+ / ACE_static_cast (CORBA::Double,
+ t.period);
+
+ // Subtract the previous tuple's utilization (if any) for the entry.
+ if (entry->current_admitted_tuple ())
{
- this->critical_utilization_ =
- (entry_mult * entry_time) / entry_period;
+ delta_utilization -=
+ (ACE_static_cast (CORBA::Double,
+ entry->current_admitted_tuple ()->threads)
+ * ACE_static_cast (CORBA::Double,
+ ACE_UINT64_DBLCAST_ADAPTER (entry->
+ aggregate_exec_time ())))
+ / ACE_static_cast (CORBA::Double,
+ entry->current_admitted_tuple ()->period);
+ }
+
+ if (RECONFIG_SCHED_STRATEGY::is_critical (t))
+ {
+ if (this->critical_utilization_ + this->noncritical_utilization_
+ +delta_utilization
+ < this->critical_utilization_threshold_)
+ {
+ this->critical_utilization_ += delta_utilization;
+ entry->current_admitted_tuple (&t);
+ entry->actual_rt_info ()->period = t.period;
+ }
}
else
{
- this->noncritical_utilization_ =
- (entry_mult * entry_time) / entry_period;
+ if (this->critical_utilization_ + this->noncritical_utilization_
+ +delta_utilization
+ < this->noncritical_utilization_threshold_)
+ {
+ this->noncritical_utilization_ += delta_utilization;
+ entry->current_admitted_tuple (&t);
+ entry->actual_rt_info ()->period = t.period;
+ }
}
return 0;
@@ -887,7 +991,7 @@ TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::visit (TAO_Reconfig_Schedu
// Accessor for utilization by critical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
-TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization ()
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization ()
{
return this->critical_utilization_;
}
@@ -896,10 +1000,73 @@ TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization ()
// Accessor for utilization by noncritical operations.
template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
-TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY>::noncritical_utilization ()
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::noncritical_utilization ()
{
return this->noncritical_utilization_;
}
+// Accessor for utilization threshold for critical operations.
+
+template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::critical_utilization_threshold ()
+{
+ return this->critical_utilization_threshold_;
+}
+
+
+// Accessor for utilization by noncritical operations.
+
+template <class RECONFIG_SCHED_STRATEGY> CORBA::Double
+TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>::noncritical_utilization_threshold ()
+{
+ return this->noncritical_utilization_threshold_;
+}
+
+
+/////////////////////////////////////////
+// TAO_RSE_Criticality_Propagation_Visitor //
+/////////////////////////////////////////
+
+// Constructor.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+TAO_RSE_Criticality_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_RSE_Criticality_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map)
+ : TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK> (dependency_map, rt_info_map)
+{
+}
+
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
+TAO_RSE_Criticality_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
+ TAO_Reconfig_Scheduler_Entry &successor,
+ const RtecScheduler::Dependency_Info &di)
+{
+ ACE_UNUSED_ARG (di);
+
+ ACE_DEBUG ((LM_DEBUG,
+ "Crit Prop_Visitor visiting %s[%d], successor is %s[%d]\n",
+ entry.actual_rt_info ()->entry_point.in(),
+ entry.actual_rt_info ()->handle,
+ successor.actual_rt_info ()->entry_point.in(),
+ successor.actual_rt_info ()->handle));
+
+ if (successor.enabled_state () != RtecScheduler::RT_INFO_DISABLED)
+ {
+ successor.actual_rt_info ()->criticality =
+ ace_max (entry.actual_rt_info ()->criticality,
+ successor.actual_rt_info ()->criticality);
+ ACE_DEBUG ((LM_DEBUG,
+ "Successor's new criticality is %d\n",
+ successor.actual_rt_info ()->criticality));
+ }
+
+ // Do not recurse on the successor node, just continue to the next successor.
+ return 1;
+}
+
#endif /* TAO_RECONFIG_SCHED_UTILS_T_C */
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.h b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.h
index c8543a41fa1..9ea96225a4c 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.h
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Sched_Utils_T.h
@@ -19,9 +19,7 @@
#include /**/ "ace/pre.h"
#include "ace/config-all.h"
-
#include "Reconfig_Sched_Utils.h"
-
#include "ace/Hash_Map_Manager.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
@@ -54,7 +52,7 @@ public:
// dependency sets by caller or called handle.
typedef ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
+ TAO_RT_Info_Ex*,
ACE_Hash<RtecScheduler::handle_t>,
ACE_Equal_To<RtecScheduler::handle_t>,
ACE_LOCK> RT_INFO_MAP;
@@ -72,11 +70,11 @@ public:
// design pattern.
protected:
-
+ /* WSOA merge - commented out
virtual int unconditional_action (TAO_Reconfig_Scheduler_Entry &rse);
// Performs an unconditional action when the entry is first reached.
// Returns 0 for success, and -1 if an error occurred.
-
+ */
virtual int precondition (TAO_Reconfig_Scheduler_Entry &rse);
// Tests whether or not any conditional actions should be taken for
// the entry. Returns 0 if the actions should be applied, 1 if the
@@ -120,18 +118,9 @@ class TAO_RSE_DFS_Visitor :
{
public:
-
TAO_RSE_DFS_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK>& rt_info_map);
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map);
// Constructor.
protected:
@@ -177,16 +166,8 @@ class TAO_RSE_SCC_Visitor :
public:
TAO_RSE_SCC_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map);
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map);
// Constructor.
int number_of_cycles (void);
@@ -201,10 +182,12 @@ public:
// detected cycle.
protected:
+ /* WSOA merge - commented out
virtual int unconditional_action (TAO_Reconfig_Scheduler_Entry &rse);
// If the entry is a thread delineator, sets its effective period and
// execution multiplier from the values in its corresponding RT_Info.
// Returns 0 for success, and -1 if an error occurred.
+ */
virtual int precondition (TAO_Reconfig_Scheduler_Entry &rse);
// Makes sure the entry has not previously been visited in the
@@ -245,9 +228,39 @@ private:
// currently within a previously discovered cycle.
};
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+class TAO_RSE_Reverse_Propagation_Visitor :
+ public TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+ // = TITLE
+ // A scheduler entry visitor that propagates aggregate execution
+ // times from called to calling nodes in a topologically ordered
+ // graph.
+ //
+ // = DESCRIPTION
+ // This class computes the aggregate execution time of each node
+ // and its dependants, according to its dependencies.
+{
+public:
+
+ TAO_RSE_Reverse_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map);
+ // Constructor.
+
+protected:
+
+ virtual int pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
+ TAO_Reconfig_Scheduler_Entry &successor,
+ const RtecScheduler::Dependency_Info &di);
+ // Propagates aggregate execution time from successor to calling
+ // entry. Returns 1 on success (to prevent recursion on the
+ // successor), and -1 on error.
+
+};
+
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-class TAO_RSE_Propagation_Visitor :
+class TAO_RSE_Forward_Propagation_Visitor :
public TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
// = TITLE
// A scheduler entry visitor that propagates effective periods and
@@ -260,17 +273,9 @@ class TAO_RSE_Propagation_Visitor :
{
public:
- TAO_RSE_Propagation_Visitor
- (ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & dependency_map,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & rt_info_map);
+ TAO_RSE_Forward_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map);
// Constructor.
int unresolved_locals (void);
@@ -318,13 +323,13 @@ private:
// Number of nodes with thread specification errors.
};
-template <class RECONFIG_SCHED_STRATEGY>
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
class TAO_RSE_Priority_Visitor :
public TAO_Reconfig_Sched_Entry_Visitor
// = TITLE
- // A scheduler entry visitor that assigns static priority and
- // subpriority values to entries in an array already sorted in
- // static <priority, subpriority> order.
+ // An entry visitor that assigns static priority and subpriority
+ // values to entries in an array already sorted in static
+ // <priority, subpriority> order.
//
// = DESCRIPTION
// The visitor uses the parameterized strategy type to determine
@@ -337,25 +342,24 @@ public:
// Constructor.
virtual int visit (TAO_Reconfig_Scheduler_Entry &);
- // Visit a Reconfig Scheduler Entry. This method
- // assigns a priority and subpriority value to each
- // entry. Priorities are assigned in increasing value
- // order, with lower numbers corresponding to higher
- // priorities. Returns -1 on error, 1 if a new priority
- // was assigned, or 0 otherwise.
+ // Visit a RT_Info tuple. This method assigns a priority and
+ // subpriority value to each tuple. Priorities are assigned in
+ // increasing value order, with lower numbers corresponding to
+ // higher priorities. Returns -1 on error, 1 if a new priority was
+ // assigned, or 0 otherwise.
int finish ();
- // Finishes scheduler entry priority assignment by iterating over the
- // remaining entries in the last subpriority level, and adjusting
+ // Finishes tuple priority assignment by iterating over the
+ // remaining tuples in the last subpriority level, and adjusting
// their subpriorities.
private:
TAO_Reconfig_Scheduler_Entry *previous_entry_;
- // Pointer to previous entry most recently seen in the iteration.
+ // Pointer to previous tuple in the iteration.
TAO_Reconfig_Scheduler_Entry **first_subpriority_entry_;
- // Pointer to first subpriority entry in the priority level.
+ // Pointer to first subpriority tuple in the priority level.
RtecScheduler::Preemption_Priority_t priority_;
// Current priority value.
@@ -374,11 +378,15 @@ private:
};
template <class RECONFIG_SCHED_STRATEGY>
-class TAO_RSE_Utilization_Visitor :
- public TAO_Reconfig_Sched_Entry_Visitor
+class TAO_Tuple_Admission_Visitor :
+ public TAO_RT_Info_Tuple_Visitor
// = TITLE
- // A scheduler entry visitor that accumulates utilization separately
- // for the critical operations and the non-critical operations.
+
+ // A tuple visitor that accumulates utilization separately for
+ // critical and non-critical operations. Operation tuples that
+ // fit within the threashold defined for their criticality level
+ // are admitted to the schedule, by updating the corresponding
+ // RT_Info with the tuple data.
//
// = DESCRIPTION
// The visitor uses the parameterized strategy type to determine
@@ -386,15 +394,15 @@ class TAO_RSE_Utilization_Visitor :
{
public:
- TAO_RSE_Utilization_Visitor ();
+ TAO_Tuple_Admission_Visitor (const CORBA::Double & critical_utilization_threshold,
+ const CORBA::Double & noncritical_utilization_threshold);
// Constructor.
- virtual int visit (TAO_Reconfig_Scheduler_Entry &);
- // Visit a Reconfig Scheduler Entry. This method
- // determines the utilization by the entry, and
- // adds it to the critical or non-critical utilization,
- // depending on whether or not the strategy says the
- // operation is critical.
+ virtual int visit (TAO_RT_Info_Tuple &);
+ // Visit an RT_Info tuple. This method determines the utilization by
+ // the tuple, and if it's admissible, updates its RT_Info and either
+ // the critical or non-critical utilization, depending on whether or
+ // not the strategy says the operation is critical.
CORBA::Double critical_utilization ();
// Accessor for utilization by critical operations.
@@ -402,6 +410,12 @@ public:
CORBA::Double noncritical_utilization ();
// Accessor for utilization by noncritical operations.
+ CORBA::Double critical_utilization_threshold ();
+ // Accessor for utilization by critical operations.
+
+ CORBA::Double noncritical_utilization_threshold ();
+ // Accessor for utilization by noncritical operations.
+
private:
CORBA::Double critical_utilization_;
@@ -409,8 +423,43 @@ private:
CORBA::Double noncritical_utilization_;
// Utilization by noncritical operations.
+
+ CORBA::Double critical_utilization_threshold_;
+ // Utilization by critical operations.
+
+ CORBA::Double noncritical_utilization_threshold_;
+ // Utilization by noncritical operations.
};
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+class TAO_RSE_Criticality_Propagation_Visitor :
+ public TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+ // = TITLE
+ // A scheduler entry visitor that propagates criticality
+ // from called to calling nodes in a topologically ordered
+ // graph.
+ //
+ // = DESCRIPTION
+ // This class computes the criticality of each node
+ // and its dependants, according to its dependencies.
+{
+public:
+
+ TAO_RSE_Criticality_Propagation_Visitor
+ (ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP & dependency_map,
+ ACE_TYPENAME TAO_RSE_Dependency_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP & rt_info_map);
+ // Constructor.
+
+protected:
+
+ virtual int pre_recurse_action (TAO_Reconfig_Scheduler_Entry &entry,
+ TAO_Reconfig_Scheduler_Entry &successor,
+ const RtecScheduler::Dependency_Info &di);
+ // Propagates criticality from successor to calling
+ // entry. Returns 1 on success (to prevent recursion on the
+ // successor), and -1 on error.
+
+};
#if defined (__ACE_INLINE__)
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.cpp b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.cpp
index abe943d96c2..e9bda62714e 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.cpp
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.cpp
@@ -30,6 +30,10 @@
#include "Reconfig_Scheduler_T.i"
#endif /* __ACE_INLINE__ */
+//#ifdef _DEBUG
+//#define SCHEDULER_LOGGING 1
+//#endif
+
ACE_RCSID(Sched, Reconfig_Scheduler_T, "$Id$")
//////////////////////////////////////////////
@@ -46,17 +50,31 @@ typedef int (*COMP_FUNC) (const void*, const void*);
// Default constructor.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::TAO_Reconfig_Scheduler (int enforce_schedule_stability)
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+TAO_Reconfig_Scheduler (int enforce_schedule_stability,
+ const CORBA::Double & critical_utilization_threshold,
+ const CORBA::Double & noncritical_utilization_threshold)
: config_info_count_ (0),
rt_info_count_ (0),
+ rt_info_tuple_count_ (0),
next_handle_ (1),
entry_ptr_array_ (0),
entry_ptr_array_size_ (0),
+ tuple_ptr_array_ (0),
+ tuple_ptr_array_size_ (0),
stability_flags_ (SCHED_NONE_STABLE),
enforce_schedule_stability_ (enforce_schedule_stability),
dependency_count_ (0),
- last_scheduled_priority_ (0)
+ last_scheduled_priority_ (0),
+ noncritical_utilization_ (0.0),
+ critical_utilization_ (0.0),
+ noncritical_utilization_threshold_ (noncritical_utilization_threshold),
+ critical_utilization_threshold_ (critical_utilization_threshold)
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler default ctor.\n"));
+#endif /* SCHEDULER_LOGGING */
}
// Constructor. Initialize the scheduler from the POD_Config_Info, POD_RT_Info,
@@ -71,15 +89,30 @@ TAO_Reconfig_Scheduler (int config_count,
int dependency_count,
ACE_Scheduler_Factory::POD_Dependency_Info dependency_infos[],
u_long stability_flags,
- int enforce_schedule_stability)
+ int enforce_schedule_stability,
+ const CORBA::Double & critical_utilization_threshold,
+ const CORBA::Double & noncritical_utilization_threshold)
: config_info_count_ (0),
rt_info_count_ (0),
+ rt_info_tuple_count_ (0),
next_handle_ (1),
stability_flags_ (SCHED_ALL_STABLE),
enforce_schedule_stability_ (enforce_schedule_stability),
dependency_count_ (0),
- last_scheduled_priority_ (0)
+ last_scheduled_priority_ (0),
+ noncritical_utilization_ (0.0),
+ critical_utilization_ (0.0),
+ noncritical_utilization_threshold_ (noncritical_utilization_threshold),
+ critical_utilization_threshold_ (critical_utilization_threshold)
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler alternative ctor.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // @ TODO - think about what it means to emit all the tuples as
+ // well as the established RT_Infos. State is more complex now.
+
// The init method can throw an exception, which must be caught
// *inside* the constructor to be portable between compilers that
// differ in whether they support native C++ exceptions.
@@ -101,6 +134,38 @@ TAO_Reconfig_Scheduler (int config_count,
}
+// Destructor.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+~TAO_Reconfig_Scheduler ()
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler dtor.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_TRY_NEW_ENV
+ {
+ this->close (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_TRY_CHECK;
+ }
+ ACE_CATCH (CORBA::SystemException, corba_sysex)
+ {
+ ACE_ERROR ((LM_ERROR, "TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, "
+ "ACE_LOCK>::~TAO_Reconfig_Scheduler "
+ "exception: cannot close scheduler.\n"));
+ }
+ ACE_ENDTRY;
+
+ // Delete the entry and tuple pointer arrays.
+ delete [] entry_ptr_array_;
+ delete [] tuple_ptr_array_;
+}
+
+// Additive initialization: can be called multiple times, with
+// new sets of operation, dependency, and config information.
+
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> int
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
init (int config_count,
@@ -117,23 +182,48 @@ init (int config_count,
RtecScheduler::SYNCHRONIZATION_FAILURE,
RtecScheduler::INTERNAL))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::init.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK_RETURN (-1);
int result = 0;
+ int i = 0;
+/* WSOA merge - commented out
// Clear out the previous entries, if any.
this->close (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK_RETURN (-1);
+*/
+
+ // Re-map the RT_Info and dependency handle values if necessary.
+ // Assumes that dependencies only refer to handles within the
+ // current set: changing that assumption would require us to use
+ // operation names, and the equivalent of a symbol table and
+ // relocating linker for RT_Infos to do this correctly in the
+ // general case.
+ if (this->next_handle_ > 1)
+ {
+ for (i = 0; i < rt_info_count; ++i)
+ {
+ rt_info [i].handle += this->next_handle_ - 1;
+ }
+ for (i = 0; i < dependency_count; ++i)
+ {
+ dependency_info [i].info_that_depends += this->next_handle_ - 1;
+ dependency_info [i].info_depended_on += this->next_handle_ - 1;
+ }
+ }
// (Re)initialize using the new settings.
// Add the passed config infos to the scheduler
auto_ptr<RtecScheduler::Config_Info> new_config_info_ptr;
- for (this->config_info_count_ = 0;
- this->config_info_count_ < config_count;
- ++this->config_info_count_)
+ for (i = 0; i < config_count; ++i)
{
RtecScheduler::Config_Info* new_config_info;
ACE_NEW_THROW_EX (new_config_info,
@@ -144,7 +234,7 @@ init (int config_count,
// Make sure the new config info is cleaned up if we exit abruptly.
ACE_AUTO_PTR_RESET (new_config_info_ptr, new_config_info, RtecScheduler::Config_Info);
- result = config_info_map_.bind (config_info [config_info_count_].preemption_priority,
+ result = config_info_map_.bind (config_info [i].preemption_priority,
new_config_info);
switch (result)
{
@@ -161,11 +251,11 @@ init (int config_count,
}
new_config_info->preemption_priority =
- config_info [config_info_count_].preemption_priority;
+ config_info [i].preemption_priority;
new_config_info->thread_priority =
- config_info [config_info_count_].thread_priority;
+ config_info [i].thread_priority;
new_config_info->dispatching_type =
- config_info [config_info_count_].dispatching_type;
+ config_info [i].dispatching_type;
if (new_config_info->preemption_priority >
last_scheduled_priority_)
@@ -177,19 +267,30 @@ init (int config_count,
// Release the auto_ptr so it does not clean
// up the sucessfully bound config info.
new_config_info_ptr.release ();
+
+ // Increase the count of successfully bound config infos.
+ ++this->config_info_count_;
}
// Add RT_Infos to scheduler
- RtecScheduler::RT_Info* new_rt_info;
+ TAO_RT_Info_Ex* new_rt_info;
for (int num_rt_infos = 0; num_rt_infos < rt_info_count; ++num_rt_infos)
{
new_rt_info = create_i (rt_info [num_rt_infos].entry_point,
- rt_info [num_rt_infos].handle
+ rt_info [num_rt_infos].handle, 1
ACE_ENV_ARG_PARAMETER);
ACE_CHECK_RETURN (-1);
+ if (new_rt_info == 0)
+ {
+ ACE_THROW_RETURN (RtecScheduler::INTERNAL (), -1);
+ }
+
+ // Set the new info's enabled state
+ new_rt_info->enabled_state (rt_info [num_rt_infos].enabled);
+
// Fill in the portions to which the user has access.
- set_i (new_rt_info,
+ this->set_i (new_rt_info,
RtecScheduler::Criticality_t (rt_info [num_rt_infos].criticality),
rt_info [num_rt_infos].worst_case_execution_time,
rt_info [num_rt_infos].typical_execution_time,
@@ -198,7 +299,9 @@ init (int config_count,
RtecScheduler::Importance_t (rt_info [num_rt_infos].importance),
rt_info [num_rt_infos].quantum,
rt_info [num_rt_infos].threads,
- RtecScheduler::Info_Type_t (rt_info [num_rt_infos].info_type));
+ RtecScheduler::Info_Type_t (rt_info [num_rt_infos].info_type)
+ ACE_ENV_ARG_DECL);
+ ACE_CHECK_RETURN (-1);
// Fill in the scheduler managed portions.
new_rt_info->priority =
@@ -210,16 +313,17 @@ init (int config_count,
new_rt_info->volatile_token = 0;
// Add dependencies between RT_Infos to scheduler.
- for (this->dependency_count_ = 0;
- this->dependency_count_ < dependency_count;
- ++this->dependency_count_)
+ for (i = 0; i < dependency_count; ++i)
{
add_dependency_i (dependency_info [dependency_count_].info_that_depends,
dependency_info [dependency_count_].info_depended_on,
dependency_info [dependency_count_].number_of_calls,
- dependency_info [dependency_count_].dependency_type
+ dependency_info [dependency_count_].dependency_type,
+ dependency_info [dependency_count_].enabled
ACE_ENV_ARG_PARAMETER);
ACE_CHECK_RETURN (-1);
+
+ ++this->dependency_count_;
}
}
@@ -235,15 +339,22 @@ init (int config_count,
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::close (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK,
RtecScheduler::SYNCHRONIZATION_FAILURE))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::close.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
- // Unbind and delete each RT_Info in the map.
- RtecScheduler::RT_Info *rt_info;
+ // Unbind and delete each RT_Info in the map: this also cleans up
+ // all the entries and tuples associated with each RT_Info.
+ TAO_RT_Info_Ex *rt_info;
RtecScheduler::handle_t handle;
while (rt_info_map_.current_size () > 0)
{
@@ -252,11 +363,15 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::close (ACE_ENV_SINGLE
{
if (rt_info_tree_.unbind (rt_info->entry_point) == 0)
{
+ // Delete the entry associated with the RT_Info, then
+ // the RT_Info itself.
+ delete ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ rt_info->volatile_token);
delete rt_info;
}
else
{
- ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ ACE_THROW (RtecScheduler::INTERNAL ());
}
}
else
@@ -277,23 +392,60 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::close (ACE_ENV_SINGLE
}
else
{
- ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Delete each dependency set in the caller map
+ RtecScheduler::Dependency_Set *dependency_set;
+ while (calling_dependency_set_map_.current_size () > 0)
+ {
+ handle = (*calling_dependency_set_map_.begin ()).ext_id_;
+ if (calling_dependency_set_map_.unbind (handle, dependency_set) == 0)
+ {
+ delete dependency_set;
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Delete each dependency set in the called map
+ while (called_dependency_set_map_.current_size () > 0)
+ {
+ handle = (*called_dependency_set_map_.begin ()).ext_id_;
+ if (called_dependency_set_map_.unbind (handle, dependency_set) == 0)
+ {
+ delete dependency_set;
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
}
}
// Zero out the scheduling entry pointer array but do not deallocate it.
if (entry_ptr_array_size_ > 0)
{
- ACE_OS::memset (entry_ptr_array_, 0,
- sizeof (TAO_Reconfig_Scheduler_Entry *) *
- entry_ptr_array_size_);
+ ACE_OS::memset (this->entry_ptr_array_, 0,
+ sizeof (TAO_Reconfig_Scheduler_Entry *)
+ * this->entry_ptr_array_size_);
}
+ // Zero out the scheduling entry pointer array but do not deallocate it.
+ if (tuple_ptr_array_size_ > 0)
+ {
+ ACE_OS::memset (this->tuple_ptr_array_, 0,
+ sizeof (TAO_RT_Info_Tuple *)
+ * this->tuple_ptr_array_size_);
+ }
// Finally, reset the entry counts and start over with the lowest
// handle number.
this->config_info_count_ = 0;
this->rt_info_count_ = 0;
+ this->rt_info_tuple_count_ = 0;
this->next_handle_ = 1;
}
@@ -312,12 +464,17 @@ create (const char *entry_point
RtecScheduler::INTERNAL,
RtecScheduler::SYNCHRONIZATION_FAILURE))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::create.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK_RETURN (0);
RtecScheduler::handle_t handle = next_handle_;
- create_i (entry_point, handle ACE_ENV_ARG_PARAMETER);
+ create_i (entry_point, handle, 0 ACE_ENV_ARG_PARAMETER);
ACE_CHECK_RETURN (handle);
// Set affected stability flags.
@@ -340,6 +497,11 @@ lookup (const char * entry_point
RtecScheduler::UNKNOWN_TASK,
RtecScheduler::SYNCHRONIZATION_FAILURE))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::lookup.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK_RETURN (0);
@@ -363,12 +525,17 @@ get (RtecScheduler::handle_t handle
RtecScheduler::UNKNOWN_TASK,
RtecScheduler::SYNCHRONIZATION_FAILURE))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::get.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK_RETURN (0);
// Find the RT_Info in the hash map.
- RtecScheduler::RT_Info *rt_info = 0;
+ TAO_RT_Info_Ex *rt_info = 0;
if (rt_info_map_.find (handle, rt_info) != 0)
{
ACE_THROW_RETURN (RtecScheduler::UNKNOWN_TASK (), 0);
@@ -408,91 +575,378 @@ set (RtecScheduler::handle_t handle,
RtecScheduler::INTERNAL,
RtecScheduler::SYNCHRONIZATION_FAILURE))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
// Look up the RT_Info by its handle, throw an exception if it's not there.
- RtecScheduler::RT_Info *rt_info_ptr = 0;
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
if (rt_info_map_.find (handle, rt_info_ptr) != 0)
{
ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
}
+ if (rt_info_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Enable the RT_Info if it was disabled. Does not modify NON_VOLATILE ops.
+ if (rt_info_ptr->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_ENABLED);
+ }
+
// Call the internal set method.
this->set_i (rt_info_ptr, criticality, time, typical_time,
cached_time, period, importance, quantum,
- threads, info_type);
+ threads, info_type ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
- // Update stability flags, based on changes to operation characteristics.
- // Reference the associated scheduling entry: the double cast is
- // needed to ensure that the size of the pointer and the size of the
- // stored magic cookie are the same (see the definition of
- // ptrdiff_t in ACE to grok how this works portably).
- TAO_Reconfig_Scheduler_Entry *sched_entry_ptr =
- ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
- rt_info_ptr->volatile_token);
+ // Update stability flags. For now, just mark everything as unstable.
+ // @@ TODO - revisit this and see if we can efficiently detect when
+ // changes do not affect stability of various aspects.
+ this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PRIORITY_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+
+ return;
+}
- if (0 == sched_entry_ptr)
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+reset (RtecScheduler::handle_t handle,
+ RtecScheduler::Criticality_t criticality,
+ RtecScheduler::Time time,
+ RtecScheduler::Time typical_time,
+ RtecScheduler::Time cached_time,
+ RtecScheduler::Period_t period,
+ RtecScheduler::Importance_t importance,
+ RtecScheduler::Quantum_t quantum,
+ CORBA::Long threads,
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::reset.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+ if (rt_info_map_.find (handle, rt_info_ptr) != 0)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+ if (rt_info_ptr == 0)
{
ACE_THROW (RtecScheduler::INTERNAL ());
}
- // Test the utilization difference between the old and new values.
- if ((sched_entry_ptr->orig_rt_info_data ().period != rt_info_ptr->period
- && sched_entry_ptr->orig_rt_info_data ().worst_case_execution_time
- != rt_info_ptr->worst_case_execution_time))
+ // Enable the RT_Info if it was disabled. Does not modify NON_VOLATILE ops.
+ if (rt_info_ptr->enabled_state () == RtecScheduler::RT_INFO_NON_VOLATILE)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+ else
{
- CORBA::Double orig_time = ACE_static_cast (
- CORBA::Double,
- ACE_UINT64_DBLCAST_ADAPTER (sched_entry_ptr->
- orig_rt_info_data ().
- worst_case_execution_time));
+ // Reset the RT_Info.
+ rt_info_ptr->reset (TAO_Reconfig_Scheduler_Entry::ORIGINAL
+ | TAO_Reconfig_Scheduler_Entry::PROPAGATED);
- CORBA::Double orig_period =
- sched_entry_ptr->orig_rt_info_data ().period;
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_ENABLED);
+ }
- CORBA::Double new_time = ACE_static_cast (
- CORBA::Double,
- ACE_UINT64_DBLCAST_ADAPTER (rt_info_ptr->
- worst_case_execution_time));
+ // Refresh the internal tuple pointer array.
+ this->refresh_tuple_ptr_array_i (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
- CORBA::Double new_period = rt_info_ptr->period;
+ // Then call the internal set method.
+ this->set_i (rt_info_ptr, criticality, time, typical_time,
+ cached_time, period, importance, quantum,
+ threads, info_type ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
- if ((orig_time / orig_period) - (new_time / new_period) > DBL_EPSILON
- || (orig_time / orig_period) - (new_time / new_period) < DBL_EPSILON)
+
+ // Update stability flags. For now, just mark everything as unstable.
+ // @@ TODO - revisit this and see if we can efficiently detect when
+ // changes do not affect stability of various aspects.
+ this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PRIORITY_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+
+ return;
+}
+
+
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_seq.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ for (u_int i = 0; i < infos.length (); ++i)
+ {
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+ if (rt_info_map_.find (infos[i].handle, rt_info_ptr) != 0)
{
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ if (rt_info_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Enable the RT_Info if it was disabled. Does not modify NON_VOLATILE ops.
+ if (rt_info_ptr->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_ENABLED);
+ }
+
+ // Call the internal set method.
+ this->set_i (rt_info_ptr,
+ infos[i].criticality,
+ infos[i].worst_case_execution_time,
+ infos[i].typical_execution_time,
+ infos[i].cached_execution_time,
+ infos[i].period,
+ infos[i].importance,
+ infos[i].quantum,
+ infos[i].threads,
+ infos[i].info_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ // Update stability flags. For now, just mark everything as unstable.
+ // @@ TODO - revisit this and see if we can efficiently detect when
+ // changes do not affect stability of various aspects.
this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PRIORITY_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+
+ return;
+}
+
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+reset_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::reset_seq.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+ u_int i;
+
+ for (i = 0; i < infos.length (); ++i)
+ {
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ if (rt_info_map_.find (infos[i].handle, rt_info_ptr) != 0)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ // Enable the RT_Info. Does not modify NON_VOLATILE ops.
+ if (rt_info_ptr->enabled_state () == RtecScheduler::RT_INFO_NON_VOLATILE)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+ else
+ {
+ // Reset the RT_Info.
+ rt_info_ptr->reset (TAO_Reconfig_Scheduler_Entry::ORIGINAL
+ | TAO_Reconfig_Scheduler_Entry::PROPAGATED);
+
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_ENABLED);
}
}
- // Test the priority difference between the old and new info.
- if (RECONFIG_SCHED_STRATEGY::priority_diff (sched_entry_ptr->
- orig_rt_info_data (),
- *rt_info_ptr) != 0)
+ // Refresh the internal tuple pointer array.
+ this->refresh_tuple_ptr_array_i (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+
+ for (i = 0; i < infos.length (); ++i)
+ {
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ if (rt_info_map_.find (infos[i].handle, rt_info_ptr) != 0)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ if (rt_info_ptr == 0)
{
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Call the internal set method.
+ this->set_i (rt_info_ptr,
+ infos[i].criticality,
+ infos[i].worst_case_execution_time,
+ infos[i].typical_execution_time,
+ infos[i].cached_execution_time,
+ infos[i].period,
+ infos[i].importance,
+ infos[i].quantum,
+ infos[i].threads,
+ infos[i].info_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+
+ // Update stability flags. For now, just mark everything as unstable.
+ // @@ TODO - revisit this and see if we can efficiently detect when
+ // changes do not affect stability of various aspects.
+ this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
this->stability_flags_ |= SCHED_PRIORITY_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+
+ return;
+}
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+replace_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::replace_seq.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+
+ for (ACE_TYPENAME RT_INFO_MAP::iterator info_iter (this->rt_info_map_);
+ info_iter.done () == 0;
+ ++info_iter)
+ {
+ // Get a pointer to each registered RT_Info.
+ rt_info_ptr = (*info_iter).int_id_;
+ if (! rt_info_ptr)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ switch (rt_info_ptr->enabled_state ())
+ {
+ case RtecScheduler::RT_INFO_ENABLED:
+
+ // Disable enabled RT_Infos.
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_DISABLED);
+
+ // Reset Enabled and Non-Volatile RT_Infos.
+ rt_info_ptr->reset (TAO_Reconfig_Scheduler_Entry::ORIGINAL
+ | TAO_Reconfig_Scheduler_Entry::PROPAGATED);
+ break;
+
+ // Intentional fall-through to ignore non-volatile RT_Infos
+ case RtecScheduler::RT_INFO_NON_VOLATILE:
+
+ default: // Ignore disabled RT_Infos.
+ break;
+ }
}
- // If the period changed, look up the handle in the calling
- // dependency map and see if there is anything there: if so,
- // the propagation is unstable.
- if (sched_entry_ptr->orig_rt_info_data ().period
- != rt_info_ptr->period)
+ // Zero out the tuple pointer array, set count to zero
+ ACE_OS::memset (this->tuple_ptr_array_, 0,
+ sizeof (TAO_RT_Info_Tuple *)
+ * this->tuple_ptr_array_size_);
+ this->rt_info_tuple_count_ = 0;
+
+ for (u_int i = 0; i < infos.length (); ++i)
{
- // Get the dependency set for the current entry.
- RtecScheduler::Dependency_Set *dependency_set = 0;
- int result = calling_dependency_set_map_.find (rt_info_ptr->handle,
- dependency_set);
- if (result == 0 && dependency_set->length () > 0)
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ if (rt_info_map_.find (infos[i].handle, rt_info_ptr) != 0)
{
- this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ if (rt_info_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
}
+
+ // Enable the RT_Info if it was disabled. Does not modify NON_VOLATILE ops.
+ if (rt_info_ptr->enabled_state () == RtecScheduler::RT_INFO_DISABLED)
+ {
+ rt_info_ptr->enabled_state (RtecScheduler::RT_INFO_ENABLED);
+ }
+
+ // Call the internal set method.
+ this->set_i (rt_info_ptr,
+ infos[i].criticality,
+ infos[i].worst_case_execution_time,
+ infos[i].typical_execution_time,
+ infos[i].cached_execution_time,
+ infos[i].period,
+ infos[i].importance,
+ infos[i].quantum,
+ infos[i].threads,
+ infos[i].info_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
}
- // Update the stored operation characteristics values in the scheduling entry
- sched_entry_ptr->orig_rt_info_data (*rt_info_ptr);
+ // Update stability flags. For now, just mark everything as unstable.
+ // @@ TODO - revisit this and see if we can efficiently detect when
+ // changes do not affect stability of various aspects.
+ this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PRIORITY_NOT_STABLE;
+ this->stability_flags_ |= SCHED_PROPAGATION_NOT_STABLE;
+
+ return;
}
@@ -523,7 +977,11 @@ priority (RtecScheduler::handle_t handle,
ACE_THROW (RtecScheduler::NOT_SCHEDULED ());
}
- RtecScheduler::RT_Info *rt_info = 0;
+ // CDG - TBD - address priority "generations" i.e., after an
+ // adaptive transition. For now, go ahead and return whatever
+ // priority is there, even if the RT_Info_Ex is disabled.
+
+ TAO_RT_Info_Ex *rt_info = 0;
if (rt_info_map_.find (handle, rt_info) != 0)
{
ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
@@ -551,6 +1009,11 @@ entry_point_priority (const char * entry_point,
RtecScheduler::SYNCHRONIZATION_FAILURE,
RtecScheduler::NOT_SCHEDULED))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::entry_point_priority.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
@@ -582,13 +1045,18 @@ add_dependency (RtecScheduler::handle_t handle /* RT_Info that has the dependenc
RtecScheduler::SYNCHRONIZATION_FAILURE,
RtecScheduler::UNKNOWN_TASK))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::add_dependency.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
// Delegate to the internal method.
- add_dependency_i (handle, dependency, number_of_calls,
- dependency_type ACE_ENV_ARG_PARAMETER);
+ add_dependency_i (handle, dependency, number_of_calls, dependency_type,
+ RtecBase::DEPENDENCY_ENABLED ACE_ENV_ARG_DECL);
ACE_CHECK;
// Since the call graph topology has changed, set *all*
@@ -598,6 +1066,183 @@ add_dependency (RtecScheduler::handle_t handle /* RT_Info that has the dependenc
}
+// This method removes a dependency between two RT_Infos.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+remove_dependency (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::remove_dependency.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ // Delegate to the internal method.
+ remove_dependency_i (handle, dependency, number_of_calls,
+ dependency_type ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Since the call graph topology has changed, set *all*
+ // stability flags before incrementing the dependency count.
+ this->stability_flags_ |= SCHED_UTILIZATION_NOT_STABLE;
+ --dependency_count_;
+}
+
+// This method sets the enable state for a dependency between two RT_Infos.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_dependency_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_dependency_enable_state.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ // Delegate to the internal method.
+ set_dependency_enable_state_i (handle, dependency, number_of_calls,
+ dependency_type, enabled ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+}
+
+
+// This method sets the enable state of a sequence of dependencies.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_dependency_enable_state_seq (const RtecScheduler::Dependency_Set & dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_dependency_enable_state_seq.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ // Delegate to the internal method for each dependency in the sequence.
+ for (u_int i = 0; i < dependencies.length (); ++i)
+ {
+ set_dependency_enable_state_i (dependencies[i].rt_info,
+ dependencies[i].rt_info_depended_on,
+ dependencies[i].number_of_calls,
+ dependencies[i].dependency_type,
+ dependencies[i].enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ }
+}
+
+
+// This method enables or disables an RT_Info.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_rt_info_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::RT_Info_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_rt_info_enable_state.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+ if (rt_info_map_.find (handle, rt_info_ptr) != 0)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ if (rt_info_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Enable the RT_Info.
+ rt_info_ptr->enabled_state (enabled);
+}
+
+
+// This method enables or disables a sequence of RT_Infos.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_rt_info_enable_state_seq (const RtecScheduler::RT_Info_Enable_State_Pair_Set & pair_set
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_rt_info_enable_state_seq.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
+ RtecScheduler::SYNCHRONIZATION_FAILURE ());
+ ACE_CHECK;
+
+ for (u_int i = 0; i < pair_set.length (); ++i)
+ {
+ // Look up the RT_Info by its handle, throw an exception if it's not there.
+ TAO_RT_Info_Ex *rt_info_ptr = 0;
+ if (rt_info_map_.find (pair_set[i].handle, rt_info_ptr) != 0)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+
+ if (rt_info_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Enable the RT_Info.
+ rt_info_ptr->enabled_state (pair_set[i].enabled);
+ }
+}
+
+
// If information has been added or changed since the last stable
// schedule was computed, this method causes scheduling information
// to be computed for all registered RT_Infos. If the schedule is
@@ -609,6 +1254,7 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
compute_scheduling (CORBA::Long minimum_priority,
CORBA::Long maximum_priority,
RtecScheduler::RT_Info_Set_out infos,
+ RtecScheduler::Dependency_Set_out dependencies,
RtecScheduler::Config_Info_Set_out configs,
RtecScheduler::Scheduling_Anomaly_Set_out anomalies
ACE_ENV_ARG_DECL)
@@ -620,6 +1266,53 @@ compute_scheduling (CORBA::Long minimum_priority,
RtecScheduler::INTERNAL,
RtecScheduler::DUPLICATE_NAME))
{
+ // Delegates to recompute_scheduling and the respective accessors.
+ this->recompute_scheduling (minimum_priority, maximum_priority,
+ anomalies ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->get_rt_info_set (infos ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->get_dependency_set (dependencies ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ this->get_config_info_set (configs ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+#if defined (SCHEDULER_DUMP)
+ ACE_DEBUG ((LM_TRACE, "Schedule prepared.\n"));
+ ACE_DEBUG ((LM_TRACE, "Dumping to stdout.\n"));
+ ACE_Scheduler_Factory::dump_schedule (*(infos.ptr()), *(dependencies.ptr()), *(configs.ptr()),
+ *(anomalies.ptr()), 0);
+ ACE_DEBUG ((LM_TRACE, "Dump done.\n"));
+#endif // SCHEDULER_DUMP
+
+ return;
+}
+
+// Recomputes the scheduling priorities, etc.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+recompute_scheduling (CORBA::Long minimum_priority,
+ CORBA::Long maximum_priority,
+ RtecScheduler::Scheduling_Anomaly_Set_out anomalies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UTILIZATION_BOUND_EXCEEDED,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INSUFFICIENT_THREAD_PRIORITY_LEVELS,
+ RtecScheduler::TASK_COUNT_MISMATCH,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::DUPLICATE_NAME))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::compute_scheduling.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
@@ -631,43 +1324,114 @@ compute_scheduling (CORBA::Long minimum_priority,
// If everything is already up to date, we're done.
if (SCHED_ALL_STABLE == stability_flags_)
{
+
+ // Must always provide a value for an out parameter
+ ACE_NEW_THROW_EX (anomalies,
+ RtecScheduler::Scheduling_Anomaly_Set (0),
+ CORBA::NO_MEMORY ());
+ ACE_CHECK;
return;
}
// @@ TO DO - use try/catch blocks to catch exceptions and add anomalies
// to scheduling anomaly set, and then perhaps rethrow)
- if (this->stability_flags_ & SCHED_PROPAGATION_NOT_STABLE)
+ if ((this->stability_flags_ & SCHED_PROPAGATION_NOT_STABLE)
+ || (this->stability_flags_ & SCHED_UTILIZATION_NOT_STABLE))
{
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "1_pre_crit_traverse.txt");
+#endif
+
+ // Traverse criticality dependency graph, assigning a
+ // topological ordering and identifying threads.
+ crit_dfs_traverse_i (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "2_crit_dfs_traverse_i.txt");
+#endif
+
+ // Propagate criticalities.
+ propagate_criticalities_i (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "3_propagate_criticalities_i.txt");
+#endif
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "4_pre_traverse.txt");
+#endif
+
// Traverse dependency graph, assigning a topological ordering and identifying threads.
dfs_traverse_i (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "5_dfs_traverse_i.txt");
+#endif
// Sort an array of RT_info handles in topological order, check
// for loops using the strongly connected components algorithm.
detect_cycles_i (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
-
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "6_detect_cycles_i.txt");
+#endif
+
+ // Perform admission control for task delineator rate tuples.
+ perform_admission_i (ACE_ENV_SINGLE_ARG_PARAMETER);
+ ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "7_perform_admission_i.txt");
+#endif
+
// Propagate effective execution time and period, set total frame size.
propagate_characteristics_i (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "8_propagate_characteristics_i.txt");
+#endif
+
}
if (this->stability_flags_ & SCHED_PRIORITY_NOT_STABLE)
{
- // Sort operations by urgency (done by strategy), then
- // assign priorities and subpriorities in one pass.
- // Sets last scheduled priority.
+ // Sort operations by urgency, then assign priorities and
+ // subpriorities in one pass. Sets last scheduled priority and
+ // last feasible priority.
assign_priorities_i (ACE_ENV_SINGLE_ARG_PARAMETER);
ACE_CHECK;
+
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_entries(entry_ptr_array_,
+ this->rt_info_count_,
+ "9_assign_priorities_i.txt");
+#endif
+
}
- if (this->stability_flags_ & SCHED_UTILIZATION_NOT_STABLE)
- {
- // Compute utilization, set last feasible priority.
- compute_utilization_i (ACE_ENV_SINGLE_ARG_PARAMETER);
- ACE_CHECK;
- }
// @@ TODO: record any scheduling anomalies in a set within the scheduler,
// storing the maximum severity level recorded so far.
@@ -679,7 +1443,27 @@ compute_scheduling (CORBA::Long minimum_priority,
ACE_CHECK;
}
+
+ // Set stability flags last.
+ this->stability_flags_ = SCHED_ALL_STABLE;
+ return;
+}
+
+
+// Returns the set of rt_infos, with their assigned priorities (as
+// of the last schedule re-computation).
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+get_rt_info_set (RtecScheduler::RT_Info_Set_out infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+{
// return the set of scheduled RT_Infos
+
if (infos.ptr () == 0)
{
ACE_NEW_THROW_EX (infos,
@@ -687,17 +1471,81 @@ compute_scheduling (CORBA::Long minimum_priority,
CORBA::NO_MEMORY ());
ACE_CHECK;
}
+
infos->length (this->rt_info_count_);
- RtecScheduler::RT_Info* rt_info = 0;
+ TAO_RT_Info_Ex* rt_info = 0;
for (ACE_TYPENAME RT_INFO_MAP::iterator info_iter (this->rt_info_map_);
info_iter.done () == 0;
++info_iter)
{
+ // TODO - rethink this: is it more useful to only return the *enabled* RT_Infos?
rt_info = (*info_iter).int_id_;
infos[ACE_static_cast (CORBA::ULong, rt_info->handle - 1)] = *rt_info;
}
- // return the set of scheduled Config_Infos
+ return;
+}
+
+
+// Returns the set of rt_infos, with their assigned priorities (as
+// of the last schedule re-computation).
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+get_dependency_set (RtecScheduler::Dependency_Set_out dependencies
+ ACE_ENV_ARG_DECL_NOT_USED)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+{
+ // Return the set of dependencies: just need to iterate over one of the maps.
+
+ if (dependencies.ptr () == 0)
+ {
+ dependencies = new RtecScheduler::Dependency_Set (this->dependency_count_);
+ }
+ dependencies->length (this->dependency_count_);
+ RtecScheduler::Dependency_Set *dependency_set = 0;
+ int i = 0;
+ for (ACE_TYPENAME DEPENDENCY_SET_MAP::iterator
+ dependency_iter (this->called_dependency_set_map_);
+ dependency_iter.done () == 0 && i < this->dependency_count_;
+ ++dependency_iter)
+ {
+ dependency_set = (*dependency_iter).int_id_;
+ for (u_int j = 0;
+ j < dependency_set->length () && i < this->dependency_count_;
+ ++i, ++j)
+ {
+ (* dependencies) [i] = (*dependency_set) [j];
+ // For two-way calls, swap the handles (stored in reverse order in the called map)
+ if ((* dependencies) [i].dependency_type == RtecBase::TWO_WAY_CALL)
+ {
+ (* dependencies) [i].rt_info = (* dependency_set) [j].rt_info_depended_on;
+ (* dependencies) [i].rt_info_depended_on = (* dependency_set) [j].rt_info;
+ }
+ }
+ }
+
+ return;
+}
+
+
+// Returns the set of config_infos, describing the appropriate
+// number, types, and priority levels for the dispatching lanes.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+get_config_info_set (RtecScheduler::Config_Info_Set_out configs
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL))
+{
+ // Return the set of scheduled Config_Infos.
+
if (configs.ptr () == 0)
{
ACE_NEW_THROW_EX (configs,
@@ -706,6 +1554,7 @@ compute_scheduling (CORBA::Long minimum_priority,
CORBA::NO_MEMORY ());
ACE_CHECK;
}
+
configs->length (this->config_info_count_);
RtecScheduler::Config_Info* config_info = 0;
for (ACE_TYPENAME CONFIG_INFO_MAP::iterator config_iter (this->config_info_map_);
@@ -716,14 +1565,6 @@ compute_scheduling (CORBA::Long minimum_priority,
configs[ACE_static_cast (CORBA::ULong, config_info->preemption_priority)] = *config_info;
}
- ACE_DEBUG ((LM_DEBUG, "Schedule prepared.\n"));
- ACE_DEBUG ((LM_DEBUG, "Dumping to stdout.\n"));
- ACE_Scheduler_Factory::dump_schedule (*(infos.ptr()), *(configs.ptr()),
- *(anomalies.ptr()), 0);
- ACE_DEBUG ((LM_DEBUG, "Dump done.\n"));
-
- // Set stability flags last.
- this->stability_flags_ = SCHED_ALL_STABLE;
return;
}
@@ -743,6 +1584,11 @@ dispatch_configuration (RtecScheduler::Preemption_Priority_t p_priority,
RtecScheduler::SYNCHRONIZATION_FAILURE,
RtecScheduler::UNKNOWN_PRIORITY_LEVEL))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::dispatch_configuration.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK;
@@ -773,11 +1619,16 @@ dispatch_configuration (RtecScheduler::Preemption_Priority_t p_priority,
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
RtecScheduler::Preemption_Priority_t
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
-last_scheduled_priority (ACE_ENV_SINGLE_ARG_DECL)
+last_scheduled_priority (ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::SYNCHRONIZATION_FAILURE,
RtecScheduler::NOT_SCHEDULED))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::last_scheduled_priority.\n"));
+#endif /* SCHEDULER_LOGGING */
+
ACE_GUARD_THROW_EX (ACE_LOCK, ace_mon, this->mutex_,
RtecScheduler::SYNCHRONIZATION_FAILURE ());
ACE_CHECK_RETURN (0);
@@ -835,37 +1686,52 @@ get_config_infos (RtecScheduler::Config_Info_Set_out configs
}
}
-// Internal method to create an RT_Info. If it does not exist, a new RT_Info is
-// created and inserted into the schedule, and the handle of the new
-// RT_Info is returned. If the RT_Info already exists, an exception
-// is thrown.
+
+// Internal method to create an RT_Info. If it does not exist, a new
+// RT_Info is created and inserted into the schedule, and the handle
+// of the new RT_Info is returned. If the RT_Info already exists,
+// then if the ignore_duplicates flag is set, the handle is simply
+// returned; otherwise, an exception is thrown.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-RtecScheduler::RT_Info *
+TAO_RT_Info_Ex *
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
create_i (const char *entry_point,
- RtecScheduler::handle_t handle
+ RtecScheduler::handle_t handle,
+ int ignore_duplicates
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::DUPLICATE_NAME,
RtecScheduler::INTERNAL))
{
- RtecScheduler::RT_Info* new_rt_info = 0;
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::create_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ TAO_RT_Info_Ex* new_rt_info = 0;
TAO_Reconfig_Scheduler_Entry* new_sched_entry = 0;
int result = 0;
+ // If we're ignoring duplicates, check for and return the existing
+ // entry if there is one.
+ if (ignore_duplicates
+ && rt_info_map_.find (handle, new_rt_info) == 0)
+ {
+ return new_rt_info;
+ }
+
// Create a new scheduling entry for the RT_Info.
ACE_NEW_THROW_EX (new_rt_info,
- RtecScheduler::RT_Info,
+ TAO_RT_Info_Ex,
CORBA::NO_MEMORY ());
ACE_CHECK_RETURN (0);
// Make sure the new scheduling entry is cleaned up if we exit abruptly.
- auto_ptr<RtecScheduler::RT_Info> new_rt_info_ptr (new_rt_info);
+ auto_ptr<TAO_RT_Info_Ex> new_rt_info_ptr (new_rt_info);
// Set some reasonable default values, and store the passed ones.
- TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::init_rt_info (*new_rt_info);
- new_rt_info->entry_point = CORBA::string_dup(entry_point);
+ new_rt_info->entry_point = CORBA::string_dup (entry_point);
new_rt_info->handle = handle;
// Bind the new RT_Info to its handle, in the RT_Info map.
@@ -878,7 +1744,16 @@ create_i (const char *entry_point,
case 1:
// Tried to bind an operation that was already in the map.
+ if (ignore_duplicates)
+ {
+ // Should never get here unless something is badly awry.
+ ACE_THROW_RETURN (RtecScheduler::INTERNAL (), 0);
+ }
+ else
+ {
+ // Already bound, and we're not ignoring duplicates.
ACE_THROW_RETURN (RtecScheduler::DUPLICATE_NAME (), 0);
+ }
default:
break;
@@ -911,74 +1786,27 @@ create_i (const char *entry_point,
// Make sure the new scheduling entry is cleaned up if we exit abruptly.
auto_ptr<TAO_Reconfig_Scheduler_Entry> new_sched_entry_ptr (new_sched_entry);
- // Make sure there is room in the scheduling entry pointer array:
- // expand the array eagerly, to minimize memory allocation overhead
-
- if (this->entry_ptr_array_size_ <= handle)
- {
- if (entry_ptr_array_size_ > 0)
- {
- // Store previous array size.
- long new_size = entry_ptr_array_size_;
-
- // Double the size of the array until sufficient.
- do
- {
- new_size *= 2;
- }
- while (new_size <= handle);
-
- // Allocate the new array of the proper size, zero it out.
-
- TAO_Reconfig_Scheduler_Entry ** new_array;
- ACE_NEW_THROW_EX (new_array,
- TAO_Reconfig_Scheduler_Entry * [new_size],
- CORBA::NO_MEMORY ());
-
- ACE_OS::memset (new_array, 0,
- sizeof (TAO_Reconfig_Scheduler_Entry *) *
- new_size);
-
- // Copy in the previous array.
- ACE_OS::memcpy (new_array, entry_ptr_array_,
- sizeof (TAO_Reconfig_Scheduler_Entry *) *
- entry_ptr_array_size_);
+ // Maintain the size of the entry pointer array.
+ ::maintain_scheduling_array (entry_ptr_array_, entry_ptr_array_size_,
+ handle ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK_RETURN (0);
- // Free the old array and swap to point to the new one.
- delete [] entry_ptr_array_;
- entry_ptr_array_ = new_array;
- entry_ptr_array_size_ = new_size;
- }
- else
- {
- // For the first allocation, just start with sufficient space
- // for the handle that was given.
- ACE_NEW_THROW_EX (entry_ptr_array_,
- TAO_Reconfig_Scheduler_Entry * [handle + 1],
- CORBA::NO_MEMORY ());
- entry_ptr_array_size_ = handle + 1;
- }
- }
- // Atore in the scheduling entry pointer array.
+ // Store the new entry in the scheduling entry pointer array.
entry_ptr_array_ [handle - 1] = new_sched_entry;
- // Store a pointer to the scheduling entry in the scheduling entry
- // pointer array and in the RT_Info: the double cast is needed to
- // ensure that the size of the pointer and the size of the stored
- // magic cookie are the same (see the definition of ptrdiff_t in
- // ACE to grok how this works portably).
- new_rt_info->volatile_token =
- ACE_static_cast (CORBA::ULongLong,
- ACE_reinterpret_cast (ptrdiff_t,
- new_sched_entry));
-
// Release the auto pointers, so their destruction does not
// remove the new rt_info that is now in the map and tree,
// or the scheduling entry attached to the rt_info.
new_rt_info_ptr.release ();
new_sched_entry_ptr.release ();
+ // Connect the entry to the RT_Info.
+ new_rt_info->volatile_token =
+ ACE_static_cast (CORBA::ULongLong,
+ ACE_reinterpret_cast (ptrdiff_t,
+ new_sched_entry));
+
// With everything safely registered in the map and tree, just
// update the next handle and info counter and return the new info.
if (handle >= this->next_handle_)
@@ -993,12 +1821,11 @@ create_i (const char *entry_point,
return new_rt_info;
}
-
// Internal method to set characteristics of the passed RT_Info.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
-set_i (RtecScheduler::RT_Info *rt_info,
+set_i (TAO_RT_Info_Ex *rt_info,
RtecScheduler::Criticality_t criticality,
RtecScheduler::Time time,
RtecScheduler::Time typical_time,
@@ -1007,8 +1834,26 @@ set_i (RtecScheduler::RT_Info *rt_info,
RtecScheduler::Importance_t importance,
RtecScheduler::Quantum_t quantum,
CORBA::Long threads,
- RtecScheduler::Info_Type_t info_type)
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // Do not allow conjunction nodes for now.
+ if (info_type == RtecScheduler::CONJUNCTION)
+ {
+ ACE_ERROR ((LM_ERROR,
+ ACE_TEXT("Conjunction Nodes are not supported currently.")));
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+
+ // Set the base RT_Info to have the attributes of the latest values.
rt_info->criticality = criticality;
rt_info->worst_case_execution_time = time;
rt_info->typical_execution_time = typical_time;
@@ -1018,9 +1863,68 @@ set_i (RtecScheduler::RT_Info *rt_info,
rt_info->quantum = quantum;
rt_info->threads = threads;
rt_info->info_type = info_type;
+
+ // If a rate is advertised, create a separate tuple for that rate.
+ if (period > 0)
+ {
+ TAO_Reconfig_Scheduler_Entry * rse_ptr =
+ ACE_LONGLONG_TO_PTR (TAO_Reconfig_Scheduler_Entry *,
+ rt_info->volatile_token);
+// ACE_DEBUG((LM_DEBUG, "Updating or inserting tuple for RT_Info: %d, entry_ptr: %x\n", rt_info->handle, rse_ptr));
+ if (rse_ptr == 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ int result = rse_ptr->update_tuple (*rt_info);
+ if (result < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ else if (result == 0) // We did not find an existing tuple.
+ {
+ // Create a new RT_Info tuple from the passed RT_Info.
+ TAO_RT_Info_Tuple *tuple_ptr = 0;
+ ACE_NEW_THROW_EX (tuple_ptr,
+ TAO_RT_Info_Tuple (*rt_info),
+ CORBA::NO_MEMORY ());
+ ACE_CHECK;
+
+
+ // Make sure the new tuple is cleaned up if we exit abruptly.
+ auto_ptr<TAO_RT_Info_Tuple> tuple_auto_ptr (tuple_ptr);
+
+// ACE_DEBUG((LM_DEBUG, "Tuple not found. Inserting new tuple for RT_Info: %d, entry_ptr: 0x%x, tuple_ptr: 0x%x\n",
+// rt_info->handle,
+// rse_ptr,
+// tuple_ptr));
+ // Add the tuple to the entry's original tuple set
+ result = rse_ptr->insert_tuple (*tuple_ptr);
+ if (result < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Maintain the size of the entry pointer array.
+ ::maintain_scheduling_array (tuple_ptr_array_,
+ tuple_ptr_array_size_,
+ rt_info_tuple_count_
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Store the new tuple in the tuple pointer array.
+ tuple_ptr_array_ [this->rt_info_tuple_count_] = tuple_ptr;
+
+ ++this->rt_info_tuple_count_;
+
+ // All is well: release the auto pointer's hold on the tuple.
+ tuple_auto_ptr.release ();
+ }
+ }
}
+
// Internal method to lookup a handle for an RT_Info, and return its
// handle, or an error value if it's not present.
@@ -1032,7 +1936,12 @@ lookup_i (const char * entry_point
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::UNKNOWN_TASK))
{
- RtecScheduler::RT_Info *rt_info = 0;
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::lookup_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ TAO_RT_Info_Ex *rt_info = 0;
if (rt_info_tree_.find (entry_point, rt_info) != 0)
{
ACE_THROW_RETURN (RtecScheduler::UNKNOWN_TASK (), 0);
@@ -1063,7 +1972,7 @@ priority_i (RtecScheduler::handle_t handle,
ACE_THROW (RtecScheduler::NOT_SCHEDULED ());
}
- RtecScheduler::RT_Info *rt_info = 0;
+ TAO_RT_Info_Ex *rt_info = 0;
if (rt_info_map_.find (handle, rt_info) != 0)
{
ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
@@ -1075,19 +1984,25 @@ priority_i (RtecScheduler::handle_t handle,
}
-// This method registers a dependency between two RT_Infos.
+// This internal method registers a dependency between two RT_Infos.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the dependency */,
RtecScheduler::handle_t dependency /* RT_Info on which it depends */,
CORBA::Long number_of_calls,
- RtecScheduler::Dependency_Type_t dependency_type
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::add_dependency_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
// All dependencies are mapped by both the calling and called
// operation handles, so that a topological sort can be performed
// once over both one-way and two-way dependencies. The dependency
@@ -1106,7 +2021,8 @@ add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the depende
dependency, // called handle
calling_dependency_set_map_, // calling map
number_of_calls,
- dependency_type
+ dependency_type,
+ enabled
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
@@ -1115,10 +2031,11 @@ add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the depende
handle, // calling handle
called_dependency_set_map_, // called map
number_of_calls,
- dependency_type
+ dependency_type,
+ enabled
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
-
+
break;
// In a one-way call, the called operation depends on the
@@ -1130,7 +2047,8 @@ add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the depende
handle, // called handle
calling_dependency_set_map_, // calling map
number_of_calls,
- dependency_type
+ dependency_type,
+ enabled
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
@@ -1139,10 +2057,113 @@ add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the depende
dependency, // calling handle
called_dependency_set_map_, // called map
number_of_calls,
- dependency_type
+ dependency_type,
+ enabled
ACE_ENV_ARG_PARAMETER);
ACE_CHECK;
+ break;
+
+ default:
+
+ // There should not be any other kinds of dependencies.
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+
+ // Add the criticality dependency map entry.
+ // If A---TW--->B and C---OW--->D, the add_dependency_calls
+ // would look like this
+ // add_dependency (A, B, TW)
+ // add_dependency (D, C, OW)
+ // Neither of the other two maps capture these dependencies
+ // A depends on B and D depends on C.
+ // The calling dependency map captures
+ // A calls B and C calls D.
+ // The called dependency map captures
+ // B called by A and D called by C.
+
+ map_dependency_i (handle, // calling handle
+ dependency, // called handle
+ crit_dependency_set_map_,// crit dependency map
+ number_of_calls,
+ dependency_type,
+ enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+}
+
+
+// This internal method removes a dependency between two RT_Infos.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+remove_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the dependency */,
+ RtecScheduler::handle_t dependency /* RT_Info on which it depends */,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::remove_dependency_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // All dependencies are mapped by both the calling and called
+ // operation handles, so that a topological sort can be performed
+ // once over both one-way and two-way dependencies. The dependency
+ // specification is in absolute terms, however, so that the calling
+ // and called handles are reversed for one-way and two way
+ // dependencies.
+
+ switch (dependency_type)
+ {
+ // In a two-way call, the calling operation depends on the
+ // called operation.
+ case RtecBase::TWO_WAY_CALL:
+
+ // Remove the calling dependency map entry
+ unmap_dependency_i (handle, // calling handle
+ dependency, // called handle
+ calling_dependency_set_map_, // calling map
+ number_of_calls,
+ dependency_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Remove the called dependency map entry
+ unmap_dependency_i (dependency, // called handle
+ handle, // calling handle
+ called_dependency_set_map_, // called map
+ number_of_calls,
+ dependency_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ break;
+
+ // In a one-way call, the called operation depends on the
+ // calling operation.
+ case RtecBase::ONE_WAY_CALL:
+
+ // Remove the calling dependency map entry
+ unmap_dependency_i (dependency, // calling handle
+ handle, // called handle
+ calling_dependency_set_map_, // calling map
+ number_of_calls,
+ dependency_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ // Remove the called dependency map entry
+ unmap_dependency_i (handle, // called handle
+ dependency, // calling handle
+ called_dependency_set_map_, // called map
+ number_of_calls,
+ dependency_type
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
break;
default:
@@ -1150,7 +2171,93 @@ add_dependency_i (RtecScheduler::handle_t handle /* RT_Info that has the depende
// There should not be any other kinds of dependencies.
ACE_THROW (RtecScheduler::INTERNAL ());
}
+}
+
+
+// This method sets the enable state for a dependency between two RT_Infos.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+set_dependency_enable_state_i (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::set_dependency_enable_state_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // All dependencies are mapped by both the calling and called
+ // operation handles, so that a topological sort can be performed
+ // once over both one-way and two-way dependencies. The dependency
+ // specification is in absolute terms, however, so that the calling
+ // and called handles are reversed for one-way and two way
+ // dependencies.
+
+ switch (dependency_type)
+ {
+ // In a two-way call, the calling operation depends on the
+ // called operation.
+ case RtecBase::TWO_WAY_CALL:
+
+ // Update the calling dependency map entry
+ map_dependency_enable_state_i (handle, // calling handle
+ dependency, // called handle
+ calling_dependency_set_map_, // calling map
+ number_of_calls,
+ dependency_type,
+ enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Update the called dependency map entry
+ map_dependency_enable_state_i (dependency, // called handle
+ handle, // calling handle
+ called_dependency_set_map_, // called map
+ number_of_calls,
+ dependency_type,
+ enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ break;
+
+ // In a one-way call, the called operation depends on the
+ // calling operation.
+ case RtecBase::ONE_WAY_CALL:
+
+ // Update the calling dependency map entry
+ map_dependency_enable_state_i (dependency, // calling handle
+ handle, // called handle
+ calling_dependency_set_map_, // calling map
+ number_of_calls,
+ dependency_type,
+ enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+
+ // Update the called dependency map entry
+ map_dependency_enable_state_i (handle, // called handle
+ dependency, // calling handle
+ called_dependency_set_map_, // called map
+ number_of_calls,
+ dependency_type,
+ enabled
+ ACE_ENV_ARG_PARAMETER);
+ ACE_CHECK;
+ break;
+
+ default:
+
+ // There should not be any other kinds of dependencies.
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
}
// This method installs a dependency in a dependency set map.
@@ -1159,21 +2266,24 @@ template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
map_dependency_i (RtecScheduler::handle_t key,
RtecScheduler::handle_t handle,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> & map,
+ ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
CORBA::Long number_of_calls,
- RtecScheduler::Dependency_Type_t dependency_type
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::map_dependency_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
RtecScheduler::Dependency_Set *dependency_set;
// Look up the dependency set in the passed map
- if (map.find (key, dependency_set) != 0)
+ if (dependency_map.find (key, dependency_set) != 0)
{
// Create a new one
ACE_NEW_THROW_EX (dependency_set,
@@ -1181,10 +2291,10 @@ map_dependency_i (RtecScheduler::handle_t key,
CORBA::NO_MEMORY ());
ACE_CHECK;
- if (map.bind (key, dependency_set) != 0)
+ if (dependency_map.bind (key, dependency_set) != 0)
{
delete dependency_set;
- ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ ACE_THROW (RtecScheduler::INTERNAL ());
}
}
@@ -1193,8 +2303,139 @@ map_dependency_i (RtecScheduler::handle_t key,
int prev_length = dependency_set->length ();
dependency_set->length (prev_length + 1);
(*dependency_set) [prev_length].rt_info = handle;
+ (*dependency_set) [prev_length].rt_info_depended_on = key; // may actually be the other way around
(*dependency_set) [prev_length].number_of_calls = number_of_calls;
(*dependency_set) [prev_length].dependency_type = dependency_type;
+ (*dependency_set) [prev_length].enabled = enabled;
+}
+
+
+// This method removes a dependency from a dependency set map.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+unmap_dependency_i (RtecScheduler::handle_t key,
+ RtecScheduler::handle_t handle,
+ ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::unmap_dependency_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ RtecScheduler::Dependency_Set *dependency_set = 0;
+
+ // Try to unbind the matching dependency set from the passed map
+ // and if successful, destroy the
+ if (dependency_map.find (key, dependency_set) == 0)
+ {
+ if (dependency_set)
+ {
+ int prev_length = dependency_set->length ();
+ int found = 0;
+ for (int i = 0; i < prev_length; ++i)
+ {
+ if ((*dependency_set) [i].rt_info == handle
+ && (*dependency_set) [i].number_of_calls == number_of_calls
+ && (*dependency_set) [i].dependency_type == dependency_type)
+ {
+ // we found the dependency to delete: set our
+ // found flag to true and compact the array
+ found = 1;
+ for (int j = i+1; j < prev_length; ++j)
+ {
+ (*dependency_set) [j - 1].rt_info = (*dependency_set) [j].rt_info;
+ (*dependency_set) [j - 1].number_of_calls = (*dependency_set) [j].number_of_calls;
+ (*dependency_set) [j - 1].dependency_type = (*dependency_set) [j].dependency_type;
+ (*dependency_set) [j - 1].enabled = (*dependency_set) [j].enabled;
+ }
+ dependency_set->length (prev_length - 1);
+ break;
+ }
+ }
+
+ if (!found)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+}
+
+// Internal method that enables or disables a dependency between two RT_Infos.
+// Assumes it is being called with all locks held, and does *not*
+// set any schedule stability flags.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+map_dependency_enable_state_i (RtecScheduler::handle_t key,
+ RtecScheduler::handle_t handle,
+ ACE_TYPENAME
+ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::UNKNOWN_TASK))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::map_dependency_enable_state_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ RtecScheduler::Dependency_Set *dependency_set = 0;
+
+ // Try to unbind the matching dependency set from the passed map
+ // and if successful, destroy the
+ if (dependency_map.find (key, dependency_set) == 0)
+ {
+ if (dependency_set)
+ {
+ int set_length = dependency_set->length ();
+ int found = 0;
+ for (int i = 0; i < set_length; ++i)
+ {
+ if ((*dependency_set) [i].rt_info == handle
+ && (*dependency_set) [i].number_of_calls == number_of_calls
+ && (*dependency_set) [i].dependency_type == dependency_type)
+ {
+ // we found the dependency to update
+ found = 1;
+ (*dependency_set) [i].enabled = enabled;
+ break;
+ }
+ }
+
+ if (!found)
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+ else
+ {
+ ACE_THROW (RtecScheduler::UNKNOWN_TASK ());
+ }
}
@@ -1208,6 +2449,11 @@ dfs_traverse_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::dfs_traverse_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
int i; // index into array of scheduling entry pointers
// Reset registered RT_Infos.
@@ -1244,9 +2490,14 @@ detect_cycles_i (ACE_ENV_SINGLE_ARG_DECL)
RtecScheduler::INTERNAL,
RtecScheduler::CYCLIC_DEPENDENCIES))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::detect_cycles_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
// Sort the pointers to entries in order of descending forward
- // finish times, which produces a topological ordering, with
- // callers ahead of called nodes.
+ // finish times, which produces a reverse topological ordering,
+ // with callers ahead of called nodes.
ACE_OS::qsort (ACE_reinterpret_cast (void *, entry_ptr_array_),
this->rt_info_count_,
sizeof (TAO_Reconfig_Scheduler_Entry *),
@@ -1276,7 +2527,152 @@ detect_cycles_i (ACE_ENV_SINGLE_ARG_DECL)
}
}
-// Propagates effective execution time and period, sets total frame size.
+
+// Propagates aggregate execution times, then performs admission over
+// rate tuples.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+perform_admission_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::perform_admission_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // Traverse entries in topological (ascending forward DFS
+ // finish time) order, propagating aggregate execution
+ // time from called nodes to calling node at each step.
+
+ TAO_RSE_Reverse_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+ prop_visitor (this->calling_dependency_set_map_,
+ this->rt_info_map_);
+ int i;
+ for (i = this->rt_info_count_ - 1; i >= 0; --i)
+ {
+ if (prop_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Sort the pointers to original tuples in ascending admission
+ // order, according to the scheduling strategy's admission policy.
+ ACE_OS::qsort (ACE_reinterpret_cast (void *, tuple_ptr_array_),
+ this->rt_info_tuple_count_,
+ sizeof (TAO_RT_Info_Tuple *),
+ ACE_reinterpret_cast (COMP_FUNC,
+ RECONFIG_SCHED_STRATEGY::total_admission_comp));
+
+ // Traverse tuples in admission order, updating the associate tuple
+ // for each thread delineator.
+#if defined (SCHEDULER_LOGGING)
+ ACE_Scheduler_Factory::log_scheduling_tuples(tuple_ptr_array_,
+ this->rt_info_tuple_count_,
+ "sorted_admit_tuples.txt");
+#endif
+
+ TAO_Tuple_Admission_Visitor<RECONFIG_SCHED_STRATEGY>
+ admit_visitor (critical_utilization_threshold_,
+ noncritical_utilization_threshold_);
+
+ for (i = 0; i < this->rt_info_tuple_count_; ++i)
+ {
+ if (admit_visitor.visit (* (tuple_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Store the values accumulated by the visitor.
+ this->noncritical_utilization_ =
+ admit_visitor.noncritical_utilization ();
+ this->critical_utilization_ =
+ admit_visitor.critical_utilization ();
+}
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+crit_dfs_traverse_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::crit_dfs_traverse_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ int i; // index into array of scheduling entry pointers
+
+ // Reset registered RT_Infos.
+ TAO_RSE_Reset_Visitor reset_visitor;
+ for (i = 0; i < this->rt_info_count_; ++i)
+ {
+ if (reset_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Traverse registered RT_Infos, assigning DFS start, finish order.
+ TAO_RSE_DFS_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+ dfs_visitor (this->crit_dependency_set_map_,
+ this->rt_info_map_);
+ for (i = 0; i < this->rt_info_count_; ++i)
+ {
+ if (dfs_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+}
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+propagate_criticalities_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL))
+{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::propagate_criticalities_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // Sort the pointers to original tuples in ascending admission
+ // order, according to the scheduling strategy's admission policy.
+ ACE_OS::qsort (ACE_reinterpret_cast (void *, tuple_ptr_array_),
+ this->rt_info_tuple_count_,
+ sizeof (TAO_RT_Info_Tuple *),
+ ACE_reinterpret_cast (COMP_FUNC,
+ RECONFIG_SCHED_STRATEGY::comp_entry_finish_times ));
+
+ // Traverse entries in topological (ascending forward DFS
+ // finish time) order, propagating aggregate execution
+ // time from called nodes to calling node at each step.
+
+ TAO_RSE_Criticality_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+ crit_prop_visitor (this->crit_dependency_set_map_,
+ this->rt_info_map_);
+ int i;
+ for (i = 0; i<this->rt_info_count_; ++i)
+ {
+ if (crit_prop_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+ for (i = this->rt_info_count_-1; i>=0; --i)
+ {
+ if (crit_prop_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+}
+
+// Propagates periods, sets total frame size.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
propagate_characteristics_i (ACE_ENV_SINGLE_ARG_DECL)
@@ -1285,10 +2681,15 @@ propagate_characteristics_i (ACE_ENV_SINGLE_ARG_DECL)
RtecScheduler::UNRESOLVED_LOCAL_DEPENDENCIES,
RtecScheduler::THREAD_SPECIFICATION))
{
- // Traverse entries in topological (DFS finish) order,
- // propagating period and effective execution time from
- // calling node to called node at each step.
- TAO_RSE_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::propagate_characteristics_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // Traverse entries in reverse topological (descending forward DFS
+ // finish time) order, propagating period and effective execution
+ // time from calling node to called node at each step.
+ TAO_RSE_Forward_Propagation_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
prop_visitor (this->calling_dependency_set_map_,
this->rt_info_map_);
for (int i = 0; i < this->rt_info_count_; ++i)
@@ -1322,6 +2723,26 @@ assign_priorities_i (ACE_ENV_SINGLE_ARG_DECL)
RtecScheduler::INTERNAL,
RtecScheduler::DUPLICATE_NAME))
{
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::assign_priorities_i.\n"));
+
+ ACE_DEBUG ((LM_DEBUG, "Scheduler::entry ptr array before sorting is\n"));
+ for (int i = 0; i < this->rt_info_count_; ++i)
+ {
+ RtecScheduler::RT_Info* rt_info_ptr =
+ this->entry_ptr_array_[i]->actual_rt_info ();
+ ACE_DEBUG ((LM_DEBUG,
+ " %s [%d] crit=%d,prio=%d,preemption_prio=%d,subprio=%d\n ",
+ rt_info_ptr->entry_point.in (),
+ rt_info_ptr->handle,
+ rt_info_ptr->criticality,
+ rt_info_ptr->priority,
+ rt_info_ptr->preemption_priority,
+ rt_info_ptr->preemption_subpriority));
+ }
+#endif /* SCHEDULER_LOGGING */
+
// Sort the pointers to entries in descending order
// of static priority and static subpriority, according
// to our given scheduling strategy.
@@ -1331,6 +2752,23 @@ assign_priorities_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_reinterpret_cast (COMP_FUNC,
RECONFIG_SCHED_STRATEGY::total_priority_comp));
+#ifdef SCHEDULER_LOGGING
+ ACE_DEBUG ((LM_DEBUG, "Scheduler::qsorted array is\n"));
+ for (int i = 0; i < this->rt_info_count_; ++i)
+ {
+ RtecScheduler::RT_Info* rt_info_ptr =
+ this->entry_ptr_array_[i]->actual_rt_info ();
+ ACE_DEBUG ((LM_DEBUG,
+ " %s [%d] crit=%d,prio=%d,preemption_prio=%d,subprio=%d\n ",
+ rt_info_ptr->entry_point.in (),
+ rt_info_ptr->handle,
+ rt_info_ptr->criticality,
+ rt_info_ptr->priority,
+ rt_info_ptr->preemption_priority,
+ rt_info_ptr->preemption_subpriority));
+ }
+#endif
+
// Empty out the previously stored configuration infos, if any.
RtecScheduler::Preemption_Priority_t config_priority;
RtecScheduler::Config_Info *config_info_temp;
@@ -1346,10 +2784,11 @@ assign_priorities_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW (RtecScheduler::INTERNAL ());
}
}
+ this->config_info_count_ = 0;
// Traverse using a priority assignment visitor, which uses a
// strategy to decide when a new priority or subpriority is reached.
- TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY>
+ TAO_RSE_Priority_Visitor<RECONFIG_SCHED_STRATEGY, ACE_LOCK>
prio_visitor (this->rt_info_count_, this->entry_ptr_array_);
auto_ptr<RtecScheduler::Config_Info> new_config_info_ptr;
for (int i = 0; i <= this->rt_info_count_; ++i)
@@ -1392,7 +2831,7 @@ assign_priorities_i (ACE_ENV_SINGLE_ARG_DECL)
}
if (new_config_info->preemption_priority >
- last_scheduled_priority_)
+ this->last_scheduled_priority_)
{
this->last_scheduled_priority_ =
new_config_info->preemption_priority;
@@ -1422,53 +2861,36 @@ assign_priorities_i (ACE_ENV_SINGLE_ARG_DECL)
}
}
-// Compute utilization, set last feasible priority.
+
+// Refreshes the array of tuple pointers, tuple pointer count.
+
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
-compute_utilization_i (ACE_ENV_SINGLE_ARG_DECL)
+refresh_tuple_ptr_array_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL))
{
- TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY> util_visitor;
+#if defined (SCHEDULER_LOGGING)
+ ACE_DEBUG ((LM_TRACE,
+ " TAO_Reconfig_Scheduler::refresh_tuple_ptr_array_i.\n"));
+#endif /* SCHEDULER_LOGGING */
+
+ // Zero out the tuple pointer array, set count to zero
+ ACE_OS::memset (this->tuple_ptr_array_, 0,
+ sizeof (TAO_RT_Info_Tuple *)
+ * this->tuple_ptr_array_size_);
+ this->rt_info_tuple_count_ = 0;
+
for (int i = 0; i < this->rt_info_count_; ++i)
{
- if (util_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ if (entry_ptr_array_ [i]->register_tuples (this->tuple_ptr_array_,
+ this->rt_info_tuple_count_) < 0)
{
ACE_THROW (RtecScheduler::INTERNAL ());
}
}
-
- // Store the values accumulated by the visitor.
- this->noncritical_utilization_ =
- util_visitor.noncritical_utilization ();
- this->critical_utilization_ =
- util_visitor.critical_utilization ();
-}
-
-
-// Static helper method to give an RT_Info some reasonable default values.
-
-template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
-TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::init_rt_info (RtecScheduler::RT_Info &rt_info)
- ACE_THROW_SPEC ((CORBA::SystemException))
-{
- // Set some reasonable default values.
- rt_info.criticality = RtecScheduler::VERY_LOW_CRITICALITY;
- rt_info.worst_case_execution_time = 0;
- rt_info.typical_execution_time = 0;
- rt_info.cached_execution_time = 0;
- rt_info.period = 1;
- rt_info.importance = RtecScheduler::VERY_LOW_IMPORTANCE;
- rt_info.quantum = 0;
- rt_info.threads = 0;
- rt_info.info_type = RtecScheduler::OPERATION;
- rt_info.priority = 0;
- rt_info.preemption_subpriority = 0;
- rt_info.preemption_priority = 0;
- rt_info.volatile_token = 0;
}
-
// Accesses scheduling strategy for the reconfig scheduler.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> RECONFIG_SCHED_STRATEGY &
@@ -1481,11 +2903,7 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::sched_strategy ()
// Accesses map for O(1) lookup of Config_Infos by priority level.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-ACE_Hash_Map_Manager_Ex<RtecScheduler::Preemption_Priority_t,
- RtecScheduler::Config_Info*,
- ACE_Hash<RtecScheduler::Preemption_Priority_t>,
- ACE_Equal_To<RtecScheduler::Preemption_Priority_t>,
- ACE_LOCK> &
+ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::CONFIG_INFO_MAP &
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::config_info_map ()
{
return this->config_info_map_;
@@ -1505,11 +2923,7 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::config_info_count ()
// Accesses map for O(1) lookup of RT_Infos by handle.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> &
+ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_MAP &
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::rt_info_map ()
{
return this->rt_info_map_;
@@ -1524,14 +2938,19 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::rt_info_count ()
return this->rt_info_count_;
}
+// Returns the number of registered RT_Info tuples.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> long
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::rt_info_tuple_count ()
+{
+ return this->rt_info_tuple_count_;
+}
+
// Accesses tree for O(log n) lookup of RT_Infos by name.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-ACE_RB_Tree<const char *,
- RtecScheduler::RT_Info*,
- ACE_Less_Than<const char *>,
- ACE_LOCK> &
+ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::RT_INFO_TREE &
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::rt_info_tree ()
{
return this->rt_info_tree_;
@@ -1542,11 +2961,7 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::rt_info_tree ()
// set by the caller operation's handle.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> &
+ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::calling_dependency_set_map ()
{
return this->calling_dependency_set_map_;
@@ -1557,11 +2972,7 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::calling_dependency_se
// set by the called operation's handle.
template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
-ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> &
+ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &
TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::called_dependency_set_map ()
{
return this->called_dependency_set_map_;
@@ -1577,6 +2988,160 @@ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::dependency_count ()
return this->dependency_count_;
}
+// Accessor for utilization by noncritical tasks.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+CORBA::Double
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+noncritical_utilization ()
+{
+ return noncritical_utilization_;
+}
+
+// Accessor for utilization by critical tasks.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+CORBA::Double
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+critical_utilization ()
+{
+ return critical_utilization_;
+}
+
+// Accessor for noncritical task utilization threshold.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+CORBA::Double
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+noncritical_utilization_threshold ()
+{
+ return noncritical_utilization_threshold_;
+}
+
+// Mutator for noncritical task utilization threshold.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+noncritical_utilization_threshold (const CORBA::Double &d)
+{
+ noncritical_utilization_threshold_ = d;
+}
+
+// Accessor for critical task utilization threshold.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+CORBA::Double
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+critical_utilization_threshold ()
+{
+ return critical_utilization_threshold_;
+}
+
+// Mutator for critical task utilization threshold.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK>
+void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+critical_utilization_threshold (const CORBA::Double &d)
+{
+ critical_utilization_threshold_ = d;
+}
+
+
+// Helper function: makes sure there is room in the scheduling pointer
+// arrays. This function expands the array eagerly, to minimize
+// memory allocation overhead.
+
+template <class ARRAY_ELEMENT_TYPE> void
+maintain_scheduling_array (ARRAY_ELEMENT_TYPE ** & current_ptr_array,
+ long & current_ptr_array_size,
+ RtecScheduler::handle_t handle
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ if (current_ptr_array_size <= handle)
+ {
+ long new_size = handle + 1;
+ ARRAY_ELEMENT_TYPE ** new_array;
+
+ if (current_ptr_array_size > 0)
+ {
+ // Store previous array size.
+ for (new_size = 2 * current_ptr_array_size;
+ new_size <= handle;
+ new_size *= 2);
+ }
+
+ // Allocate the new array of the proper size, zero it out.
+
+ ACE_NEW_THROW_EX (new_array,
+ ARRAY_ELEMENT_TYPE * [new_size],
+ CORBA::NO_MEMORY ());
+
+ ACE_OS::memset (new_array, 0,
+ sizeof (ARRAY_ELEMENT_TYPE *) *
+ new_size);
+
+ if (current_ptr_array_size > 0)
+ {
+ // Copy in the previous array.
+ ACE_OS::memcpy (new_array, current_ptr_array,
+ sizeof (ARRAY_ELEMENT_TYPE *) *
+ current_ptr_array_size);
+
+ // Free the old array and swap to point to the new one.
+ delete [] current_ptr_array;
+ }
+
+ current_ptr_array = new_array;
+ current_ptr_array_size = new_size;
+ }
+}
+
+/* WSOA merge - commented out
+// Compute utilization, set last feasible priority.
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::
+compute_utilization_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL))
+{
+ TAO_RSE_Utilization_Visitor<RECONFIG_SCHED_STRATEGY> util_visitor;
+ for (int i = 0; i < this->rt_info_count_; ++i)
+ {
+ if (util_visitor.visit (* (entry_ptr_array_ [i])) < 0)
+ {
+ ACE_THROW (RtecScheduler::INTERNAL ());
+ }
+ }
+
+ // Store the values accumulated by the visitor.
+ this->noncritical_utilization_ =
+ util_visitor.noncritical_utilization ();
+ this->critical_utilization_ =
+ util_visitor.critical_utilization ();
+}
+
+
+// Static helper method to give an RT_Info some reasonable default values.
+
+template <class RECONFIG_SCHED_STRATEGY, class ACE_LOCK> void
+TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::init_rt_info (RtecScheduler::RT_Info &rt_info)
+ ACE_THROW_SPEC ((CORBA::SystemException))
+{
+ // Set some reasonable default values.
+ rt_info.criticality = RtecScheduler::VERY_LOW_CRITICALITY;
+ rt_info.worst_case_execution_time = 0;
+ rt_info.typical_execution_time = 0;
+ rt_info.cached_execution_time = 0;
+ rt_info.period = 1;
+ rt_info.importance = RtecScheduler::VERY_LOW_IMPORTANCE;
+ rt_info.quantum = 0;
+ rt_info.threads = 0;
+ rt_info.info_type = RtecScheduler::OPERATION;
+ rt_info.priority = 0;
+ rt_info.preemption_subpriority = 0;
+ rt_info.preemption_priority = 0;
+ rt_info.volatile_token = 0;
+}
+
+*/
#endif /* TAO_RECONFIG_SCHEDULER_T_C */
diff --git a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.h b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.h
index a9fd17ab431..d2fe751d304 100644
--- a/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.h
+++ b/TAO/orbsvcs/orbsvcs/Sched/Reconfig_Scheduler_T.h
@@ -46,14 +46,14 @@ class TAO_Reconfig_Scheduler :
public:
typedef ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::RT_Info*,
+ TAO_RT_Info_Ex *,
ACE_Hash<RtecScheduler::handle_t>,
ACE_Equal_To<RtecScheduler::handle_t>,
ACE_LOCK> RT_INFO_MAP;
// Type of map used for O(1) lookup of RT_Infos by their handles.
typedef ACE_RB_Tree<const char *,
- RtecScheduler::RT_Info*,
+ TAO_RT_Info_Ex*,
ACE_Less_Than<const char *>,
ACE_LOCK> RT_INFO_TREE;
// Type of tree used for O(log n) lookup of RT_Infos by their names.
@@ -73,7 +73,13 @@ public:
// Type of map used for O(1) lookup of RT_Info
// dependency sets by caller or called handle.
- TAO_Reconfig_Scheduler (int enforce_schedule_stability = 0);
+ typedef typename DEPENDENCY_SET_MAP::ITERATOR DEPENDENCY_SET_MAP_ITERATOR;
+ // Type of iterator for traversal of RT_Info dependency sets by
+ // caller or called handle.
+
+ TAO_Reconfig_Scheduler (int enforce_schedule_stability = 0,
+ const CORBA::Double & critical_utilization_threshold = 1.0,
+ const CORBA::Double & noncritical_utilization_threshold = 1.1);
// Default constructor.
TAO_Reconfig_Scheduler (int config_count,
@@ -83,10 +89,15 @@ public:
int dependency_count,
ACE_Scheduler_Factory::POD_Dependency_Info dependency_info[],
u_long stability_flags,
- int enforce_schedule_stability = 0);
+ int enforce_schedule_stability = 0,
+ const CORBA::Double & critical_utilization_threshold = 1.0,
+ const CORBA::Double & noncritical_utilization_threshold = 1.1);
// Constructor. Initialize the scheduler from POD_Config_Info, POD_RT_Info,
// and POD_Dependency arrays, plus schedule stability flags.
+ ~TAO_Reconfig_Scheduler ();
+ // Destructor.
+
int init (int config_count,
ACE_Scheduler_Factory::POD_Config_Info config_info[],
int rt_info_count,
@@ -104,6 +115,7 @@ public:
void close (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK,
RtecScheduler::SYNCHRONIZATION_FAILURE));
// Closes the scheduler, releasing all current resources.
@@ -151,6 +163,52 @@ public:
RtecScheduler::SYNCHRONIZATION_FAILURE));
// Set characteristics of the RT_Info corresponding to the passed handle.
+ virtual void reset (RtecScheduler::handle_t handle,
+ RtecScheduler::Criticality_t criticality,
+ RtecScheduler::Time time,
+ RtecScheduler::Time typical_time,
+ RtecScheduler::Time cached_time,
+ RtecScheduler::Period_t period,
+ RtecScheduler::Importance_t importance,
+ RtecScheduler::Quantum_t quantum,
+ CORBA::Long threads,
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Reset characteristics of the RT_Info corresponding to the passed handle.
+
+ virtual void set_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Set characteristics of the RT_Infos corresponding to the passed handles.
+ // Tuples are added in the case of existing and/or multiple definitions.
+
+ virtual void reset_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Reset characteristics of the RT_Infos corresponding to the passed handles.
+ // Tuples are replaced in the case of existing and/or multiple definitions.
+
+ virtual void replace_seq (const RtecScheduler::RT_Info_Set& infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UNKNOWN_TASK,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::SYNCHRONIZATION_FAILURE));
+ // Replace all RT_Infos, resetting characteristics of the RT_Infos
+ // corresponding to the passed handles. All other RT_Infos are
+ // reset to their uninitialized values, i.e., the same they have
+ // just after the create call.
+
virtual void priority (RtecScheduler::handle_t handle,
RtecScheduler::OS_Priority& o_priority,
RtecScheduler::Preemption_Subpriority_t& p_subpriority,
@@ -185,9 +243,53 @@ public:
RtecScheduler::UNKNOWN_TASK));
// This method registers a dependency between two RT_Infos.
+ virtual void remove_dependency (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method removes a dependency between two RT_Infos.
+
+ virtual void set_dependency_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method sets the enable state of a dependency between two RT_Infos.
+
+ virtual void set_dependency_enable_state_seq (const RtecScheduler::Dependency_Set & dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method sets the enable state of a sequence of dependencies.
+
+ virtual void set_rt_info_enable_state (RtecScheduler::handle_t handle,
+ RtecScheduler::RT_Info_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method enables or disables an RT_Info.
+
+ virtual void set_rt_info_enable_state_seq (const RtecScheduler::RT_Info_Enable_State_Pair_Set & pair_set
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method enables or disables a sequence of RT_Infos.
+
virtual void compute_scheduling (CORBA::Long minimum_priority,
CORBA::Long maximum_priority,
RtecScheduler::RT_Info_Set_out infos,
+ RtecScheduler::Dependency_Set_out dependencies,
RtecScheduler::Config_Info_Set_out configs,
RtecScheduler::Scheduling_Anomaly_Set_out anomalies
ACE_ENV_ARG_DECL)
@@ -203,6 +305,44 @@ public:
// to be computed for all registered RT_Infos. If the schedule is
// already stable, this is a no-op.
+ virtual void recompute_scheduling (CORBA::Long minimum_priority,
+ CORBA::Long maximum_priority,
+ RtecScheduler::Scheduling_Anomaly_Set_out anomalies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::UTILIZATION_BOUND_EXCEEDED,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INSUFFICIENT_THREAD_PRIORITY_LEVELS,
+ RtecScheduler::TASK_COUNT_MISMATCH,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::DUPLICATE_NAME));
+ // Recomputes the scheduling priorities, etc.
+
+ virtual void get_rt_info_set (RtecScheduler::RT_Info_Set_out infos
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of rt_infos, with their assigned priorities (as
+ // of the last schedule re-computation).
+
+ virtual void get_dependency_set (RtecScheduler::Dependency_Set_out dependencies
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of rt_infos, with their assigned priorities (as
+ // of the last schedule re-computation).
+
+ virtual void get_config_info_set (RtecScheduler::Config_Info_Set_out configs
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::INTERNAL));
+ // Returns the set of config_infos, describing the appropriate
+ // number, types, and priority levels for the dispatching lanes.
+
+
virtual void dispatch_configuration (RtecScheduler::Preemption_Priority_t p_priority,
RtecScheduler::OS_Priority& o_priority,
RtecScheduler::Dispatching_Type_t & d_type
@@ -247,6 +387,9 @@ public:
long rt_info_count ();
// Returns the number of registered RT_Infos.
+ long rt_info_tuple_count ();
+ // Returns the number of registered RT_Info tuples.
+
RT_INFO_TREE & rt_info_tree ();
// Accesses tree for O(log n) lookup of RT_Infos by name.
@@ -262,11 +405,33 @@ public:
// Returns the number of dependencies in the dependency lists of all RT_Infos.
// This is used when traversing the dependency graph.
+ CORBA::Double noncritical_utilization ();
+ // Accessor for utilization by noncritical tasks.
+
+ CORBA::Double critical_utilization ();
+ // Accessor for utilization by critical tasks.
+
+ CORBA::Double noncritical_utilization_threshold ();
+ // Accessor for noncritical task utilization threshold.
+
+ void noncritical_utilization_threshold (const CORBA::Double &);
+ // Mutator for noncritical task utilization threshold.
+
+ CORBA::Double critical_utilization_threshold ();
+ // Accessor for critical task utilization threshold.
+
+ void critical_utilization_threshold (const CORBA::Double &);
+ // Mutator for critical task utilization threshold.
+
protected:
// @@ TO DO: use a memento to save and restore scheduler state without
// breaking encapsulation, particularly of these flags.
+ // @@ TO DO: Recheck the applicability and fine-grain management of
+ // these flags. Do these still correctly reflect the phases
+ // of the computation?
+
enum Stability_Flags
{
// This should always be zero.
@@ -293,18 +458,20 @@ protected:
};
// Flags indicating stability conditions of schedule.
- virtual RtecScheduler::RT_Info * create_i (const char * entry_point,
- RtecScheduler::handle_t handle
- ACE_ENV_ARG_DECL)
+ TAO_RT_Info_Ex * create_i (const char * entry_point,
+ RtecScheduler::handle_t handle,
+ int ignore_duplicates
+ ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::DUPLICATE_NAME,
RtecScheduler::INTERNAL));
- // Internal method to create an RT_Info. If it does not exist,
- // a new RT_Info is created and inserted into the schedule,
- // and the handle of the new RT_Info is returned.
- // If the RT_Info already exists, an exception is thrown.
+ // Internal method to create an RT_Info. If it does not exist, a
+ // new RT_Info is created and inserted into the schedule, and the
+ // handle of the new RT_Info is returned. If the RT_Info already
+ // exists, then if the ignore_duplicates flag is set, the handle
+ // is simply returned; otherwise, an exception is thrown.
- virtual void set_i (RtecScheduler::RT_Info *rt_info,
+ void set_i (TAO_RT_Info_Ex *rt_info,
RtecScheduler::Criticality_t criticality,
RtecScheduler::Time time,
RtecScheduler::Time typical_time,
@@ -313,7 +480,10 @@ protected:
RtecScheduler::Importance_t importance,
RtecScheduler::Quantum_t quantum,
CORBA::Long threads,
- RtecScheduler::Info_Type_t info_type);
+ RtecScheduler::Info_Type_t info_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL));
// Internal method to set characteristics of the passed RT_Info.
virtual RtecScheduler::handle_t lookup_i (const char * entry_point
@@ -337,8 +507,9 @@ protected:
virtual void add_dependency_i (RtecScheduler::handle_t handle,
RtecScheduler::handle_t dependency,
CORBA::Long number_of_calls,
- RtecScheduler::Dependency_Type_t dependency_type
- ACE_ENV_ARG_DECL)
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK));
@@ -346,21 +517,68 @@ protected:
// Assumes it is being called with all locks held, and does *not*
// set any schedule stability flags.
+ virtual void remove_dependency_i (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // Internal method that removes a dependency between two RT_Infos.
+ // Assumes it is being called with all locks held, and does *not*
+ // set any schedule stability flags.
+
+ virtual void set_dependency_enable_state_i (RtecScheduler::handle_t handle,
+ RtecScheduler::handle_t dependency,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::SYNCHRONIZATION_FAILURE,
+ RtecScheduler::UNKNOWN_TASK));
+ // Internal method that enables or disables a dependency between two RT_Infos.
+ // Assumes it is being called with all locks held, and does *not*
+ // set any schedule stability flags.
+
+
virtual void map_dependency_i
(RtecScheduler::handle_t key,
RtecScheduler::handle_t handle,
- ACE_Hash_Map_Manager_Ex<RtecScheduler::handle_t,
- RtecScheduler::Dependency_Set*,
- ACE_Hash<RtecScheduler::handle_t>,
- ACE_Equal_To<RtecScheduler::handle_t>,
- ACE_LOCK> &map,
+ ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
CORBA::Long number_of_calls,
- RtecScheduler::Dependency_Type_t dependency_type
- ACE_ENV_ARG_DECL)
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
RtecScheduler::UNKNOWN_TASK));
// This method installs a dependency in a dependency set map.
+ void unmap_dependency_i (RtecScheduler::handle_t key,
+ RtecScheduler::handle_t handle,
+ ACE_TYPENAME TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method removes a dependency from a dependency set map.
+
+ void map_dependency_enable_state_i (RtecScheduler::handle_t key,
+ RtecScheduler::handle_t handle,
+ ACE_TYPENAME
+ TAO_Reconfig_Scheduler<RECONFIG_SCHED_STRATEGY, ACE_LOCK>::DEPENDENCY_SET_MAP &dependency_map,
+ CORBA::Long number_of_calls,
+ RtecScheduler::Dependency_Type_t dependency_type,
+ RtecScheduler::Dependency_Enabled_Type_t enabled
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL,
+ RtecScheduler::UNKNOWN_TASK));
+ // This method updates the enable state of a dependency in a dependency set map.
virtual void dfs_traverse_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
@@ -375,7 +593,28 @@ protected:
// Sorts an array of RT_info handles in topological order, then
// checks for loops, marks unresolved remote dependencies.
- virtual void propagate_characteristics_i (ACE_ENV_SINGLE_ARG_DECL)
+ void perform_admission_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL));
+ // Compute aggregate execution times, then performs admission over
+ // rate tuples.
+
+
+ void crit_dfs_traverse_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL));
+ // Traverses criticality dependency graph, assigning a topological
+ // ordering. Resets DFS map entries, do DFS traversal, constructs
+ // DFS map.
+
+
+ void propagate_criticalities_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL));
+ // Propagates criticalities.
+
+
+ void propagate_characteristics_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL,
RtecScheduler::UNRESOLVED_LOCAL_DEPENDENCIES,
@@ -386,21 +625,26 @@ protected:
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL,
RtecScheduler::DUPLICATE_NAME));
- // Sort operations by urgency (done by strategy), then
- // assign priorities and subpriorities in one pass.
- // Sets last scheduled priority.
+ // Sort operations by urgency (done by strategy), then assign
+ // priorities and subpriorities in one pass. (Re)computes utilization
+ // and sets last scheduled priority and last feasible priority.
+
+ void refresh_tuple_ptr_array_i (ACE_ENV_SINGLE_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException,
+ RtecScheduler::INTERNAL));
+ // Refreshes the array of tuple pointers, corrects the count.
+/* WSOA merge - commented out
virtual void compute_utilization_i (ACE_ENV_SINGLE_ARG_DECL)
ACE_THROW_SPEC ((CORBA::SystemException,
RtecScheduler::INTERNAL));
// Compute utilization, set last feasible priority.
+
static void init_rt_info (RtecScheduler::RT_Info &rt_info)
ACE_THROW_SPEC ((CORBA::SystemException));
// Helper method to give an RT_Info some reasonable default values
-
-
-
+*/
// = Protected class members.
RECONFIG_SCHED_STRATEGY sched_strategy_;
@@ -419,6 +663,9 @@ protected:
long rt_info_count_;
// The number of registered RT_Infos.
+ long rt_info_tuple_count_;
+ // The number of registered RT_Info tuples.
+
RT_INFO_TREE rt_info_tree_;
// Tree for O(log n) lookup of RT_Infos by name.
@@ -430,6 +677,13 @@ protected:
// Map for O(1) lookup of RT_Info dependency
// set by the called operation's handle.
+ DEPENDENCY_SET_MAP crit_dependency_set_map_;
+ // Map for O(1) lookup of RT_Info dependency set. The above two maps
+ // store the calling and called dependencies. This map stores the
+ // true dependencies based on criticality propagation direction. For
+ // two-ways, this is the same as the direction of invocation,
+ // whereas for one-ways, it is in the opposite direction.
+
RtecScheduler::handle_t next_handle_;
// Next RT_Info descriptor handle to allocate. The first handle is
// always 1.
@@ -444,6 +698,16 @@ protected:
long entry_ptr_array_size_;
// Size of the array of scheduling entry pointers.
+ TAO_RT_Info_Tuple ** tuple_ptr_array_;
+ // Array of pointers to scheduling entries. This
+ // array is maintained by the methods that create
+ // scheduling entries, and sorted in topological
+ // order and then priority order at various points
+ // during schedule computation.
+
+ long tuple_ptr_array_size_;
+ // Size of the array of scheduling entry pointers.
+
u_long stability_flags_;
// Flags indicating whether a stable schedule has been computed
// since the last addition or modification of information, and which
@@ -468,11 +732,28 @@ protected:
CORBA::Double critical_utilization_;
// Utilization by critical tasks.
+ CORBA::Double noncritical_utilization_threshold_;
+ // Utilization by noncritical tasks.
+
+ CORBA::Double critical_utilization_threshold_;
+ // Utilization by critical tasks.
+
ACE_LOCK mutex_;
// Mutual exclusion lock for the scheduler itself. This is needed to
// synchronize updates and accesses to scheduling information.
};
+
+template <class ARRAY_ELEMENT_TYPE> void
+maintain_scheduling_array (ARRAY_ELEMENT_TYPE ** & current_ptr_array,
+ long & current_ptr_array_size,
+ RtecScheduler::handle_t handle
+ ACE_ENV_ARG_DECL)
+ ACE_THROW_SPEC ((CORBA::SystemException));
+// Helper function: makes sure there is room in the scheduling pointer
+// arrays. This function expands the array eagerly, to minimize time
+// overhead for memory allocation (at a cost of some unused space).
+
#if defined (__ACE_INLINE__)
#include "Reconfig_Scheduler_T.i"
#endif /* __ACE_INLINE__ */