summaryrefslogtreecommitdiff
path: root/TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp')
-rw-r--r--TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp109
1 files changed, 54 insertions, 55 deletions
diff --git a/TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp b/TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp
index b0a177262c1..7d810dc4436 100644
--- a/TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp
+++ b/TAO/orbsvcs/orbsvcs/Event/Dispatching_Modules.cpp
@@ -141,14 +141,14 @@ void
ACE_ES_Dispatching_Base::shutdown (void)
{
ACE_DEBUG ((LM_DEBUG,
- "EC (%t) ACE_ES_Dispatching_Base module shutting down.\n"));
+ "EC (%t) ACE_ES_Dispatching_Base module shutting down.\n"));
}
// Just forward the request. This is basically a hook for the RTU
-// stuff.
+// stuff.
int
ACE_ES_Dispatching_Base::dispatch_event (ACE_ES_Dispatch_Request *request,
- u_long &command_action)
+ u_long &command_action)
{
TAO_TRY
{
@@ -191,7 +191,7 @@ ACE_ES_Priority_Dispatching::ACE_ES_Priority_Dispatching (ACE_EventChannel *chan
this->queues_[x] = 0;
}
- this->scheduler_ =
+ this->scheduler_ =
this->channel_->scheduler ();
}
@@ -206,7 +206,7 @@ ACE_ES_Priority_Dispatching::initialize_queues (void)
for (int x = 0; x < ACE_Scheduler_MAX_PRIORITIES; x++)
{
if (this->queues_[x] != 0)
- continue;
+ continue;
// Convert ACE_Scheduler_Rate (it's really a period, not a rate!)
// to a form we can easily work with.
@@ -217,16 +217,16 @@ ACE_ES_Priority_Dispatching::initialize_queues (void)
period_tv.usec () * 10;
ACE_NEW (this->queues_[x],
- ACE_ES_Dispatch_Queue (this,
+ ACE_ES_Dispatch_Queue (this,
&this->notification_strategy_,
this->scheduler_.in ()));
this->queues_[x]->thr_mgr (&this->thr_mgr_);
if ( this->queues_[x]->open_queue (period,
- threads_per_queue_) == -1)
+ threads_per_queue_) == -1)
{
ACE_ERROR ((LM_ERROR, "%p.\n",
- "ACE_ES_Priority_Dispatching::initialize_queues"));
+ "ACE_ES_Priority_Dispatching::initialize_queues"));
return;
}
@@ -265,7 +265,7 @@ ACE_ES_Priority_Dispatching::connected (ACE_Push_Consumer_Proxy *consumer,
// Allocate a new dispatch queue.
queues_[priority] = new ACE_ES_Dispatch_Queue (this, &notification_strategy_);
if (queues_[priority] == 0)
- TAO_THROW (CORBA::NO_MEMORY (0, ,
+ TAO_THROW (CORBA::NO_MEMORY (0, CORBA::COMPLETED_NO,
"ACE_ES_Priority_Dispatching::connected"));
// Initialize the dispatch queue corresponding to the
@@ -281,7 +281,7 @@ ACE_ES_Priority_Dispatching::connected (ACE_Push_Consumer_Proxy *consumer,
// spawns the threads.
if (queues_[priority]->open_queue (priority,
threads_per_queue_) == -1)
- TAO_THROW (DISPATCH_ERROR (0, ,
+ TAO_THROW (DISPATCH_ERROR (0, CORBA::COMPLETED_NO,
"ACE_ES_Priority_Dispatching::connected:"
"queue open failed.\n"));
@@ -293,7 +293,7 @@ ACE_ES_Priority_Dispatching::connected (ACE_Push_Consumer_Proxy *consumer,
highest_priority_ = priority;
ACE_DEBUG ((LM_DEBUG,
- "EC (%t) Created queue priority = %d.\n", priority));
+ "EC (%t) Created queue priority = %d.\n", priority));
}
else
queue_count_[priority]++;
@@ -379,10 +379,10 @@ ACE_ES_Priority_Dispatching::push (ACE_ES_Dispatch_Request *request,
if (queues_[preemption_priority] == 0)
{
ACE_ERROR ((LM_ERROR, "EC (%t): Push to closed queue %d,"
- " dropping event.\n", preemption_priority));
+ " dropping event.\n", preemption_priority));
return;
#if 0
- TAO_THROW (SYNC_ERROR (0, , "ACE_ES_Priority_Dispatching::push"));
+ TAO_THROW (SYNC_ERROR (0, CORBA::COMPLETED_NO, "ACE_ES_Priority_Dispatching::push"));
#endif /* 0 */
}
@@ -397,9 +397,9 @@ ACE_ES_Priority_Dispatching::push (ACE_ES_Dispatch_Request *request,
" release failed.\n"));
if (errno != EPIPE)
{
- TAO_THROW (CORBA::NO_MEMORY ());
+ TAO_THROW (CORBA::NO_MEMORY (CORBA::COMPLETED_NO));
// @@ Orbix parameters
- // 0, ,
+ // 0, CORBA::COMPLETED_NO,
// "ACE_ES_Priority_Dispatching::push enqueue failed"));
}
else
@@ -485,21 +485,21 @@ ACE_ES_Priority_Dispatching::shutdown (void)
if (queues_[x] != 0)
{
ACE_DEBUG ((LM_DEBUG,
- "EC (%t) shutting down dispatch queue %d.\n", x));
+ "EC (%t) shutting down dispatch queue %d.\n", x));
queues_[x]->shutdown_task ();
}
if (this->thr_mgr_.wait () == -1)
ACE_ERROR ((LM_ERROR, "%p\n",
- "Priority_Dispatching::shutdown - waiting"));
+ "Priority_Dispatching::shutdown - waiting"));
for (int i = 0; i <= this->highest_priority_; ++i)
{
if (this->queues_[i] != 0)
- {
- delete this->queues_[i];
- this->queues_[i] = 0;
- }
+ {
+ delete this->queues_[i];
+ this->queues_[i] = 0;
+ }
}
}
@@ -510,7 +510,6 @@ ACE_ES_Priority_Dispatching::shutdown (void)
void
ACE_ES_Priority_Dispatching::dispatch_queue_closed (ACE_ES_Dispatch_Queue *queue)
{
- ACE_UNUSED_ARG (queue);
}
/*
@@ -558,9 +557,9 @@ ACE_ES_Dispatch_Queue::open_queue (RtecScheduler::Period_t &period,
else
{
// quick hack to test dynamic queue performance (to be replaced soon)
- ACE_ES_QUEUE *mq = 0;
-#if defined (TAO_USES_STRATEGY_SCHEDULER)
-#if defined (TAO_USES_EDF_SCHEDULING)
+ ACE_ES_QUEUE *mq = 0;
+ #if defined (TAO_USES_STRATEGY_SCHEDULER)
+ #if defined (TAO_USES_EDF_SCHEDULING)
ACE_Deadline_Message_Strategy *adms = new ACE_Deadline_Message_Strategy;
@@ -569,7 +568,7 @@ ACE_ES_Dispatch_Queue::open_queue (RtecScheduler::Period_t &period,
mq = new ACE_Dynamic_Message_Queue<ACE_SYNCH> (*adms);
}
-#elif defined (TAO_USES_MLF_SCHEDULING) || defined (TAO_USES_MUF_SCHEDULING)
+ #elif defined (TAO_USES_MLF_SCHEDULING) || defined (TAO_USES_MUF_SCHEDULING)
ACE_Laxity_Message_Strategy *alms = new ACE_Laxity_Message_Strategy;
@@ -578,13 +577,13 @@ ACE_ES_Dispatch_Queue::open_queue (RtecScheduler::Period_t &period,
mq = new ACE_Dynamic_Message_Queue<ACE_SYNCH> (*alms);
}
-#else
+ #else
mq = new ACE_ES_QUEUE;
-#endif
-#else
+ #endif
+ #else
// Allocate a message queue that does not notify.
mq = new ACE_ES_MQ;
-#endif
+ #endif
if (mq == 0)
ACE_ERROR_RETURN ((LM_ERROR, "%p.\n",
@@ -617,42 +616,42 @@ ACE_ES_Dispatch_Queue::open_queue (RtecScheduler::Period_t &period,
"ACE_ES_Dispatch_Queue::open_queue"), -1);
case 0:
{
- TAO_TRY
- {// @@ TODO: Handle exceptions...
+ TAO_TRY
+ {// @@ TODO: Handle exceptions...
#if 1
this->scheduler_->set
(rt_info_,
RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero (),
- ORBSVCS_Time::zero (),
- ORBSVCS_Time::zero (),
+ ORBSVCS_Time::zero,
+ ORBSVCS_Time::zero,
+ ORBSVCS_Time::zero,
period,
RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero (),
+ ORBSVCS_Time::zero,
1,
RtecScheduler::OPERATION,
TAO_TRY_ENV);
#else
- ACE_Scheduler_Factory::server()->set (rt_info_,
- RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero (),
- ORBSVCS_Time::zero (),
- ORBSVCS_Time::zero (),
- period,
- RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero (),
- 1,
- RtecScheduler::OPERATION,
- TAO_TRY_ENV);
+ ACE_Scheduler_Factory::server()->set (rt_info_,
+ RtecScheduler::VERY_HIGH_CRITICALITY,
+ ORBSVCS_Time::zero,
+ ORBSVCS_Time::zero,
+ ORBSVCS_Time::zero,
+ period,
+ RtecScheduler::VERY_LOW_IMPORTANCE,
+ ORBSVCS_Time::zero,
+ 1,
+ RtecScheduler::OPERATION,
+ TAO_TRY_ENV);
#endif
- TAO_CHECK_ENV;
- }
- TAO_CATCHANY
- {
- ACE_ERROR_RETURN ((LM_ERROR,
- "ACE_ES_Dispatch_Queue::exception"), -1);
- }
- TAO_ENDTRY;
+ TAO_CHECK_ENV;
+ }
+ TAO_CATCHANY
+ {
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "ACE_ES_Dispatch_Queue::exception"), -1);
+ }
+ TAO_ENDTRY;
}
// FALLTHROUGH
case 1: