summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsergio <sergio@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>1998-06-19 02:44:09 +0000
committersergio <sergio@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>1998-06-19 02:44:09 +0000
commita9eeeb6fb369219f0e7271462d0566b2e6fce1d0 (patch)
tree1e11473c4b1de607ff3570a1c4dcb86cb793627c
parentac9c47fc9ed58f49c6dde90de6459436945bac8e (diff)
downloadATCD-a9eeeb6fb369219f0e7271462d0566b2e6fce1d0.tar.gz
*** empty log message ***
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/README12
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp100
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp2
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.h4
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp248
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp14
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp39
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/README12
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp100
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp2
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.h4
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/client.cpp248
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp14
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/server.cpp39
14 files changed, 472 insertions, 366 deletions
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/README b/TAO/performance-tests/Cubit/TAO/MT_Cubit/README
index 8a545ac722c..e0b79a019f3 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/README
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/README
@@ -69,6 +69,10 @@ Options are case sensitive, e.g., "-U" has a different meaning than "-u".
Default Value: Disabled. To enable, specify "-U" on the
command line.
+[-r] turn on the thread-per-rate tests. (Priority inversion tests)
+
+ Default Value: Disabled.
+
The server will print the IOR of the servants, but it is not required
by the client.
@@ -84,10 +88,10 @@ by the client.
[-r] // use thread-per-rate test.
[-o] // makes client use oneway calls instead
[-x] // makes a call to servant to shutdown
- [-u] // run the client utilization test
- [-U] // run the servant utilization test
+ [-u <seconds> ] // run the client utilization test for <seconds> seconds
+ [-U <seconds> ] // run the servant utilization test for <seconds> seconds
[-1] // use one to n test.
- [-g] // choose the granularity of timing calls
+ [-g <granularity>] // choose the granularity of timing calls
[-c] // obtain number of context switches.
[-s] // makes client *NOT* use the name service
[-m] // use multiple priorities for the low priority client.
@@ -116,7 +120,7 @@ by the client.
Default Value: nil, which means not to write the iors to disk.
-[-r] turn on the thread-per-rate tests.
+[-r] turn on the thread-per-rate tests. (Priority inversion tests)
Default Value: Disabled.
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
index d4c95c2c70d..29cb263d76d 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
@@ -30,7 +30,7 @@ Task_State::Task_State (int argc, char **argv)
run_server_utilization_test_ (0),
util_time_ (0)
{
- ACE_Get_Opt opts (argc, argv, "Umu:sn:t:d:rxof:g:1c");
+ ACE_Get_Opt opts (argc, argv, "U:mu:sn:t:d:rxof:g:1c");
int c;
int datatype;
@@ -43,6 +43,7 @@ Task_State::Task_State (int argc, char **argv)
break;
case 'U':
run_server_utilization_test_ = 1;
+ util_time_ = ACE_OS::atoi (opts.optarg);
break;
case 'm':
use_multiple_priority_ = 1;
@@ -648,6 +649,7 @@ Client::run_tests (Cubit_ptr cb,
double sleep_time = (1 / frequency) * ACE_ONE_SECOND_IN_USECS * ts_->granularity_; // usec
double delta = 0;
+ // time to wait for utilization tests to know when to stop.
ACE_Time_Value max_wait_time (ts_->util_time_, 0);
ACE_Countdown_Time countdown (&max_wait_time);
@@ -671,22 +673,27 @@ Client::run_tests (Cubit_ptr cb,
// lowest frequency thread.
(ts_->thread_per_rate_ == 1 && id_ < (ts_->thread_count_ - 1)) ||
// continous loop if we are running the utilization test
- (ts_->use_utilization_test_ == 1);
+ (ts_->use_utilization_test_ == 1) ||
+ // continous loop if we are running the SERVER utilization test
+ (ts_->run_server_utilization_test_ == 1);
i++)
{
// Elapsed time will be in microseconds.
ACE_Time_Value delta_t;
- if ( (i % ts_->granularity_) == 0)
+ // start timing a call
+ if ( (i % ts_->granularity_) == 0 &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
- if (ts_->use_utilization_test_ == 0 && ts_->run_server_utilization_test_ == 0)
- {
- ACE_Time_Value tv (0,
- (u_long) ((sleep_time - delta) < 0
- ? 0
- : (sleep_time - delta)));
- ACE_OS::sleep (tv);
- }
+ // delay a sufficient amount of time to be able to enforce
+ // the calling frequency (i.e., 20Hz, 10Hz, 5Hz, 1Hz).
+ ACE_Time_Value tv (0,
+ (u_long) ((sleep_time - delta) < 0
+ ? 0
+ : (sleep_time - delta)));
+ ACE_OS::sleep (tv);
#if defined (CHORUS)
pstartTime = pccTime1Get();
@@ -885,7 +892,11 @@ Client::run_tests (Cubit_ptr cb,
}
}
- if ((i % ts_->granularity_) == (ts_->granularity_ - 1))
+ // stop the timer
+ if ( (i % ts_->granularity_) == (ts_->granularity_ - 1) &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
#if defined (CHORUS)
pstopTime = pccTime1Get();
@@ -909,27 +920,47 @@ Client::run_tests (Cubit_ptr cb,
my_jitter_array [i/ts_->granularity_] = real_time; // in units of microseconds.
// update the latency array, correcting the index using the granularity
#else /* ACE_LACKS_FLOATING_POINT */
- // Store the time in secs.
-
-// These comments are to temporarily fix what seems a bug in
-// the ACE_Long_Long class that is used to calc the elapsed
-// time.
-// I'll leave these here to debug it later.
- double tmp = (double)delta_t.sec ();
- double tmp2 = (double)delta_t.usec ();
- if (tmp > 100000) {tmp=0.0; tmp2 = 2000.0; fprintf(stderr, "tmp > 100000!, delta_t.usec ()=%f\n", (double)delta_t.usec ());}
- if (tmp2 < 100 && tmp == 0) {tmp=0.0; tmp2 = 2000.0; /*fprintf(stderr, "tmp2 < 100!, delta_t.usec ()=%d , delta_t.usec ()=%d\n", delta_t.sec (), delta_t.usec ());*/ }
- real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
- // real_time = (double)delta_t.sec () + (double)delta_t.usec () / (double)ACE_ONE_SECOND_IN_USECS;
+ // Store the time in secs.
+#if defined (VXWORKS)
+ // @@ David, these comments are to temporarily fix what
+ // seems a bug in the ACE_Long_Long class that is used to
+ // calc the elapsed time. It seems that subtraction of two
+ // ACE_Long_Long are not done correctly when the least
+ // significant value has wrapped around. For example to
+ // subtract these values: 00ff1001:00000001 minus
+ // 00ff1000:ffffffff would give a huge number, instead of
+ // giving 2.
+
+ // This is only occuring in VxWorks.
+ // I'll leave these here to debug it later.
+ double tmp = (double)delta_t.sec ();
+ double tmp2 = (double)delta_t.usec ();
+ if (tmp > 100000)
+ {
+ tmp = 0.0;
+ tmp2 = 2000.0;
+ fprintf (stderr, "tmp > 100000!, delta_t.usec ()=%u\n", delta_t.usec ());
+ }
+
+ real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
+#else
+ real_time = ((double) delta_t.sec () +
+ (double) delta_t.usec () / (double) ACE_ONE_SECOND_IN_USECS);
+#endif /* VXWORKS */
+
real_time /= ts_->granularity_;
-
+
delta = ((0.4 * fabs (real_time * ACE_ONE_SECOND_IN_USECS)) + (0.6 * delta)); // pow(10,6)
latency += (real_time * ts_->granularity_);
- my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
+ my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
#endif /* !ACE_LACKS_FLOATING_POINT */
- }
+ } // END OF IF :
+ // if ( (i % ts_->granularity_) == (ts_->granularity_ - 1) &&
+ // (ts_->use_utilization_test_ == 0) &&
+ // (ts_->run_server_utilization_test_ == 0)
+ // )
if ( ts_->thread_per_rate_ == 1 && id_ < (ts_->thread_count_ - 1) )
{
@@ -951,11 +982,15 @@ Client::run_tests (Cubit_ptr cb,
}
}
- if (ts_->use_utilization_test_ == 1)
+ if (ts_->use_utilization_test_ == 1 ||
+ ts_->run_server_utilization_test_ == 1)
{
countdown.update ();
if (max_wait_time == ACE_Time_Value::zero)
- break;
+ {
+ ts_->loop_count_ = call_count;
+ break;
+ }
}
} /* end of for () */
@@ -963,7 +998,12 @@ Client::run_tests (Cubit_ptr cb,
if (id_ == 0)
ts_->high_priority_loop_count_ = call_count;
- if (call_count > 0)
+ // perform latency stats onlt if we are not running the utilization
+ // tests.
+ if (call_count > 0 &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
if (error_count == 0)
{
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
index 4d3c2a84e94..8a33b046b0b 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
@@ -51,7 +51,7 @@ Util_Thread::svc (void)
return 0;
}
-double
+u_long
Util_Thread::get_number_of_computations (void)
{
return this->number_of_computations_;
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.h
index 8f199da4b5a..5509de3d32b 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Util_Thread.h
@@ -31,7 +31,7 @@ public:
virtual int svc (void);
// The thread entry point function.
- double get_number_of_computations (void);
+ u_long get_number_of_computations (void);
// Gets the number of computations the thread was able to perform.
void computation (void);
@@ -44,7 +44,7 @@ private:
int run_computations (void);
// Run the computations.
- double number_of_computations_;
+ u_long number_of_computations_;
// This stores the number of computations performed
// by the Util_Thread so far
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
index 69dea293097..0aa507cac31 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
@@ -150,13 +150,14 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
int i;
u_int j;
+ char * task_id = 0;
+
ACE_High_Res_Timer timer_;
ACE_Time_Value delta_t;
timer_.start ();
#if defined (VXWORKS)
- char * task_id = 0;
ctx = 0;
ACE_NEW_RETURN (task_id,
char [32],
@@ -236,7 +237,7 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
priority = ACE_THR_PRI_FIFO_DEF;
#elif defined (ACE_WIN32)
priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
+ ACE_SCOPE_THREAD);
#else /* ! VXWORKS */
priority = ACE_THR_PRI_FIFO_DEF + 25;
#endif /* ! ACE_WIN32 */
@@ -248,17 +249,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
if (high_priority_client.activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
-#if !defined (VXWORKS)
- priority) == -1)
-#else
priority,
-1,
0,
0,
0,
0,
- &task_id) == -1)
-#endif /* VXWORKS */
+ (ACE_thread_t*)task_id) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
@@ -271,6 +268,9 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
number_of_low_priority_client = ts->thread_count_ - 1;
+ // mechanism to distribute the available priorities among the
+ // threads when there are not enough different priorities for all
+ // threads.
if (ts->use_multiple_priority_ == 1)
{
ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
@@ -286,23 +286,24 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
// 1 priority is exclusive for the high priority client.
number_of_priorities --;
- // if utilization thread is present, reduce in 1 the available priorities.
+ // if utilization thread is present, reduce in 1 the available
+ // priorities.
if (ts->use_utilization_test_ == 1)
{
number_of_priorities --;
}
- // Drop the priority, so that the priority of clients will increase
- // with increasing client number.
- for (j = 0; j < number_of_low_priority_client + 1; j++)
+ // Drop the priority, so that the priority of clients will
+ // increase with increasing client number.
+ for (j = 0; j < number_of_low_priority_client; j++)
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
- // if the lowest priority of the "low priority clients" is the minimum,
- // and we are running the utilization thread, increment the priority,
- // since we don't want the utlization thread and a "low priority thread"
- // to have the same priority.
+ // if the lowest priority of the "low priority clients" is the
+ // minimum, and we are running the utilization thread, increment
+ // the priority, since we don't want the utlization thread and a
+ // "low priority thread" to have the same priority.
if ( priority == ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
ACE_SCOPE_THREAD) &&
ts->use_utilization_test_ == 1)
@@ -310,10 +311,10 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
priority,
ACE_SCOPE_THREAD);
- // granularity of the assignment of the priorities. Some OSs have
- // fewer levels of priorities than we have threads in our test, so
- // with this mechanism we assign priorities to groups of threads when
- // there are more threads than priorities.
+ // granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
grain = number_of_low_priority_client / number_of_priorities;
counter = 0;
@@ -359,18 +360,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
if (low_priority_client[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
-#if !defined (VXWORKS)
- priority) == -1)
-#else
priority, // These are constructor defaults.
-1, // int grp_id = -1,
0, // ACE_Task_Base *task = 0,
0, // ACE_hthread_t thread_handles[] = 0,
0, // void *stack[] = 0,
0, // size_t stack_size[] = 0,
- &task_id) == -1)
-#endif /* VXWORKS */
-
+ (ACE_thread_t*)task_id) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
@@ -380,14 +376,15 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
{
counter = (counter + 1) % grain;
if ( (counter == 0) &&
- //Just so when we distribute the priorities among the threads, we make sure we don't go overboard.
+ //Just so when we distribute the priorities among the
+ //threads, we make sure we don't go overboard.
((number_of_priorities * grain) > (number_of_low_priority_client - (i - 1))) )
- {
- // Get the next higher priority.
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
- }
+ {
+ // Get the next higher priority.
+ priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
+ priority,
+ ACE_SCOPE_THREAD);
+ }
}
} /* end of for () */
@@ -443,12 +440,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#if defined (VXWORKS)
if (ts->context_switch_test_ == 1)
{
- fprintf(stderr, "Adding the context switch hook!\n");
- taskSwitchHookAdd ((FUNCPTR)&switchHook);
+ fprintf(stderr, "Adding the context switch hook!\n");
+ taskSwitchHookAdd ((FUNCPTR)&switchHook);
}
#endif
- // Wait for all the client threads to exit (except the utilization thread).
+ // Wait for all the client threads to exit (except the utilization
+ // thread).
thread_manager->wait ();
#if defined (NO_ACE_QUANTIFY)
@@ -461,9 +459,9 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
timer_.elapsed_time (delta_t);
if (ts->use_utilization_test_ == 1)
- // signal the utilization thread to finish with its work..
- // only if utilization test was specified. See
- // description of this variable in header file.
+ // signal the utilization thread to finish with its work.. only
+ // if utilization test was specified. See description of this
+ // variable in header file.
{
util_thread.done_ = 1;
@@ -479,7 +477,8 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#if defined (ACE_HAS_PRUSAGE_T)
timer_for_context_switch.stop ();
timer_for_context_switch.get_rusage (usage);
- // Add up the voluntary context switches & involuntary context switches
+ // Add up the voluntary context switches & involuntary context
+ // switches
context_switch = usage.pr_vctx + usage.pr_ictx - context_switch;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
@@ -488,7 +487,8 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#elif defined (ACE_HAS_GETRUSAGE) && !defined (ACE_WIN32)
timer_for_context_switch.stop ();
timer_for_context_switch.get_rusage (usage);
- // Add up the voluntary context switches & involuntary context switches
+ // Add up the voluntary context switches & involuntary context
+ // switches
context_switch = usage.ru_nvcsw + usage.ru_nivcsw - context_switch;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
@@ -496,88 +496,95 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
usage.ru_nivcsw));
#elif defined (VXWORKS) /* ACE_HAS_GETRUSAGE */
taskSwitchHookDelete ((FUNCPTR)&switchHook);
- ACE_DEBUG ((LM_DEBUG,
+ ACE_DEBUG ((LM_DEBUG,
"Context switches=%d\n",
ctx));
#endif
}
+ // if running the utilization test, don't report latency nor jitter.
+ if (ts->use_utilization_test_ == 0 &&
+ ts->run_server_utilization_test_ == 0)
+ {
#if defined (VXWORKS)
- ACE_DEBUG ((LM_DEBUG, "Test done.\n"
- "High priority client latency : %f msec, jitter: %f msec\n"
- "Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
- // output the latency values to a file, tab separated, to import it
- // to Excel to calculate jitter, in the mean time we come up with
- // the sqrt() function.
- output_latency (ts);
-
+ ACE_DEBUG ((LM_DEBUG, "Test done.\n"
+ "High priority client latency : %f msec, jitter: %f msec\n"
+ "Low priority client latency : %f msec, jitter: %f msec\n",
+ high_priority_client.get_high_priority_latency (),
+ high_priority_client.get_high_priority_jitter (),
+ low_priority_client[0]->get_low_priority_latency (),
+ low_priority_client[0]->get_low_priority_jitter ()));
+ // output the latency values to a file, tab separated, to import
+ // it to Excel to calculate jitter, in the mean time we come up
+ // with the sqrt() function.
+ output_latency (ts);
#elif defined (CHORUS)
- ACE_DEBUG ((LM_DEBUG,
- "Test done.\n"
- "High priority client latency : %u usec\n"
- "Low priority client latency : %u usec\n",
- high_priority_client.get_high_priority_latency (),
- low_priority_client[0]->get_low_priority_latency () ));
-
- // output the latency values to a file, tab separated, to import it
- // to Excel to calculate jitter, in the mean time we come up with
- // the sqrt() function.
- output_latency (ts);
+ ACE_DEBUG ((LM_DEBUG,
+ "Test done.\n"
+ "High priority client latency : %u usec\n"
+ "Low priority client latency : %u usec\n",
+ high_priority_client.get_high_priority_latency (),
+ low_priority_client[0]->get_low_priority_latency () ));
+
+ // output the latency values to a file, tab separated, to import
+ // it to Excel to calculate jitter, in the mean time we come up
+ // with the sqrt() function.
+ output_latency (ts);
#else /* !CHORUS */
- ACE_DEBUG ((LM_DEBUG, "Test done.\n"
- "High priority client latency : %f msec, jitter: %f msec\n"
- "Low priority client latency : %f msec, jitter: %f msec\n"
- "# of context switches: %d, context_switch_time: %f msec\n"
- "total context switch time: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter (),
- context_switch,
- csw/1000,
- csw * context_switch/1000 ));
-// output_latency (ts);
+ ACE_DEBUG ((LM_DEBUG, "Test done.\n"
+ "High priority client latency : %f msec, jitter: %f msec\n"
+ "Low priority client latency : %f msec, jitter: %f msec\n"
+ "# of context switches: %d, context_switch_time: %f msec\n"
+ "total context switch time: %f msec\n",
+ high_priority_client.get_high_priority_latency (),
+ high_priority_client.get_high_priority_jitter (),
+ low_priority_client[0]->get_low_priority_latency (),
+ low_priority_client[0]->get_low_priority_jitter (),
+ context_switch,
+ csw/1000,
+ csw * context_switch/1000 ));
+ // output_latency (ts);
#endif /* !VXWORKS && !CHORUS */
-
+ }
if (ts->use_utilization_test_ == 1)
- {
- total_util_task_duration = util_task_duration * util_thread.get_number_of_computations ();
+ {
+ total_util_task_duration = util_task_duration * util_thread.get_number_of_computations ();
- total_latency = (delta_t.sec () *
- ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
-
- total_latency_high = total_latency - total_util_task_duration;
-
- // Calc and print the CPU percentage. I add 0.5 to round to the
- // nearest integer before casting it to int.
- ACE_DEBUG ((LM_DEBUG,
- "\t%% ORB Client CPU utilization: %u %%\n"
- "\t%% Idle time: %u %%\n",
- (u_int) (total_latency_high * 100 / total_latency + 0.5),
- (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
+ total_latency = (delta_t.sec () *
+ ACE_ONE_SECOND_IN_MSECS +
+ (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
+
+ total_latency_high = total_latency - total_util_task_duration;
+
+ // Calc and print the CPU percentage. I add 0.5 to round to the
+ // nearest integer before casting it to int.
+ ACE_DEBUG ((LM_DEBUG,
+ "\t%% ORB Client CPU utilization: %u %%\n"
+ "\t%% Idle time: %u %%\n\n",
+ (u_int) (total_latency_high * 100 / total_latency + 0.5),
+ (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
- #if defined (ACE_LACKS_FLOATING_POINT)
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %u computations\n",
- util_thread.get_number_of_computations ()));
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %u usecs\n",
- util_task_duration));
- #else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %g computations\n",
- util_thread.get_number_of_computations ()));
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %f msecs\n",
- util_task_duration));
- #endif /* ! ACE_LACKS_FLOATING_POINT */
- }
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) UTILIZATION task performed \t%u computations\n"
+ "(%t) CLIENT task performed \t\t%u CORBA calls\n"
+ "\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
+ util_thread.get_number_of_computations (),
+ ts->loop_count_,
+ util_thread.get_number_of_computations () / ts->loop_count_,
+ (util_thread.get_number_of_computations () % ts->loop_count_) * 100 / ts->loop_count_
+ ));
+
+#if defined (ACE_LACKS_FLOATING_POINT)
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) utilization computation time is %u usecs\n",
+ util_task_duration));
+#else
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) utilization computation time is %f msecs\n",
+ util_task_duration));
+#endif /* ! ACE_LACKS_FLOATING_POINT */
+ }
#if defined (VXWORKS)
delete task_id;
@@ -608,25 +615,20 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 20 Hz client with priority %d\n",
priority));
+
if (CB_20Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
ACE_DEBUG ((LM_DEBUG,
"Creating 10 Hz client with priority %d\n",
priority));
+
if (CB_10Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
@@ -634,10 +636,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 5 Hz client with priority %d\n",
priority));
+
if (CB_5Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
@@ -645,10 +646,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 1 Hz client with priority %d\n",
priority));
+
if (CB_1Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
// Wait for all the threads to exit.
thread_manager->wait ();
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
index 4ed26dce816..1c4d2799ac4 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
@@ -25,16 +25,7 @@ Cubit_i::cube_octet (CORBA::Octet o,
ts_->barrier_->wait ();
}
-// if (ts_->run_server_utilization_test_ == 0)
-// {
-// ACE_DEBUG ((LM_DEBUG,
-// "(%t) )))))))) "
-// "test STARTED at %D\n"));
-
-// ts_->run_server_utilization_test_ = 1;
-// this->util_started_ = 1;
-// this->ts_->timer_.start ();
-// }
+ ts_->loop_count_++;
return (CORBA::Octet) (o * o * o);
}
@@ -75,6 +66,9 @@ void Cubit_i::shutdown (CORBA::Environment &)
{
ACE_DEBUG ((LM_DEBUG,
"(%t) Calling TAO_ORB_Core_instance ()->orb ()->shutdown ()\n"));
+
TAO_ORB_Core_instance ()->orb ()->shutdown ();
+
+ // this is to signal the utilization thread to exit its loop.
ts_->utilization_task_started_ = 0;
}
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
index 2a9e62169db..0e0ae8f8824 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
@@ -543,8 +543,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n",
- "high_priority_task->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\thigh_priority_task->activate failed"));
}
// Create an array to hold pointers to the low priority tasks.
@@ -580,7 +580,7 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
// Drop the priority, so that the priority of clients will increase
// with increasing client number.
- for (i = 0; i < number_of_low_priority_servants + 1; i++)
+ for (i = 0; i < number_of_low_priority_servants; i++)
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
@@ -643,8 +643,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t; %p\n",
- "low_priority_task[i]->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\tlow_priority_task[i]->activate"));
}
ACE_DEBUG ((LM_DEBUG,
@@ -657,7 +657,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
{
counter = (counter + 1) % grain;
if ( (counter == 0) &&
- //Just so when we distribute the priorities among the threads, we make sure we don't go overboard.
+ //Just so when we distribute the priorities among the
+ //threads, we make sure we don't go overboard.
((number_of_priorities * grain) > (number_of_low_priority_servants - (i - 1))) )
{
// Get the next higher priority.
@@ -724,8 +725,8 @@ start_utilization (ACE_Thread_Manager *util_thr_mgr, Task_State *ts)
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n",
- "util_task->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\tutil_task->activate failed"));
}
return util_task;
@@ -784,7 +785,10 @@ main (int argc, char *argv[])
Task_State ts ( _argc, _argv);
if (run_utilization_test == 1)
- ts.run_server_utilization_test_ = 1;
+ {
+ ts.run_server_utilization_test_ = 1;
+ ts.loop_count_ = 0;
+ }
Util_Thread * util_task = 0;
@@ -873,11 +877,22 @@ main (int argc, char *argv[])
total_latency_servants = total_latency - total_util_task_duration;
+ ACE_DEBUG ((LM_DEBUG,
+ "-------------------------- Stats -------------------------------\n"));
+
ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %g computations\n"
- "(%t) each computation had a duration of %f msecs\n"
- "(%t) elapsed time is %f msecs\n",
+ "(%t) UTILIZATION task performed \t%u computations\n"
+ "(%t) SERVANT task serviced \t\t%u CORBA calls\n"
+ "\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
util_task->get_number_of_computations (),
+ ts.loop_count_,
+ util_task->get_number_of_computations () / ts.loop_count_,
+ (util_task->get_number_of_computations () % ts.loop_count_) * 100 / ts.loop_count_
+ ));
+
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Each computation had a duration of %f msecs\n"
+ "(%t) Total elapsed time of test is %f msecs\n",
util_task_duration / 1000,
total_latency / 1000));
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/README b/TAO/tests/Cubit/TAO/MT_Cubit/README
index 8a545ac722c..e0b79a019f3 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/README
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/README
@@ -69,6 +69,10 @@ Options are case sensitive, e.g., "-U" has a different meaning than "-u".
Default Value: Disabled. To enable, specify "-U" on the
command line.
+[-r] turn on the thread-per-rate tests. (Priority inversion tests)
+
+ Default Value: Disabled.
+
The server will print the IOR of the servants, but it is not required
by the client.
@@ -84,10 +88,10 @@ by the client.
[-r] // use thread-per-rate test.
[-o] // makes client use oneway calls instead
[-x] // makes a call to servant to shutdown
- [-u] // run the client utilization test
- [-U] // run the servant utilization test
+ [-u <seconds> ] // run the client utilization test for <seconds> seconds
+ [-U <seconds> ] // run the servant utilization test for <seconds> seconds
[-1] // use one to n test.
- [-g] // choose the granularity of timing calls
+ [-g <granularity>] // choose the granularity of timing calls
[-c] // obtain number of context switches.
[-s] // makes client *NOT* use the name service
[-m] // use multiple priorities for the low priority client.
@@ -116,7 +120,7 @@ by the client.
Default Value: nil, which means not to write the iors to disk.
-[-r] turn on the thread-per-rate tests.
+[-r] turn on the thread-per-rate tests. (Priority inversion tests)
Default Value: Disabled.
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
index d4c95c2c70d..29cb263d76d 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
@@ -30,7 +30,7 @@ Task_State::Task_State (int argc, char **argv)
run_server_utilization_test_ (0),
util_time_ (0)
{
- ACE_Get_Opt opts (argc, argv, "Umu:sn:t:d:rxof:g:1c");
+ ACE_Get_Opt opts (argc, argv, "U:mu:sn:t:d:rxof:g:1c");
int c;
int datatype;
@@ -43,6 +43,7 @@ Task_State::Task_State (int argc, char **argv)
break;
case 'U':
run_server_utilization_test_ = 1;
+ util_time_ = ACE_OS::atoi (opts.optarg);
break;
case 'm':
use_multiple_priority_ = 1;
@@ -648,6 +649,7 @@ Client::run_tests (Cubit_ptr cb,
double sleep_time = (1 / frequency) * ACE_ONE_SECOND_IN_USECS * ts_->granularity_; // usec
double delta = 0;
+ // time to wait for utilization tests to know when to stop.
ACE_Time_Value max_wait_time (ts_->util_time_, 0);
ACE_Countdown_Time countdown (&max_wait_time);
@@ -671,22 +673,27 @@ Client::run_tests (Cubit_ptr cb,
// lowest frequency thread.
(ts_->thread_per_rate_ == 1 && id_ < (ts_->thread_count_ - 1)) ||
// continous loop if we are running the utilization test
- (ts_->use_utilization_test_ == 1);
+ (ts_->use_utilization_test_ == 1) ||
+ // continous loop if we are running the SERVER utilization test
+ (ts_->run_server_utilization_test_ == 1);
i++)
{
// Elapsed time will be in microseconds.
ACE_Time_Value delta_t;
- if ( (i % ts_->granularity_) == 0)
+ // start timing a call
+ if ( (i % ts_->granularity_) == 0 &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
- if (ts_->use_utilization_test_ == 0 && ts_->run_server_utilization_test_ == 0)
- {
- ACE_Time_Value tv (0,
- (u_long) ((sleep_time - delta) < 0
- ? 0
- : (sleep_time - delta)));
- ACE_OS::sleep (tv);
- }
+ // delay a sufficient amount of time to be able to enforce
+ // the calling frequency (i.e., 20Hz, 10Hz, 5Hz, 1Hz).
+ ACE_Time_Value tv (0,
+ (u_long) ((sleep_time - delta) < 0
+ ? 0
+ : (sleep_time - delta)));
+ ACE_OS::sleep (tv);
#if defined (CHORUS)
pstartTime = pccTime1Get();
@@ -885,7 +892,11 @@ Client::run_tests (Cubit_ptr cb,
}
}
- if ((i % ts_->granularity_) == (ts_->granularity_ - 1))
+ // stop the timer
+ if ( (i % ts_->granularity_) == (ts_->granularity_ - 1) &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
#if defined (CHORUS)
pstopTime = pccTime1Get();
@@ -909,27 +920,47 @@ Client::run_tests (Cubit_ptr cb,
my_jitter_array [i/ts_->granularity_] = real_time; // in units of microseconds.
// update the latency array, correcting the index using the granularity
#else /* ACE_LACKS_FLOATING_POINT */
- // Store the time in secs.
-
-// These comments are to temporarily fix what seems a bug in
-// the ACE_Long_Long class that is used to calc the elapsed
-// time.
-// I'll leave these here to debug it later.
- double tmp = (double)delta_t.sec ();
- double tmp2 = (double)delta_t.usec ();
- if (tmp > 100000) {tmp=0.0; tmp2 = 2000.0; fprintf(stderr, "tmp > 100000!, delta_t.usec ()=%f\n", (double)delta_t.usec ());}
- if (tmp2 < 100 && tmp == 0) {tmp=0.0; tmp2 = 2000.0; /*fprintf(stderr, "tmp2 < 100!, delta_t.usec ()=%d , delta_t.usec ()=%d\n", delta_t.sec (), delta_t.usec ());*/ }
- real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
- // real_time = (double)delta_t.sec () + (double)delta_t.usec () / (double)ACE_ONE_SECOND_IN_USECS;
+ // Store the time in secs.
+#if defined (VXWORKS)
+ // @@ David, these comments are to temporarily fix what
+ // seems a bug in the ACE_Long_Long class that is used to
+ // calc the elapsed time. It seems that subtraction of two
+ // ACE_Long_Long are not done correctly when the least
+ // significant value has wrapped around. For example to
+ // subtract these values: 00ff1001:00000001 minus
+ // 00ff1000:ffffffff would give a huge number, instead of
+ // giving 2.
+
+ // This is only occuring in VxWorks.
+ // I'll leave these here to debug it later.
+ double tmp = (double)delta_t.sec ();
+ double tmp2 = (double)delta_t.usec ();
+ if (tmp > 100000)
+ {
+ tmp = 0.0;
+ tmp2 = 2000.0;
+ fprintf (stderr, "tmp > 100000!, delta_t.usec ()=%u\n", delta_t.usec ());
+ }
+
+ real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
+#else
+ real_time = ((double) delta_t.sec () +
+ (double) delta_t.usec () / (double) ACE_ONE_SECOND_IN_USECS);
+#endif /* VXWORKS */
+
real_time /= ts_->granularity_;
-
+
delta = ((0.4 * fabs (real_time * ACE_ONE_SECOND_IN_USECS)) + (0.6 * delta)); // pow(10,6)
latency += (real_time * ts_->granularity_);
- my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
+ my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
#endif /* !ACE_LACKS_FLOATING_POINT */
- }
+ } // END OF IF :
+ // if ( (i % ts_->granularity_) == (ts_->granularity_ - 1) &&
+ // (ts_->use_utilization_test_ == 0) &&
+ // (ts_->run_server_utilization_test_ == 0)
+ // )
if ( ts_->thread_per_rate_ == 1 && id_ < (ts_->thread_count_ - 1) )
{
@@ -951,11 +982,15 @@ Client::run_tests (Cubit_ptr cb,
}
}
- if (ts_->use_utilization_test_ == 1)
+ if (ts_->use_utilization_test_ == 1 ||
+ ts_->run_server_utilization_test_ == 1)
{
countdown.update ();
if (max_wait_time == ACE_Time_Value::zero)
- break;
+ {
+ ts_->loop_count_ = call_count;
+ break;
+ }
}
} /* end of for () */
@@ -963,7 +998,12 @@ Client::run_tests (Cubit_ptr cb,
if (id_ == 0)
ts_->high_priority_loop_count_ = call_count;
- if (call_count > 0)
+ // perform latency stats onlt if we are not running the utilization
+ // tests.
+ if (call_count > 0 &&
+ (ts_->use_utilization_test_ == 0) &&
+ (ts_->run_server_utilization_test_ == 0)
+ )
{
if (error_count == 0)
{
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
index 4d3c2a84e94..8a33b046b0b 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.cpp
@@ -51,7 +51,7 @@ Util_Thread::svc (void)
return 0;
}
-double
+u_long
Util_Thread::get_number_of_computations (void)
{
return this->number_of_computations_;
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.h b/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.h
index 8f199da4b5a..5509de3d32b 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Util_Thread.h
@@ -31,7 +31,7 @@ public:
virtual int svc (void);
// The thread entry point function.
- double get_number_of_computations (void);
+ u_long get_number_of_computations (void);
// Gets the number of computations the thread was able to perform.
void computation (void);
@@ -44,7 +44,7 @@ private:
int run_computations (void);
// Run the computations.
- double number_of_computations_;
+ u_long number_of_computations_;
// This stores the number of computations performed
// by the Util_Thread so far
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
index 69dea293097..0aa507cac31 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
@@ -150,13 +150,14 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
int i;
u_int j;
+ char * task_id = 0;
+
ACE_High_Res_Timer timer_;
ACE_Time_Value delta_t;
timer_.start ();
#if defined (VXWORKS)
- char * task_id = 0;
ctx = 0;
ACE_NEW_RETURN (task_id,
char [32],
@@ -236,7 +237,7 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
priority = ACE_THR_PRI_FIFO_DEF;
#elif defined (ACE_WIN32)
priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
+ ACE_SCOPE_THREAD);
#else /* ! VXWORKS */
priority = ACE_THR_PRI_FIFO_DEF + 25;
#endif /* ! ACE_WIN32 */
@@ -248,17 +249,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
if (high_priority_client.activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
-#if !defined (VXWORKS)
- priority) == -1)
-#else
priority,
-1,
0,
0,
0,
0,
- &task_id) == -1)
-#endif /* VXWORKS */
+ (ACE_thread_t*)task_id) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
@@ -271,6 +268,9 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
number_of_low_priority_client = ts->thread_count_ - 1;
+ // mechanism to distribute the available priorities among the
+ // threads when there are not enough different priorities for all
+ // threads.
if (ts->use_multiple_priority_ == 1)
{
ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
@@ -286,23 +286,24 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
// 1 priority is exclusive for the high priority client.
number_of_priorities --;
- // if utilization thread is present, reduce in 1 the available priorities.
+ // if utilization thread is present, reduce in 1 the available
+ // priorities.
if (ts->use_utilization_test_ == 1)
{
number_of_priorities --;
}
- // Drop the priority, so that the priority of clients will increase
- // with increasing client number.
- for (j = 0; j < number_of_low_priority_client + 1; j++)
+ // Drop the priority, so that the priority of clients will
+ // increase with increasing client number.
+ for (j = 0; j < number_of_low_priority_client; j++)
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
- // if the lowest priority of the "low priority clients" is the minimum,
- // and we are running the utilization thread, increment the priority,
- // since we don't want the utlization thread and a "low priority thread"
- // to have the same priority.
+ // if the lowest priority of the "low priority clients" is the
+ // minimum, and we are running the utilization thread, increment
+ // the priority, since we don't want the utlization thread and a
+ // "low priority thread" to have the same priority.
if ( priority == ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
ACE_SCOPE_THREAD) &&
ts->use_utilization_test_ == 1)
@@ -310,10 +311,10 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
priority,
ACE_SCOPE_THREAD);
- // granularity of the assignment of the priorities. Some OSs have
- // fewer levels of priorities than we have threads in our test, so
- // with this mechanism we assign priorities to groups of threads when
- // there are more threads than priorities.
+ // granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
grain = number_of_low_priority_client / number_of_priorities;
counter = 0;
@@ -359,18 +360,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
if (low_priority_client[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
-#if !defined (VXWORKS)
- priority) == -1)
-#else
priority, // These are constructor defaults.
-1, // int grp_id = -1,
0, // ACE_Task_Base *task = 0,
0, // ACE_hthread_t thread_handles[] = 0,
0, // void *stack[] = 0,
0, // size_t stack_size[] = 0,
- &task_id) == -1)
-#endif /* VXWORKS */
-
+ (ACE_thread_t*)task_id) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
@@ -380,14 +376,15 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
{
counter = (counter + 1) % grain;
if ( (counter == 0) &&
- //Just so when we distribute the priorities among the threads, we make sure we don't go overboard.
+ //Just so when we distribute the priorities among the
+ //threads, we make sure we don't go overboard.
((number_of_priorities * grain) > (number_of_low_priority_client - (i - 1))) )
- {
- // Get the next higher priority.
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
- }
+ {
+ // Get the next higher priority.
+ priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
+ priority,
+ ACE_SCOPE_THREAD);
+ }
}
} /* end of for () */
@@ -443,12 +440,13 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#if defined (VXWORKS)
if (ts->context_switch_test_ == 1)
{
- fprintf(stderr, "Adding the context switch hook!\n");
- taskSwitchHookAdd ((FUNCPTR)&switchHook);
+ fprintf(stderr, "Adding the context switch hook!\n");
+ taskSwitchHookAdd ((FUNCPTR)&switchHook);
}
#endif
- // Wait for all the client threads to exit (except the utilization thread).
+ // Wait for all the client threads to exit (except the utilization
+ // thread).
thread_manager->wait ();
#if defined (NO_ACE_QUANTIFY)
@@ -461,9 +459,9 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
timer_.elapsed_time (delta_t);
if (ts->use_utilization_test_ == 1)
- // signal the utilization thread to finish with its work..
- // only if utilization test was specified. See
- // description of this variable in header file.
+ // signal the utilization thread to finish with its work.. only
+ // if utilization test was specified. See description of this
+ // variable in header file.
{
util_thread.done_ = 1;
@@ -479,7 +477,8 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#if defined (ACE_HAS_PRUSAGE_T)
timer_for_context_switch.stop ();
timer_for_context_switch.get_rusage (usage);
- // Add up the voluntary context switches & involuntary context switches
+ // Add up the voluntary context switches & involuntary context
+ // switches
context_switch = usage.pr_vctx + usage.pr_ictx - context_switch;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
@@ -488,7 +487,8 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
#elif defined (ACE_HAS_GETRUSAGE) && !defined (ACE_WIN32)
timer_for_context_switch.stop ();
timer_for_context_switch.get_rusage (usage);
- // Add up the voluntary context switches & involuntary context switches
+ // Add up the voluntary context switches & involuntary context
+ // switches
context_switch = usage.ru_nvcsw + usage.ru_nivcsw - context_switch;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
@@ -496,88 +496,95 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
usage.ru_nivcsw));
#elif defined (VXWORKS) /* ACE_HAS_GETRUSAGE */
taskSwitchHookDelete ((FUNCPTR)&switchHook);
- ACE_DEBUG ((LM_DEBUG,
+ ACE_DEBUG ((LM_DEBUG,
"Context switches=%d\n",
ctx));
#endif
}
+ // if running the utilization test, don't report latency nor jitter.
+ if (ts->use_utilization_test_ == 0 &&
+ ts->run_server_utilization_test_ == 0)
+ {
#if defined (VXWORKS)
- ACE_DEBUG ((LM_DEBUG, "Test done.\n"
- "High priority client latency : %f msec, jitter: %f msec\n"
- "Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
- // output the latency values to a file, tab separated, to import it
- // to Excel to calculate jitter, in the mean time we come up with
- // the sqrt() function.
- output_latency (ts);
-
+ ACE_DEBUG ((LM_DEBUG, "Test done.\n"
+ "High priority client latency : %f msec, jitter: %f msec\n"
+ "Low priority client latency : %f msec, jitter: %f msec\n",
+ high_priority_client.get_high_priority_latency (),
+ high_priority_client.get_high_priority_jitter (),
+ low_priority_client[0]->get_low_priority_latency (),
+ low_priority_client[0]->get_low_priority_jitter ()));
+ // output the latency values to a file, tab separated, to import
+ // it to Excel to calculate jitter, in the mean time we come up
+ // with the sqrt() function.
+ output_latency (ts);
#elif defined (CHORUS)
- ACE_DEBUG ((LM_DEBUG,
- "Test done.\n"
- "High priority client latency : %u usec\n"
- "Low priority client latency : %u usec\n",
- high_priority_client.get_high_priority_latency (),
- low_priority_client[0]->get_low_priority_latency () ));
-
- // output the latency values to a file, tab separated, to import it
- // to Excel to calculate jitter, in the mean time we come up with
- // the sqrt() function.
- output_latency (ts);
+ ACE_DEBUG ((LM_DEBUG,
+ "Test done.\n"
+ "High priority client latency : %u usec\n"
+ "Low priority client latency : %u usec\n",
+ high_priority_client.get_high_priority_latency (),
+ low_priority_client[0]->get_low_priority_latency () ));
+
+ // output the latency values to a file, tab separated, to import
+ // it to Excel to calculate jitter, in the mean time we come up
+ // with the sqrt() function.
+ output_latency (ts);
#else /* !CHORUS */
- ACE_DEBUG ((LM_DEBUG, "Test done.\n"
- "High priority client latency : %f msec, jitter: %f msec\n"
- "Low priority client latency : %f msec, jitter: %f msec\n"
- "# of context switches: %d, context_switch_time: %f msec\n"
- "total context switch time: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter (),
- context_switch,
- csw/1000,
- csw * context_switch/1000 ));
-// output_latency (ts);
+ ACE_DEBUG ((LM_DEBUG, "Test done.\n"
+ "High priority client latency : %f msec, jitter: %f msec\n"
+ "Low priority client latency : %f msec, jitter: %f msec\n"
+ "# of context switches: %d, context_switch_time: %f msec\n"
+ "total context switch time: %f msec\n",
+ high_priority_client.get_high_priority_latency (),
+ high_priority_client.get_high_priority_jitter (),
+ low_priority_client[0]->get_low_priority_latency (),
+ low_priority_client[0]->get_low_priority_jitter (),
+ context_switch,
+ csw/1000,
+ csw * context_switch/1000 ));
+ // output_latency (ts);
#endif /* !VXWORKS && !CHORUS */
-
+ }
if (ts->use_utilization_test_ == 1)
- {
- total_util_task_duration = util_task_duration * util_thread.get_number_of_computations ();
+ {
+ total_util_task_duration = util_task_duration * util_thread.get_number_of_computations ();
- total_latency = (delta_t.sec () *
- ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
-
- total_latency_high = total_latency - total_util_task_duration;
-
- // Calc and print the CPU percentage. I add 0.5 to round to the
- // nearest integer before casting it to int.
- ACE_DEBUG ((LM_DEBUG,
- "\t%% ORB Client CPU utilization: %u %%\n"
- "\t%% Idle time: %u %%\n",
- (u_int) (total_latency_high * 100 / total_latency + 0.5),
- (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
+ total_latency = (delta_t.sec () *
+ ACE_ONE_SECOND_IN_MSECS +
+ (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
+
+ total_latency_high = total_latency - total_util_task_duration;
+
+ // Calc and print the CPU percentage. I add 0.5 to round to the
+ // nearest integer before casting it to int.
+ ACE_DEBUG ((LM_DEBUG,
+ "\t%% ORB Client CPU utilization: %u %%\n"
+ "\t%% Idle time: %u %%\n\n",
+ (u_int) (total_latency_high * 100 / total_latency + 0.5),
+ (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
- #if defined (ACE_LACKS_FLOATING_POINT)
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %u computations\n",
- util_thread.get_number_of_computations ()));
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %u usecs\n",
- util_task_duration));
- #else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %g computations\n",
- util_thread.get_number_of_computations ()));
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %f msecs\n",
- util_task_duration));
- #endif /* ! ACE_LACKS_FLOATING_POINT */
- }
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) UTILIZATION task performed \t%u computations\n"
+ "(%t) CLIENT task performed \t\t%u CORBA calls\n"
+ "\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
+ util_thread.get_number_of_computations (),
+ ts->loop_count_,
+ util_thread.get_number_of_computations () / ts->loop_count_,
+ (util_thread.get_number_of_computations () % ts->loop_count_) * 100 / ts->loop_count_
+ ));
+
+#if defined (ACE_LACKS_FLOATING_POINT)
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) utilization computation time is %u usecs\n",
+ util_task_duration));
+#else
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) utilization computation time is %f msecs\n",
+ util_task_duration));
+#endif /* ! ACE_LACKS_FLOATING_POINT */
+ }
#if defined (VXWORKS)
delete task_id;
@@ -608,25 +615,20 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 20 Hz client with priority %d\n",
priority));
+
if (CB_20Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
ACE_DEBUG ((LM_DEBUG,
"Creating 10 Hz client with priority %d\n",
priority));
+
if (CB_10Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
@@ -634,10 +636,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 5 Hz client with priority %d\n",
priority));
+
if (CB_5Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
@@ -645,10 +646,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"Creating 1 Hz client with priority %d\n",
priority));
+
if (CB_1Hz_client.activate (THR_BOUND | ACE_SCHED_FIFO, 1, 1, priority) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p\n"
- "activate failed"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) errno = %p: activate failed\n"));
// Wait for all the threads to exit.
thread_manager->wait ();
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
index 4ed26dce816..1c4d2799ac4 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
@@ -25,16 +25,7 @@ Cubit_i::cube_octet (CORBA::Octet o,
ts_->barrier_->wait ();
}
-// if (ts_->run_server_utilization_test_ == 0)
-// {
-// ACE_DEBUG ((LM_DEBUG,
-// "(%t) )))))))) "
-// "test STARTED at %D\n"));
-
-// ts_->run_server_utilization_test_ = 1;
-// this->util_started_ = 1;
-// this->ts_->timer_.start ();
-// }
+ ts_->loop_count_++;
return (CORBA::Octet) (o * o * o);
}
@@ -75,6 +66,9 @@ void Cubit_i::shutdown (CORBA::Environment &)
{
ACE_DEBUG ((LM_DEBUG,
"(%t) Calling TAO_ORB_Core_instance ()->orb ()->shutdown ()\n"));
+
TAO_ORB_Core_instance ()->orb ()->shutdown ();
+
+ // this is to signal the utilization thread to exit its loop.
ts_->utilization_task_started_ = 0;
}
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
index 2a9e62169db..0e0ae8f8824 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
@@ -543,8 +543,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n",
- "high_priority_task->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\thigh_priority_task->activate failed"));
}
// Create an array to hold pointers to the low priority tasks.
@@ -580,7 +580,7 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
// Drop the priority, so that the priority of clients will increase
// with increasing client number.
- for (i = 0; i < number_of_low_priority_servants + 1; i++)
+ for (i = 0; i < number_of_low_priority_servants; i++)
priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
priority,
ACE_SCOPE_THREAD);
@@ -643,8 +643,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t; %p\n",
- "low_priority_task[i]->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\tlow_priority_task[i]->activate"));
}
ACE_DEBUG ((LM_DEBUG,
@@ -657,7 +657,8 @@ start_servants (ACE_Thread_Manager *serv_thr_mgr, ACE_Barrier &start_barrier, Ta
{
counter = (counter + 1) % grain;
if ( (counter == 0) &&
- //Just so when we distribute the priorities among the threads, we make sure we don't go overboard.
+ //Just so when we distribute the priorities among the
+ //threads, we make sure we don't go overboard.
((number_of_priorities * grain) > (number_of_low_priority_servants - (i - 1))) )
{
// Get the next higher priority.
@@ -724,8 +725,8 @@ start_utilization (ACE_Thread_Manager *util_thr_mgr, Task_State *ts)
0,
priority) == -1)
{
- ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n",
- "util_task->activate"));
+ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n"
+ "\tutil_task->activate failed"));
}
return util_task;
@@ -784,7 +785,10 @@ main (int argc, char *argv[])
Task_State ts ( _argc, _argv);
if (run_utilization_test == 1)
- ts.run_server_utilization_test_ = 1;
+ {
+ ts.run_server_utilization_test_ = 1;
+ ts.loop_count_ = 0;
+ }
Util_Thread * util_task = 0;
@@ -873,11 +877,22 @@ main (int argc, char *argv[])
total_latency_servants = total_latency - total_util_task_duration;
+ ACE_DEBUG ((LM_DEBUG,
+ "-------------------------- Stats -------------------------------\n"));
+
ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization task performed %g computations\n"
- "(%t) each computation had a duration of %f msecs\n"
- "(%t) elapsed time is %f msecs\n",
+ "(%t) UTILIZATION task performed \t%u computations\n"
+ "(%t) SERVANT task serviced \t\t%u CORBA calls\n"
+ "\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
util_task->get_number_of_computations (),
+ ts.loop_count_,
+ util_task->get_number_of_computations () / ts.loop_count_,
+ (util_task->get_number_of_computations () % ts.loop_count_) * 100 / ts.loop_count_
+ ));
+
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Each computation had a duration of %f msecs\n"
+ "(%t) Total elapsed time of test is %f msecs\n",
util_task_duration / 1000,
total_latency / 1000));