summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp4
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.cpp86
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.h56
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp136
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.h32
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.cpp27
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.h20
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp1000
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/client.h147
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp4
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp158
-rw-r--r--TAO/performance-tests/Cubit/TAO/MT_Cubit/server.h11
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp4
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Globals.cpp86
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Globals.h56
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp136
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.h32
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Timer.cpp27
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/Timer.h20
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/client.cpp1000
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/client.h147
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp4
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/server.cpp158
-rw-r--r--TAO/tests/Cubit/TAO/MT_Cubit/server.h11
24 files changed, 1736 insertions, 1626 deletions
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
index 06b7a89c6ad..0d46a6a8e06 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
@@ -131,9 +131,6 @@ Cubit_Task::initialize_orb (void)
if (GLOBALS::instance ()->use_name_service == 0)
return 0;
- // @@ Naga, if this code is no longer needed can we please
- // remove it?
-
// Initialize the naming services. Init should be able to be
// passed the command line arguments, but it isn't possible
// here, so use dummy values.
@@ -210,7 +207,6 @@ Cubit_Task::create_servants (void)
-1);
char *buffer;
- // @@ Naga, can you please document why the value "3" is here?
// Length of the string is the length of the key + 2 char
// id of the servant + null space.
int len = ACE_OS::strlen (this->key_) + 3;
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.cpp
index c0fdacf8f6b..ce248410b84 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.cpp
@@ -67,8 +67,6 @@ Globals::parse_args (int argc, char *argv[])
break;
case 'p':
base_port = ACE_OS::atoi (opts.optarg);
- // @@ Naga, do we need to keep this printout here or can we
- // remove it?
break;
case 't':
num_of_objs = ACE_OS::atoi (opts.optarg);
@@ -94,3 +92,87 @@ Globals::parse_args (int argc, char *argv[])
// Indicates successful parsing of command line.
return 0;
}
+
+MT_Priority::MT_Priority (void)
+ :num_priorities_ (0),
+ grain_ (0)
+{
+}
+
+ACE_Sched_Priority
+MT_Priority::get_high_priority (void)
+{
+ ACE_Sched_Priority high_priority;
+#if defined (VXWORKS)
+ high_priority = ACE_THR_PRI_FIFO_DEF;
+#elif defined (ACE_WIN32)
+ high_priority =
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD);
+#else
+ // @@ Naga/Sergio, why is there a "25" here? This seems like to
+ // much of a "magic" number. Can you make this more "abstract?"
+ high_priority = ACE_THR_PRI_FIFO_DEF + 25;
+#endif /* VXWORKS */
+ return high_priority;
+}
+
+ACE_Sched_Priority
+MT_Priority::get_low_priority (u_int num_low_priority,
+ ACE_Sched_Priority prev_priority,
+ u_int use_multiple_priority)
+{
+ ACE_Sched_Priority low_priority;
+ // Drop the priority
+ if (use_multiple_priority)
+ {
+ this->num_priorities_ = 0;
+
+ for (ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD);
+ priority_iterator.more ();
+ priority_iterator.next ())
+ this->num_priorities_ ++;
+ // 1 priority is exclusive for the high priority client.
+ this->num_priorities_ --;
+ // Drop the priority, so that the priority of clients will
+ // increase with increasing client number.
+ for (u_int j = 0;
+ j < num_low_priority;
+ j++)
+ {
+ low_priority =
+ ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
+ prev_priority,
+ ACE_SCOPE_THREAD);
+ prev_priority = low_priority;
+ }
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+ this->grain_ = num_low_priority / this->num_priorities_;
+ if (this->grain_ <= 0)
+
+ this->grain_ = 1;
+ }
+ else
+ low_priority =
+ ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
+ prev_priority,
+ ACE_SCOPE_THREAD);
+ return low_priority;
+}
+
+u_int
+MT_Priority::number_of_priorities (void)
+{
+ return this->num_priorities_;
+}
+
+u_int
+MT_Priority::grain (void)
+{
+ return this->grain_;
+}
+
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.h
index 2859297bd17..8523ddf4a7c 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Globals.h
@@ -19,6 +19,7 @@
#include "ace/OS.h"
#include "ace/Get_Opt.h"
#include "ace/Synch_T.h"
+#include "ace/Sched_Params.h"
#if !defined (ACE_HAS_THREADS)
class NOOP_ACE_Barrier
@@ -30,6 +31,27 @@ public:
#define ACE_Barrier NOOP_ACE_Barrier
#endif /* ACE_HAS_THREADS */
+#if defined (VXWORKS) && defined (VME_DRIVER)
+#define VX_VME_INIT \
+STATUS status = vmeDrv ();\
+if (status != OK)\
+ ACE_DEBUG ((LM_DEBUG,\
+ "ERROR on call to vmeDrv()\n"));\
+ status = vmeDevCreate ("/vme");\
+ if (status != OK)\
+ ACE_DEBUG ((LM_DEBUG,\
+ "ERROR on call to vmeDevCreate()\n"));
+#else
+#define VX_VME_INIT
+#endif /* VXWORKS && VME_DRIVER */
+
+#if defined (ACE_LACKS_FLOATING_POINT)
+#define TIME_IN_MICROSEC(X) X
+#else /* !ACE_LACKS_FLOATING_POINT */
+#define TIME_IN_MICROSEC(X) \
+(X * ACE_ONE_SECOND_IN_USECS)
+#endif /* !ACE_LACKS_FLOATING_POINT */
+
class Globals
{
// = TITLE
@@ -38,9 +60,8 @@ class Globals
// This is used both by the server and client side.
public:
Globals (void);
+ // default constructor.
- // @@ Naga, can you please make sure these fields/methods are
- // commented briefly?
int parse_args (int argc,char **argv);
// parse the arguments.
@@ -85,6 +106,37 @@ public:
// binding to the orb.
};
+class MT_Priority
+{
+public:
+ MT_Priority (void);
+ // constructor.
+
+ virtual ACE_Sched_Priority get_high_priority (void);
+ // sets the priority of the high priority thread.
+
+ virtual ACE_Sched_Priority get_low_priority (u_int num_low_priority,
+ ACE_Sched_Priority prev_priority,
+ u_int use_multiple_priority);
+ // sets the priority to be used for the low priority thread.
+ u_int number_of_priorities (void);
+ // accessor for num_priorities_.
+
+ u_int grain (void);
+ // accessor for grain_.
+
+protected:
+ u_int num_priorities_;
+ // number of priorities used.
+
+ u_int grain_;
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+};
+
+
#endif /* GLOBALS_H */
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
index cbdba19ac33..9eb7d636a5e 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
@@ -196,10 +196,10 @@ Task_State::parse_args (int argc,char **argv)
ACE_Thread_Semaphore (0),
-1);
ACE_NEW_RETURN (latency_,
- double [thread_count_],
+ ACE_timer_t [thread_count_],
-1);
ACE_NEW_RETURN (global_jitter_array_,
- double *[thread_count_],
+ ACE_timer_t *[thread_count_],
-1);
ACE_NEW_RETURN (count_,
u_int [thread_count_],
@@ -212,8 +212,6 @@ Task_State::~Task_State (void)
int i;
if (this->ior_file_ != 0)
- // @@ Naga, should this be delete [] this->ior_file?!
- // ;-(
ACE_OS::free (this->ior_file_);
// Delete the strduped memory.
@@ -240,8 +238,8 @@ Client::Client (ACE_Thread_Manager *thread_manager,
}
void
-Client::put_latency (double *jitter,
- double latency,
+Client::put_latency (ACE_timer_t *jitter,
+ ACE_timer_t latency,
u_int thread_id,
u_int count)
{
@@ -251,52 +249,43 @@ Client::put_latency (double *jitter,
this->ts_->global_jitter_array_[thread_id] = jitter;
this->ts_->count_[thread_id] = count;
- // @@ Naga, can you please try to factor out all of the
- // ACE_LACKS_FLOATING_POINT into a helper class to clean up all of
- // this code?!
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%t) My latency was %u msec\n",
+ "(%t) My latency was %A msec\n",
latency));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) My latency was %f msec\n",
- latency));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
}
-double
+ACE_timer_t
Client::get_high_priority_latency (void)
{
- return (double) this->ts_->latency_ [0];
+ return (ACE_timer_t) this->ts_->latency_ [0];
}
-double
+ACE_timer_t
Client::get_low_priority_latency (void)
{
if (this->ts_->thread_count_ == 1)
return 0;
- double l = 0;
+ ACE_timer_t l = 0;
for (u_int i = 1; i < this->ts_->thread_count_; i++)
- l += (double) this->ts_->latency_[i];
+ l += (ACE_timer_t) this->ts_->latency_[i];
- return l / (double) (this->ts_->thread_count_ - 1);
+ return l / (ACE_timer_t) (this->ts_->thread_count_ - 1);
}
-double
+ACE_timer_t
Client::get_latency (u_int thread_id)
{
- return ACE_static_cast (double, this->ts_->latency_ [thread_id]);
+ return ACE_static_cast (ACE_timer_t, this->ts_->latency_ [thread_id]);
}
-double
+ACE_timer_t
Client::get_high_priority_jitter (void)
{
- double jitter = 0.0;
- double average = get_high_priority_latency ();
- double number_of_samples = this->ts_->high_priority_loop_count_ / this->ts_->granularity_;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_high_priority_latency ();
+ ACE_timer_t number_of_samples = this->ts_->high_priority_loop_count_ / this->ts_->granularity_;
// Compute the standard deviation (i.e. jitter) from the values
// stored in the global_jitter_array_.
@@ -307,7 +296,7 @@ Client::get_high_priority_jitter (void)
// each latency has from the average
for (u_int i = 0; i < number_of_samples; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_ [0][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [0][i] * 1000 + 0.5));
@@ -316,8 +305,6 @@ Client::get_high_priority_jitter (void)
// Return the square root of the sum of the differences computed
// above, i.e. jitter.
- // @@ Naga, can you please replace the fprintf (stderr, ...) calls
- // with ACE_DEBUG(()) calls throughout this file?
ACE_DEBUG ((LM_DEBUG,
"high priority jitter:\n"));
stats.print_summary (3, 1000, stderr);
@@ -325,15 +312,15 @@ Client::get_high_priority_jitter (void)
return sqrt (jitter / (number_of_samples - 1));
}
-double
+ACE_timer_t
Client::get_low_priority_jitter (void)
{
if (this->ts_->thread_count_ == 1)
return 0;
- double jitter = 0.0;
- double average = get_low_priority_latency ();
- double number_of_samples = 0;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_low_priority_latency ();
+ ACE_timer_t number_of_samples = 0;
//(this->ts_->thread_count_ - 1) * (this->ts_->loop_count_ / this->ts_->granularity_);
// Compute the standard deviation (i.e. jitter) from the values
@@ -348,7 +335,7 @@ Client::get_low_priority_jitter (void)
number_of_samples += this->ts_->count_[j];
for (u_int i = 0; i < this->ts_->count_[j] / this->ts_->granularity_; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_[j][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [j][i] * 1000 + 0.5));
@@ -365,12 +352,12 @@ Client::get_low_priority_jitter (void)
return sqrt (jitter / (number_of_samples - 1));
}
-double
+ACE_timer_t
Client::get_jitter (u_int id)
{
- double jitter = 0.0;
- double average = get_latency (id);
- double number_of_samples = this->ts_->count_[id] / this->ts_->granularity_;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_latency (id);
+ ACE_timer_t number_of_samples = this->ts_->count_[id] / this->ts_->granularity_;
// Compute the standard deviation (i.e. jitter) from the values
// stored in the global_jitter_array_.
@@ -381,7 +368,7 @@ Client::get_jitter (u_int id)
// latency has from the average.
for (u_int i = 0; i < this->ts_->count_[id] / this->ts_->granularity_; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_[id][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [id][i] * 1000 + 0.5));
@@ -405,7 +392,7 @@ Client::svc (void)
CORBA::Object_var naming_obj (0);
CORBA::Environment env;
- double frequency = 0.0;
+ ACE_timer_t frequency = 0.0;
ACE_DEBUG ((LM_DEBUG, "I'm thread %t\n"));
@@ -787,12 +774,6 @@ Client::cube_short (void)
CORBA::Short arg_short = func (this->num_);
CORBA::Short ret_short;
- // @@ Naga, can you please do two things:
- // 1. Move this quantify stuff into a macro so that it
- // doesn't clutter the code everywhere?
- // 2. Reconsider why this macro is named NO_ACE_QUANTIFY?
- // It doesn't seem to make much sense!
-
START_QUANTIFY;
ret_short = this->cubit_->cube_short (arg_short, TAO_TRY_ENV);
@@ -964,31 +945,26 @@ Client::run_tests (Cubit_ptr cb,
u_int loop_count,
u_int thread_id,
Cubit_Datatypes datatype,
- double frequency)
+ ACE_timer_t frequency)
{
int result;
- // @@ Naga, this function is WAY too long! Can you please try to
- // split it up?!
CORBA::Environment env;
u_int i = 0;
u_int low_priority_client_count = this->ts_->thread_count_ - 1;
- double *my_jitter_array;
+ ACE_timer_t *my_jitter_array;
this->cubit_ = cb;
if (id_ == 0 && this->ts_->thread_count_ > 1)
// @@ Naga, can you please generalize this magic number?
ACE_NEW_RETURN (my_jitter_array,
- double [(loop_count/this->ts_->granularity_) * 30],
+ ACE_timer_t [(loop_count/this->ts_->granularity_) * 30],
-1);
else
ACE_NEW_RETURN (my_jitter_array,
- double [loop_count/this->ts_->granularity_ * 15],
+ ACE_timer_t [loop_count/this->ts_->granularity_ * 15],
-1);
- // @@ Naga, can you please replace this CHORUS stuff with the
- // ACE_timer_t stuff throughout the file?!
-
ACE_timer_t latency = 0;
ACE_timer_t sleep_time = (1 / frequency) * ACE_ONE_SECOND_IN_USECS * this->ts_->granularity_; // usec
ACE_timer_t delta = 0;
@@ -997,7 +973,7 @@ Client::run_tests (Cubit_ptr cb,
ACE_Time_Value max_wait_time (this->ts_->util_time_, 0);
ACE_Countdown_Time countdown (&max_wait_time);
- MT_Cubit_Timer timer (this->ts_);
+ MT_Cubit_Timer timer (this->ts_->granularity_);
// Elapsed time will be in microseconds.
ACE_Time_Value delta_t;
@@ -1032,7 +1008,9 @@ Client::run_tests (Cubit_ptr cb,
timer.start ();
}
this->num_ = i;
+ // make calls to the server object depending on the datatype.
result = this->make_calls ();
+
if (result < 0)
return 2;
@@ -1044,17 +1022,12 @@ Client::run_tests (Cubit_ptr cb,
// Calculate time elapsed.
ACE_timer_t real_time;
real_time = timer.get_elapsed ();
-#if defined (ACE_LACKS_FLOATING_POINT)
- delta = ((40 * fabs (real_time) / 100) + (60 * delta / 100)); // pow(10,6)
- latency += real_time * this->ts_->granularity_;
- my_jitter_array [i/this->ts_->granularity_] = real_time; // in units of microseconds.
- // update the latency array, correcting the index using the granularity
-#else /* !ACE_LACKS_FLOATING_POINT */
- delta = ((0.4 * fabs (real_time * ACE_ONE_SECOND_IN_USECS)) + (0.6 * delta)); // pow(10,6)
- latency += (real_time * ts_->granularity_);
- my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
-#endif /* !ACE_LACKS_FLOATING_POINT */
- } // END OF IF :
+ delta = (ACE_timer_t) 40 *fabs (TIME_IN_MICROSEC(real_time))
+ / (ACE_timer_t) 100 + (ACE_timer_t) 60 *delta/ 100;
+ latency += real_time * this->ts_->granularity_;
+ my_jitter_array [i/this->ts_->granularity_] =
+ TIME_IN_MICROSEC (real_time);
+ }
if ( this->ts_->thread_per_rate_ == 1 && id_ < (this->ts_->thread_count_ - 1) )
{
if (this->ts_->semaphore_->tryacquire () != -1)
@@ -1094,38 +1067,21 @@ Client::run_tests (Cubit_ptr cb,
{
if (this->error_count_ == 0)
{
-#if defined (ACE_LACKS_FLOATING_POINT)
- long calls_per_second = (this->call_count_ * ACE_ONE_SECOND_IN_USECS) / latency;
+ ACE_timer_t calls_per_second = (TIME_IN_MICROSEC (this->call_count_)) / latency;
latency = latency/this->call_count_;//calc average latency
-#else
- latency /= this->call_count_; // calc average latency
-#endif /* ACE_LACKS_FLOATING_POINT */
if (latency > 0)
{
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) cube average call ACE_OS::time\t= %u usec, \t"
- "%u calls/second\n",
+ "(%P|%t) cube average call ACE_OS::time\t= %A usec, \t"
+ "%A calls/second\n",
latency,
calls_per_second));
this->put_latency (my_jitter_array,
- latency,
- thread_id,
- this->call_count_);
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) cube average call ACE_OS::time\t= %f msec, \t"
- "%f calls/second\n",
- latency * 1000,
- 1 / latency));
-
- this->put_latency (my_jitter_array,
- latency * ACE_ONE_SECOND_IN_MSECS,
+ TIME_IN_MICROSEC (latency),
thread_id,
this->call_count_);
-#endif /* ! ACE_LACKS_FLOATING_POINT */
}
else
{
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.h
index f0d269a15d8..6574293f015 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Task_Client.h
@@ -26,10 +26,12 @@
#include "ace/Sched_Params.h"
#include "ace/High_Res_Timer.h"
+
#include "orbsvcs/CosNamingC.h"
#include "orbsvcs/Naming/Naming_Utils.h"
#include "cubitC.h"
#include "cubit_i.h"
+#include "Globals.h"
#if defined (CHORUS)
#include "pccTimer.h"
@@ -41,10 +43,7 @@
//
// I will integrate this, together with the sqrt() function when
// the implementation is complete. --Sergio.
-// @@ Sergio, can you please use the ACE_timer_t type for this instead
-// of #define'ing double?!
#if defined (ACE_LACKS_FLOATING_POINT)
-#define double ACE_UINT32
#define fabs(X) ((X) >= 0 ? (X) : -(X))
// the following is just temporary, until we finish the sqrt()
// implementation.
@@ -66,9 +65,12 @@ public:
quantify_start_recording_data ();
#define STOP_QUANTIFY \
quantify_stop_recording_data();
+#define CLEAR_QUANTIFY \
+quantify_clear_data ();
#else /*!NO_ACE_QUANTIFY */
#define START_QUANTIFY
#define STOP_QUANTIFY
+#define CLEAR_QUANTIFY
#endif /* !NO_ACE_QUANTIFY */
// Arbitrary generator used by the client to create the numbers to be
@@ -134,7 +136,7 @@ public:
u_int thread_count_;
// Number of concurrent clients to create.
- double *latency_;
+ ACE_timer_t *latency_;
// Array to store the latency for every client, indexed by
// thread-id.
@@ -154,7 +156,7 @@ public:
u_int thread_per_rate_;
// Flag for the thread_per_rate test.
- double **global_jitter_array_;
+ ACE_timer_t **global_jitter_array_;
// This array stores the latency seen by each client for each
// request, to be used later to compute jitter.
@@ -243,7 +245,7 @@ char *one_ior_;
// flag to indicate whether we make remote versus local invocations
// to calculate accurately the ORB overhead.
- double util_test_time_;
+ ACE_timer_t util_test_time_;
// holds the total time for the utilization test to complete.
};
@@ -262,12 +264,12 @@ public:
virtual int svc (void);
// The thread function.
- double get_high_priority_latency (void);
- double get_low_priority_latency (void);
- double get_high_priority_jitter (void);
- double get_low_priority_jitter (void);
- double get_latency (u_int thread_id);
- double get_jitter (u_int id);
+ ACE_timer_t get_high_priority_latency (void);
+ ACE_timer_t get_low_priority_latency (void);
+ ACE_timer_t get_high_priority_jitter (void);
+ ACE_timer_t get_low_priority_jitter (void);
+ ACE_timer_t get_latency (u_int thread_id);
+ ACE_timer_t get_jitter (u_int id);
// Accessors to get the various measured quantities.
private:
@@ -275,7 +277,7 @@ private:
u_int,
u_int,
Cubit_Datatypes,
- double frequency);
+ ACE_timer_t frequency);
// run the various tests.
int make_calls (void);
@@ -293,8 +295,8 @@ private:
int cube_struct (void);
// call cube struct on the cubit object.
- void put_latency (double *jitter,
- double latency,
+ void put_latency (ACE_timer_t *jitter,
+ ACE_timer_t latency,
u_int thread_id,
u_int count);
// Records the latencies in the <Task_State>.
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.cpp
index e6122264992..409b128af66 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.cpp
@@ -3,13 +3,11 @@
#include "Timer.h"
#include "Task_Client.h"
-MT_Cubit_Timer::MT_Cubit_Timer (Task_State *ts)
+MT_Cubit_Timer::MT_Cubit_Timer (u_int granularity)
+ :granularity_ (granularity)
#if defined (CHORUS)
- :pstartTime_ (0),
- pstopTime_ (0),
- ts_ (ts)
-#else
- :ts_ (ts)
+ ,pstartTime_ (0)
+ pstopTime_ (0)
#endif
{
}
@@ -42,11 +40,11 @@ MT_Cubit_Timer::get_elapsed (void)
ACE_timer_t real_time;
#if defined (ACE_LACKS_FLOATING_POINT)
# if defined (CHORUS)
- real_time = (this->pstopTime_ - this->pstartTime_) / this->ts_->granularity_;
+ real_time = (this->pstopTime_ - this->pstartTime_) / this->granularity_;
# else /* CHORUS */
// Store the time in usecs.
real_time = (this->delta_.sec () * ACE_ONE_SECOND_IN_USECS +
- this->delta_.usec ()) / this->ts_->granularity_;
+ this->delta_.usec ()) / this->granularity_;
#endif /* !CHORUS */
#else /* !ACE_LACKS_FLOATING_POINT */
@@ -64,8 +62,8 @@ MT_Cubit_Timer::get_elapsed (void)
// This is only occuring in VxWorks.
// I'll leave these here to debug it later.
- double tmp = (double)this->delta_.sec ();
- double tmp2 = (double)this->delta_.usec ();
+ ACE_timer_t tmp = (ACE_timer_t)delta_t.sec ();
+ ACE_timer_t tmp2 = (ACE_timer_t)delta_t.usec ();
if (tmp > 100000)
{
tmp = 0.0;
@@ -74,15 +72,14 @@ MT_Cubit_Timer::get_elapsed (void)
this->delta_.usec ()));
}
- real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
+ real_time = tmp + tmp2 / (ACE_timer_t)ACE_ONE_SECOND_IN_USECS;
#else
- real_time = ((double) this->delta_.sec () +
- (double) this->delta_.usec () / (double) ACE_ONE_SECOND_IN_USECS);
+ real_time = ((ACE_timer_t) this->delta_.sec () +
+ (ACE_timer_t) this->delta_.usec () / (ACE_timer_t) ACE_ONE_SECOND_IN_USECS);
#endif /* VXWORKS */
- real_time /= this->ts_->granularity_;
+ real_time /= this->granularity_;
#endif /* !ACE_LACKS_FLOATING_POINT */
-
return real_time;
}
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.h
index a53d465e773..357cde10983 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/Timer.h
@@ -14,25 +14,27 @@ class MT_Cubit_Timer
// A class that encapsulates the pccTimer for chorus and uses
// ACE Timer for other platforms.
public:
- MT_Cubit_Timer (Task_State *ts);
+ MT_Cubit_Timer (u_int granularity);
void start (void);
void stop (void);
ACE_timer_t get_elapsed (void);
private:
-#if defined (CHORUS)
- int pstartTime_;
- int pstopTime_;
- // variables for the pccTimer.
-#endif
-
ACE_High_Res_Timer timer_;
// timer.
ACE_Time_Value delta_;
// Elapsed time in microseconds.
- Task_State *ts_;
- // task state.
+ u_int granularity_;
+ // this is the granularity of the timing of the CORBA requests. A
+ // value of 5 represents that we will take time every 5 requests,
+ // instead of the default of every request (1).
+
+#if defined (CHORUS)
+ int pstartTime_;
+ int pstopTime_;
+ // variables for the pccTimer.
+#endif
};
#endif
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
index 19e86e72007..69d01af1743 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.cpp
@@ -24,6 +24,13 @@
ACE_RCSID(MT_Cubit, client, "$Id$")
+#if defined (FORCE_ARGS)
+ char *force_argv[] = {"client",
+ "-s",
+ "-f",
+ "ior.txt"};
+#endif
+
#if defined (VXWORKS)
u_int ctx = 0;
u_int ct = 0;
@@ -63,31 +70,123 @@ switchHook ( WIND_TCB *pOldTcb, // pointer to old task's WIND_TCB.
}
#endif /* VXWORKS */
+// constructor.
+Client_i::Client_i (void)
+ :high_priority_client_ (0),
+ low_priority_client_ (0),
+ util_thread_ (0),
+ ts_ (0),
+ num_low_priority_ (0),
+ num_priorities_ (0),
+ grain_ (0),
+ counter_ (0),
+ task_id_ (0),
+ argc_ (0),
+ argv_ (0),
+ total_latency_ (0),
+ total_latency_high_ (0),
+ total_util_task_duration_ (0),
+ context_switch_ (0)
+{
+}
+
+// destructor.
+Client_i::~Client_i (void)
+{
+ if (this->low_priority_client_ != 0)
+ // Delete the low priority task array
+ for (u_int i = this->num_low_priority_; i > 0; i--)
+ delete this->low_priority_client_ [i - 1];
+}
+
int
-initialize (void)
+Client_i::init (int argc,char **argv)
{
-#if defined (VXWORKS) && defined (VME_DRIVER)
- // @@ Naga, can you please factor these initialization functions
- // into a separate function somehow?
- STATUS status = vmeDrv ();
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDrv()\n"));
- status = vmeDevCreate ("/vme");
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDevCreate()\n"));
-#endif /* VXWORKS && VME_DRIVER */
+#if defined (ACE_HAS_THREADS)
+ // Enable FIFO scheduling, e.g., RT scheduling class on Solaris.
+ if (ACE_OS::sched_params (
+ ACE_Sched_Params (
+ ACE_SCHED_FIFO,
+#if defined (__Lynx__)
+ 30,
+#elif defined (VXWORKS) /* ! __Lynx__ */
+ 6,
+#elif defined (ACE_WIN32)
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+#else
+ ACE_THR_PRI_FIFO_DEF + 25,
+#endif /* ! __Lynx__ */
+ ACE_SCOPE_PROCESS)) != 0)
+ {
+ if (ACE_OS::last_error () == EPERM)
+ ACE_DEBUG ((LM_MAX,
+ "preempt: user is not superuser, "
+ "so remain in time-sharing class\n"));
+ else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%n: ACE_OS::sched_params failed\n%a"),
+ -1);
+ }
+#else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "Test will not run. This platform doesn't seem to have threads.\n"),
+ -1);
+#endif /* ACE_HAS_THREADS */
+
+ this->argc_ = argc;
+ this->argv_ = argv;
+ VX_VME_INIT;
+
+#if defined (VXWORKS) && defined (FORCE_ARGS)
+ this->argc_ = 4;
+ this->argv_ = force_argv;
+#endif /* VXWORKS && FORCE_ARGS */
// Make sure we've got plenty of socket handles. This call will use
// the default maximum.
ACE::set_handle_limit ();
+
+ ACE_NEW_RETURN (this->ts_,
+ Task_State (this->argc_,
+ this->argv_),
+ -1);
+ // preliminary argument processing
+ for (int i=0 ; i< this->argc_; i++)
+ {
+ if ((ACE_OS::strcmp (this->argv_[i],"-r") == 0))
+ this->ts_->thread_per_rate_ = 1;
+ else if ((ACE_OS::strcmp (this->argv_[i],"-t") == 0) && (i-1 < this->argc_))
+ this->ts_->thread_count_ = ACE_OS::atoi (this->argv_[i+1]);
+ }
+#if defined (CHORUS)
+ // start the pccTimer for chorus classix
+ int pTime;
+
+ // Initialize the PCC timer Chip
+ pccTimerInit();
+
+ if(pccTimer(PCC2_TIMER1_START,&pTime) != K_OK)
+ {
+ printf("pccTimer has a pending benchmark\n");
+ }
+#endif /* CHORUS */
+
return 0;
}
+void
+Client_i::run (void)
+{
+ if (this->ts_->thread_per_rate_ == 0)
+ this->do_priority_inversion_test (&this->client_thread_manager_);
+ else
+ this->do_thread_per_rate_test (&this->client_thread_manager_);
+}
+
#if defined (VXWORKS)
void
-output_taskinfo (void)
+Client_i::output_taskinfo (void)
{
FILE *file_handle = 0;
@@ -113,7 +212,7 @@ output_taskinfo (void)
#endif /* VXWORKS */
void
-output_latency (Task_State *ts)
+Client_i::output_latency (Task_State *ts)
{
FILE *latency_file_handle = 0;
char latency_file[BUFSIZ];
@@ -121,7 +220,7 @@ output_latency (Task_State *ts)
ACE_OS::sprintf (latency_file,
"cb__%d.txt",
- ts->thread_count_);
+ this->ts_->thread_count_);
ACE_OS::fprintf(stderr,
"--->Output file for latency data is \"%s\"\n",
@@ -131,7 +230,7 @@ output_latency (Task_State *ts)
// This loop visits each client. thread_count_ is the number of
// clients.
- for (u_int j = 0; j < ts->thread_count_; j ++)
+ for (u_int j = 0; j < this->ts_->thread_count_; j ++)
{
ACE_OS::sprintf(buffer,
"%s #%d",
@@ -139,7 +238,7 @@ output_latency (Task_State *ts)
j);
// This loop visits each request latency from a client.
for (u_int i = 0;
- i < (j == 0 ? ts->high_priority_loop_count_ : ts->loop_count_) / ts->granularity_;
+ i < (j == 0 ? this->ts_->high_priority_loop_count_ : this->ts_->loop_count_) / this->ts_->granularity_;
i ++)
{
ACE_OS::sprintf(buffer+strlen(buffer),
@@ -148,7 +247,7 @@ output_latency (Task_State *ts)
#else
"\t%f\n",
#endif /* !CHORUS */
- ts->global_jitter_array_[j][i]);
+ this->ts_->global_jitter_array_[j][i]);
fputs (buffer, latency_file_handle);
buffer[0]=0;
}
@@ -157,299 +256,107 @@ output_latency (Task_State *ts)
ACE_OS::fclose (latency_file_handle);
}
-int
-start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager)
+// Mechanism to distribute the available priorities among the
+// threads when there are not enough different priorities for all
+// threads.
+void
+Client_i::init_low_priority (void)
{
- char high_thread_args[BUFSIZ];
-
- static char hostname[BUFSIZ];
-
- if (ACE_OS::hostname (hostname, BUFSIZ) != 0)
- ACE_ERROR_RETURN ((LM_ERROR,
- "%p\n",
- "hostname"),
- -1);
- ACE_OS::sprintf (high_thread_args,
- "-ORBport %d "
- "-ORBhost %s "
- "-ORBobjrefstyle URL "
- "-ORBsndsock 32768 "
- "-ORBrcvsock 32768 ",
- ACE_DEFAULT_SERVER_PORT,
- hostname);
-
- Cubit_Task *high_priority_task;
+ if (this->ts_->use_multiple_priority_ == 1)
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->high_priority_,
+ this->num_low_priority_,
+ 1);
+ else
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->high_priority_,
+ this->num_low_priority_,
+ 0);
+ this->num_priorities_ = this->priority_.number_of_priorities ();
+ this->grain_ = this->priority_.grain ();
+ this->counter_ = 0;
+}
- ACE_NEW_RETURN (high_priority_task,
- Cubit_Task ((const char *)high_thread_args,
- (const char *)"internet",
- (u_int) 1,
- &thread_manager,
- (u_int) 0), //task id 0.
- -1);
+void
+Client_i::calc_util_time (void)
+{
+ MT_Cubit_Timer timer (ACE_ONE_SECOND_IN_MSECS);
+ // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
- // @@ Naga, can you please generalize this #ifdef so that it doesn't
- // go into the code, but goes into a header file or inline function
- // or something instead?!
-#if defined (VXWORKS)
- ACE_Sched_Priority priority = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- ACE_Sched_Priority priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
+ // execute one computation.
+ timer.start ();
+#if defined (CHORUS)
+ this->util_thread_->computation ();
+ timer.stop ();
+ this->util_task_duration_ = timer.get_elapsed ();
#else
- ACE_Sched_Priority priority = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "Creating servant 0 with high priority %d\n",
- priority));
-
- // Make the high priority task an active object.
- if (high_priority_task->activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority) == -1)
- {
- ACE_ERROR ((LM_ERROR,
- "(%P|%t) %p\n"
- "\thigh_priority_task->activate failed"));
- }
-
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, GLOBALS::instance ()->ready_mtx_,-1));
- while (!GLOBALS::instance ()->ready_)
- GLOBALS::instance ()->ready_cnd_.wait ();
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Argument parsing waiting done\n"));
-
- GLOBALS::instance ()->barrier_->wait ();
-
- ts->one_ior_ = high_priority_task->get_servant_ior (0);
-
- return 0;
+ for (u_int i = 0; i < 10000; i++)
+ this->util_thread_->computation ();
+ timer.stop ();
+ this->util_task_duration_ = timer.get_elapsed ()/10000;
+#endif /* !CHORUS */
}
-// @@ Naga, can this function be either (1) made static to the file or
-// (2) moved into a class somewhere (I prefer the latter, if
-// possible). Moreover, this function is VERY long. Can you please
-// break it up somehow?
int
-do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
- Task_State *ts)
+Client_i::activate_high_client (ACE_Thread_Manager *thread_manager)
{
- int i;
- u_int j;
-
- char *task_id = 0;
-
- ACE_High_Res_Timer timer_;
- ACE_Time_Value delta_t;
-
- timer_.start ();
-
-#if defined (VXWORKS)
- ctx = 0;
- ACE_NEW_RETURN (task_id,
- char [32],
+ ACE_NEW_RETURN (this->high_priority_client_,
+ Client (thread_manager, this->ts_, 0),
-1);
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) <<<<<<< starting test on %D\n"));
-
- // Stores the total number of context switches incurred by the
- // program while making CORBA requests
-#if defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)
- u_int context_switch = 0;
-#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
-
- double util_task_duration = 0.0;
- double total_latency = 0.0;
- double total_latency_high = 0.0;
- double total_util_task_duration = 0.0;
-
- GLOBALS::instance ()->num_of_objs = 1;
-
- ACE_Thread_Manager server_thread_manager;
-
- GLOBALS::instance ()->use_name_service = 0;
-
- for (j = 0; j < ts->argc_; j++)
- if (ACE_OS::strcmp (ts->argv_[j], "-u") == 0)
- {
- start_servant (ts, server_thread_manager);
- break;
- }
-
- // Create the clients.
- Client high_priority_client (thread_manager, ts, 0);
-
- // Create an array to hold pointers to the low priority tasks.
- Client **low_priority_client;
-
- ACE_NEW_RETURN (low_priority_client,
- Client *[ts->thread_count_],
- -1);
-
- // Hack to make sure we have something in this pointer, when
- // thread_count == 1
- low_priority_client[0] = &high_priority_client;
-
- // Create the daemon thread in its own <ACE_Thread_Manager>.
- ACE_Thread_Manager util_thr_mgr;
-
- Util_Thread util_thread (ts, &util_thr_mgr);
-
- // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
-
- // @@ Naga, can you please clean up the following code? It's VERY
- // complicated and needs to be refactored into a separate abstraction.
-#if defined (CHORUS)
- int pstartTime = 0;
- int pstopTime = 0;
- // Elapsed time will be in microseconds.
- pstartTime = pccTime1Get();
- // execute one computation.
- util_thread.computation ();
- pstopTime = pccTime1Get();
- // Store the time in micro-seconds.
- util_task_duration = pstopTime - pstartTime;
-#else /* CHORUS */
- // Elapsed time will be in microseconds.
- timer_.start ();
- // execute computations.
- for (i = 0; i < 10000; i++)
- util_thread.computation ();
- timer_.stop ();
- timer_.elapsed_time (delta_t);
- // Store the time in milli-seconds.
- util_task_duration = (delta_t.sec () *
- ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS) / 10000;
-#endif /* !CHORUS */
-
- // The thread priority
- ACE_Sched_Priority priority;
#if defined (VXWORKS)
// Set a task_id string starting with "@", so we are able to
// accurately count the number of context switches.
- strcpy (task_id, "@High");
+ strcpy (this->task_id_, "@High");
#endif /* VXWORKS */
- // @@ Naga, again, this code is repeated from earlier. Can you
- // please factor this out somehow?!
- // Now activate the high priority client.
-#if defined (VXWORKS)
- priority = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
-#else /* ! VXWORKS */
- priority = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* ! ACE_WIN32 */
+ this->high_priority_ = this->priority_.get_high_priority ();
ACE_DEBUG ((LM_DEBUG,
"Creating 1 client with high priority of %d\n",
- priority));
-
- if (high_priority_client.activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority,
- -1,
- 0,
- 0,
- 0,
- 0) == -1)
- // (ACE_thread_t*)task_id) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p; priority is %d\n",
- "activate failed",
- priority));
-
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, ts->ready_mtx_,-1));
- while (!ts->ready_)
- ts->ready_cnd_.wait ();
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Argument parsing waiting done\n"));
-
- u_int number_of_low_priority_client = 0;
- u_int number_of_priorities = 0;
- u_int grain = 0;
- u_int counter = 0;
-
- number_of_low_priority_client = ts->thread_count_ - 1;
-
- // Mechanism to distribute the available priorities among the
- // threads when there are not enough different priorities for all
- // threads.
- if (ts->use_multiple_priority_ == 1)
- {
- ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
- number_of_priorities = 0;
-
- while (priority_iterator.more ())
- {
- number_of_priorities ++;
- priority_iterator.next ();
- }
-
- // 1 priority is exclusive for the high priority client.
- number_of_priorities --;
-
- // if utilization thread is present, reduce in 1 the available
- // priorities.
- if (ts->use_utilization_test_ == 1)
- number_of_priorities --;
+ this->high_priority_));
+
+ if (this->high_priority_client_->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->high_priority_,
+ -1,
+ 0,
+ 0,
+ 0,
+ 0) == -1)
+ // (ACE_thread_t*)this->task_id_) == -1)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%p; priority is %d\n",
+ "activate failed",
+ this->high_priority_),
+ -1);
- // Drop the priority, so that the priority of clients will
- // increase with increasing client number.
- for (j = 0; j < number_of_low_priority_client; j++)
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
+ return 0;
+}
- // If the lowest priority of the "low priority clients" is the
- // minimum, and we are running the utilization thread, increment
- // the priority, since we don't want the utlization thread and a
- // "low priority thread" to have the same priority.
- if (priority == ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD)
- && ts->use_utilization_test_ == 1)
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
-
- // Granularity of the assignment of the priorities. Some OSs
- // have fewer levels of priorities than we have threads in our
- // test, so with this mechanism we assign priorities to groups
- // of threads when there are more threads than priorities.
- grain = number_of_low_priority_client / number_of_priorities;
- counter = 0;
-
- if (grain <= 0)
- grain = 1;
- }
- else
- // Drop the priority one level.
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
+int
+Client_i::activate_low_client (ACE_Thread_Manager *thread_manager)
+{
+ ACE_NEW_RETURN (this->low_priority_client_,
+ Client *[this->ts_->thread_count_],
+ -1);
+ // Hack to make sure we have something in this pointer, when
+ // thread_count == 1
+ this->low_priority_client_[0] = this->high_priority_client_;
+ this->num_low_priority_ = this->ts_->thread_count_ - 1;
+ // set the priority for the low priority threads.
+ this->init_low_priority ();
ACE_DEBUG ((LM_DEBUG,
"Creating %d clients at priority %d\n",
- ts->thread_count_ - 1,
- priority));
+ this->ts_->thread_count_ - 1,
+ this->low_priority_));
- for (i = number_of_low_priority_client; i > 0; i--)
+ for (u_int i = this->num_low_priority_; i > 0; i--)
{
- ACE_NEW_RETURN (low_priority_client [i - 1],
- Client (thread_manager, ts, i),
+ ACE_NEW_RETURN (this->low_priority_client_ [i - 1],
+ Client (thread_manager, this->ts_, i),
-1);
#if defined (VXWORKS)
@@ -459,150 +366,104 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
// Set a task_id string startiing with "@", so we are able to
// accurately count the number of context switches on VXWORKS
- sprintf (task_id, "@Low%u", i);
+ sprintf (this->task_id_, "@Low%u", i);
#endif /* VXWORKS */
ACE_DEBUG ((LM_DEBUG,
"Creating client with thread ID %d and priority %d\n",
i,
- priority));
+ this->low_priority_));
// The first thread starts at the lowest priority of all the low
// priority clients.
- if (low_priority_client[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
+ if (this->low_priority_client_[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
- priority, // These are constructor defaults.
+ this->low_priority_, // These are constructor defaults.
-1, // int grp_id = -1,
0, // ACE_Task_Base *task = 0,
0, // ACE_hthread_t thread_handles[] = 0,
0, // void *stack[] = 0,
0, // size_t stack_size[] = 0,
- (ACE_thread_t*)task_id) == -1)
+ (ACE_thread_t*)this->task_id_) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
- priority));
+ this->low_priority_));
- if (ts->use_multiple_priority_ == 1)
+ if (this->ts_->use_multiple_priority_ == 1)
{
- counter = (counter + 1) % grain;
- if (counter == 0 &&
+ this->counter_ = (this->counter_ + 1) % this->grain_;
+ if (this->counter_ == 0 &&
// Just so when we distribute the priorities among the
// threads, we make sure we don't go overboard.
- number_of_priorities * grain > number_of_low_priority_client - (i - 1))
+ this->num_priorities_ * this->grain_ > this->num_low_priority_ - (i - 1))
// Get the next higher priority.
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
+ this->low_priority_ = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
+ this->low_priority_,
ACE_SCOPE_THREAD);
}
} /* end of for () */
+}
- if (ts->use_utilization_test_ == 1)
+int
+Client_i::activate_util_thread (void)
+{
+ ACE_NEW_RETURN (this->util_thread_,
+ Util_Thread (this->ts_, &this->util_thread_manager_),
+ -1);
+
+ // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
+ this->calc_util_time ();
+
+ if (this->ts_->use_utilization_test_ == 1)
// Activate the utilization thread only if specified. See
// description of this variable in header file.
{
- priority =
+ this->low_priority_ =
ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
ACE_SCOPE_THREAD);
ACE_DEBUG ((LM_DEBUG,
"Creating utilization thread with priority of %d\n",
- priority));
+ this->low_priority_));
// Activate the Utilization thread. It will wait until all
// threads have finished binding.
- util_thread.activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority);
+ this->util_thread_->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->low_priority_);
}
else
- util_thread.close ();
-
- // Wait for all the client threads to be initialized before going
- // any further.
- ts->barrier_->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
- quantify_clear_data ();
-#endif /* NO_ACE_QUANTIFY */
-
-#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
- ACE_Profile_Timer timer_for_context_switch;
- ACE_Profile_Timer::Rusage usage;
-
- if (ts->context_switch_test_ == 1)
- {
- timer_for_context_switch.start ();
- timer_for_context_switch.get_rusage (usage);
-# if defined (ACE_HAS_PRUSAGE_T)
- context_switch = usage.pr_vctx + usage.pr_ictx;
-# else /* ACE_HAS_PRUSAGE_T */
- context_switch = usage.ru_nvcsw + usage.ru_nivcsw;
-# endif /* ACE_HAS_GETRUSAGE */
- }
-#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
-
-#if defined (VXWORKS)
- if (ts->context_switch_test_ == 1)
- {
- ACE_DEBUG ((LM_DEBUG,
- "Adding the context switch hook!\n"));
- taskSwitchHookAdd ((FUNCPTR) &switchHook);
- }
-#endif /* VXWORKS */
-
- // Wait for all the client threads to exit (except the utilization
- // thread).
- thread_manager->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
-#endif /* NO_ACE_QUANTIFY */
-
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) >>>>>>> ending test on %D\n"));
-
- timer_.stop ();
- timer_.elapsed_time (delta_t);
-
- if (ts->use_utilization_test_ == 1)
- // Signal the utilization thread to finish with its work.. only
- // if utilization test was specified. See description of this
- // variable in header file.
- {
- util_thread.done_ = 1;
-
- // This will wait for the utilization thread to finish.
- util_thr_mgr.wait ();
- }
-
- ACE_DEBUG ((LM_DEBUG,
- "-------------------------- Stats -------------------------------\n"));
+ this->util_thread_->close ();
+ return 0;
+}
- if (ts->context_switch_test_ == 1)
+void
+Client_i:: print_context_stats (void)
+{
+ if (this->ts_->context_switch_test_ == 1)
{
#if defined (ACE_HAS_PRUSAGE_T)
- timer_for_context_switch.stop ();
- timer_for_context_switch.get_rusage (usage);
+ this->timer_for_context_switch.stop ();
+ this->timer_for_context_switch.get_rusage (this->usage);
// Add up the voluntary context switches & involuntary context
// switches
- context_switch = usage.pr_vctx + usage.pr_ictx - context_switch;
+ this->context_switch_ = this->usage.pr_vctx + this->usage.pr_ictx - this->context_switch_;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
- usage.pr_vctx,
- usage.pr_ictx));
+ this->usage.pr_vctx,
+ this->usage.pr_ictx));
#elif defined (ACE_HAS_GETRUSAGE) && !defined (ACE_WIN32)
- timer_for_context_switch.stop ();
- timer_for_context_switch.get_rusage (usage);
+ this->timer_for_context_switch.stop ();
+ this->timer_for_context_switch.get_rusage (this->usage);
// Add up the voluntary context switches & involuntary context
// switches
- context_switch = usage.ru_nvcsw + usage.ru_nivcsw - context_switch;
+ this->context_switch_ = this->usage.ru_nvcsw + this->usage.ru_nivcsw - this->context_switch_;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
- usage.ru_nvcsw,
- usage.ru_nivcsw));
+ this->usage.ru_nvcsw,
+ this->usage.ru_nivcsw));
#elif defined (VXWORKS) /* ACE_HAS_GETRUSAGE */
taskSwitchHookDelete ((FUNCPTR)&switchHook);
ACE_DEBUG ((LM_DEBUG,
@@ -610,111 +471,266 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
ctx));
#endif /* ACE_HAS_PRUSAGE_T */
}
+}
+void
+Client_i::print_latency_stats (void)
+{
// If running the utilization test, don't report latency nor jitter.
- if (ts->use_utilization_test_ == 0)
+ if (this->ts_->use_utilization_test_ == 0)
{
#if defined (VXWORKS)
ACE_DEBUG ((LM_DEBUG, "Test done.\n"
"High priority client latency : %f msec, jitter: %f msec\n"
"Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
+ this->high_priority_client_->get_high_priority_latency (),
+ this->high_priority_client_->get_high_priority_jitter (),
+ this->low_priority_client_[0]->get_low_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_jitter ()));
// output the latency values to a file, tab separated, to import
// it to Excel to calculate jitter, in the mean time we come up
// with the sqrt() function.
- output_latency (ts);
+ output_latency (this->ts_);
#elif defined (CHORUS)
ACE_DEBUG ((LM_DEBUG,
"Test done.\n"
"High priority client latency : %u usec\n"
"Low priority client latency : %u usec\n",
- high_priority_client.get_high_priority_latency (),
- low_priority_client[0]->get_low_priority_latency () ));
+ this->high_priority_client_->get_high_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_latency () ));
// output the latency values to a file, tab separated, to import
// it to Excel to calculate jitter, in the mean time we come up
// with the sqrt() function.
- output_latency (ts);
+ output_latency (this->ts_);
#else /* !CHORUS */
ACE_DEBUG ((LM_DEBUG, "Test done.\n"
"High priority client latency : %f msec, jitter: %f msec\n"
"Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
- // output_latency (ts);
+ this->high_priority_client_->get_high_priority_latency (),
+ this->high_priority_client_->get_high_priority_jitter (),
+ this->low_priority_client_[0]->get_low_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_jitter ()));
+ // output_latency (this->ts_);
#endif /* !VXWORKS && !CHORUS */
}
+}
- if (ts->use_utilization_test_ == 1)
+void
+Client_i::print_util_stats (void)
+{
+ if (this->ts_->use_utilization_test_ == 1)
{
- total_util_task_duration =
- util_task_duration * util_thread.get_number_of_computations ();
+ this->total_util_task_duration_ =
+ this->util_task_duration_ * this->util_thread_->get_number_of_computations ();
- total_latency = (delta_t.sec () *
+ this->total_latency_ = (this->delta_.sec () *
ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
+ (ACE_timer_t)this->delta_.usec () / ACE_ONE_SECOND_IN_MSECS);
- total_latency_high =
- total_latency - total_util_task_duration;
+ this->total_latency_high_ =
+ this->total_latency_ - this->total_util_task_duration_;
// Calc and print the CPU percentage. I add 0.5 to round to the
// nearest integer before casting it to int.
ACE_DEBUG ((LM_DEBUG,
"\t%% ORB Client CPU utilization: %u %%\n"
"\t%% Idle time: %u %%\n\n",
- (u_int) (total_latency_high * 100 / total_latency + 0.5),
- (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
+ (u_int) (this->total_latency_high_ * 100 / this->total_latency_ + 0.5),
+ (u_int) (this->total_util_task_duration_ * 100 / this->total_latency_ + 0.5) ));
ACE_DEBUG ((LM_DEBUG,
"(%t) UTILIZATION task performed \t%u computations\n"
"(%t) CLIENT task performed \t\t%u CORBA calls\n"
"(%t) Utilization test time is \t\t%f seconds\n"
"\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
- util_thread.get_number_of_computations (),
- ts->loop_count_,
- ts->util_test_time_,
- util_thread.get_number_of_computations () / ts->loop_count_,
- (util_thread.get_number_of_computations () % ts->loop_count_) * 100 / ts->loop_count_
+ this->util_thread_->get_number_of_computations (),
+ this->ts_->loop_count_,
+ this->ts_->util_test_time_,
+ this->util_thread_->get_number_of_computations () / this->ts_->loop_count_,
+ (this->util_thread_->get_number_of_computations () % this->ts_->loop_count_) * 100 / this->ts_->loop_count_
));
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %u usecs\n",
- util_task_duration));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %f msecs\n",
- util_task_duration));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
+ "(%t) utilization computation time is %A usecs\n",
+ this->util_task_duration_));
+ }
+}
+
+void
+Client_i::print_priority_inversion_stats (void)
+{
+ this->print_context_stats ();
+ this->print_latency_stats ();
+ this->print_util_stats ();
+}
+
+int
+Client_i::start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager)
+{
+ char high_thread_args[BUFSIZ];
+
+ static char hostname[BUFSIZ];
+
+ if (ACE_OS::hostname (hostname, BUFSIZ) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%p\n",
+ "hostname"),
+ -1);
+ ACE_OS::sprintf (high_thread_args,
+ "-ORBport %d "
+ "-ORBhost %s "
+ "-ORBobjrefstyle URL "
+ "-ORBsndsock 32768 "
+ "-ORBrcvsock 32768 ",
+ ACE_DEFAULT_SERVER_PORT,
+ hostname);
+
+ Cubit_Task *high_priority_task;
+
+ ACE_NEW_RETURN (high_priority_task,
+ Cubit_Task ((const char *)high_thread_args,
+ (const char *)"internet",
+ (u_int) 1,
+ &thread_manager,
+ (u_int) 0), //task id 0.
+ -1);
+
+ this->high_priority_ = this->priority_.get_high_priority ();
+
+ ACE_DEBUG ((LM_DEBUG,
+ "Creating servant 0 with high priority %d\n",
+ this->high_priority_));
+
+ // Make the high priority task an active object.
+ if (high_priority_task->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->high_priority_) == -1)
+ {
+ ACE_ERROR ((LM_ERROR,
+ "(%P|%t) %p\n"
+ "\thigh_priority_task->activate failed"));
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Waiting for argument parsing\n"));
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, GLOBALS::instance ()->ready_mtx_,-1));
+ while (!GLOBALS::instance ()->ready_)
+ GLOBALS::instance ()->ready_cnd_.wait ();
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Argument parsing waiting done\n"));
+
+ GLOBALS::instance ()->barrier_->wait ();
+
+ this->ts_->one_ior_ = high_priority_task->get_servant_ior (0);
+
+ return 0;
+}
+
+int
+Client_i::do_priority_inversion_test (ACE_Thread_Manager *thread_manager)
+{
+ u_int i,j,result;
+
+ this->timer_.start ();
+#if defined (VXWORKS)
+ ctx = 0;
+ ACE_NEW_RETURN (this->task_id_,
+ char [32],
+ -1);
+#endif /* VXWORKS */
+ ACE_DEBUG ((LM_DEBUG,
+ "(%P|%t) <<<<<<< starting test on %D\n"));
+ GLOBALS::instance ()->num_of_objs = 1;
+ ACE_Thread_Manager server_thread_manager;
+ GLOBALS::instance ()->use_name_service = 0;
+ for (j = 0; j < this->ts_->argc_; j++)
+ if (ACE_OS::strcmp (this->ts_->argv_[j], "-u") == 0)
+ {
+ start_servant (this->ts_, server_thread_manager);
+ break;
+ }
+ // Create the clients.
+ result = this->activate_high_client (thread_manager);
+ if (result < 0)
+ return result;
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Waiting for argument parsing\n"));
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, this->ts_->ready_mtx_,-1));
+ while (!this->ts_->ready_)
+ this->ts_->ready_cnd_.wait ();
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Argument parsing waiting done\n"));
+ result = this->activate_low_client (thread_manager);
+ if (result < 0)
+ return result;
+ result = this->activate_util_thread ();
+ if (result < 0)
+ return result;
+ // Wait for all the client threads to be initialized before going
+ // any further.
+ this->ts_->barrier_->wait ();
+
+ STOP_QUANTIFY;
+ CLEAR_QUANTIFY;
+#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
+
+ if (this->ts_->context_switch_test_ == 1)
+ {
+ this->timer_for_context_switch.start ();
+ this->timer_for_context_switch.get_rusage (this->usage);
+# if defined (ACE_HAS_PRUSAGE_T)
+ this->context_switch_ = this->usage.pr_vctx + this->usage.pr_ictx;
+# else /* ACE_HAS_PRUSAGE_T */
+ this->context_switch_ = this->usage.ru_nvcsw + this->usage.ru_nivcsw;
+# endif /* ACE_HAS_GETRUSAGE */
}
+#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
#if defined (VXWORKS)
- delete task_id;
+ if (this->ts_->context_switch_test_ == 1)
+ {
+ ACE_DEBUG ((LM_DEBUG,
+ "Adding the context switch hook!\n"));
+ taskSwitchHookAdd ((FUNCPTR) &switchHook);
+ }
#endif /* VXWORKS */
- // Delete the low priority task array
- for (i = number_of_low_priority_client; i > 0; i--)
- delete low_priority_client [i - 1];
+ // Wait for all the client threads to exit (except the utilization
+ // thread).
+ thread_manager->wait ();
+ STOP_QUANTIFY;
+ ACE_DEBUG ((LM_DEBUG,
+ "(%P|%t) >>>>>>> ending test on %D\n"));
+ this->timer_.stop ();
+ this->timer_.elapsed_time (this->delta_);
+
+ if (this->ts_->use_utilization_test_ == 1)
+ // Signal the utilization thread to finish with its work.. only
+ // if utilization test was specified. See description of this
+ // variable in header file.
+ {
+ this->util_thread_->done_ = 1;
- delete [] low_priority_client;
+ // This will wait for the utilization thread to finish.
+ this->util_thread_manager_.wait ();
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ "-------------------------- Stats -------------------------------\n"));
+
+ this->print_priority_inversion_stats ();
return 0;
}
-// @@ Naga, can you please either (1) make this a static or (2) move
-// it into a class?!
int
-do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
- Task_State *ts)
+Client_i::do_thread_per_rate_test (ACE_Thread_Manager *thread_manager)
{
- Client CB_20Hz_client (thread_manager, ts, CB_20HZ_CONSUMER);
- Client CB_10Hz_client (thread_manager, ts, CB_10HZ_CONSUMER);
- Client CB_5Hz_client (thread_manager, ts, CB_5HZ_CONSUMER);
- Client CB_1Hz_client (thread_manager, ts, CB_1HZ_CONSUMER);
+ Client CB_20Hz_client (thread_manager, this->ts_, CB_20HZ_CONSUMER);
+ Client CB_10Hz_client (thread_manager, this->ts_, CB_10HZ_CONSUMER);
+ Client CB_5Hz_client (thread_manager, this->ts_, CB_5HZ_CONSUMER);
+ Client CB_1Hz_client (thread_manager, this->ts_, CB_1HZ_CONSUMER);
ACE_Sched_Priority priority;
@@ -743,9 +759,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, ts->ready_mtx_,-1));
- while (!ts->ready_)
- ts->ready_cnd_.wait ();
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, this->ts_->ready_mtx_,-1));
+ while (!this->ts_->ready_)
+ this->ts_->ready_cnd_.wait ();
ACE_DEBUG ((LM_DEBUG,
"(%t) Argument parsing waiting done\n"));
@@ -795,29 +811,16 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
// Wait for all the threads to exit.
thread_manager->wait ();
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
"Test done.\n"
- "20Hz client latency : %u usec, jitter: %u usec\n"
- "10Hz client latency : %u usec, jitter: %u usec\n"
- "5Hz client latency : %u usec, jitter: %u usec\n"
- "1Hz client latency : %u usec, jitter: %u usec\n",
+ "20Hz client latency : %A usec, jitter: %A usec\n"
+ "10Hz client latency : %A usec, jitter: %A usec\n"
+ "5Hz client latency : %A usec, jitter: %A usec\n"
+ "1Hz client latency : %A usec, jitter: %A usec\n",
CB_20Hz_client.get_latency (0), CB_20Hz_client.get_jitter (0),
CB_10Hz_client.get_latency (1), CB_10Hz_client.get_jitter (1),
CB_5Hz_client.get_latency (2), CB_5Hz_client.get_jitter (2),
CB_1Hz_client.get_latency (3), CB_1Hz_client.get_jitter (3) ));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "Test done.\n"
- "20Hz client latency : %f msec, jitter: %f msec\n"
- "10Hz client latency : %f msec, jitter: %f msec\n"
- "5Hz client latency : %f msec, jitter: %f msec\n"
- "1Hz client latency : %f msec, jitter: %f msec\n",
- CB_20Hz_client.get_latency (0), CB_20Hz_client.get_jitter (0),
- CB_10Hz_client.get_latency (1), CB_10Hz_client.get_jitter (1),
- CB_5Hz_client.get_latency (2), CB_5Hz_client.get_jitter (2),
- CB_1Hz_client.get_latency (3), CB_1Hz_client.get_jitter (3) ));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
return 0;
}
@@ -838,92 +841,15 @@ int
main (int argc, char *argv[])
{
#endif /* VXWORKS */
+ int result;
+ Client_i client;
-#if defined (ACE_HAS_THREADS)
-#if defined (FORCE_ARGS)
- int argc = 4;
- char *argv[] = {"client",
- "-s",
- "-f",
- "ior.txt"};
-#endif /* defined (FORCE_ARGS) */
-
- // Enable FIFO scheduling, e.g., RT scheduling class on Solaris.
- if (ACE_OS::sched_params (
- ACE_Sched_Params (
- ACE_SCHED_FIFO,
-#if defined (__Lynx__)
- 30,
-#elif defined (VXWORKS) /* ! __Lynx__ */
- 6,
-#elif defined (ACE_WIN32)
- ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD),
-#else
- ACE_THR_PRI_FIFO_DEF + 25,
-#endif /* ! __Lynx__ */
- ACE_SCOPE_PROCESS)) != 0)
- {
- if (ACE_OS::last_error () == EPERM)
- ACE_DEBUG ((LM_MAX, "preempt: user is not superuser, "
- "so remain in time-sharing class\n"));
- else
- ACE_ERROR_RETURN ((LM_ERROR, "%n: ACE_OS::sched_params failed\n%a"),
- -1);
- }
-
- ACE_High_Res_Timer timer_;
- ACE_Time_Value delta_t;
-
-#if 0 // this is a debug section that will be removed soon. 1/6/98
- ACE_DEBUG ((LM_MAX, "<<<<<Delay of 5 seconds>>>>>\n"));
-
- timer_.start ();
-
- const ACE_Time_Value delay (5L, 0L);
- ACE_OS::sleep (delay);
-
- timer_.stop ();
- timer_.elapsed_time (delta_t);
-
- ACE_DEBUG ((LM_DEBUG, "5secs= %u secs, %u usecs\n", delta_t.sec (), delta_t.usec ()));
-#endif
-
- initialize ();
-ACE_DEBUG ((LM_DEBUG, "argv[1]=%s\n", argv[1]));
- Task_State ts (argc, argv);
-
- // preliminary argument processing
- for (int i=0 ; i< argc; i++)
- {
- if ((ACE_OS::strcmp (argv[i],"-r") == 0))
- ts.thread_per_rate_ = 1;
- else if ((ACE_OS::strcmp (argv[i],"-t") == 0) && (i-1 < argc))
- ts.thread_count_ = ACE_OS::atoi (argv[i+1]);
- }
-
-#if defined (CHORUS)
- // start the pccTimer for chorus classix
- int pTime;
-
- // Initialize the PCC timer Chip
- pccTimerInit();
-
- if(pccTimer(PCC2_TIMER1_START,&pTime) != K_OK)
- {
- printf("pccTimer has a pending benchmark\n");
- }
-#endif /* CHORUS */
-
- // Create a separate manager for the client. This allows the use
- // of its wait () method on VxWorks, without interfering with the
- // server's (global) thread manager.
- ACE_Thread_Manager client_thread_manager;
-
- if (ts.thread_per_rate_ == 0)
- do_priority_inversion_test (&client_thread_manager, &ts);
- else
- do_thread_per_rate_test (&client_thread_manager, &ts);
+ result = client.init (argc,argv);
+ if (result < 0)
+ return result;
+
+ // run the tests.
+ client.run ();
#if defined (CHORUS)
if (pccTimer (PCC2_TIMER1_STOP, &pTime) != K_OK)
@@ -936,12 +862,6 @@ ACE_DEBUG ((LM_DEBUG, "argv[1]=%s\n", argv[1]));
int status;
ACE_OS::thr_exit (&status);
#endif /* CHORUS */
-
-#else /* !ACE_HAS_THREADS */
- ACE_DEBUG ((LM_DEBUG,
- "Test not run. This platform doesn't seem to have threads.\n"));
-#endif /* ACE_HAS_THREADS */
-
return 0;
}
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.h
index 5f1f00c1626..e103cd37243 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/client.h
@@ -22,6 +22,7 @@
#include "cubitC.h"
#include "Task_Client.h"
#include "Util_Thread.h"
+#include "Timer.h"
#if defined (CHORUS)
#include "pccTimer.h"
@@ -33,23 +34,133 @@ extern "C" STATUS vmeDrv (void);
extern "C" STATUS vmeDevCreate (char *);
#endif /* defined (VME_DRIVER) */
-// class Client
-// {
-// public:
-// Client (void);
-// //constructor.
-// int do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
-// Task_State *ts);
-// int do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
-// Task_State *ts);
-// int start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager);
-
-// void output_latency (Task_State *ts);
-
-// #if defined (VXWORKS)
-// void output_taskinfo (void);
-// #endif /* VXWORKS */
-// int initialize (void);
-// };
+class Client_i
+ :public virtual MT_Priority
+{
+public:
+ Client_i (void);
+ //constructor.
+
+ ~Client_i (void);
+ // destructor.
+
+ int init (int argc,char **argv);
+ // initialize the state of Client_i.
+
+ void run (void);
+ // run the tests.
+
+ int do_priority_inversion_test (ACE_Thread_Manager *thread_manager);
+
+ int do_thread_per_rate_test (ACE_Thread_Manager *thread_manager);
+
+ int start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager);
+
+ void output_latency (Task_State *ts);
+
+#if defined (VXWORKS)
+ void output_taskinfo (void);
+#endif /* VXWORKS */
+private:
+ void init_low_priority (void);
+ // sets the priority to be used for the low priority clients.
+
+ void calc_util_time (void);
+ // calculate the time for one util computation.
+
+ int activate_high_client (ACE_Thread_Manager *thread_manager);
+ // activates the high priority client.
+
+ int activate_low_client (ACE_Thread_Manager *thread_manager);
+ // activates the low priority client.
+
+ int activate_util_thread (void);
+ // activates the utilization thread.
+
+ void print_priority_inversion_stats (void);
+ // prints the results of the tests.
+
+ void print_context_stats (void);
+ // prints the context switch results.
+
+ void print_util_stats (void);
+ // prints the utilization test results.
+
+ void print_latency_stats (void);
+ // prints the latency and jitter results.
+
+ Client *high_priority_client_;
+ // pointer to the high priority client object.
+
+ Client **low_priority_client_;
+ // array to hold pointers to the low priority tasks.
+
+ ACE_High_Res_Timer timer_;
+ // Timer for timing the tests.
+
+ MT_Priority priority_;
+ // priority helper object.
+
+ Util_Thread *util_thread_;
+ // Utilization thread.
+
+ ACE_Thread_Manager util_thread_manager_;
+ // Utilization thread manager.
+
+ ACE_timer_t util_task_duration_;
+ // time for one computation of utilization thread.
+
+ Task_State *ts_;
+ // pointer to task state.
+
+ ACE_Sched_Priority high_priority_;
+ // priority used for the high priority client.
+
+ ACE_Sched_Priority low_priority_;
+ // priority used by the low priority clients.
+
+ u_int num_low_priority_;
+ // number of low priority clients
+
+ u_int num_priorities_;
+ // number of priorities used.
+
+ u_int grain_;
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+
+ u_int counter_;
+
+ char *task_id_;
+ // Set a task_id string starting with "@", so we are able to
+ // accurately count the number of context switches.
+
+ ACE_Time_Value delta_;
+ // elapsed time for the latency tests.
+
+ int argc_;
+
+ char **argv_;
+
+ ACE_Thread_Manager client_thread_manager_;
+ // Create a separate manager for the client. This allows the use
+ // of its wait () method on VxWorks, without interfering with the
+ // server's (global) thread manager.
+
+ ACE_timer_t total_latency_;
+ ACE_timer_t total_latency_high_;
+ ACE_timer_t total_util_task_duration_;
+
+ u_int context_switch_;
+ // Stores the total number of context switches incurred by the
+ // program while making CORBA requests
+
+#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
+ ACE_Profile_Timer timer_for_context_switch;
+ ACE_Profile_Timer::Rusage usage;
+#endif
+};
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
index 23b572f64c0..493a65e0b60 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
@@ -58,9 +58,5 @@ void Cubit_i::shutdown (CORBA::Environment &)
{
ACE_DEBUG ((LM_DEBUG,
"(%t) Calling orb ()->shutdown ()\n"));
-
- // @@ Naga, can you please revise this so that it doesn't use
- // TAO-specific features? Please see how Irfan fixed IDL_Cubit's
- // shutdown () so that it wasn't TAO-specific!
this->orb_->shutdown ();
}
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
index c5d43e3c868..230d7c0e666 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.cpp
@@ -33,6 +33,17 @@ char *force_argv[]=
"ior.txt"
};
+Server::Server (void)
+ :argc_ (0),
+ argv_ (0),
+ cubits_ (0),
+ high_priority_task_ (0),
+ low_priority_tasks_ (0),
+ high_argv_ (0),
+ low_argv_ (0)
+{
+}
+
int
Server::initialize (int argc, char **argv)
{
@@ -62,39 +73,24 @@ Server::initialize (int argc, char **argv)
"%n: ACE_OS::sched_params failed\n%a"),
-1);
}
+#else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "Test will not run. This platform doesn't seem to have threads.\n"),
+ -1);
+#endif /* ACE_HAS_THREADS */
+
this->argc_ = argc;
this->argv_ = argv;
-#if defined (VXWORKS)
- // @@ Naga, can you please factor this code into a separate file?!
-#if defined (VME_DRIVER)
- STATUS status = vmeDrv ();
-
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDrv()\n"));
-
- status = vmeDevCreate ("/vme");
-
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDevCreate()\n"));
-#endif /* defined (VME_DRIVER) */
-
-#if defined (FORCE_ARGS)
- int argc = 4;
- char *argv[] =
- {
- "server",
- "-s",
- "-f",
- "ior.txt"
- };
-#endif /* defined (FORCE_ARGS) */
-#endif /* defined (VXWORKS) */
-
- // Make sure we've got plenty of socket handles. This call will
- // use the default maximum.
+ VX_VME_INIT;
+
+#if defined (VXWORKS) && defined (FORCE_ARGS)
+ this->argc_ = 4;
+ this->argv_ = force_argv;
+#endif /* VXWORKS && FORCE_ARGS */
+
+ // Make sure we've got plenty of socket handles. This call will
+ // use the default maximum.
ACE::set_handle_limit ();
return 0;
}
@@ -127,77 +123,24 @@ Server::prelim_args_process (void)
}
void
-Server::init_high_priority (void)
-{
- // @@ Naga, here's another place where we write the same code again.
- // Please make sure that this gets factored out into a macro or an
- // inline function!
-#if defined (VXWORKS)
- this->high_priority_ = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- this->high_priority_ =
- ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
-#else
- // @@ Naga/Sergio, why is there a "25" here? This seems like to
- // much of a "magic" number. Can you make this more "abstract?"
- this->high_priority_ = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "Creating servant 0 with high priority %d\n",
- this->high_priority_));
-
-}
-
-void
Server::init_low_priority (void)
{
- u_int j;
- this->num_low_priority_ =
- GLOBALS::instance ()->num_of_objs - 1;
-
ACE_Sched_Priority prev_priority = this->high_priority_;
// Drop the priority
if (GLOBALS::instance ()->thread_per_rate == 1
|| GLOBALS::instance ()->use_multiple_priority == 1)
- {
- this->num_priorities_ = 0;
-
- for (ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
- priority_iterator.more ();
- priority_iterator.next ())
- this->num_priorities_ ++;
- // 1 priority is exclusive for the high priority client.
- this->num_priorities_ --;
- // Drop the priority, so that the priority of clients will
- // increase with increasing client number.
- for (j = 0;
- j < this->num_low_priority_;
- j++)
- {
- this->low_priority_ =
- ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- prev_priority,
- ACE_SCOPE_THREAD);
- prev_priority = this->low_priority_;
- }
- // Granularity of the assignment of the priorities. Some OSs
- // have fewer levels of priorities than we have threads in our
- // test, so with this mechanism we assign priorities to groups
- // of threads when there are more threads than priorities.
- this->grain_ = this->num_low_priority_ / this->num_priorities_;
- this->counter_ = 0;
-
- if (this->grain_ <= 0)
- this->grain_ = 1;
- }
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->num_low_priority_,
+ prev_priority,
+ 1);
else
this->low_priority_ =
- ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- prev_priority,
- ACE_SCOPE_THREAD);
+ this->priority_.get_low_priority (this->num_low_priority_,
+ prev_priority,
+ 0);
+ this->num_priorities_ = this->priority_.number_of_priorities ();
+ this->grain_ = this->priority_.grain ();
+ this->counter_ = 0;
}
// Write the ior's to a file so the client can read them.
@@ -384,7 +327,11 @@ Server::start_servants (ACE_Thread_Manager *serv_thr_mgr)
this->prelim_args_process ();
// Find the priority for the high priority servant.
- this->init_high_priority ();
+ this->high_priority_ = this->priority_.get_high_priority ();
+
+ ACE_DEBUG ((LM_DEBUG,
+ "Creating servant 0 with high priority %d\n",
+ this->high_priority_));
// activate the high priority servant task
if (this->activate_high_servant (serv_thr_mgr) < 0)
@@ -392,6 +339,9 @@ Server::start_servants (ACE_Thread_Manager *serv_thr_mgr)
"Failure in activating high priority servant\n"),
-1);
+ this->num_low_priority_ =
+ GLOBALS::instance ()->num_of_objs - 1;
+
// initialize the priority of the low priority servants.
this->init_low_priority ();
@@ -437,32 +387,20 @@ main (int argc, char *argv[])
// Create the daemon thread in its own <ACE_Thread_Manager>.
ACE_Thread_Manager servant_thread_manager;
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
- quantify_clear_data ();
- quantify_start_recording_data();
-#endif /* NO_ACE_QUANTIFY */
+ STOP_QUANTIFY;
+ CLEAR_QUANTIFY;
+ START_QUANTIFY;
if (server.start_servants (&servant_thread_manager) != 0)
ACE_ERROR_RETURN ((LM_ERROR,
"Error creating the servants\n"),
1);
-
ACE_DEBUG ((LM_DEBUG,
"Wait for all the threads to exit\n"));
-
// Wait for all the threads to exit.
servant_thread_manager.wait ();
// ACE_Thread_Manager::instance ()->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
-#endif /* NO_ACE_QUANTIFY */
-
-#else
- ACE_DEBUG ((LM_DEBUG,
- "Test not run. This platform doesn't seem to have threads.\n"));
-#endif /* ACE_HAS_THREADS */
+ STOP_QUANTIFY;
return 0;
}
diff --git a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.h b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.h
index d49f144c866..97d5e6ae1ce 100644
--- a/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.h
+++ b/TAO/performance-tests/Cubit/TAO/MT_Cubit/server.h
@@ -127,6 +127,7 @@ private:
};
class Server
+ :public virtual MT_Priority
{
// = TITLE
// A multithreaded cubit server class.
@@ -135,7 +136,9 @@ class Server
// cubit server. To use this ,call initialize and then
// start_servants method.
public:
+ Server (void);
// default constructor
+
int initialize (int argc, char **argv);
// initialize the server state.
@@ -146,9 +149,6 @@ private:
void prelim_args_process (void);
// preliminary argument processing code.
- void init_high_priority (void);
- // sets the priority of the high priority servant.
-
void init_low_priority (void);
// sets the priority to be used for the low priority servants.
@@ -181,7 +181,7 @@ private:
ACE_Sched_Priority low_priority_;
// priority used by the low priority servants.
-
+
u_int num_low_priority_;
// number of low priority servants
@@ -201,6 +201,9 @@ private:
ACE_ARGV *low_argv_;
// argv passed to the low priority servants.
+
+ MT_Priority priority_;
+ // priority helper object.
};
#endif /* SERVER_H */
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
index 06b7a89c6ad..0d46a6a8e06 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Cubit_Task.cpp
@@ -131,9 +131,6 @@ Cubit_Task::initialize_orb (void)
if (GLOBALS::instance ()->use_name_service == 0)
return 0;
- // @@ Naga, if this code is no longer needed can we please
- // remove it?
-
// Initialize the naming services. Init should be able to be
// passed the command line arguments, but it isn't possible
// here, so use dummy values.
@@ -210,7 +207,6 @@ Cubit_Task::create_servants (void)
-1);
char *buffer;
- // @@ Naga, can you please document why the value "3" is here?
// Length of the string is the length of the key + 2 char
// id of the servant + null space.
int len = ACE_OS::strlen (this->key_) + 3;
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Globals.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Globals.cpp
index c0fdacf8f6b..ce248410b84 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Globals.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Globals.cpp
@@ -67,8 +67,6 @@ Globals::parse_args (int argc, char *argv[])
break;
case 'p':
base_port = ACE_OS::atoi (opts.optarg);
- // @@ Naga, do we need to keep this printout here or can we
- // remove it?
break;
case 't':
num_of_objs = ACE_OS::atoi (opts.optarg);
@@ -94,3 +92,87 @@ Globals::parse_args (int argc, char *argv[])
// Indicates successful parsing of command line.
return 0;
}
+
+MT_Priority::MT_Priority (void)
+ :num_priorities_ (0),
+ grain_ (0)
+{
+}
+
+ACE_Sched_Priority
+MT_Priority::get_high_priority (void)
+{
+ ACE_Sched_Priority high_priority;
+#if defined (VXWORKS)
+ high_priority = ACE_THR_PRI_FIFO_DEF;
+#elif defined (ACE_WIN32)
+ high_priority =
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD);
+#else
+ // @@ Naga/Sergio, why is there a "25" here? This seems like to
+ // much of a "magic" number. Can you make this more "abstract?"
+ high_priority = ACE_THR_PRI_FIFO_DEF + 25;
+#endif /* VXWORKS */
+ return high_priority;
+}
+
+ACE_Sched_Priority
+MT_Priority::get_low_priority (u_int num_low_priority,
+ ACE_Sched_Priority prev_priority,
+ u_int use_multiple_priority)
+{
+ ACE_Sched_Priority low_priority;
+ // Drop the priority
+ if (use_multiple_priority)
+ {
+ this->num_priorities_ = 0;
+
+ for (ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD);
+ priority_iterator.more ();
+ priority_iterator.next ())
+ this->num_priorities_ ++;
+ // 1 priority is exclusive for the high priority client.
+ this->num_priorities_ --;
+ // Drop the priority, so that the priority of clients will
+ // increase with increasing client number.
+ for (u_int j = 0;
+ j < num_low_priority;
+ j++)
+ {
+ low_priority =
+ ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
+ prev_priority,
+ ACE_SCOPE_THREAD);
+ prev_priority = low_priority;
+ }
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+ this->grain_ = num_low_priority / this->num_priorities_;
+ if (this->grain_ <= 0)
+
+ this->grain_ = 1;
+ }
+ else
+ low_priority =
+ ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
+ prev_priority,
+ ACE_SCOPE_THREAD);
+ return low_priority;
+}
+
+u_int
+MT_Priority::number_of_priorities (void)
+{
+ return this->num_priorities_;
+}
+
+u_int
+MT_Priority::grain (void)
+{
+ return this->grain_;
+}
+
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Globals.h b/TAO/tests/Cubit/TAO/MT_Cubit/Globals.h
index 2859297bd17..8523ddf4a7c 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Globals.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Globals.h
@@ -19,6 +19,7 @@
#include "ace/OS.h"
#include "ace/Get_Opt.h"
#include "ace/Synch_T.h"
+#include "ace/Sched_Params.h"
#if !defined (ACE_HAS_THREADS)
class NOOP_ACE_Barrier
@@ -30,6 +31,27 @@ public:
#define ACE_Barrier NOOP_ACE_Barrier
#endif /* ACE_HAS_THREADS */
+#if defined (VXWORKS) && defined (VME_DRIVER)
+#define VX_VME_INIT \
+STATUS status = vmeDrv ();\
+if (status != OK)\
+ ACE_DEBUG ((LM_DEBUG,\
+ "ERROR on call to vmeDrv()\n"));\
+ status = vmeDevCreate ("/vme");\
+ if (status != OK)\
+ ACE_DEBUG ((LM_DEBUG,\
+ "ERROR on call to vmeDevCreate()\n"));
+#else
+#define VX_VME_INIT
+#endif /* VXWORKS && VME_DRIVER */
+
+#if defined (ACE_LACKS_FLOATING_POINT)
+#define TIME_IN_MICROSEC(X) X
+#else /* !ACE_LACKS_FLOATING_POINT */
+#define TIME_IN_MICROSEC(X) \
+(X * ACE_ONE_SECOND_IN_USECS)
+#endif /* !ACE_LACKS_FLOATING_POINT */
+
class Globals
{
// = TITLE
@@ -38,9 +60,8 @@ class Globals
// This is used both by the server and client side.
public:
Globals (void);
+ // default constructor.
- // @@ Naga, can you please make sure these fields/methods are
- // commented briefly?
int parse_args (int argc,char **argv);
// parse the arguments.
@@ -85,6 +106,37 @@ public:
// binding to the orb.
};
+class MT_Priority
+{
+public:
+ MT_Priority (void);
+ // constructor.
+
+ virtual ACE_Sched_Priority get_high_priority (void);
+ // sets the priority of the high priority thread.
+
+ virtual ACE_Sched_Priority get_low_priority (u_int num_low_priority,
+ ACE_Sched_Priority prev_priority,
+ u_int use_multiple_priority);
+ // sets the priority to be used for the low priority thread.
+ u_int number_of_priorities (void);
+ // accessor for num_priorities_.
+
+ u_int grain (void);
+ // accessor for grain_.
+
+protected:
+ u_int num_priorities_;
+ // number of priorities used.
+
+ u_int grain_;
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+};
+
+
#endif /* GLOBALS_H */
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
index cbdba19ac33..9eb7d636a5e 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.cpp
@@ -196,10 +196,10 @@ Task_State::parse_args (int argc,char **argv)
ACE_Thread_Semaphore (0),
-1);
ACE_NEW_RETURN (latency_,
- double [thread_count_],
+ ACE_timer_t [thread_count_],
-1);
ACE_NEW_RETURN (global_jitter_array_,
- double *[thread_count_],
+ ACE_timer_t *[thread_count_],
-1);
ACE_NEW_RETURN (count_,
u_int [thread_count_],
@@ -212,8 +212,6 @@ Task_State::~Task_State (void)
int i;
if (this->ior_file_ != 0)
- // @@ Naga, should this be delete [] this->ior_file?!
- // ;-(
ACE_OS::free (this->ior_file_);
// Delete the strduped memory.
@@ -240,8 +238,8 @@ Client::Client (ACE_Thread_Manager *thread_manager,
}
void
-Client::put_latency (double *jitter,
- double latency,
+Client::put_latency (ACE_timer_t *jitter,
+ ACE_timer_t latency,
u_int thread_id,
u_int count)
{
@@ -251,52 +249,43 @@ Client::put_latency (double *jitter,
this->ts_->global_jitter_array_[thread_id] = jitter;
this->ts_->count_[thread_id] = count;
- // @@ Naga, can you please try to factor out all of the
- // ACE_LACKS_FLOATING_POINT into a helper class to clean up all of
- // this code?!
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%t) My latency was %u msec\n",
+ "(%t) My latency was %A msec\n",
latency));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) My latency was %f msec\n",
- latency));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
}
-double
+ACE_timer_t
Client::get_high_priority_latency (void)
{
- return (double) this->ts_->latency_ [0];
+ return (ACE_timer_t) this->ts_->latency_ [0];
}
-double
+ACE_timer_t
Client::get_low_priority_latency (void)
{
if (this->ts_->thread_count_ == 1)
return 0;
- double l = 0;
+ ACE_timer_t l = 0;
for (u_int i = 1; i < this->ts_->thread_count_; i++)
- l += (double) this->ts_->latency_[i];
+ l += (ACE_timer_t) this->ts_->latency_[i];
- return l / (double) (this->ts_->thread_count_ - 1);
+ return l / (ACE_timer_t) (this->ts_->thread_count_ - 1);
}
-double
+ACE_timer_t
Client::get_latency (u_int thread_id)
{
- return ACE_static_cast (double, this->ts_->latency_ [thread_id]);
+ return ACE_static_cast (ACE_timer_t, this->ts_->latency_ [thread_id]);
}
-double
+ACE_timer_t
Client::get_high_priority_jitter (void)
{
- double jitter = 0.0;
- double average = get_high_priority_latency ();
- double number_of_samples = this->ts_->high_priority_loop_count_ / this->ts_->granularity_;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_high_priority_latency ();
+ ACE_timer_t number_of_samples = this->ts_->high_priority_loop_count_ / this->ts_->granularity_;
// Compute the standard deviation (i.e. jitter) from the values
// stored in the global_jitter_array_.
@@ -307,7 +296,7 @@ Client::get_high_priority_jitter (void)
// each latency has from the average
for (u_int i = 0; i < number_of_samples; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_ [0][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [0][i] * 1000 + 0.5));
@@ -316,8 +305,6 @@ Client::get_high_priority_jitter (void)
// Return the square root of the sum of the differences computed
// above, i.e. jitter.
- // @@ Naga, can you please replace the fprintf (stderr, ...) calls
- // with ACE_DEBUG(()) calls throughout this file?
ACE_DEBUG ((LM_DEBUG,
"high priority jitter:\n"));
stats.print_summary (3, 1000, stderr);
@@ -325,15 +312,15 @@ Client::get_high_priority_jitter (void)
return sqrt (jitter / (number_of_samples - 1));
}
-double
+ACE_timer_t
Client::get_low_priority_jitter (void)
{
if (this->ts_->thread_count_ == 1)
return 0;
- double jitter = 0.0;
- double average = get_low_priority_latency ();
- double number_of_samples = 0;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_low_priority_latency ();
+ ACE_timer_t number_of_samples = 0;
//(this->ts_->thread_count_ - 1) * (this->ts_->loop_count_ / this->ts_->granularity_);
// Compute the standard deviation (i.e. jitter) from the values
@@ -348,7 +335,7 @@ Client::get_low_priority_jitter (void)
number_of_samples += this->ts_->count_[j];
for (u_int i = 0; i < this->ts_->count_[j] / this->ts_->granularity_; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_[j][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [j][i] * 1000 + 0.5));
@@ -365,12 +352,12 @@ Client::get_low_priority_jitter (void)
return sqrt (jitter / (number_of_samples - 1));
}
-double
+ACE_timer_t
Client::get_jitter (u_int id)
{
- double jitter = 0.0;
- double average = get_latency (id);
- double number_of_samples = this->ts_->count_[id] / this->ts_->granularity_;
+ ACE_timer_t jitter = 0.0;
+ ACE_timer_t average = get_latency (id);
+ ACE_timer_t number_of_samples = this->ts_->count_[id] / this->ts_->granularity_;
// Compute the standard deviation (i.e. jitter) from the values
// stored in the global_jitter_array_.
@@ -381,7 +368,7 @@ Client::get_jitter (u_int id)
// latency has from the average.
for (u_int i = 0; i < this->ts_->count_[id] / this->ts_->granularity_; i ++)
{
- double difference =
+ ACE_timer_t difference =
this->ts_->global_jitter_array_[id][i] - average;
jitter += difference * difference;
stats.sample ((ACE_UINT32) (this->ts_->global_jitter_array_ [id][i] * 1000 + 0.5));
@@ -405,7 +392,7 @@ Client::svc (void)
CORBA::Object_var naming_obj (0);
CORBA::Environment env;
- double frequency = 0.0;
+ ACE_timer_t frequency = 0.0;
ACE_DEBUG ((LM_DEBUG, "I'm thread %t\n"));
@@ -787,12 +774,6 @@ Client::cube_short (void)
CORBA::Short arg_short = func (this->num_);
CORBA::Short ret_short;
- // @@ Naga, can you please do two things:
- // 1. Move this quantify stuff into a macro so that it
- // doesn't clutter the code everywhere?
- // 2. Reconsider why this macro is named NO_ACE_QUANTIFY?
- // It doesn't seem to make much sense!
-
START_QUANTIFY;
ret_short = this->cubit_->cube_short (arg_short, TAO_TRY_ENV);
@@ -964,31 +945,26 @@ Client::run_tests (Cubit_ptr cb,
u_int loop_count,
u_int thread_id,
Cubit_Datatypes datatype,
- double frequency)
+ ACE_timer_t frequency)
{
int result;
- // @@ Naga, this function is WAY too long! Can you please try to
- // split it up?!
CORBA::Environment env;
u_int i = 0;
u_int low_priority_client_count = this->ts_->thread_count_ - 1;
- double *my_jitter_array;
+ ACE_timer_t *my_jitter_array;
this->cubit_ = cb;
if (id_ == 0 && this->ts_->thread_count_ > 1)
// @@ Naga, can you please generalize this magic number?
ACE_NEW_RETURN (my_jitter_array,
- double [(loop_count/this->ts_->granularity_) * 30],
+ ACE_timer_t [(loop_count/this->ts_->granularity_) * 30],
-1);
else
ACE_NEW_RETURN (my_jitter_array,
- double [loop_count/this->ts_->granularity_ * 15],
+ ACE_timer_t [loop_count/this->ts_->granularity_ * 15],
-1);
- // @@ Naga, can you please replace this CHORUS stuff with the
- // ACE_timer_t stuff throughout the file?!
-
ACE_timer_t latency = 0;
ACE_timer_t sleep_time = (1 / frequency) * ACE_ONE_SECOND_IN_USECS * this->ts_->granularity_; // usec
ACE_timer_t delta = 0;
@@ -997,7 +973,7 @@ Client::run_tests (Cubit_ptr cb,
ACE_Time_Value max_wait_time (this->ts_->util_time_, 0);
ACE_Countdown_Time countdown (&max_wait_time);
- MT_Cubit_Timer timer (this->ts_);
+ MT_Cubit_Timer timer (this->ts_->granularity_);
// Elapsed time will be in microseconds.
ACE_Time_Value delta_t;
@@ -1032,7 +1008,9 @@ Client::run_tests (Cubit_ptr cb,
timer.start ();
}
this->num_ = i;
+ // make calls to the server object depending on the datatype.
result = this->make_calls ();
+
if (result < 0)
return 2;
@@ -1044,17 +1022,12 @@ Client::run_tests (Cubit_ptr cb,
// Calculate time elapsed.
ACE_timer_t real_time;
real_time = timer.get_elapsed ();
-#if defined (ACE_LACKS_FLOATING_POINT)
- delta = ((40 * fabs (real_time) / 100) + (60 * delta / 100)); // pow(10,6)
- latency += real_time * this->ts_->granularity_;
- my_jitter_array [i/this->ts_->granularity_] = real_time; // in units of microseconds.
- // update the latency array, correcting the index using the granularity
-#else /* !ACE_LACKS_FLOATING_POINT */
- delta = ((0.4 * fabs (real_time * ACE_ONE_SECOND_IN_USECS)) + (0.6 * delta)); // pow(10,6)
- latency += (real_time * ts_->granularity_);
- my_jitter_array [i/ts_->granularity_] = real_time * ACE_ONE_SECOND_IN_MSECS;
-#endif /* !ACE_LACKS_FLOATING_POINT */
- } // END OF IF :
+ delta = (ACE_timer_t) 40 *fabs (TIME_IN_MICROSEC(real_time))
+ / (ACE_timer_t) 100 + (ACE_timer_t) 60 *delta/ 100;
+ latency += real_time * this->ts_->granularity_;
+ my_jitter_array [i/this->ts_->granularity_] =
+ TIME_IN_MICROSEC (real_time);
+ }
if ( this->ts_->thread_per_rate_ == 1 && id_ < (this->ts_->thread_count_ - 1) )
{
if (this->ts_->semaphore_->tryacquire () != -1)
@@ -1094,38 +1067,21 @@ Client::run_tests (Cubit_ptr cb,
{
if (this->error_count_ == 0)
{
-#if defined (ACE_LACKS_FLOATING_POINT)
- long calls_per_second = (this->call_count_ * ACE_ONE_SECOND_IN_USECS) / latency;
+ ACE_timer_t calls_per_second = (TIME_IN_MICROSEC (this->call_count_)) / latency;
latency = latency/this->call_count_;//calc average latency
-#else
- latency /= this->call_count_; // calc average latency
-#endif /* ACE_LACKS_FLOATING_POINT */
if (latency > 0)
{
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) cube average call ACE_OS::time\t= %u usec, \t"
- "%u calls/second\n",
+ "(%P|%t) cube average call ACE_OS::time\t= %A usec, \t"
+ "%A calls/second\n",
latency,
calls_per_second));
this->put_latency (my_jitter_array,
- latency,
- thread_id,
- this->call_count_);
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) cube average call ACE_OS::time\t= %f msec, \t"
- "%f calls/second\n",
- latency * 1000,
- 1 / latency));
-
- this->put_latency (my_jitter_array,
- latency * ACE_ONE_SECOND_IN_MSECS,
+ TIME_IN_MICROSEC (latency),
thread_id,
this->call_count_);
-#endif /* ! ACE_LACKS_FLOATING_POINT */
}
else
{
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.h b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.h
index f0d269a15d8..6574293f015 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Task_Client.h
@@ -26,10 +26,12 @@
#include "ace/Sched_Params.h"
#include "ace/High_Res_Timer.h"
+
#include "orbsvcs/CosNamingC.h"
#include "orbsvcs/Naming/Naming_Utils.h"
#include "cubitC.h"
#include "cubit_i.h"
+#include "Globals.h"
#if defined (CHORUS)
#include "pccTimer.h"
@@ -41,10 +43,7 @@
//
// I will integrate this, together with the sqrt() function when
// the implementation is complete. --Sergio.
-// @@ Sergio, can you please use the ACE_timer_t type for this instead
-// of #define'ing double?!
#if defined (ACE_LACKS_FLOATING_POINT)
-#define double ACE_UINT32
#define fabs(X) ((X) >= 0 ? (X) : -(X))
// the following is just temporary, until we finish the sqrt()
// implementation.
@@ -66,9 +65,12 @@ public:
quantify_start_recording_data ();
#define STOP_QUANTIFY \
quantify_stop_recording_data();
+#define CLEAR_QUANTIFY \
+quantify_clear_data ();
#else /*!NO_ACE_QUANTIFY */
#define START_QUANTIFY
#define STOP_QUANTIFY
+#define CLEAR_QUANTIFY
#endif /* !NO_ACE_QUANTIFY */
// Arbitrary generator used by the client to create the numbers to be
@@ -134,7 +136,7 @@ public:
u_int thread_count_;
// Number of concurrent clients to create.
- double *latency_;
+ ACE_timer_t *latency_;
// Array to store the latency for every client, indexed by
// thread-id.
@@ -154,7 +156,7 @@ public:
u_int thread_per_rate_;
// Flag for the thread_per_rate test.
- double **global_jitter_array_;
+ ACE_timer_t **global_jitter_array_;
// This array stores the latency seen by each client for each
// request, to be used later to compute jitter.
@@ -243,7 +245,7 @@ char *one_ior_;
// flag to indicate whether we make remote versus local invocations
// to calculate accurately the ORB overhead.
- double util_test_time_;
+ ACE_timer_t util_test_time_;
// holds the total time for the utilization test to complete.
};
@@ -262,12 +264,12 @@ public:
virtual int svc (void);
// The thread function.
- double get_high_priority_latency (void);
- double get_low_priority_latency (void);
- double get_high_priority_jitter (void);
- double get_low_priority_jitter (void);
- double get_latency (u_int thread_id);
- double get_jitter (u_int id);
+ ACE_timer_t get_high_priority_latency (void);
+ ACE_timer_t get_low_priority_latency (void);
+ ACE_timer_t get_high_priority_jitter (void);
+ ACE_timer_t get_low_priority_jitter (void);
+ ACE_timer_t get_latency (u_int thread_id);
+ ACE_timer_t get_jitter (u_int id);
// Accessors to get the various measured quantities.
private:
@@ -275,7 +277,7 @@ private:
u_int,
u_int,
Cubit_Datatypes,
- double frequency);
+ ACE_timer_t frequency);
// run the various tests.
int make_calls (void);
@@ -293,8 +295,8 @@ private:
int cube_struct (void);
// call cube struct on the cubit object.
- void put_latency (double *jitter,
- double latency,
+ void put_latency (ACE_timer_t *jitter,
+ ACE_timer_t latency,
u_int thread_id,
u_int count);
// Records the latencies in the <Task_State>.
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Timer.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/Timer.cpp
index e6122264992..409b128af66 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Timer.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Timer.cpp
@@ -3,13 +3,11 @@
#include "Timer.h"
#include "Task_Client.h"
-MT_Cubit_Timer::MT_Cubit_Timer (Task_State *ts)
+MT_Cubit_Timer::MT_Cubit_Timer (u_int granularity)
+ :granularity_ (granularity)
#if defined (CHORUS)
- :pstartTime_ (0),
- pstopTime_ (0),
- ts_ (ts)
-#else
- :ts_ (ts)
+ ,pstartTime_ (0)
+ pstopTime_ (0)
#endif
{
}
@@ -42,11 +40,11 @@ MT_Cubit_Timer::get_elapsed (void)
ACE_timer_t real_time;
#if defined (ACE_LACKS_FLOATING_POINT)
# if defined (CHORUS)
- real_time = (this->pstopTime_ - this->pstartTime_) / this->ts_->granularity_;
+ real_time = (this->pstopTime_ - this->pstartTime_) / this->granularity_;
# else /* CHORUS */
// Store the time in usecs.
real_time = (this->delta_.sec () * ACE_ONE_SECOND_IN_USECS +
- this->delta_.usec ()) / this->ts_->granularity_;
+ this->delta_.usec ()) / this->granularity_;
#endif /* !CHORUS */
#else /* !ACE_LACKS_FLOATING_POINT */
@@ -64,8 +62,8 @@ MT_Cubit_Timer::get_elapsed (void)
// This is only occuring in VxWorks.
// I'll leave these here to debug it later.
- double tmp = (double)this->delta_.sec ();
- double tmp2 = (double)this->delta_.usec ();
+ ACE_timer_t tmp = (ACE_timer_t)delta_t.sec ();
+ ACE_timer_t tmp2 = (ACE_timer_t)delta_t.usec ();
if (tmp > 100000)
{
tmp = 0.0;
@@ -74,15 +72,14 @@ MT_Cubit_Timer::get_elapsed (void)
this->delta_.usec ()));
}
- real_time = tmp + tmp2 / (double)ACE_ONE_SECOND_IN_USECS;
+ real_time = tmp + tmp2 / (ACE_timer_t)ACE_ONE_SECOND_IN_USECS;
#else
- real_time = ((double) this->delta_.sec () +
- (double) this->delta_.usec () / (double) ACE_ONE_SECOND_IN_USECS);
+ real_time = ((ACE_timer_t) this->delta_.sec () +
+ (ACE_timer_t) this->delta_.usec () / (ACE_timer_t) ACE_ONE_SECOND_IN_USECS);
#endif /* VXWORKS */
- real_time /= this->ts_->granularity_;
+ real_time /= this->granularity_;
#endif /* !ACE_LACKS_FLOATING_POINT */
-
return real_time;
}
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/Timer.h b/TAO/tests/Cubit/TAO/MT_Cubit/Timer.h
index a53d465e773..357cde10983 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/Timer.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/Timer.h
@@ -14,25 +14,27 @@ class MT_Cubit_Timer
// A class that encapsulates the pccTimer for chorus and uses
// ACE Timer for other platforms.
public:
- MT_Cubit_Timer (Task_State *ts);
+ MT_Cubit_Timer (u_int granularity);
void start (void);
void stop (void);
ACE_timer_t get_elapsed (void);
private:
-#if defined (CHORUS)
- int pstartTime_;
- int pstopTime_;
- // variables for the pccTimer.
-#endif
-
ACE_High_Res_Timer timer_;
// timer.
ACE_Time_Value delta_;
// Elapsed time in microseconds.
- Task_State *ts_;
- // task state.
+ u_int granularity_;
+ // this is the granularity of the timing of the CORBA requests. A
+ // value of 5 represents that we will take time every 5 requests,
+ // instead of the default of every request (1).
+
+#if defined (CHORUS)
+ int pstartTime_;
+ int pstopTime_;
+ // variables for the pccTimer.
+#endif
};
#endif
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
index 19e86e72007..69d01af1743 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/client.cpp
@@ -24,6 +24,13 @@
ACE_RCSID(MT_Cubit, client, "$Id$")
+#if defined (FORCE_ARGS)
+ char *force_argv[] = {"client",
+ "-s",
+ "-f",
+ "ior.txt"};
+#endif
+
#if defined (VXWORKS)
u_int ctx = 0;
u_int ct = 0;
@@ -63,31 +70,123 @@ switchHook ( WIND_TCB *pOldTcb, // pointer to old task's WIND_TCB.
}
#endif /* VXWORKS */
+// constructor.
+Client_i::Client_i (void)
+ :high_priority_client_ (0),
+ low_priority_client_ (0),
+ util_thread_ (0),
+ ts_ (0),
+ num_low_priority_ (0),
+ num_priorities_ (0),
+ grain_ (0),
+ counter_ (0),
+ task_id_ (0),
+ argc_ (0),
+ argv_ (0),
+ total_latency_ (0),
+ total_latency_high_ (0),
+ total_util_task_duration_ (0),
+ context_switch_ (0)
+{
+}
+
+// destructor.
+Client_i::~Client_i (void)
+{
+ if (this->low_priority_client_ != 0)
+ // Delete the low priority task array
+ for (u_int i = this->num_low_priority_; i > 0; i--)
+ delete this->low_priority_client_ [i - 1];
+}
+
int
-initialize (void)
+Client_i::init (int argc,char **argv)
{
-#if defined (VXWORKS) && defined (VME_DRIVER)
- // @@ Naga, can you please factor these initialization functions
- // into a separate function somehow?
- STATUS status = vmeDrv ();
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDrv()\n"));
- status = vmeDevCreate ("/vme");
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDevCreate()\n"));
-#endif /* VXWORKS && VME_DRIVER */
+#if defined (ACE_HAS_THREADS)
+ // Enable FIFO scheduling, e.g., RT scheduling class on Solaris.
+ if (ACE_OS::sched_params (
+ ACE_Sched_Params (
+ ACE_SCHED_FIFO,
+#if defined (__Lynx__)
+ 30,
+#elif defined (VXWORKS) /* ! __Lynx__ */
+ 6,
+#elif defined (ACE_WIN32)
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+#else
+ ACE_THR_PRI_FIFO_DEF + 25,
+#endif /* ! __Lynx__ */
+ ACE_SCOPE_PROCESS)) != 0)
+ {
+ if (ACE_OS::last_error () == EPERM)
+ ACE_DEBUG ((LM_MAX,
+ "preempt: user is not superuser, "
+ "so remain in time-sharing class\n"));
+ else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%n: ACE_OS::sched_params failed\n%a"),
+ -1);
+ }
+#else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "Test will not run. This platform doesn't seem to have threads.\n"),
+ -1);
+#endif /* ACE_HAS_THREADS */
+
+ this->argc_ = argc;
+ this->argv_ = argv;
+ VX_VME_INIT;
+
+#if defined (VXWORKS) && defined (FORCE_ARGS)
+ this->argc_ = 4;
+ this->argv_ = force_argv;
+#endif /* VXWORKS && FORCE_ARGS */
// Make sure we've got plenty of socket handles. This call will use
// the default maximum.
ACE::set_handle_limit ();
+
+ ACE_NEW_RETURN (this->ts_,
+ Task_State (this->argc_,
+ this->argv_),
+ -1);
+ // preliminary argument processing
+ for (int i=0 ; i< this->argc_; i++)
+ {
+ if ((ACE_OS::strcmp (this->argv_[i],"-r") == 0))
+ this->ts_->thread_per_rate_ = 1;
+ else if ((ACE_OS::strcmp (this->argv_[i],"-t") == 0) && (i-1 < this->argc_))
+ this->ts_->thread_count_ = ACE_OS::atoi (this->argv_[i+1]);
+ }
+#if defined (CHORUS)
+ // start the pccTimer for chorus classix
+ int pTime;
+
+ // Initialize the PCC timer Chip
+ pccTimerInit();
+
+ if(pccTimer(PCC2_TIMER1_START,&pTime) != K_OK)
+ {
+ printf("pccTimer has a pending benchmark\n");
+ }
+#endif /* CHORUS */
+
return 0;
}
+void
+Client_i::run (void)
+{
+ if (this->ts_->thread_per_rate_ == 0)
+ this->do_priority_inversion_test (&this->client_thread_manager_);
+ else
+ this->do_thread_per_rate_test (&this->client_thread_manager_);
+}
+
#if defined (VXWORKS)
void
-output_taskinfo (void)
+Client_i::output_taskinfo (void)
{
FILE *file_handle = 0;
@@ -113,7 +212,7 @@ output_taskinfo (void)
#endif /* VXWORKS */
void
-output_latency (Task_State *ts)
+Client_i::output_latency (Task_State *ts)
{
FILE *latency_file_handle = 0;
char latency_file[BUFSIZ];
@@ -121,7 +220,7 @@ output_latency (Task_State *ts)
ACE_OS::sprintf (latency_file,
"cb__%d.txt",
- ts->thread_count_);
+ this->ts_->thread_count_);
ACE_OS::fprintf(stderr,
"--->Output file for latency data is \"%s\"\n",
@@ -131,7 +230,7 @@ output_latency (Task_State *ts)
// This loop visits each client. thread_count_ is the number of
// clients.
- for (u_int j = 0; j < ts->thread_count_; j ++)
+ for (u_int j = 0; j < this->ts_->thread_count_; j ++)
{
ACE_OS::sprintf(buffer,
"%s #%d",
@@ -139,7 +238,7 @@ output_latency (Task_State *ts)
j);
// This loop visits each request latency from a client.
for (u_int i = 0;
- i < (j == 0 ? ts->high_priority_loop_count_ : ts->loop_count_) / ts->granularity_;
+ i < (j == 0 ? this->ts_->high_priority_loop_count_ : this->ts_->loop_count_) / this->ts_->granularity_;
i ++)
{
ACE_OS::sprintf(buffer+strlen(buffer),
@@ -148,7 +247,7 @@ output_latency (Task_State *ts)
#else
"\t%f\n",
#endif /* !CHORUS */
- ts->global_jitter_array_[j][i]);
+ this->ts_->global_jitter_array_[j][i]);
fputs (buffer, latency_file_handle);
buffer[0]=0;
}
@@ -157,299 +256,107 @@ output_latency (Task_State *ts)
ACE_OS::fclose (latency_file_handle);
}
-int
-start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager)
+// Mechanism to distribute the available priorities among the
+// threads when there are not enough different priorities for all
+// threads.
+void
+Client_i::init_low_priority (void)
{
- char high_thread_args[BUFSIZ];
-
- static char hostname[BUFSIZ];
-
- if (ACE_OS::hostname (hostname, BUFSIZ) != 0)
- ACE_ERROR_RETURN ((LM_ERROR,
- "%p\n",
- "hostname"),
- -1);
- ACE_OS::sprintf (high_thread_args,
- "-ORBport %d "
- "-ORBhost %s "
- "-ORBobjrefstyle URL "
- "-ORBsndsock 32768 "
- "-ORBrcvsock 32768 ",
- ACE_DEFAULT_SERVER_PORT,
- hostname);
-
- Cubit_Task *high_priority_task;
+ if (this->ts_->use_multiple_priority_ == 1)
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->high_priority_,
+ this->num_low_priority_,
+ 1);
+ else
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->high_priority_,
+ this->num_low_priority_,
+ 0);
+ this->num_priorities_ = this->priority_.number_of_priorities ();
+ this->grain_ = this->priority_.grain ();
+ this->counter_ = 0;
+}
- ACE_NEW_RETURN (high_priority_task,
- Cubit_Task ((const char *)high_thread_args,
- (const char *)"internet",
- (u_int) 1,
- &thread_manager,
- (u_int) 0), //task id 0.
- -1);
+void
+Client_i::calc_util_time (void)
+{
+ MT_Cubit_Timer timer (ACE_ONE_SECOND_IN_MSECS);
+ // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
- // @@ Naga, can you please generalize this #ifdef so that it doesn't
- // go into the code, but goes into a header file or inline function
- // or something instead?!
-#if defined (VXWORKS)
- ACE_Sched_Priority priority = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- ACE_Sched_Priority priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
+ // execute one computation.
+ timer.start ();
+#if defined (CHORUS)
+ this->util_thread_->computation ();
+ timer.stop ();
+ this->util_task_duration_ = timer.get_elapsed ();
#else
- ACE_Sched_Priority priority = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "Creating servant 0 with high priority %d\n",
- priority));
-
- // Make the high priority task an active object.
- if (high_priority_task->activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority) == -1)
- {
- ACE_ERROR ((LM_ERROR,
- "(%P|%t) %p\n"
- "\thigh_priority_task->activate failed"));
- }
-
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, GLOBALS::instance ()->ready_mtx_,-1));
- while (!GLOBALS::instance ()->ready_)
- GLOBALS::instance ()->ready_cnd_.wait ();
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Argument parsing waiting done\n"));
-
- GLOBALS::instance ()->barrier_->wait ();
-
- ts->one_ior_ = high_priority_task->get_servant_ior (0);
-
- return 0;
+ for (u_int i = 0; i < 10000; i++)
+ this->util_thread_->computation ();
+ timer.stop ();
+ this->util_task_duration_ = timer.get_elapsed ()/10000;
+#endif /* !CHORUS */
}
-// @@ Naga, can this function be either (1) made static to the file or
-// (2) moved into a class somewhere (I prefer the latter, if
-// possible). Moreover, this function is VERY long. Can you please
-// break it up somehow?
int
-do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
- Task_State *ts)
+Client_i::activate_high_client (ACE_Thread_Manager *thread_manager)
{
- int i;
- u_int j;
-
- char *task_id = 0;
-
- ACE_High_Res_Timer timer_;
- ACE_Time_Value delta_t;
-
- timer_.start ();
-
-#if defined (VXWORKS)
- ctx = 0;
- ACE_NEW_RETURN (task_id,
- char [32],
+ ACE_NEW_RETURN (this->high_priority_client_,
+ Client (thread_manager, this->ts_, 0),
-1);
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) <<<<<<< starting test on %D\n"));
-
- // Stores the total number of context switches incurred by the
- // program while making CORBA requests
-#if defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)
- u_int context_switch = 0;
-#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
-
- double util_task_duration = 0.0;
- double total_latency = 0.0;
- double total_latency_high = 0.0;
- double total_util_task_duration = 0.0;
-
- GLOBALS::instance ()->num_of_objs = 1;
-
- ACE_Thread_Manager server_thread_manager;
-
- GLOBALS::instance ()->use_name_service = 0;
-
- for (j = 0; j < ts->argc_; j++)
- if (ACE_OS::strcmp (ts->argv_[j], "-u") == 0)
- {
- start_servant (ts, server_thread_manager);
- break;
- }
-
- // Create the clients.
- Client high_priority_client (thread_manager, ts, 0);
-
- // Create an array to hold pointers to the low priority tasks.
- Client **low_priority_client;
-
- ACE_NEW_RETURN (low_priority_client,
- Client *[ts->thread_count_],
- -1);
-
- // Hack to make sure we have something in this pointer, when
- // thread_count == 1
- low_priority_client[0] = &high_priority_client;
-
- // Create the daemon thread in its own <ACE_Thread_Manager>.
- ACE_Thread_Manager util_thr_mgr;
-
- Util_Thread util_thread (ts, &util_thr_mgr);
-
- // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
-
- // @@ Naga, can you please clean up the following code? It's VERY
- // complicated and needs to be refactored into a separate abstraction.
-#if defined (CHORUS)
- int pstartTime = 0;
- int pstopTime = 0;
- // Elapsed time will be in microseconds.
- pstartTime = pccTime1Get();
- // execute one computation.
- util_thread.computation ();
- pstopTime = pccTime1Get();
- // Store the time in micro-seconds.
- util_task_duration = pstopTime - pstartTime;
-#else /* CHORUS */
- // Elapsed time will be in microseconds.
- timer_.start ();
- // execute computations.
- for (i = 0; i < 10000; i++)
- util_thread.computation ();
- timer_.stop ();
- timer_.elapsed_time (delta_t);
- // Store the time in milli-seconds.
- util_task_duration = (delta_t.sec () *
- ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS) / 10000;
-#endif /* !CHORUS */
-
- // The thread priority
- ACE_Sched_Priority priority;
#if defined (VXWORKS)
// Set a task_id string starting with "@", so we are able to
// accurately count the number of context switches.
- strcpy (task_id, "@High");
+ strcpy (this->task_id_, "@High");
#endif /* VXWORKS */
- // @@ Naga, again, this code is repeated from earlier. Can you
- // please factor this out somehow?!
- // Now activate the high priority client.
-#if defined (VXWORKS)
- priority = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- priority = ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
-#else /* ! VXWORKS */
- priority = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* ! ACE_WIN32 */
+ this->high_priority_ = this->priority_.get_high_priority ();
ACE_DEBUG ((LM_DEBUG,
"Creating 1 client with high priority of %d\n",
- priority));
-
- if (high_priority_client.activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority,
- -1,
- 0,
- 0,
- 0,
- 0) == -1)
- // (ACE_thread_t*)task_id) == -1)
- ACE_ERROR ((LM_ERROR,
- "%p; priority is %d\n",
- "activate failed",
- priority));
-
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, ts->ready_mtx_,-1));
- while (!ts->ready_)
- ts->ready_cnd_.wait ();
- ACE_DEBUG ((LM_DEBUG,
- "(%t) Argument parsing waiting done\n"));
-
- u_int number_of_low_priority_client = 0;
- u_int number_of_priorities = 0;
- u_int grain = 0;
- u_int counter = 0;
-
- number_of_low_priority_client = ts->thread_count_ - 1;
-
- // Mechanism to distribute the available priorities among the
- // threads when there are not enough different priorities for all
- // threads.
- if (ts->use_multiple_priority_ == 1)
- {
- ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
- number_of_priorities = 0;
-
- while (priority_iterator.more ())
- {
- number_of_priorities ++;
- priority_iterator.next ();
- }
-
- // 1 priority is exclusive for the high priority client.
- number_of_priorities --;
-
- // if utilization thread is present, reduce in 1 the available
- // priorities.
- if (ts->use_utilization_test_ == 1)
- number_of_priorities --;
+ this->high_priority_));
+
+ if (this->high_priority_client_->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->high_priority_,
+ -1,
+ 0,
+ 0,
+ 0,
+ 0) == -1)
+ // (ACE_thread_t*)this->task_id_) == -1)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%p; priority is %d\n",
+ "activate failed",
+ this->high_priority_),
+ -1);
- // Drop the priority, so that the priority of clients will
- // increase with increasing client number.
- for (j = 0; j < number_of_low_priority_client; j++)
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
+ return 0;
+}
- // If the lowest priority of the "low priority clients" is the
- // minimum, and we are running the utilization thread, increment
- // the priority, since we don't want the utlization thread and a
- // "low priority thread" to have the same priority.
- if (priority == ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD)
- && ts->use_utilization_test_ == 1)
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
-
- // Granularity of the assignment of the priorities. Some OSs
- // have fewer levels of priorities than we have threads in our
- // test, so with this mechanism we assign priorities to groups
- // of threads when there are more threads than priorities.
- grain = number_of_low_priority_client / number_of_priorities;
- counter = 0;
-
- if (grain <= 0)
- grain = 1;
- }
- else
- // Drop the priority one level.
- priority = ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- priority,
- ACE_SCOPE_THREAD);
+int
+Client_i::activate_low_client (ACE_Thread_Manager *thread_manager)
+{
+ ACE_NEW_RETURN (this->low_priority_client_,
+ Client *[this->ts_->thread_count_],
+ -1);
+ // Hack to make sure we have something in this pointer, when
+ // thread_count == 1
+ this->low_priority_client_[0] = this->high_priority_client_;
+ this->num_low_priority_ = this->ts_->thread_count_ - 1;
+ // set the priority for the low priority threads.
+ this->init_low_priority ();
ACE_DEBUG ((LM_DEBUG,
"Creating %d clients at priority %d\n",
- ts->thread_count_ - 1,
- priority));
+ this->ts_->thread_count_ - 1,
+ this->low_priority_));
- for (i = number_of_low_priority_client; i > 0; i--)
+ for (u_int i = this->num_low_priority_; i > 0; i--)
{
- ACE_NEW_RETURN (low_priority_client [i - 1],
- Client (thread_manager, ts, i),
+ ACE_NEW_RETURN (this->low_priority_client_ [i - 1],
+ Client (thread_manager, this->ts_, i),
-1);
#if defined (VXWORKS)
@@ -459,150 +366,104 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
// Set a task_id string startiing with "@", so we are able to
// accurately count the number of context switches on VXWORKS
- sprintf (task_id, "@Low%u", i);
+ sprintf (this->task_id_, "@Low%u", i);
#endif /* VXWORKS */
ACE_DEBUG ((LM_DEBUG,
"Creating client with thread ID %d and priority %d\n",
i,
- priority));
+ this->low_priority_));
// The first thread starts at the lowest priority of all the low
// priority clients.
- if (low_priority_client[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
+ if (this->low_priority_client_[i - 1]->activate (THR_BOUND | ACE_SCHED_FIFO,
1,
0,
- priority, // These are constructor defaults.
+ this->low_priority_, // These are constructor defaults.
-1, // int grp_id = -1,
0, // ACE_Task_Base *task = 0,
0, // ACE_hthread_t thread_handles[] = 0,
0, // void *stack[] = 0,
0, // size_t stack_size[] = 0,
- (ACE_thread_t*)task_id) == -1)
+ (ACE_thread_t*)this->task_id_) == -1)
ACE_ERROR ((LM_ERROR,
"%p; priority is %d\n",
"activate failed",
- priority));
+ this->low_priority_));
- if (ts->use_multiple_priority_ == 1)
+ if (this->ts_->use_multiple_priority_ == 1)
{
- counter = (counter + 1) % grain;
- if (counter == 0 &&
+ this->counter_ = (this->counter_ + 1) % this->grain_;
+ if (this->counter_ == 0 &&
// Just so when we distribute the priorities among the
// threads, we make sure we don't go overboard.
- number_of_priorities * grain > number_of_low_priority_client - (i - 1))
+ this->num_priorities_ * this->grain_ > this->num_low_priority_ - (i - 1))
// Get the next higher priority.
- priority = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
- priority,
+ this->low_priority_ = ACE_Sched_Params::next_priority (ACE_SCHED_FIFO,
+ this->low_priority_,
ACE_SCOPE_THREAD);
}
} /* end of for () */
+}
- if (ts->use_utilization_test_ == 1)
+int
+Client_i::activate_util_thread (void)
+{
+ ACE_NEW_RETURN (this->util_thread_,
+ Util_Thread (this->ts_, &this->util_thread_manager_),
+ -1);
+
+ // Time the utilization thread' "computation" to get %IdleCPU at the end of the test.
+ this->calc_util_time ();
+
+ if (this->ts_->use_utilization_test_ == 1)
// Activate the utilization thread only if specified. See
// description of this variable in header file.
{
- priority =
+ this->low_priority_ =
ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
ACE_SCOPE_THREAD);
ACE_DEBUG ((LM_DEBUG,
"Creating utilization thread with priority of %d\n",
- priority));
+ this->low_priority_));
// Activate the Utilization thread. It will wait until all
// threads have finished binding.
- util_thread.activate (THR_BOUND | ACE_SCHED_FIFO,
- 1,
- 0,
- priority);
+ this->util_thread_->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->low_priority_);
}
else
- util_thread.close ();
-
- // Wait for all the client threads to be initialized before going
- // any further.
- ts->barrier_->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
- quantify_clear_data ();
-#endif /* NO_ACE_QUANTIFY */
-
-#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
- ACE_Profile_Timer timer_for_context_switch;
- ACE_Profile_Timer::Rusage usage;
-
- if (ts->context_switch_test_ == 1)
- {
- timer_for_context_switch.start ();
- timer_for_context_switch.get_rusage (usage);
-# if defined (ACE_HAS_PRUSAGE_T)
- context_switch = usage.pr_vctx + usage.pr_ictx;
-# else /* ACE_HAS_PRUSAGE_T */
- context_switch = usage.ru_nvcsw + usage.ru_nivcsw;
-# endif /* ACE_HAS_GETRUSAGE */
- }
-#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
-
-#if defined (VXWORKS)
- if (ts->context_switch_test_ == 1)
- {
- ACE_DEBUG ((LM_DEBUG,
- "Adding the context switch hook!\n"));
- taskSwitchHookAdd ((FUNCPTR) &switchHook);
- }
-#endif /* VXWORKS */
-
- // Wait for all the client threads to exit (except the utilization
- // thread).
- thread_manager->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
-#endif /* NO_ACE_QUANTIFY */
-
- ACE_DEBUG ((LM_DEBUG,
- "(%P|%t) >>>>>>> ending test on %D\n"));
-
- timer_.stop ();
- timer_.elapsed_time (delta_t);
-
- if (ts->use_utilization_test_ == 1)
- // Signal the utilization thread to finish with its work.. only
- // if utilization test was specified. See description of this
- // variable in header file.
- {
- util_thread.done_ = 1;
-
- // This will wait for the utilization thread to finish.
- util_thr_mgr.wait ();
- }
-
- ACE_DEBUG ((LM_DEBUG,
- "-------------------------- Stats -------------------------------\n"));
+ this->util_thread_->close ();
+ return 0;
+}
- if (ts->context_switch_test_ == 1)
+void
+Client_i:: print_context_stats (void)
+{
+ if (this->ts_->context_switch_test_ == 1)
{
#if defined (ACE_HAS_PRUSAGE_T)
- timer_for_context_switch.stop ();
- timer_for_context_switch.get_rusage (usage);
+ this->timer_for_context_switch.stop ();
+ this->timer_for_context_switch.get_rusage (this->usage);
// Add up the voluntary context switches & involuntary context
// switches
- context_switch = usage.pr_vctx + usage.pr_ictx - context_switch;
+ this->context_switch_ = this->usage.pr_vctx + this->usage.pr_ictx - this->context_switch_;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
- usage.pr_vctx,
- usage.pr_ictx));
+ this->usage.pr_vctx,
+ this->usage.pr_ictx));
#elif defined (ACE_HAS_GETRUSAGE) && !defined (ACE_WIN32)
- timer_for_context_switch.stop ();
- timer_for_context_switch.get_rusage (usage);
+ this->timer_for_context_switch.stop ();
+ this->timer_for_context_switch.get_rusage (this->usage);
// Add up the voluntary context switches & involuntary context
// switches
- context_switch = usage.ru_nvcsw + usage.ru_nivcsw - context_switch;
+ this->context_switch_ = this->usage.ru_nvcsw + this->usage.ru_nivcsw - this->context_switch_;
ACE_DEBUG ((LM_DEBUG,
"Voluntary context switches=%d, Involuntary context switches=%d\n",
- usage.ru_nvcsw,
- usage.ru_nivcsw));
+ this->usage.ru_nvcsw,
+ this->usage.ru_nivcsw));
#elif defined (VXWORKS) /* ACE_HAS_GETRUSAGE */
taskSwitchHookDelete ((FUNCPTR)&switchHook);
ACE_DEBUG ((LM_DEBUG,
@@ -610,111 +471,266 @@ do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
ctx));
#endif /* ACE_HAS_PRUSAGE_T */
}
+}
+void
+Client_i::print_latency_stats (void)
+{
// If running the utilization test, don't report latency nor jitter.
- if (ts->use_utilization_test_ == 0)
+ if (this->ts_->use_utilization_test_ == 0)
{
#if defined (VXWORKS)
ACE_DEBUG ((LM_DEBUG, "Test done.\n"
"High priority client latency : %f msec, jitter: %f msec\n"
"Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
+ this->high_priority_client_->get_high_priority_latency (),
+ this->high_priority_client_->get_high_priority_jitter (),
+ this->low_priority_client_[0]->get_low_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_jitter ()));
// output the latency values to a file, tab separated, to import
// it to Excel to calculate jitter, in the mean time we come up
// with the sqrt() function.
- output_latency (ts);
+ output_latency (this->ts_);
#elif defined (CHORUS)
ACE_DEBUG ((LM_DEBUG,
"Test done.\n"
"High priority client latency : %u usec\n"
"Low priority client latency : %u usec\n",
- high_priority_client.get_high_priority_latency (),
- low_priority_client[0]->get_low_priority_latency () ));
+ this->high_priority_client_->get_high_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_latency () ));
// output the latency values to a file, tab separated, to import
// it to Excel to calculate jitter, in the mean time we come up
// with the sqrt() function.
- output_latency (ts);
+ output_latency (this->ts_);
#else /* !CHORUS */
ACE_DEBUG ((LM_DEBUG, "Test done.\n"
"High priority client latency : %f msec, jitter: %f msec\n"
"Low priority client latency : %f msec, jitter: %f msec\n",
- high_priority_client.get_high_priority_latency (),
- high_priority_client.get_high_priority_jitter (),
- low_priority_client[0]->get_low_priority_latency (),
- low_priority_client[0]->get_low_priority_jitter ()));
- // output_latency (ts);
+ this->high_priority_client_->get_high_priority_latency (),
+ this->high_priority_client_->get_high_priority_jitter (),
+ this->low_priority_client_[0]->get_low_priority_latency (),
+ this->low_priority_client_[0]->get_low_priority_jitter ()));
+ // output_latency (this->ts_);
#endif /* !VXWORKS && !CHORUS */
}
+}
- if (ts->use_utilization_test_ == 1)
+void
+Client_i::print_util_stats (void)
+{
+ if (this->ts_->use_utilization_test_ == 1)
{
- total_util_task_duration =
- util_task_duration * util_thread.get_number_of_computations ();
+ this->total_util_task_duration_ =
+ this->util_task_duration_ * this->util_thread_->get_number_of_computations ();
- total_latency = (delta_t.sec () *
+ this->total_latency_ = (this->delta_.sec () *
ACE_ONE_SECOND_IN_MSECS +
- (double)delta_t.usec () / ACE_ONE_SECOND_IN_MSECS);
+ (ACE_timer_t)this->delta_.usec () / ACE_ONE_SECOND_IN_MSECS);
- total_latency_high =
- total_latency - total_util_task_duration;
+ this->total_latency_high_ =
+ this->total_latency_ - this->total_util_task_duration_;
// Calc and print the CPU percentage. I add 0.5 to round to the
// nearest integer before casting it to int.
ACE_DEBUG ((LM_DEBUG,
"\t%% ORB Client CPU utilization: %u %%\n"
"\t%% Idle time: %u %%\n\n",
- (u_int) (total_latency_high * 100 / total_latency + 0.5),
- (u_int) (total_util_task_duration * 100 / total_latency + 0.5) ));
+ (u_int) (this->total_latency_high_ * 100 / this->total_latency_ + 0.5),
+ (u_int) (this->total_util_task_duration_ * 100 / this->total_latency_ + 0.5) ));
ACE_DEBUG ((LM_DEBUG,
"(%t) UTILIZATION task performed \t%u computations\n"
"(%t) CLIENT task performed \t\t%u CORBA calls\n"
"(%t) Utilization test time is \t\t%f seconds\n"
"\t Ratio of computations to CORBA calls is %u.%u:1\n\n",
- util_thread.get_number_of_computations (),
- ts->loop_count_,
- ts->util_test_time_,
- util_thread.get_number_of_computations () / ts->loop_count_,
- (util_thread.get_number_of_computations () % ts->loop_count_) * 100 / ts->loop_count_
+ this->util_thread_->get_number_of_computations (),
+ this->ts_->loop_count_,
+ this->ts_->util_test_time_,
+ this->util_thread_->get_number_of_computations () / this->ts_->loop_count_,
+ (this->util_thread_->get_number_of_computations () % this->ts_->loop_count_) * 100 / this->ts_->loop_count_
));
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %u usecs\n",
- util_task_duration));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "(%t) utilization computation time is %f msecs\n",
- util_task_duration));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
+ "(%t) utilization computation time is %A usecs\n",
+ this->util_task_duration_));
+ }
+}
+
+void
+Client_i::print_priority_inversion_stats (void)
+{
+ this->print_context_stats ();
+ this->print_latency_stats ();
+ this->print_util_stats ();
+}
+
+int
+Client_i::start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager)
+{
+ char high_thread_args[BUFSIZ];
+
+ static char hostname[BUFSIZ];
+
+ if (ACE_OS::hostname (hostname, BUFSIZ) != 0)
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "%p\n",
+ "hostname"),
+ -1);
+ ACE_OS::sprintf (high_thread_args,
+ "-ORBport %d "
+ "-ORBhost %s "
+ "-ORBobjrefstyle URL "
+ "-ORBsndsock 32768 "
+ "-ORBrcvsock 32768 ",
+ ACE_DEFAULT_SERVER_PORT,
+ hostname);
+
+ Cubit_Task *high_priority_task;
+
+ ACE_NEW_RETURN (high_priority_task,
+ Cubit_Task ((const char *)high_thread_args,
+ (const char *)"internet",
+ (u_int) 1,
+ &thread_manager,
+ (u_int) 0), //task id 0.
+ -1);
+
+ this->high_priority_ = this->priority_.get_high_priority ();
+
+ ACE_DEBUG ((LM_DEBUG,
+ "Creating servant 0 with high priority %d\n",
+ this->high_priority_));
+
+ // Make the high priority task an active object.
+ if (high_priority_task->activate (THR_BOUND | ACE_SCHED_FIFO,
+ 1,
+ 0,
+ this->high_priority_) == -1)
+ {
+ ACE_ERROR ((LM_ERROR,
+ "(%P|%t) %p\n"
+ "\thigh_priority_task->activate failed"));
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Waiting for argument parsing\n"));
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, GLOBALS::instance ()->ready_mtx_,-1));
+ while (!GLOBALS::instance ()->ready_)
+ GLOBALS::instance ()->ready_cnd_.wait ();
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Argument parsing waiting done\n"));
+
+ GLOBALS::instance ()->barrier_->wait ();
+
+ this->ts_->one_ior_ = high_priority_task->get_servant_ior (0);
+
+ return 0;
+}
+
+int
+Client_i::do_priority_inversion_test (ACE_Thread_Manager *thread_manager)
+{
+ u_int i,j,result;
+
+ this->timer_.start ();
+#if defined (VXWORKS)
+ ctx = 0;
+ ACE_NEW_RETURN (this->task_id_,
+ char [32],
+ -1);
+#endif /* VXWORKS */
+ ACE_DEBUG ((LM_DEBUG,
+ "(%P|%t) <<<<<<< starting test on %D\n"));
+ GLOBALS::instance ()->num_of_objs = 1;
+ ACE_Thread_Manager server_thread_manager;
+ GLOBALS::instance ()->use_name_service = 0;
+ for (j = 0; j < this->ts_->argc_; j++)
+ if (ACE_OS::strcmp (this->ts_->argv_[j], "-u") == 0)
+ {
+ start_servant (this->ts_, server_thread_manager);
+ break;
+ }
+ // Create the clients.
+ result = this->activate_high_client (thread_manager);
+ if (result < 0)
+ return result;
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Waiting for argument parsing\n"));
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, this->ts_->ready_mtx_,-1));
+ while (!this->ts_->ready_)
+ this->ts_->ready_cnd_.wait ();
+ ACE_DEBUG ((LM_DEBUG,
+ "(%t) Argument parsing waiting done\n"));
+ result = this->activate_low_client (thread_manager);
+ if (result < 0)
+ return result;
+ result = this->activate_util_thread ();
+ if (result < 0)
+ return result;
+ // Wait for all the client threads to be initialized before going
+ // any further.
+ this->ts_->barrier_->wait ();
+
+ STOP_QUANTIFY;
+ CLEAR_QUANTIFY;
+#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
+
+ if (this->ts_->context_switch_test_ == 1)
+ {
+ this->timer_for_context_switch.start ();
+ this->timer_for_context_switch.get_rusage (this->usage);
+# if defined (ACE_HAS_PRUSAGE_T)
+ this->context_switch_ = this->usage.pr_vctx + this->usage.pr_ictx;
+# else /* ACE_HAS_PRUSAGE_T */
+ this->context_switch_ = this->usage.ru_nvcsw + this->usage.ru_nivcsw;
+# endif /* ACE_HAS_GETRUSAGE */
}
+#endif /* ACE_HAS_PRUSAGE_T || ACE_HAS_GETRUSAGE */
#if defined (VXWORKS)
- delete task_id;
+ if (this->ts_->context_switch_test_ == 1)
+ {
+ ACE_DEBUG ((LM_DEBUG,
+ "Adding the context switch hook!\n"));
+ taskSwitchHookAdd ((FUNCPTR) &switchHook);
+ }
#endif /* VXWORKS */
- // Delete the low priority task array
- for (i = number_of_low_priority_client; i > 0; i--)
- delete low_priority_client [i - 1];
+ // Wait for all the client threads to exit (except the utilization
+ // thread).
+ thread_manager->wait ();
+ STOP_QUANTIFY;
+ ACE_DEBUG ((LM_DEBUG,
+ "(%P|%t) >>>>>>> ending test on %D\n"));
+ this->timer_.stop ();
+ this->timer_.elapsed_time (this->delta_);
+
+ if (this->ts_->use_utilization_test_ == 1)
+ // Signal the utilization thread to finish with its work.. only
+ // if utilization test was specified. See description of this
+ // variable in header file.
+ {
+ this->util_thread_->done_ = 1;
- delete [] low_priority_client;
+ // This will wait for the utilization thread to finish.
+ this->util_thread_manager_.wait ();
+ }
+
+ ACE_DEBUG ((LM_DEBUG,
+ "-------------------------- Stats -------------------------------\n"));
+
+ this->print_priority_inversion_stats ();
return 0;
}
-// @@ Naga, can you please either (1) make this a static or (2) move
-// it into a class?!
int
-do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
- Task_State *ts)
+Client_i::do_thread_per_rate_test (ACE_Thread_Manager *thread_manager)
{
- Client CB_20Hz_client (thread_manager, ts, CB_20HZ_CONSUMER);
- Client CB_10Hz_client (thread_manager, ts, CB_10HZ_CONSUMER);
- Client CB_5Hz_client (thread_manager, ts, CB_5HZ_CONSUMER);
- Client CB_1Hz_client (thread_manager, ts, CB_1HZ_CONSUMER);
+ Client CB_20Hz_client (thread_manager, this->ts_, CB_20HZ_CONSUMER);
+ Client CB_10Hz_client (thread_manager, this->ts_, CB_10HZ_CONSUMER);
+ Client CB_5Hz_client (thread_manager, this->ts_, CB_5HZ_CONSUMER);
+ Client CB_1Hz_client (thread_manager, this->ts_, CB_1HZ_CONSUMER);
ACE_Sched_Priority priority;
@@ -743,9 +759,9 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
ACE_DEBUG ((LM_DEBUG,
"(%t) Waiting for argument parsing\n"));
- ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, ts->ready_mtx_,-1));
- while (!ts->ready_)
- ts->ready_cnd_.wait ();
+ ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ready_mon, this->ts_->ready_mtx_,-1));
+ while (!this->ts_->ready_)
+ this->ts_->ready_cnd_.wait ();
ACE_DEBUG ((LM_DEBUG,
"(%t) Argument parsing waiting done\n"));
@@ -795,29 +811,16 @@ do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
// Wait for all the threads to exit.
thread_manager->wait ();
-#if defined (ACE_LACKS_FLOATING_POINT)
ACE_DEBUG ((LM_DEBUG,
"Test done.\n"
- "20Hz client latency : %u usec, jitter: %u usec\n"
- "10Hz client latency : %u usec, jitter: %u usec\n"
- "5Hz client latency : %u usec, jitter: %u usec\n"
- "1Hz client latency : %u usec, jitter: %u usec\n",
+ "20Hz client latency : %A usec, jitter: %A usec\n"
+ "10Hz client latency : %A usec, jitter: %A usec\n"
+ "5Hz client latency : %A usec, jitter: %A usec\n"
+ "1Hz client latency : %A usec, jitter: %A usec\n",
CB_20Hz_client.get_latency (0), CB_20Hz_client.get_jitter (0),
CB_10Hz_client.get_latency (1), CB_10Hz_client.get_jitter (1),
CB_5Hz_client.get_latency (2), CB_5Hz_client.get_jitter (2),
CB_1Hz_client.get_latency (3), CB_1Hz_client.get_jitter (3) ));
-#else
- ACE_DEBUG ((LM_DEBUG,
- "Test done.\n"
- "20Hz client latency : %f msec, jitter: %f msec\n"
- "10Hz client latency : %f msec, jitter: %f msec\n"
- "5Hz client latency : %f msec, jitter: %f msec\n"
- "1Hz client latency : %f msec, jitter: %f msec\n",
- CB_20Hz_client.get_latency (0), CB_20Hz_client.get_jitter (0),
- CB_10Hz_client.get_latency (1), CB_10Hz_client.get_jitter (1),
- CB_5Hz_client.get_latency (2), CB_5Hz_client.get_jitter (2),
- CB_1Hz_client.get_latency (3), CB_1Hz_client.get_jitter (3) ));
-#endif /* ! ACE_LACKS_FLOATING_POINT */
return 0;
}
@@ -838,92 +841,15 @@ int
main (int argc, char *argv[])
{
#endif /* VXWORKS */
+ int result;
+ Client_i client;
-#if defined (ACE_HAS_THREADS)
-#if defined (FORCE_ARGS)
- int argc = 4;
- char *argv[] = {"client",
- "-s",
- "-f",
- "ior.txt"};
-#endif /* defined (FORCE_ARGS) */
-
- // Enable FIFO scheduling, e.g., RT scheduling class on Solaris.
- if (ACE_OS::sched_params (
- ACE_Sched_Params (
- ACE_SCHED_FIFO,
-#if defined (__Lynx__)
- 30,
-#elif defined (VXWORKS) /* ! __Lynx__ */
- 6,
-#elif defined (ACE_WIN32)
- ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD),
-#else
- ACE_THR_PRI_FIFO_DEF + 25,
-#endif /* ! __Lynx__ */
- ACE_SCOPE_PROCESS)) != 0)
- {
- if (ACE_OS::last_error () == EPERM)
- ACE_DEBUG ((LM_MAX, "preempt: user is not superuser, "
- "so remain in time-sharing class\n"));
- else
- ACE_ERROR_RETURN ((LM_ERROR, "%n: ACE_OS::sched_params failed\n%a"),
- -1);
- }
-
- ACE_High_Res_Timer timer_;
- ACE_Time_Value delta_t;
-
-#if 0 // this is a debug section that will be removed soon. 1/6/98
- ACE_DEBUG ((LM_MAX, "<<<<<Delay of 5 seconds>>>>>\n"));
-
- timer_.start ();
-
- const ACE_Time_Value delay (5L, 0L);
- ACE_OS::sleep (delay);
-
- timer_.stop ();
- timer_.elapsed_time (delta_t);
-
- ACE_DEBUG ((LM_DEBUG, "5secs= %u secs, %u usecs\n", delta_t.sec (), delta_t.usec ()));
-#endif
-
- initialize ();
-ACE_DEBUG ((LM_DEBUG, "argv[1]=%s\n", argv[1]));
- Task_State ts (argc, argv);
-
- // preliminary argument processing
- for (int i=0 ; i< argc; i++)
- {
- if ((ACE_OS::strcmp (argv[i],"-r") == 0))
- ts.thread_per_rate_ = 1;
- else if ((ACE_OS::strcmp (argv[i],"-t") == 0) && (i-1 < argc))
- ts.thread_count_ = ACE_OS::atoi (argv[i+1]);
- }
-
-#if defined (CHORUS)
- // start the pccTimer for chorus classix
- int pTime;
-
- // Initialize the PCC timer Chip
- pccTimerInit();
-
- if(pccTimer(PCC2_TIMER1_START,&pTime) != K_OK)
- {
- printf("pccTimer has a pending benchmark\n");
- }
-#endif /* CHORUS */
-
- // Create a separate manager for the client. This allows the use
- // of its wait () method on VxWorks, without interfering with the
- // server's (global) thread manager.
- ACE_Thread_Manager client_thread_manager;
-
- if (ts.thread_per_rate_ == 0)
- do_priority_inversion_test (&client_thread_manager, &ts);
- else
- do_thread_per_rate_test (&client_thread_manager, &ts);
+ result = client.init (argc,argv);
+ if (result < 0)
+ return result;
+
+ // run the tests.
+ client.run ();
#if defined (CHORUS)
if (pccTimer (PCC2_TIMER1_STOP, &pTime) != K_OK)
@@ -936,12 +862,6 @@ ACE_DEBUG ((LM_DEBUG, "argv[1]=%s\n", argv[1]));
int status;
ACE_OS::thr_exit (&status);
#endif /* CHORUS */
-
-#else /* !ACE_HAS_THREADS */
- ACE_DEBUG ((LM_DEBUG,
- "Test not run. This platform doesn't seem to have threads.\n"));
-#endif /* ACE_HAS_THREADS */
-
return 0;
}
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/client.h b/TAO/tests/Cubit/TAO/MT_Cubit/client.h
index 5f1f00c1626..e103cd37243 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/client.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/client.h
@@ -22,6 +22,7 @@
#include "cubitC.h"
#include "Task_Client.h"
#include "Util_Thread.h"
+#include "Timer.h"
#if defined (CHORUS)
#include "pccTimer.h"
@@ -33,23 +34,133 @@ extern "C" STATUS vmeDrv (void);
extern "C" STATUS vmeDevCreate (char *);
#endif /* defined (VME_DRIVER) */
-// class Client
-// {
-// public:
-// Client (void);
-// //constructor.
-// int do_priority_inversion_test (ACE_Thread_Manager *thread_manager,
-// Task_State *ts);
-// int do_thread_per_rate_test (ACE_Thread_Manager *thread_manager,
-// Task_State *ts);
-// int start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager);
-
-// void output_latency (Task_State *ts);
-
-// #if defined (VXWORKS)
-// void output_taskinfo (void);
-// #endif /* VXWORKS */
-// int initialize (void);
-// };
+class Client_i
+ :public virtual MT_Priority
+{
+public:
+ Client_i (void);
+ //constructor.
+
+ ~Client_i (void);
+ // destructor.
+
+ int init (int argc,char **argv);
+ // initialize the state of Client_i.
+
+ void run (void);
+ // run the tests.
+
+ int do_priority_inversion_test (ACE_Thread_Manager *thread_manager);
+
+ int do_thread_per_rate_test (ACE_Thread_Manager *thread_manager);
+
+ int start_servant (Task_State *ts, ACE_Thread_Manager &thread_manager);
+
+ void output_latency (Task_State *ts);
+
+#if defined (VXWORKS)
+ void output_taskinfo (void);
+#endif /* VXWORKS */
+private:
+ void init_low_priority (void);
+ // sets the priority to be used for the low priority clients.
+
+ void calc_util_time (void);
+ // calculate the time for one util computation.
+
+ int activate_high_client (ACE_Thread_Manager *thread_manager);
+ // activates the high priority client.
+
+ int activate_low_client (ACE_Thread_Manager *thread_manager);
+ // activates the low priority client.
+
+ int activate_util_thread (void);
+ // activates the utilization thread.
+
+ void print_priority_inversion_stats (void);
+ // prints the results of the tests.
+
+ void print_context_stats (void);
+ // prints the context switch results.
+
+ void print_util_stats (void);
+ // prints the utilization test results.
+
+ void print_latency_stats (void);
+ // prints the latency and jitter results.
+
+ Client *high_priority_client_;
+ // pointer to the high priority client object.
+
+ Client **low_priority_client_;
+ // array to hold pointers to the low priority tasks.
+
+ ACE_High_Res_Timer timer_;
+ // Timer for timing the tests.
+
+ MT_Priority priority_;
+ // priority helper object.
+
+ Util_Thread *util_thread_;
+ // Utilization thread.
+
+ ACE_Thread_Manager util_thread_manager_;
+ // Utilization thread manager.
+
+ ACE_timer_t util_task_duration_;
+ // time for one computation of utilization thread.
+
+ Task_State *ts_;
+ // pointer to task state.
+
+ ACE_Sched_Priority high_priority_;
+ // priority used for the high priority client.
+
+ ACE_Sched_Priority low_priority_;
+ // priority used by the low priority clients.
+
+ u_int num_low_priority_;
+ // number of low priority clients
+
+ u_int num_priorities_;
+ // number of priorities used.
+
+ u_int grain_;
+ // Granularity of the assignment of the priorities. Some OSs
+ // have fewer levels of priorities than we have threads in our
+ // test, so with this mechanism we assign priorities to groups
+ // of threads when there are more threads than priorities.
+
+ u_int counter_;
+
+ char *task_id_;
+ // Set a task_id string starting with "@", so we are able to
+ // accurately count the number of context switches.
+
+ ACE_Time_Value delta_;
+ // elapsed time for the latency tests.
+
+ int argc_;
+
+ char **argv_;
+
+ ACE_Thread_Manager client_thread_manager_;
+ // Create a separate manager for the client. This allows the use
+ // of its wait () method on VxWorks, without interfering with the
+ // server's (global) thread manager.
+
+ ACE_timer_t total_latency_;
+ ACE_timer_t total_latency_high_;
+ ACE_timer_t total_util_task_duration_;
+
+ u_int context_switch_;
+ // Stores the total number of context switches incurred by the
+ // program while making CORBA requests
+
+#if (defined (ACE_HAS_PRUSAGE_T) || defined (ACE_HAS_GETRUSAGE)) && !defined (ACE_WIN32)
+ ACE_Profile_Timer timer_for_context_switch;
+ ACE_Profile_Timer::Rusage usage;
+#endif
+};
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
index 23b572f64c0..493a65e0b60 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/cubit_i.cpp
@@ -58,9 +58,5 @@ void Cubit_i::shutdown (CORBA::Environment &)
{
ACE_DEBUG ((LM_DEBUG,
"(%t) Calling orb ()->shutdown ()\n"));
-
- // @@ Naga, can you please revise this so that it doesn't use
- // TAO-specific features? Please see how Irfan fixed IDL_Cubit's
- // shutdown () so that it wasn't TAO-specific!
this->orb_->shutdown ();
}
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp b/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
index c5d43e3c868..230d7c0e666 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/server.cpp
@@ -33,6 +33,17 @@ char *force_argv[]=
"ior.txt"
};
+Server::Server (void)
+ :argc_ (0),
+ argv_ (0),
+ cubits_ (0),
+ high_priority_task_ (0),
+ low_priority_tasks_ (0),
+ high_argv_ (0),
+ low_argv_ (0)
+{
+}
+
int
Server::initialize (int argc, char **argv)
{
@@ -62,39 +73,24 @@ Server::initialize (int argc, char **argv)
"%n: ACE_OS::sched_params failed\n%a"),
-1);
}
+#else
+ ACE_ERROR_RETURN ((LM_ERROR,
+ "Test will not run. This platform doesn't seem to have threads.\n"),
+ -1);
+#endif /* ACE_HAS_THREADS */
+
this->argc_ = argc;
this->argv_ = argv;
-#if defined (VXWORKS)
- // @@ Naga, can you please factor this code into a separate file?!
-#if defined (VME_DRIVER)
- STATUS status = vmeDrv ();
-
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDrv()\n"));
-
- status = vmeDevCreate ("/vme");
-
- if (status != OK)
- ACE_DEBUG ((LM_DEBUG,
- "ERROR on call to vmeDevCreate()\n"));
-#endif /* defined (VME_DRIVER) */
-
-#if defined (FORCE_ARGS)
- int argc = 4;
- char *argv[] =
- {
- "server",
- "-s",
- "-f",
- "ior.txt"
- };
-#endif /* defined (FORCE_ARGS) */
-#endif /* defined (VXWORKS) */
-
- // Make sure we've got plenty of socket handles. This call will
- // use the default maximum.
+ VX_VME_INIT;
+
+#if defined (VXWORKS) && defined (FORCE_ARGS)
+ this->argc_ = 4;
+ this->argv_ = force_argv;
+#endif /* VXWORKS && FORCE_ARGS */
+
+ // Make sure we've got plenty of socket handles. This call will
+ // use the default maximum.
ACE::set_handle_limit ();
return 0;
}
@@ -127,77 +123,24 @@ Server::prelim_args_process (void)
}
void
-Server::init_high_priority (void)
-{
- // @@ Naga, here's another place where we write the same code again.
- // Please make sure that this gets factored out into a macro or an
- // inline function!
-#if defined (VXWORKS)
- this->high_priority_ = ACE_THR_PRI_FIFO_DEF;
-#elif defined (ACE_WIN32)
- this->high_priority_ =
- ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
-#else
- // @@ Naga/Sergio, why is there a "25" here? This seems like to
- // much of a "magic" number. Can you make this more "abstract?"
- this->high_priority_ = ACE_THR_PRI_FIFO_DEF + 25;
-#endif /* VXWORKS */
-
- ACE_DEBUG ((LM_DEBUG,
- "Creating servant 0 with high priority %d\n",
- this->high_priority_));
-
-}
-
-void
Server::init_low_priority (void)
{
- u_int j;
- this->num_low_priority_ =
- GLOBALS::instance ()->num_of_objs - 1;
-
ACE_Sched_Priority prev_priority = this->high_priority_;
// Drop the priority
if (GLOBALS::instance ()->thread_per_rate == 1
|| GLOBALS::instance ()->use_multiple_priority == 1)
- {
- this->num_priorities_ = 0;
-
- for (ACE_Sched_Priority_Iterator priority_iterator (ACE_SCHED_FIFO,
- ACE_SCOPE_THREAD);
- priority_iterator.more ();
- priority_iterator.next ())
- this->num_priorities_ ++;
- // 1 priority is exclusive for the high priority client.
- this->num_priorities_ --;
- // Drop the priority, so that the priority of clients will
- // increase with increasing client number.
- for (j = 0;
- j < this->num_low_priority_;
- j++)
- {
- this->low_priority_ =
- ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- prev_priority,
- ACE_SCOPE_THREAD);
- prev_priority = this->low_priority_;
- }
- // Granularity of the assignment of the priorities. Some OSs
- // have fewer levels of priorities than we have threads in our
- // test, so with this mechanism we assign priorities to groups
- // of threads when there are more threads than priorities.
- this->grain_ = this->num_low_priority_ / this->num_priorities_;
- this->counter_ = 0;
-
- if (this->grain_ <= 0)
- this->grain_ = 1;
- }
+ this->low_priority_ =
+ this->priority_.get_low_priority (this->num_low_priority_,
+ prev_priority,
+ 1);
else
this->low_priority_ =
- ACE_Sched_Params::previous_priority (ACE_SCHED_FIFO,
- prev_priority,
- ACE_SCOPE_THREAD);
+ this->priority_.get_low_priority (this->num_low_priority_,
+ prev_priority,
+ 0);
+ this->num_priorities_ = this->priority_.number_of_priorities ();
+ this->grain_ = this->priority_.grain ();
+ this->counter_ = 0;
}
// Write the ior's to a file so the client can read them.
@@ -384,7 +327,11 @@ Server::start_servants (ACE_Thread_Manager *serv_thr_mgr)
this->prelim_args_process ();
// Find the priority for the high priority servant.
- this->init_high_priority ();
+ this->high_priority_ = this->priority_.get_high_priority ();
+
+ ACE_DEBUG ((LM_DEBUG,
+ "Creating servant 0 with high priority %d\n",
+ this->high_priority_));
// activate the high priority servant task
if (this->activate_high_servant (serv_thr_mgr) < 0)
@@ -392,6 +339,9 @@ Server::start_servants (ACE_Thread_Manager *serv_thr_mgr)
"Failure in activating high priority servant\n"),
-1);
+ this->num_low_priority_ =
+ GLOBALS::instance ()->num_of_objs - 1;
+
// initialize the priority of the low priority servants.
this->init_low_priority ();
@@ -437,32 +387,20 @@ main (int argc, char *argv[])
// Create the daemon thread in its own <ACE_Thread_Manager>.
ACE_Thread_Manager servant_thread_manager;
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
- quantify_clear_data ();
- quantify_start_recording_data();
-#endif /* NO_ACE_QUANTIFY */
+ STOP_QUANTIFY;
+ CLEAR_QUANTIFY;
+ START_QUANTIFY;
if (server.start_servants (&servant_thread_manager) != 0)
ACE_ERROR_RETURN ((LM_ERROR,
"Error creating the servants\n"),
1);
-
ACE_DEBUG ((LM_DEBUG,
"Wait for all the threads to exit\n"));
-
// Wait for all the threads to exit.
servant_thread_manager.wait ();
// ACE_Thread_Manager::instance ()->wait ();
-
-#if defined (NO_ACE_QUANTIFY)
- quantify_stop_recording_data();
-#endif /* NO_ACE_QUANTIFY */
-
-#else
- ACE_DEBUG ((LM_DEBUG,
- "Test not run. This platform doesn't seem to have threads.\n"));
-#endif /* ACE_HAS_THREADS */
+ STOP_QUANTIFY;
return 0;
}
diff --git a/TAO/tests/Cubit/TAO/MT_Cubit/server.h b/TAO/tests/Cubit/TAO/MT_Cubit/server.h
index d49f144c866..97d5e6ae1ce 100644
--- a/TAO/tests/Cubit/TAO/MT_Cubit/server.h
+++ b/TAO/tests/Cubit/TAO/MT_Cubit/server.h
@@ -127,6 +127,7 @@ private:
};
class Server
+ :public virtual MT_Priority
{
// = TITLE
// A multithreaded cubit server class.
@@ -135,7 +136,9 @@ class Server
// cubit server. To use this ,call initialize and then
// start_servants method.
public:
+ Server (void);
// default constructor
+
int initialize (int argc, char **argv);
// initialize the server state.
@@ -146,9 +149,6 @@ private:
void prelim_args_process (void);
// preliminary argument processing code.
- void init_high_priority (void);
- // sets the priority of the high priority servant.
-
void init_low_priority (void);
// sets the priority to be used for the low priority servants.
@@ -181,7 +181,7 @@ private:
ACE_Sched_Priority low_priority_;
// priority used by the low priority servants.
-
+
u_int num_low_priority_;
// number of low priority servants
@@ -201,6 +201,9 @@ private:
ACE_ARGV *low_argv_;
// argv passed to the low priority servants.
+
+ MT_Priority priority_;
+ // priority helper object.
};
#endif /* SERVER_H */