summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--TAO/ChangeLog-98c39
-rw-r--r--TAO/orbsvcs/Naming_Service/Naming_Service.cpp37
-rw-r--r--TAO/orbsvcs/Naming_Service/Naming_Service.h15
-rw-r--r--TAO/orbsvcs/orbsvcs/Scheduler_Factory.cpp23
-rw-r--r--TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.cpp274
-rw-r--r--TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.h19
-rw-r--r--TAO/orbsvcs/tests/EC_Multiple/Makefile41
-rw-r--r--TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime1.h25
-rw-r--r--TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime2.h25
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/gen_latency35
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/gen_overhead114
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/gen_utilization71
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/latency.pl8
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/run_latency53
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/run_overhead113
-rwxr-xr-xTAO/orbsvcs/tests/EC_Multiple/run_utilization82
16 files changed, 754 insertions, 220 deletions
diff --git a/TAO/ChangeLog-98c b/TAO/ChangeLog-98c
index bb5422ba862..29a3769b6fc 100644
--- a/TAO/ChangeLog-98c
+++ b/TAO/ChangeLog-98c
@@ -1,3 +1,42 @@
+Wed Apr 29 12:26:12 1998 Carlos O'Ryan <coryan@cs.wustl.edu>
+
+ * orbsvcs/tests/EC_Multiple/Makefile:
+ * orbsvcs/tests/EC_Multiple/EC_Multiple.h:
+ * orbsvcs/tests/EC_Multiple/Scheduler_Runtime1.h:
+ * orbsvcs/tests/EC_Multiple/Scheduler_Runtime2.h:
+ * orbsvcs/tests/EC_Multiple/EC_Multiple.cpp:
+ Added two precomputed schedules that are selected when the
+ argument to -s is either RUNTIME1 or RUNTIME2, but it is still
+ possible to have a global scheduler (-g) or to compute new
+ schedules (-s <anyname>).
+ The amount of work performed in the high priority threads can be
+ controlled using the -w option.
+
+ * orbsvcs/tests/EC_Multiple/latency.pl:
+ * orbsvcs/tests/EC_Multiple/run_utilization:
+ * orbsvcs/tests/EC_Multiple/run_overhead:
+ * orbsvcs/tests/EC_Multiple/run_latency:
+ * orbsvcs/tests/EC_Multiple/gen_utilization:
+ * orbsvcs/tests/EC_Multiple/gen_overhead:
+ * orbsvcs/tests/EC_Multiple/gen_latency:
+ This scripts execute the test with different configurations
+ designed to measure latency, overhead, utilization, etc.
+ The run_* scripts execute the test, the gen_* scripts filter the
+ results and generate Encapsulated PostScript and JPEG files with
+ the results.
+ I've tried to make the file naming conventions consistent so its
+ easier to add new scripts or tests.
+
+ * orbsvcs/Naming_Service/Naming_Service.h:
+ * orbsvcs/Naming_Service/Naming_Service.cpp:
+ Added an option (-p) to dump the process ID into a file, this is
+ useful when running the service from a script that has to shut
+ it down.
+
+ * orbsvcs/orbsvcs/Scheduler_Factory.cpp:
+ Fixed some memory managment problems, but the whole class has to
+ be revised to be more complaint.
+
Wed Apr 29 07:08:18 1998 David L. Levine <levine@cs.wustl.edu>
* tests/Cubit/TAO/MT_Cubit/client.cpp (do_priority_inversion_test):
diff --git a/TAO/orbsvcs/Naming_Service/Naming_Service.cpp b/TAO/orbsvcs/Naming_Service/Naming_Service.cpp
index 68f8b0d0edb..015959d055f 100644
--- a/TAO/orbsvcs/Naming_Service/Naming_Service.cpp
+++ b/TAO/orbsvcs/Naming_Service/Naming_Service.cpp
@@ -21,24 +21,26 @@
// Default Constructor.
Naming_Service::Naming_Service (void)
- :ior_output_file_ (0)
+ : ior_output_file_ (0),
+ pid_file_name_ (0)
{
}
// Constructor taking command-line arguments
Naming_Service::Naming_Service (int argc,
- char** argv)
- :ior_output_file_ (0)
+ char* argv[])
+ : ior_output_file_ (0),
+ pid_file_name_ (0)
{
- this->init (argc,argv);
+ this->init (argc, argv);
}
int
Naming_Service::parse_args (int argc,
- char **argv)
+ char *argv[])
{
- ACE_Get_Opt get_opts (argc,argv,"o:");
+ ACE_Get_Opt get_opts (argc,argv,"o:p:");
int c;
while ((c = get_opts ()) != -1)
@@ -51,11 +53,15 @@ Naming_Service::parse_args (int argc,
"Unable to open %s for writing: %p\n",
get_opts.optarg), -1);
break;
+ case 'p':
+ this->pid_file_name_ = get_opts.optarg;
+ break;
case '?':
default:
ACE_ERROR_RETURN ((LM_ERROR,
- "usage: %s"
- " [-o] <ior_output_file>"
+ "usage: %s "
+ "-o <ior_output_file> "
+ "-p <pid_file_name> "
"\n",
argv [0]),
-1);
@@ -66,7 +72,7 @@ Naming_Service::parse_args (int argc,
// Initialize the state of the Naming_Service object
int
Naming_Service::init (int argc,
- char** argv)
+ char* argv[])
{
int result;
@@ -110,6 +116,16 @@ Naming_Service::init (int argc,
str.in ());
ACE_OS::fclose (this->ior_output_file_);
}
+
+ if (this->pid_file_name_ != 0)
+ {
+ FILE* pidf = fopen (this->pid_file_name_, "w");
+ if (pidf != 0)
+ {
+ ACE_OS::fprintf (pidf, "%d\n", ACE_OS::getpid ());
+ ACE_OS::fclose (pidf);
+ }
+ }
return 0;
}
@@ -127,9 +143,8 @@ Naming_Service::~Naming_Service (void)
{
}
-
int
-main (int argc, char ** argv)
+main (int argc, char* argv[])
{
int init_result;
diff --git a/TAO/orbsvcs/Naming_Service/Naming_Service.h b/TAO/orbsvcs/Naming_Service/Naming_Service.h
index c3414273de0..c64444ca8a9 100644
--- a/TAO/orbsvcs/Naming_Service/Naming_Service.h
+++ b/TAO/orbsvcs/Naming_Service/Naming_Service.h
@@ -26,6 +26,7 @@ class Naming_Service
// =TITLE
// Defines a class that encapsulates the implementation of a
// naming service.
+ //
// =DESCRIPTION
// This class makes use of the TAO_Naming_Server and
// TAO_ORB_Manager class to implement the Naming_Service.
@@ -34,14 +35,12 @@ public:
Naming_Service (void);
// Default Constructor.
- Naming_Service (int argc,
- char** argv);
+ Naming_Service (int argc, char* argv[]);
// Constructor taking the command-line arguments.
int
- init (int argc,
- char** argv);
+ init (int argc, char* argv[]);
// Initialize the Naming Service with the arguments.
int
@@ -52,14 +51,20 @@ public:
// Destructor.
private:
- int parse_args (int argc,char **argv);
+ int parse_args (int argc,char *argv[]);
// parses the arguments.
+
TAO_ORB_Manager orb_manager_;
// The ORB manager.
+
TAO_Naming_Server my_naming_server_;
// Naming Server instance.
+
FILE *ior_output_file_;
// File to output the Naming Service IOR.
+
+ const char *pid_file_name_;
+ // File to output the process id.
};
#endif /* _NAMING_SERVICE_H */
diff --git a/TAO/orbsvcs/orbsvcs/Scheduler_Factory.cpp b/TAO/orbsvcs/orbsvcs/Scheduler_Factory.cpp
index f1194cb7657..579f6f8a979 100644
--- a/TAO/orbsvcs/orbsvcs/Scheduler_Factory.cpp
+++ b/TAO/orbsvcs/orbsvcs/Scheduler_Factory.cpp
@@ -109,18 +109,15 @@ ACE_Scheduler_Factory::use_config (CosNaming::NamingContext_ptr naming,
TAO_TRY
{
CosNaming::Name schedule_name (1);
- schedule_name[0].id = CORBA::string_dup (name);
schedule_name.length (1);
- CORBA::Object_ptr objref =
+ schedule_name[0].id = CORBA::string_dup (name);
+ CORBA::Object_var objref =
naming->resolve (schedule_name, TAO_TRY_ENV);
TAO_CHECK_ENV;
server_ =
RtecScheduler::Scheduler::_narrow(objref, TAO_TRY_ENV);
TAO_CHECK_ENV;
-
- RtecScheduler::Scheduler::_duplicate (server_);
- TAO_CHECK_ENV;
}
TAO_CATCHANY
{
@@ -196,16 +193,22 @@ int ACE_Scheduler_Factory::dump_schedule
ACE_OS::fprintf(file, ",\n");
}
const RtecScheduler::RT_Info& info = infos[i];
+ // @@ TODO Eventually the TimeT structure will be a 64-bit
+ // unsigned int, we will have to change this dump method then.
ACE_OS::fprintf (file,
-"{ \"%s\", %d, %f, %f, %f, %d, %d, %f, %d, %d, %d, %d }",
+"{ \"%s\", %d, {%d, %d}, {%d, %d}, {%d, %d}, %d, %d, {%d, %d}, %d, %d, %d, %d }",
(const char*)info.entry_point,
info.handle,
- info.worst_case_execution_time,
- info.typical_execution_time,
- info.cached_execution_time,
+ info.worst_case_execution_time.low,
+ info.worst_case_execution_time.high,
+ info.typical_execution_time.low,
+ info.typical_execution_time.high,
+ info.cached_execution_time.low,
+ info.cached_execution_time.high,
info.period,
info.importance,
- info.quantum,
+ info.quantum.low,
+ info.quantum.high,
info.threads,
info.priority,
info.static_subpriority,
diff --git a/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.cpp b/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.cpp
index 95475c76a45..241f021487b 100644
--- a/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.cpp
+++ b/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.cpp
@@ -24,6 +24,9 @@
#include "orbsvcs/Event/Event_Channel.h"
#include "EC_Multiple.h"
+#include "Scheduler_Runtime1.h"
+#include "Scheduler_Runtime2.h"
+
Test_ECG::Test_ECG (void)
: consumer_ (this),
supplier_ (this),
@@ -38,14 +41,14 @@ Test_ECG::Test_ECG (void)
event_a_ (0),
event_b_ (0),
event_c_ (0),
- lcl_cnt_ (0),
- rmt_cnt_ (0),
- scavenger_start_ (0),
- scavenger_end_ (0),
+ lcl_count_ (0),
+ rmt_count_ (0),
scavenger_barrier_ (2),
- scavenger_cnt_ (0),
+ scavenger_count_ (0),
scavenger_priority_ (0),
- scheduling_class_ (ACE_SCHED_FIFO)
+ push_count_ (1),
+ schedule_file_ (0),
+ pid_file_name_ (0)
{
}
@@ -173,10 +176,13 @@ Test_ECG::run (int argc, char* argv[])
" local Scheduler name = <%s>\n"
" remote Scheduler name = <%s>\n"
" global scheduler = <%d>\n"
- " scavenger count = <%d>\n"
+ " scavenger work = <%d>\n"
+ " push work = <%d>\n"
" short circuit EC = <%d>\n"
" interval between events = <%d> (usecs)\n"
" message count = <%d>\n"
+ " schedule_file = <%s>\n"
+ " pid file name = <%s>\n"
" event A = <%d>\n"
" event B = <%d>\n"
" event C = <%d>\n",
@@ -185,26 +191,35 @@ Test_ECG::run (int argc, char* argv[])
this->lcl_sch_name_,
this->rmt_sch_name_?this->rmt_sch_name_:"nil",
this->global_scheduler_,
- this->scavenger_cnt_,
+ this->scavenger_count_,
+ this->push_count_,
this->short_circuit_,
this->interval_,
this->message_count_,
+ this->schedule_file_?this->schedule_file_:"nil",
+ this->pid_file_name_?this->pid_file_name_:"nil",
this->event_a_,
this->event_b_,
this->event_c_) );
print_priority_info ("Test_ECG::run (Main)");
+ if (this->pid_file_name_ != 0)
+ {
+ FILE* pid = ACE_OS::fopen (this->pid_file_name_, "w");
+ if (pid != 0)
+ {
+ ACE_OS::fprintf (pid, "%d\n", ACE_OS::getpid ());
+ ACE_OS::fclose (pid);
+ }
+ }
+
this->scavenger_priority_ =
- ACE_Sched_Params::priority_min (this->scheduling_class_);
- int next_priority =
- ACE_Sched_Params::next_priority (this->scheduling_class_,
- this->scavenger_priority_,
- ACE_SCOPE_PROCESS);
+ ACE_Sched_Params::priority_min (ACE_SCHED_FIFO);
// Enable FIFO scheduling, e.g., RT scheduling class on Solaris.
- if (ACE_OS::sched_params (ACE_Sched_Params (this->scheduling_class_,
- next_priority,
+ if (ACE_OS::sched_params (ACE_Sched_Params (ACE_SCHED_FIFO,
+ this->scavenger_priority_,
ACE_SCOPE_PROCESS)) != 0)
{
if (ACE_OS::last_error () == EPERM)
@@ -216,6 +231,10 @@ Test_ECG::run (int argc, char* argv[])
"%s: ACE_OS::sched_params failed\n", argv[0]));
}
+ int next_priority =
+ ACE_Sched_Params::next_priority (this->scavenger_priority_,
+ ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD);
if (ACE_OS::thr_setprio (next_priority) == -1)
{
ACE_ERROR ((LM_ERROR, "(%P|%t) main thr_setprio failed\n"));
@@ -223,9 +242,9 @@ Test_ECG::run (int argc, char* argv[])
print_priority_info ("Test_ECG::run (Main)");
- if (this->scavenger_cnt_ != 0)
+ if (this->scavenger_count_ != 0)
{
- if (this->activate (THR_BOUND|this->scheduling_class_,
+ if (this->activate (THR_BOUND|ACE_SCHED_FIFO,
1, 0, this->scavenger_priority_) == -1)
ACE_ERROR_RETURN ((LM_ERROR,
" (%P|%t) Cannot activate scavenger.\n"),
@@ -272,12 +291,12 @@ Test_ECG::run_short_circuit (CORBA::ORB_ptr orb,
ACE_Reactor* reactor = TAO_ORB_Core_instance ()->reactor ();
ACE_Time_Value tv (this->interval_ / ACE_ONE_SECOND_IN_USECS,
(this->interval_ % ACE_ONE_SECOND_IN_USECS));
- reactor->schedule_timer (this, 0, tv, tv);
+ reactor->schedule_timer (this, 0, tv);
this->supplier_id_ = 0;
ACE_DEBUG ((LM_DEBUG, "running short circuit test\n"));
- if (this->scavenger_cnt_ != 0)
+ if (this->scavenger_count_ != 0)
{
if (this->scavenger_barrier_.wait () == -1)
return -1;
@@ -334,19 +353,36 @@ Test_ECG::run_ec (CORBA::ORB_ptr orb,
}
else
{
- scheduler_impl =
- auto_ptr<ACE_Config_Scheduler>(new ACE_Config_Scheduler);
- if (scheduler_impl.get () == 0)
- return -1;
-
- RtecScheduler::Scheduler_var scheduler =
- scheduler_impl->_this (TAO_TRY_ENV);
- TAO_CHECK_ENV;
-
+ int config = 0;
+ RtecScheduler::Scheduler_var scheduler;
+ if (ACE_OS::strcmp (this->lcl_sch_name_, "RUNTIME1") == 0)
+ {
+ ACE_Scheduler_Factory::use_runtime
+ (sizeof(runtime_infos_1)/sizeof(runtime_infos_1[0]),
+ runtime_infos_1);
+ scheduler = RtecScheduler::Scheduler::_duplicate (ACE_Scheduler_Factory::server ());
+ }
+ else if (ACE_OS::strcmp (this->lcl_sch_name_, "RUNTIME2") == 0)
+ {
+ ACE_Scheduler_Factory::use_runtime
+ (sizeof(runtime_infos_2)/sizeof(runtime_infos_2[0]),
+ runtime_infos_2);
+ scheduler = RtecScheduler::Scheduler::_duplicate (ACE_Scheduler_Factory::server ());
+ }
+ else
+ {
+ scheduler_impl =
+ auto_ptr<ACE_Config_Scheduler>(new ACE_Config_Scheduler);
+ if (scheduler_impl.get () == 0)
+ return -1;
+ scheduler =
+ scheduler_impl->_this (TAO_TRY_ENV);
+ TAO_CHECK_ENV;
+ config = 1;
+ }
CORBA::String_var str =
orb->object_to_string (scheduler.in (), TAO_TRY_ENV);
TAO_CHECK_ENV;
-
ACE_DEBUG ((LM_DEBUG, "The (local) scheduler IOR is <%s>\n",
str.in ()));
@@ -357,9 +393,12 @@ Test_ECG::run_ec (CORBA::ORB_ptr orb,
naming_context->bind (schedule_name, scheduler.in (), TAO_TRY_ENV);
TAO_CHECK_ENV;
- if (ACE_Scheduler_Factory::use_config (naming_context.in (),
- this->lcl_sch_name_) == -1)
- return -1;
+ if (config == 1)
+ {
+ if (ACE_Scheduler_Factory::use_config (naming_context.in (),
+ this->lcl_sch_name_) == -1)
+ return -1;
+ }
}
if (ACE_Scheduler_Factory::use_config (naming_context.in (),
@@ -459,7 +498,7 @@ Test_ECG::run_ec (CORBA::ORB_ptr orb,
ACE_DEBUG ((LM_DEBUG, "connected consumer\n"));
- if (this->scavenger_cnt_ != 0)
+ if (this->scavenger_count_ != 0)
{
if (this->scavenger_barrier_.wait () == -1)
return -1;
@@ -487,6 +526,42 @@ Test_ECG::run_ec (CORBA::ORB_ptr orb,
"(%P|%t) waiting for scavenger tasks\n"), 1);
this->dump_results ();
+
+ if (this->schedule_file_ != 0)
+ {
+ RtecScheduler::RT_Info_Set_var infos;
+
+#if defined (__SUNPRO_CC)
+ // Sun C++ 4.2 warns with the code below:
+ // Warning (Anachronism): Temporary used for non-const
+ // reference, now obsolete.
+ // Note: Type "CC -migration" for more on anachronisms.
+ // Warning (Anachronism): The copy constructor for argument
+ // infos of type RtecScheduler::RT_Info_Set_out should take
+ // const RtecScheduler::RT_Info_Set_out&.
+ // But, this code is not CORBA conformant, because users should
+ // not define instances of _out types.
+
+ RtecScheduler::RT_Info_Set_out infos_out (infos);
+ ACE_Scheduler_Factory::server ()->compute_scheduling
+ (ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+ infos_out, TAO_TRY_ENV);
+#else /* ! __SUNPRO_CC */
+ ACE_Scheduler_Factory::server ()->compute_scheduling
+ (ACE_Sched_Params::priority_min (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+ ACE_Sched_Params::priority_max (ACE_SCHED_FIFO,
+ ACE_SCOPE_THREAD),
+ infos.out (), TAO_TRY_ENV);
+#endif /* ! __SUNPRO_CC */
+
+ TAO_CHECK_ENV;
+ ACE_Scheduler_Factory::dump_schedule (infos.in (),
+ this->schedule_file_);
+ }
}
TAO_CATCHANY
{
@@ -513,11 +588,11 @@ Test_ECG::svc (void)
if (this->scavenger_barrier_.wait () == -1)
return -1;
- this->scavenger_start_ = ACE_OS::gethrtime ();
+ this->scavenger_timer_.start ();
// ACE_DEBUG ((LM_DEBUG, "(%P|%t) starting scavenger\n"));
for (int i = 0;
- i < this->scavenger_cnt_;
+ i < this->scavenger_count_;
++i)
{
u_long n = 1279UL;
@@ -529,7 +604,7 @@ Test_ECG::svc (void)
i));
#endif
}
- this->scavenger_end_ = ACE_OS::gethrtime ();
+ this->scavenger_timer_.stop ();
ACE_DEBUG ((LM_DEBUG, "(%P|%t) scavenger finished its work\n"));
return 0;
}
@@ -568,15 +643,20 @@ Test_ECG::connect_supplier (RtecEventChannelAdmin::EventChannel_ptr local_ec,
server->create (buf, TAO_TRY_ENV);
TAO_CHECK_ENV;
+ // The worst case execution time is far less than 2
+ // mislliseconds, but that is a safe estimate....
+ ACE_Time_Value tv (0, 2000);
+ TimeBase::TimeT time;
+ ORBSVCS_Time::Time_Value_to_TimeT (time, tv);
server->set (rt_info,
RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- this->interval_ * 10, // @@ Make it parametric
+ time, time, time,
+ // @@ Make it parametric; the number below is the
+ // maximum rate in the EC....
+ 25000 * 10,
RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero,
- 1,
+ time,
+ 1,
RtecScheduler::OPERATION,
TAO_TRY_ENV);
TAO_CHECK_ENV;
@@ -636,15 +716,18 @@ Test_ECG::connect_consumer (RtecEventChannelAdmin::EventChannel_ptr local_ec,
server->create (buf, TAO_TRY_ENV);
TAO_CHECK_ENV;
+ // The worst case execution time is far less than 2
+ // mislliseconds, but that is a safe estimate....
+ ACE_Time_Value tv (0, 2000);
+ TimeBase::TimeT time;
+ ORBSVCS_Time::Time_Value_to_TimeT (time, tv);
server->set (rt_info,
RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- this->interval_ * 10, // @@ Make it parametric
+ time, time, time,
+ 25000 * 10, // @@ Make it parametric
RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero,
- 1,
+ time,
+ 0,
RtecScheduler::OPERATION,
TAO_TRY_ENV);
TAO_CHECK_ENV;
@@ -702,24 +785,36 @@ Test_ECG::connect_ecg (RtecEventChannelAdmin::EventChannel_ptr local_ec,
RtecScheduler::Scheduler_ptr local_sch =
ACE_Scheduler_Factory::server ();
+ // ECG name.
+ char ecg_name[BUFSIZ];
+ ACE_OS::strcpy (ecg_name, "ecg_");
+ ACE_OS::strcat (ecg_name, this->lcl_ec_name_);
+
// Generate its ConsumerQOS
+
+ // We could use the same name on the local and remote scheduler,
+ // but that fails when using a global scheduler.
char rmt[BUFSIZ];
- ACE_OS::strcpy (rmt, "ecp@");
+ ACE_OS::strcpy (rmt, ecg_name);
+ ACE_OS::strcat (rmt, "@");
ACE_OS::strcat (rmt, this->rmt_ec_name_);
RtecScheduler::handle_t rmt_info =
remote_sch->create (rmt, TAO_TRY_ENV);
TAO_CHECK_ENV;
+ // The worst case execution time is far less than 500 usecs, but
+ // that is a safe estimate....
+ ACE_Time_Value tv (0, 500);
+ TimeBase::TimeT time;
+ ORBSVCS_Time::Time_Value_to_TimeT (time, tv);
remote_sch->set (rmt_info,
RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- this->interval_ * 10,
+ time, time, time,
+ 25000 * 10, // @@ Make it parametric
RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero,
- 1,
+ time,
+ 0,
RtecScheduler::OPERATION,
TAO_TRY_ENV);
TAO_CHECK_ENV;
@@ -731,8 +826,12 @@ Test_ECG::connect_ecg (RtecEventChannelAdmin::EventChannel_ptr local_ec,
consumer_qos.insert_type (ACE_ES_EVENT_SHUTDOWN, rmt_info);
// Generate its SupplierQOS
+
+ // We could use the same name on the local and remote scheduler,
+ // but that fails when using a global scheduler.
char lcl[BUFSIZ];
- ACE_OS::strcpy (lcl, "ecp@");
+ ACE_OS::strcpy (lcl, ecg_name);
+ ACE_OS::strcat (lcl, "@");
ACE_OS::strcat (lcl, this->lcl_ec_name_);
RtecScheduler::handle_t lcl_info =
@@ -741,12 +840,10 @@ Test_ECG::connect_ecg (RtecEventChannelAdmin::EventChannel_ptr local_ec,
local_sch->set (lcl_info,
RtecScheduler::VERY_HIGH_CRITICALITY,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- ORBSVCS_Time::zero,
- this->interval_ * 10,
+ time, time, time,
+ 25000 * 10,
RtecScheduler::VERY_LOW_IMPORTANCE,
- ORBSVCS_Time::zero,
+ time,
1,
RtecScheduler::OPERATION,
TAO_TRY_ENV);
@@ -865,24 +962,27 @@ Test_ECG::push (const RtecEventComm::EventSet &events,
ACE_hrtime_t nsec = r - s;
if (this->supplier_id_ == e.source_)
{
- this->lcl_time_[this->lcl_cnt_] = nsec;
- this->lcl_cnt_++;
+ this->lcl_time_[this->lcl_count_] = nsec;
+ this->lcl_count_++;
// ACE_DEBUG ((LM_DEBUG, "Latency[LOCAL]: %d\n",
// nsec));
}
else
{
- this->rmt_time_[this->rmt_cnt_] = nsec;
- this->rmt_cnt_++;
+ this->rmt_time_[this->rmt_count_] = nsec;
+ this->rmt_count_++;
// ACE_DEBUG ((LM_DEBUG, "Latency[REMOTE]: %d\n",
// nsec));
}
- // Eat a little CPU so the Utilization test can measure the
- // consumed time....
- /* takes about 40.2 usecs on a 167 MHz Ultra2 */
- u_long n = 1279UL;
- ACE::is_prime (n, 2, n / 2);
+ for (int j = 0; j < this->push_count_; ++j)
+ {
+ // Eat a little CPU so the Utilization test can measure the
+ // consumed time....
+ /* takes about 40.2 usecs on a 167 MHz Ultra2 */
+ u_long n = 1279UL;
+ ACE::is_prime (n, 2, n / 2);
+ }
}
}
this->push_timer_.stop_incr ();
@@ -968,23 +1068,25 @@ void
Test_ECG::dump_results (void)
{
int i;
- for (i = 0; i < this->lcl_cnt_; ++i)
+ for (i = 0; i < this->lcl_count_; ++i)
{
double usec = this->lcl_time_[i] / 1000.0;
ACE_DEBUG ((LM_DEBUG, "Latency[LCL]: %.3f\n", usec));
}
- for (i = 0; i < this->rmt_cnt_; ++i)
+ for (i = 0; i < this->rmt_count_; ++i)
{
double usec = this->rmt_time_[i] / 1000.0;
ACE_DEBUG ((LM_DEBUG, "Latency[RMT]: %.3f\n", usec));
}
- if (this->scavenger_cnt_ != 0)
+
+ ACE_Time_Value tv;
+ if (this->scavenger_count_ != 0)
{
- double usec = (this->scavenger_end_ - this->scavenger_start_) / 1000.0;
+ this->scavenger_timer_.elapsed_time (tv);
+ double usec = tv.sec () * ACE_ONE_SECOND_IN_USECS + tv.usec ();
ACE_DEBUG ((LM_DEBUG, "Scavenger time: %.3f\n", usec));
}
- ACE_Time_Value tv;
this->push_timer_.elapsed_time_incr (tv);
double usec = tv.sec () * ACE_ONE_SECOND_IN_USECS + tv.usec ();
ACE_DEBUG ((LM_DEBUG, "Push time: %.3f\n", usec));
@@ -993,7 +1095,7 @@ Test_ECG::dump_results (void)
int
Test_ECG::parse_args (int argc, char *argv [])
{
- ACE_Get_Opt get_opt (argc, argv, "tgxl:r:s:o:i:m:u:a:b:c:");
+ ACE_Get_Opt get_opt (argc, argv, "tgxw:p:d:l:r:s:o:i:m:u:a:b:c:");
int opt;
while ((opt = get_opt ()) != EOF)
@@ -1012,18 +1114,24 @@ Test_ECG::parse_args (int argc, char *argv [])
case 'o':
this->rmt_sch_name_ = get_opt.optarg;
break;
- case 't':
- this->scheduling_class_ = ACE_SCHED_OTHER;
- break;
case 'g':
this->global_scheduler_ = 1;
break;
case 'u':
- this->scavenger_cnt_ = ACE_OS::atoi (get_opt.optarg);
+ this->scavenger_count_ = ACE_OS::atoi (get_opt.optarg);
break;
case 'x':
this->short_circuit_ = 1;
break;
+ case 'w':
+ this->push_count_ = ACE_OS::atoi (get_opt.optarg);
+ break;
+ case 'p':
+ this->pid_file_name_ = get_opt.optarg;
+ break;
+ case 'd':
+ this->schedule_file_ = get_opt.optarg;
+ break;
case 'i':
this->interval_ = ACE_OS::atoi (get_opt.optarg);
break;
@@ -1053,8 +1161,8 @@ Test_ECG::parse_args (int argc, char *argv [])
"-s <scheduling service name> "
"-o <remote scheduling service name> "
"-x (short circuit EC) "
- "-t (run in real-time class) "
"-u <utilization test iterations> "
+ "-w <'work' iterations per push> "
"<-a event_type_a> "
"<-b event_type_b> "
"<-c event_type_c> "
@@ -1077,11 +1185,11 @@ Test_ECG::parse_args (int argc, char *argv [])
this->message_count_ = Test_ECG::DEFAULT_EVENT_COUNT;
}
- if (this->scavenger_cnt_ < 0)
+ if (this->scavenger_count_ < 0)
{
ACE_DEBUG ((LM_DEBUG,
"%s: scavenger count < 0, test disabled\n"));
- this->scavenger_cnt_ = 0;
+ this->scavenger_count_ = 0;
}
if (this->event_a_ <= 0
diff --git a/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.h b/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.h
index 593c1a2d237..2d5732262f0 100644
--- a/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.h
+++ b/TAO/orbsvcs/tests/EC_Multiple/EC_Multiple.h
@@ -173,19 +173,18 @@ private:
// different, etc.)
ACE_hrtime_t lcl_time_[Test_ECG::MAX_EVENTS];
- int lcl_cnt_;
+ int lcl_count_;
ACE_hrtime_t rmt_time_[Test_ECG::MAX_EVENTS];
- int rmt_cnt_;
+ int rmt_count_;
// Store the measurements for local and remote events..
- ACE_hrtime_t scavenger_start_;
- ACE_hrtime_t scavenger_end_;
+ ACE_High_Res_Timer scavenger_timer_;
// Measure the time it takes to run the scavenger thread.
ACE_Barrier scavenger_barrier_;
// The scavenger thread should not start until the EC is running.
- int scavenger_cnt_;
+ int scavenger_count_;
// The number of iterations to run in the scavenger thread, if 0
// then there utilization thread is not started (this is good to
// measure just latency).
@@ -197,7 +196,15 @@ private:
ACE_High_Res_Timer push_timer_;
// Measure the time spent in pushes...
- int scheduling_class_;
+ int push_count_;
+ // Number of iterations of ACE::is_prime() in the push() method.
+
+ const char* schedule_file_;
+ // Ask the schedule to compute and dump its schedule after the test
+ // execution.
+
+ const char* pid_file_name_;
+ // The name of a file where the process stores its pid
};
#endif /* EC_MULTIPLE_H */
diff --git a/TAO/orbsvcs/tests/EC_Multiple/Makefile b/TAO/orbsvcs/tests/EC_Multiple/Makefile
index 85e0fb827c6..089775de69c 100644
--- a/TAO/orbsvcs/tests/EC_Multiple/Makefile
+++ b/TAO/orbsvcs/tests/EC_Multiple/Makefile
@@ -1,18 +1,21 @@
# $Id$
BIN = EC_Multiple
-
BUILD = $(BIN)
+SRC = $(BIN:%=%$(VAR).cpp)
+LDLIBS= -lorbsvcs -lTAO
-EC_MULTIPLE_SRCS= \
- EC_Multiple.cpp
-
-LSRC= \
- $(EC_MULTIPLE_SRCS) \
+ifndef TAO_ROOT
+TAO_ROOT = $(ACE_ROOT)/TAO
+endif
-EC_MULTIPLE_OBJS = $(EC_MULTIPLE_SRCS:.cpp=.o)
+CPPFLAGS += -I$(TAO_ROOT)/orbsvcs \
+ -I$(TAO_ROOT)
-LDLIBS= -lorbsvcs -lTAO
+ifdef quantify
+ CCFLAGS += -Dquantify
+ CPPFLAGS += -I/pkg/purify/quantify-2.1-solaris2
+endif # quantify
#----------------------------------------------------------------------------
# Include macros and targets
@@ -23,34 +26,14 @@ include $(ACE_ROOT)/include/makeinclude/macros.GNU
include $(ACE_ROOT)/include/makeinclude/rules.common.GNU
include $(ACE_ROOT)/include/makeinclude/rules.nonested.GNU
include $(ACE_ROOT)/include/makeinclude/rules.local.GNU
-
-ifdef quantify
- CCFLAGS += -Dquantify
- CPPFLAGS += -I/pkg/purify/quantify-2.1-solaris2
-endif # quantify
+include $(ACE_ROOT)/include/makeinclude/rules.bin.GNU
#### Local rules and variables...
-ifndef TAO_ROOT
-TAO_ROOT = $(ACE_ROOT)/TAO
-endif
-TSS_ORB_FLAG = #-DTAO_HAS_TSS_ORBCORE
-DCFLAGS = -g
-LDFLAGS += -L$(TAO_ROOT)/orbsvcs/orbsvcs -L$(TAO_ROOT)/tao
-CPPFLAGS += -I$(TAO_ROOT)/orbsvcs -I$(TAO_ROOT) -I$(TAO_ROOT)/tao/compat $(TSS_ORB_FLAG)#-H
-
-# Leave the scheduler output out if this is a config run.
-ifeq ($(runtime),1)
-EC_MULTIPLE_CONFIG_OBJS=EC_Multiple_Scheduler_Runtime.o
-endif # runtime
-
ifeq ($(probe),1)
CCFLAGS += -DACE_ENABLE_TIMEPROBES
endif # probe
-EC_Multiple: $(addprefix $(VDIR),$(EC_MULTIPLE_OBJS) $(EC_MULTIPLE_CONFIG_OBJS))
- $(LINK.cc) $(LDFLAGS) -o $@ $^ $(VLDLIBS) $(POSTLINK)
-
#----------------------------------------------------------------------------
# Dependencies
#----------------------------------------------------------------------------
diff --git a/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime1.h b/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime1.h
new file mode 100644
index 00000000000..495591cca4f
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime1.h
@@ -0,0 +1,25 @@
+// This file was automatically generated by Scheduler_Factory
+// before editing the file please consider generating it again
+
+// $Id$
+
+#include "orbsvcs/Scheduler_Factory.h"
+
+static ACE_Scheduler_Factory::POD_RT_Info runtime_infos_1[] = {
+{ "Reactor_Task-25000.us", 1, {0, 0}, {0, 0}, {0, 0}, 250000, 0, {0, 0}, 1, 5, 0, 0 },
+{ "Reactor_Task-50000.us", 2, {0, 0}, {0, 0}, {0, 0}, 500000, 0, {0, 0}, 1, 4, 0, 0 },
+{ "Reactor_Task-100000.us", 3, {0, 0}, {0, 0}, {0, 0}, 1000000, 0, {0, 0}, 1, 3, 0, 0 },
+{ "Reactor_Task-200000.us", 4, {0, 0}, {0, 0}, {0, 0}, 2000000, 0, {0, 0}, 1, 2, 0, 0 },
+{ "Reactor_Task-1000000.us", 5, {0, 0}, {0, 0}, {0, 0}, 10000000, 0, {0, 0}, 1, 1, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-25000.us", 6, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 5, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-50000.us", 7, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 4, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-100000.us", 8, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 3, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-200000.us", 9, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 2, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-1000000.us", 10, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 1, 0, 0 },
+{ "supplier@EC1", 11, {20000, 0}, {20000, 0}, {20000, 0}, 250000, 0, {20000, 0}, 1, 5, 0, 0 },
+{ "ecg_EC2@EC1", 12, {5000, 0}, {5000, 0}, {5000, 0}, 250000, 0, {5000, 0}, 0, 5, 0, 0 },
+{ "ecg_EC1@EC1", 13, {5000, 0}, {5000, 0}, {5000, 0}, 250000, 0, {5000, 0}, 1, 5, 0, 0 },
+{ "consumer@EC1", 14, {20000, 0}, {20000, 0}, {20000, 0}, 250000, 0, {20000, 0}, 0, 5, 0, 0 }
+};
+
+// EOF
diff --git a/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime2.h b/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime2.h
new file mode 100644
index 00000000000..b0e8a914f78
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/Scheduler_Runtime2.h
@@ -0,0 +1,25 @@
+// This file was automatically generated by Scheduler_Factory
+// before editing the file please consider generating it again
+
+// $Id$
+
+#include "orbsvcs/Scheduler_Factory.h"
+
+static ACE_Scheduler_Factory::POD_RT_Info runtime_infos_2[] = {
+{ "Reactor_Task-25000.us", 1, {0, 0}, {0, 0}, {0, 0}, 250000, 0, {0, 0}, 1, 5, 0, 0 },
+{ "Reactor_Task-50000.us", 2, {0, 0}, {0, 0}, {0, 0}, 500000, 0, {0, 0}, 1, 4, 0, 0 },
+{ "Reactor_Task-100000.us", 3, {0, 0}, {0, 0}, {0, 0}, 1000000, 0, {0, 0}, 1, 3, 0, 0 },
+{ "Reactor_Task-200000.us", 4, {0, 0}, {0, 0}, {0, 0}, 2000000, 0, {0, 0}, 1, 2, 0, 0 },
+{ "Reactor_Task-1000000.us", 5, {0, 0}, {0, 0}, {0, 0}, 10000000, 0, {0, 0}, 1, 1, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-25000.us", 6, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 5, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-50000.us", 7, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 4, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-100000.us", 8, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 3, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-200000.us", 9, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 2, 0, 0 },
+{ "ACE_ES_Dispatch_Queue-1000000.us", 10, {0, 0}, {0, 0}, {0, 0}, 0, 0, {0, 0}, 1, 1, 0, 0 },
+{ "supplier@EC2", 11, {20000, 0}, {20000, 0}, {20000, 0}, 250000, 0, {20000, 0}, 1, 5, 0, 0 },
+{ "ecg_EC2@EC2", 12, {5000, 0}, {5000, 0}, {5000, 0}, 250000, 0, {5000, 0}, 1, 5, 0, 0 },
+{ "ecg_EC1@EC2", 13, {5000, 0}, {5000, 0}, {5000, 0}, 250000, 0, {5000, 0}, 0, 5, 0, 0 },
+{ "consumer@EC2", 14, {20000, 0}, {20000, 0}, {20000, 0}, 250000, 0, {20000, 0}, 0, 5, 0, 0 }
+};
+
+// EOF
diff --git a/TAO/orbsvcs/tests/EC_Multiple/gen_latency b/TAO/orbsvcs/tests/EC_Multiple/gen_latency
new file mode 100755
index 00000000000..17231e02ff8
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/gen_latency
@@ -0,0 +1,35 @@
+#!/bin/sh
+#
+# $Id$
+#
+
+./latency.pl -k LCL -r 10 LATENCY1.log LATENCY2.log >LTC.LCL.log
+tail +3 LTC.LCL.log | sort -n > LTC.LCL.data
+./latency.pl -k RMT -r 1 LATENCY1.log LATENCY2.log >LTC.RMT.log
+tail +3 LTC.RMT.log | sort -n > LTC.RMT.data
+
+gnuplot <<_EOF_
+set grid xtics ytics
+set xlabel "Time (usecs)"
+set ylabel "Relative frequency"
+
+set terminal postscript eps color
+set output "LTC.LCL.eps"
+plot 'LTC.LCL.data' w i
+set terminal x11
+plot 'LTC.LCL.data' w i
+pause 2
+
+set terminal postscript eps
+set output "LTC.RMT.eps"
+plot 'LTC.RMT.data' w i
+set terminal x11
+plot 'LTC.RMT.data' w i
+pause 2
+
+_EOF_
+
+for i in LTC.LCL LTC.RMT; do
+ gs -sDEVICE=jpeg -g640x480 -r110x110 -sNOPAUSE \
+ -sOutputFile="${i}.jpg" ${i}.eps quit.ps
+done
diff --git a/TAO/orbsvcs/tests/EC_Multiple/gen_overhead b/TAO/orbsvcs/tests/EC_Multiple/gen_overhead
new file mode 100755
index 00000000000..3f6cf50acd5
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/gen_overhead
@@ -0,0 +1,114 @@
+#!/bin/sh
+#
+# $Id$
+#
+
+#TESTS_SOURCES="X LCL RMT1 RMT2 RPT.X RPT.LCL RPT.RMT1 RPT.RMT2"
+TESTS_SOURCES="RPT.X RPT.LCL RPT.RMT1 RPT.RMT2"
+
+#TESTS="$TEST_SOURCES RPT.RMT RMT"
+TESTS="$TEST_SOURCES RPT.RMT"
+
+for i in $TESTS_SOURCES; do
+ grep "Scavenger time" OVH.${i}.*.log |
+ sed -e "s/^OVH\.${i}\.//" -e 's/\.log:Scavenger time://' |
+ sort -n > OVH.${i}.scav.data
+ grep "Push time" OVH.${i}.*.log |
+ sed -e "s/^OVH\.${i}\.//" -e 's/\.log:Push time://' |
+ sort -n > OVH.${i}.push.data
+# | awk '{printf ("%d %.3f\n", $1, $2/$1);}'
+done
+
+paste scav.RMT1.data scav.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > OVH.RMT.scav.data
+paste push.RMT1.data push.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > OVH.RMT.push.data
+
+paste scav.RPT.RMT1.data scav.RPT.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > OVH.RPT.RMT.scav.data
+paste push.RPT.RMT1.data push.RPT.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > OVH.RPT.RMT.push.data
+
+for i in X LCL RMT RMT1 RMT2; do
+ paste OVH.${i}.scav.data OVH.${i}.push.data |
+ awk '{
+ if (NR == 1) {
+ b = $2;
+ } else {
+ printf ("%d %.3f\n", $1, ($2 - $4 - b) / $1);
+ }}' > OVH.${i}.over.data
+done
+
+for i in RPT.X RPT.LCL RPT.RMT RPT.RMT1 RPT.RMT2; do
+ paste OVH.${i}.scav.data OVH.${i}.push.data |
+ awk '{
+ printf ("%d %.3f\n", $1, ($2 - $4));
+ }' > OVH.${i}.over.data
+done
+
+for i in $TESTS; do
+ case $i in
+ X) LABEL="Number of Messages [short circuit test]"
+ ;;
+ LCL) LABEL="Number of Messages [local EC test]"
+ ;;
+ RMT) LABEL="Number of Messages [remote EC test]"
+ ;;
+ RMT1) LABEL="Number of Messages [remote EC test 1]"
+ ;;
+ RMT2) LABEL="Number of Messages [remote EC test 2]"
+ ;;
+ RPT.X) LABEL="Test Number [short circuit test]"
+ ;;
+ RPT.LCL) LABEL="Test Number [local EC test]"
+ ;;
+ RPT.RMT) LABEL="Test Number [remote EC test]"
+ ;;
+ RPT.RMT1) LABEL="Test Number [remote EC test 1]"
+ ;;
+ RPT.RMT2) LABEL="Test Number [remote EC test 2]"
+ ;;
+
+ *) LABEL="Test Number [unknown test]"
+ ;;
+ esac
+
+ gnuplot <<_EOF_
+set grid xtics ytics
+set xlabel "$LABEL"
+
+set terminal postscript eps color
+set ylabel "Time in scavenger (usecs)"
+set output "OVH.${i}.scav.eps"
+plot 'OVH.${i}.scav.data' w l
+set terminal x11
+plot 'OVH.${i}.scav.data' w l
+pause 2
+
+set terminal postscript eps
+set ylabel "Time in push (usecs)"
+set output "OVH.${i}.push.eps"
+plot 'OVH.${i}.push.data' w l
+set terminal x11
+plot 'OVH.${i}.push.data' w l
+pause 2
+
+set terminal postscript eps
+set ylabel "Overhead (usecs)"
+set output "OVH.${i}.over.eps"
+plot 'OVH.${i}.over.data' w l
+set terminal x11
+plot 'OVH.${i}.over.data' w l
+pause 2
+_EOF_
+done
+
+for i in OVH.push OVH.scav OVH.over; do
+ for j in $TESTS; do
+ gs -sDEVICE=jpeg -g640x480 -r110x110 -sNOPAUSE \
+ -sOutputFile="${i}.${j}.jpg" ${i}.${j}.eps quit.ps
+ done
+done
+
+exit 0
+
diff --git a/TAO/orbsvcs/tests/EC_Multiple/gen_utilization b/TAO/orbsvcs/tests/EC_Multiple/gen_utilization
new file mode 100755
index 00000000000..b873f567395
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/gen_utilization
@@ -0,0 +1,71 @@
+#!/bin/sh
+#
+# $Id$
+#
+
+TESTS_SOURCES="X LCL"
+TESTS="$TESTS_SOURCES"
+
+for i in $TESTS_SOURCES; do
+ grep "Scavenger time" UTL.${i}.*.log |
+ sed -e "s/^UTL\.${i}\.//" -e 's/\.log:Scavenger time://' |
+ sort -n | awk '{printf ("%d %.3f\n", $1, $2/$1);}'> UTL.${i}.scav.data
+ grep "Push time" UTL.${i}.*.log |
+ sed -e "s/^UTL\.${i}\.//" -e 's/\.log:Push time://' |
+ sort -n | awk '{printf ("%d %.3f\n", $1, $2/$1);}'> UTL.${i}.push.data
+done
+
+paste scav.RMT1.data scav.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > UTL.RMT.scav.data
+paste push.RMT1.data push.RMT2.data |
+ awk '{printf("%d %.3f\n", $1, ($2 + $4)/2);}' > UTL.RMT.push.data
+
+for i in $TESTS; do
+ case $i in
+ X) LABEL="Number of Messages [short circuit test]"
+ ;;
+ LCL) LABEL="Number of Messages [local EC test]"
+ ;;
+ RMT) LABEL="Number of Messages [remote EC test]"
+ ;;
+ RMT1) LABEL="Number of Messages [remote EC test 1]"
+ ;;
+ RMT2) LABEL="Number of Messages [remote EC test 2]"
+ ;;
+
+ *) LABEL="Number of Messages [unknown test]"
+ ;;
+ esac
+
+ gnuplot <<_EOF_
+set grid xtics ytics
+set xlabel "$LABEL"
+
+set terminal postscript eps color
+set ylabel "Time in scavenger (usecs)"
+set output "UTL.${i}.scav.eps"
+plot 'UTL.${i}.scav.data' w l
+set terminal x11
+plot 'UTL.${i}.scav.data' w l
+pause 2
+
+set terminal postscript eps
+set ylabel "Time in push (usecs)"
+set output "UTL.${i}.push.eps"
+plot 'UTL.${i}.push.data' w l
+set terminal x11
+plot 'UTL.${i}.push.data' w l
+pause 2
+
+_EOF_
+done
+
+for i in push scav; do
+ for j in $TESTS; do
+ gs -sDEVICE=jpeg -g640x480 -r110x110 -sNOPAUSE \
+ -sOutputFile="UTL.${j}.${i}.jpg" UTL.${j}.${i}.eps quit.ps
+ done
+done
+
+exit 0
+
diff --git a/TAO/orbsvcs/tests/EC_Multiple/latency.pl b/TAO/orbsvcs/tests/EC_Multiple/latency.pl
index 5cc2bcdba5e..017bbb76bab 100755
--- a/TAO/orbsvcs/tests/EC_Multiple/latency.pl
+++ b/TAO/orbsvcs/tests/EC_Multiple/latency.pl
@@ -55,14 +55,16 @@ while (<>) {
}
print "Latency results for $opt_k:\n";
+$s2 = $sum2 - ($sum * $sum) / $n;
+$sigma = int(sqrt ( $s2 / ($n - 1) ));
print "Minimum: $min,",
" Maximum: $max,",
" Average: ", int($sum / $n),
- " Deviation: ",
- int(sqrt (($sum2/$n - ($sum/$n)*($sum/$n)))), "\n";
+ " Deviation: ", $sigma,
+ "\n";
while ( ($key,$value) = each %histo ) {
$t = ($key / $opt_r);
- print $t, " ", $value / $n, "\n";
+ print $t, " ", 100 * $value / $n, "\n";
}
diff --git a/TAO/orbsvcs/tests/EC_Multiple/run_latency b/TAO/orbsvcs/tests/EC_Multiple/run_latency
index 5853441fc71..5b2e35c3d33 100755
--- a/TAO/orbsvcs/tests/EC_Multiple/run_latency
+++ b/TAO/orbsvcs/tests/EC_Multiple/run_latency
@@ -8,46 +8,23 @@ MSG_COUNT=1000
MSG_INTERVAL=50000
# The interval between the messages.
-../../Naming_Service/Naming_Service -ORBport 20000 -o NameService.ioro &
+/bin/rm -f NameService.ior NameService.pid EC1.pid EC2.pid EC.pid
+
+../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid &
sleep 2
NameService=`cat NameService.ior`
export NameService
-./EC_Multiple -ORBport 20010 -l EC1 -r EC2 -d -s SS1 -a 1 -b 2 -c 3 \
- -m $MSG_COUNT -i $MSG_INTERVAL > LATENCY1.log 2>&1 &
-./EC_Multiple -ORBport 20020 -l EC2 -r EC1 -d -s SS2 -a 1 -b 3 -c 2 \
- -m $MSG_COUNT -i $MSG_INTERVAL > LATENCY2.log 2>&1 &
-wait
-
-kill %1
-wait
-
-./latency.pl -k LCL -r 100 LATENCY1.log LATENCY2.log >local.log
-tail +3 local.log | sort -n > local.data
-./latency.pl -k RMT -r 1000 LATENCY1.log LATENCY2.log >remote.log
-tail +3 remote.log | sort -n > remote.data
-
-gnuplot <<_EOF_
-set grid xtics ytics
-set xlabel "Time (usecs)"
-set ylabel "Relative frequency"
-
-set terminal postscript eps color
-set output "local.eps"
-plot 'local.data' w i
-set terminal x11
-plot 'local.data' w i
-pause 2
-
-set terminal postscript eps
-set output "remote.eps"
-plot 'remote.data' w i
-set terminal x11
-plot 'remote.data' w i
-pause 2
-
-_EOF_
-
-gs -sDEVICE=jpeg -g640x480 -r110x110 -sNOPAUSE -sOutputFile="local.jpg" local.eps quit.ps
-gs -sDEVICE=jpeg -g640x480 -r110x110 -sNOPAUSE -sOutputFile="remote.jpg" remote.eps quit.ps
+HOST=`hostname`
+./EC_Multiple -ORBport 20010 -ORBhost $HOST -ORBpreconnect ${HOST}:20020 \
+ -l EC1 -r EC2 -s RUNTIME1 -o RUNTIME2 \
+ -a 1 -b 2 -c 3 -p EC1.pid -m $MSG_COUNT -i $MSG_INTERVAL > LATENCY1.log 2>&1 &
+./EC_Multiple -ORBport 20020 -ORBhost $HOST -ORBpreconnect ${HOST}:20010 \
+ -l EC2 -r EC1 -s RUNTIME2 -o RUNTIME1 \
+ -a 4 -b 3 -c 2 -p EC2.pid -m $MSG_COUNT -i $MSG_INTERVAL > LATENCY2.log 2>&1 &
+sleep 2
+wait `cat EC1.pid`
+wait `cat EC2.pid`
+kill `cat NameService.pid`
diff --git a/TAO/orbsvcs/tests/EC_Multiple/run_overhead b/TAO/orbsvcs/tests/EC_Multiple/run_overhead
new file mode 100755
index 00000000000..f9f1dcfd893
--- /dev/null
+++ b/TAO/orbsvcs/tests/EC_Multiple/run_overhead
@@ -0,0 +1,113 @@
+#!/bin/sh
+#
+# $Id$
+#
+
+MSG_INTERVAL=5000 # 50000
+# The interval between the messages, in usecs
+
+UTL_COUNT=50000 # 500000
+# The number of iterations in the scavenger thread; each iteration is
+# (roughly) 20 usecs (On a Sparc Ultra 30); and the number of
+# iterations must be high enough so all the messages are sent while
+# the scavenger is still running.
+
+MSG_COUNTS="1 50 100 150 200 250 300 350 400 450 500 550 600 650 700 750 800 850 900 950 1000"
+# The number of messages sent on each test...
+
+RPT_ITER="01 02 03 04 05 06 07 08 09 10"
+# The iterations for the final test.
+
+RPT_MSGS=1000
+# The number of messages in the final test.
+
+/bin/rm -f NameService.ior NameService.pid EC1.pid EC2.pid EC.pid
+
+for i in $RPT_ITER; do
+ echo Short circuit RPT test $i
+ sleep 1
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -p EC1.pid -m $RPT_MSGS -u $UTL_COUNT \
+ -i $MSG_INTERVAL -x > OVH.RPT.X.${i}.log 2>&1
+
+ echo Local RPT EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -p EC1.pid -m $RPT_MSGS -u $UTL_COUNT \
+ -i $MSG_INTERVAL > OVH.RPT.LCL.${i}.log 2>&1
+ kill `cat NameService.pid`
+
+ echo Remote RPT EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -r EC2 -s RUNTIME1 -o RUNTIME2 \
+ -a 1 -b 2 -c 3 -p EC1.pid -m $RPT_MSGS -u $UTL_COUNT \
+ -i $MSG_INTERVAL > OVH.RPT.RMT1.${i}.log 2>&1 &
+ ./EC_Multiple -ORBport 20020 -l EC2 -r EC1 -s RUNTIME2 -o RUNTIME1 \
+ -a 4 -b 3 -c 2 -p EC2.pid -m $RPT_MSGS -u $UTL_COUNT \
+ -i $MSG_INTERVAL > OVH.RPT.RMT2.${i}.log 2>&1 &
+ sleep 2
+ wait `cat EC1.pid`
+ wait `cat EC2.pid`
+ kill `cat NameService.pid`
+ wait
+done
+
+exit 0
+
+# This tests prove that the overhead is linear on the number of
+# messages...
+
+# Generate the baseline data, i.e. shortcircuit the EC.
+
+for i in $MSG_COUNTS; do
+ echo Short circuit test $i
+ sleep 1
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -m $i -u $UTL_COUNT \
+ -i $MSG_INTERVAL -x > OVH.X.${i}.log 2>&1
+done
+
+# Generate the local data, i.e. what is the overhead of using the local EC.
+for i in $MSG_COUNTS; do
+ echo Local EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -m $i -u $UTL_COUNT \
+ -i $MSG_INTERVAL -p EC1.pid > OVH.LCL.${i}.log 2>&1
+ kill `cat NameService.pid`
+done
+
+# Generate the remote data, this test is much slower since the latency
+# can be as high as 2 msec
+for i in $MSG_COUNTS; do
+ echo Remote EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -r EC2 -s RUNTIME1 -o RUNTIME2 \
+ -a 1 -b 2 -c 3 -p EC1.pid -m $i -u $UTL_COUNT \
+ -i $MSG_INTERVAL > OVH.RMT1.${i}.log 2>&1 &
+ ./EC_Multiple -ORBport 20020 -l EC2 -r EC1 -s RUNTIME2 -o RUNTIME1 \
+ -a 4 -b 3 -c 2 -p EC2.pid -m $i -u $UTL_COUNT \
+ -i $MSG_INTERVAL > OVH.RMT2.${i}.log 2>&1 &
+ sleep 2
+ wait `cat EC1.pid`
+ wait `cat EC2.pid`
+ kill `cat NameService.pid`
+ wait
+done
+
diff --git a/TAO/orbsvcs/tests/EC_Multiple/run_utilization b/TAO/orbsvcs/tests/EC_Multiple/run_utilization
index 3eccd4202c3..20b13d0a661 100755
--- a/TAO/orbsvcs/tests/EC_Multiple/run_utilization
+++ b/TAO/orbsvcs/tests/EC_Multiple/run_utilization
@@ -5,47 +5,59 @@
MSG_INTERVAL=5000
# The interval between the messages, in usecs
-UTL_COUNT=80000
-# Each iteration is (roughly) 20 usecs (On a Sparc Ultra 30).
-MSG_COUNTS="1 10 50 100 200 300 400 500 600 700 800 900 1000 1200 1400 1600 1800 2000"
-# Generate the baseline data, i.e. shortcircuit the EC.
+UTL_COUNT=2500
+# The number of iterations in the scavenger thread; each iteration is
+# (roughly) 20 usecs (On a Sparc Ultra 30); and the number of
+# iterations must be high enough so all the messages are sent while
+# the scavenger is still running.
-for i in $MSG_COUNTS; do
- ./EC_Multiple -ORBport 20010 -l EC1 -d -s SS1 -a 1 -b 2 -c 2 \
- -m $i -u $UTL_COUNT -i $MSG_INTERVAL -x > UTL.X.${i}.log 2>&1
-done
+PUSH_COUNTS="5000 10000 100000"
+# The number of messages sent on each test...
-# Generate the local data, i.e. what is the overhead of using the local EC.
-for i in $MSG_COUNTS; do
- ../../Naming_Service/Naming_Service -ORBport 20000 >/dev/null 2>&1 & sleep 1
- ./EC_Multiple -ORBport 20010 -l EC1 -d -s SS1 -a 1 -b 2 -c 2 \
- -m $i -u $UTL_COUNT -i $MSG_INTERVAL > UTL.LCL.${i}.log 2>&1
- kill %1
- wait
-done
+MSG_COUNT=50
+
+/bin/rm -f NameService.ior NameService.pid EC1.pid EC2.pid EC.pid
+
+# Generate the baseline data, i.e. shortcircuit the EC.
+
+for i in $PUSH_COUNTS; do
+ echo Local EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -m $MSG_COUNT -u $UTL_COUNT -w $i \
+ -i $MSG_INTERVAL -p EC1.pid > UTL.LCL.${i}.log 2>&1
+ kill `cat NameService.pid`
-# Generate the remote data, this test is much slower since the latency
-# can be as high as 2 msec
-for i in $MSG_COUNTS; do
- ../../Naming_Service/Naming_Service -ORBport 20000 >/dev/null 2>&1 & sleep 1
- ./EC_Multiple -ORBport 20010 -l EC1 -r EC2 -d -s SS1 -a 1 -b 2 -c 3 \
- -m $i -u $UTL_COUNT -i $MSG_INTERVAL > UTL.RMT1.${i}.log 2>&1 &
- ./EC_Multiple -ORBport 20020 -l EC2 -r EC1 -d -s SS2 -a 1 -b 3 -c 2 \
- -m $i -u $UTL_COUNT -i $MSG_INTERVAL > UTL.RMT2.${i}.log 2>&1 &
- wait
- kill %1
- wait
done
exit 0
-grep "Scavenger time" bar.*.log |
- sed -e 's/^bar\.//' -e 's/\.log:Scavenger time://' |
- sort -n > bar.scavenger.data
-grep "Push time" bar.*.log |
- sed -e 's/^bar\.//' -e 's/\.log:Push time://' |
- sort -n > bar.push.data
-
-grep Scavenger UTL.X.*.log | sed -e 's/UTL\.X\.//' -e 's/\.log:/ /' | sort -n
+ echo Short circuit test $i
+ sleep 1
+ ./EC_Multiple -ORBport 20010 -l EC1 -s RUNTIME1 \
+ -a 1 -b 2 -c 2 -m $MSG_COUNT -u $UTL_COUNT -w $i \
+ -i $MSG_INTERVAL -x > UTL.X.${i}.log 2>&1
+
+ echo Remote EC test $i
+ ../../Naming_Service/Naming_Service -ORBport 20000 \
+ -o NameService.ior -p NameService.pid >/dev/null 2>&1 &
+ sleep 2
+ NameService=`cat NameService.ior`
+ export NameService
+ ./EC_Multiple -ORBport 20010 -l EC1 -r EC2 -s RUNTIME1 -o RUNTIME2 \
+ -a 1 -b 2 -c 3 -p EC1.pid -m $MSG_COUNT -u $UTL_COUNT -w $i \
+ -i $MSG_INTERVAL > UTL.RMT1.${i}.log 2>&1 &
+ ./EC_Multiple -ORBport 20020 -l EC2 -r EC1 -s RUNTIME2 -o RUNTIME1 \
+ -a 4 -b 3 -c 2 -p EC2.pid -m $MSG_COUNT -u $UTL_COUNT -w $i \
+ -i $MSG_INTERVAL > UTL.RMT2.${i}.log 2>&1 &
+ sleep 2
+ wait `cat EC1.pid`
+ wait `cat EC2.pid`
+ kill `cat NameService.pid`
+ wait