summaryrefslogtreecommitdiff
path: root/tests/Message_Block_Test.cpp
diff options
context:
space:
mode:
authornw1 <nw1@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>1997-07-12 09:19:37 +0000
committernw1 <nw1@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>1997-07-12 09:19:37 +0000
commit3e2dad8be6f430fdbe855726acbd3d2aa5072475 (patch)
treed7e10091ebd22fb03471bb41e1374ade6a580528 /tests/Message_Block_Test.cpp
parent81450f37ba50a725e2963ed2f478e882a00947da (diff)
downloadATCD-3e2dad8be6f430fdbe855726acbd3d2aa5072475.tar.gz
The test now runs twice. One with C++ default new/delete memory
allocation and one with a cached memory allocation strategy.
Diffstat (limited to 'tests/Message_Block_Test.cpp')
-rw-r--r--tests/Message_Block_Test.cpp152
1 files changed, 132 insertions, 20 deletions
diff --git a/tests/Message_Block_Test.cpp b/tests/Message_Block_Test.cpp
index d5eb464e491..52c538158e9 100644
--- a/tests/Message_Block_Test.cpp
+++ b/tests/Message_Block_Test.cpp
@@ -20,9 +20,14 @@
#include "ace/Task.h"
#include "ace/Service_Config.h"
-#include "ace/Task.h"
+#include "ace/Malloc.h"
+#include "ace/Profile_Timer.h"
+#include "ace/Free_List.h"
#include "test_config.h"
+#define ACE_ALLOC_STRATEGY_NO 2
+// Number of memory allocation strategies used in this test
+
#if defined (ACE_HAS_THREADS)
// Number of iterations to run the test.
@@ -32,6 +37,93 @@ static ACE_Lock_Adapter<ACE_Thread_Mutex> lock_adapter_;
// Serialize access to <ACE_Message_Block> reference count, which will
// be decremented from multiple threads.
+template <class TYPE>
+class Mem_Pool_Node
+// Mem_Pool_Node keeps unused memory within free list
+// Free list structure is kept within the memory being kept.
+// The length of a piece of unused memory must be greater than
+// sizeof (void*). This makes sense because we'll waste even
+// more memory if we keep them in a separate data structure.
+{
+public:
+ TYPE *addr () { return &this->obj_; }
+ // return the address of free memory
+
+ Mem_Pool_Node<TYPE> *get_next () { return this->next_; }
+ // get the next Mem_Pool_Node
+
+ void set_next (Mem_Pool_Node<TYPE> * ptr) { this->next_ = ptr; }
+ // set the next Mem_Pool_Node
+
+private:
+ union
+ {
+ TYPE obj_;
+ Mem_Pool_Node<TYPE> *next_;
+ } ;
+};
+
+template <class TYPE, class LOCK>
+class Cached_Memory_Pool_Allocator : public ACE_New_Allocator
+{
+public:
+ Cached_Memory_Pool_Allocator (size_t n_chunks);
+ // Create a cached memory poll with <n_chunks> chunks
+ // each with sizeof (TYPE) size.
+
+ ~Cached_Memory_Pool_Allocator ();
+ // clear things up
+
+ void* malloc (size_t);
+ // get a chunk of memory
+
+ void free (void *);
+ // return a chunk of memory
+
+private:
+ TYPE *pool_;
+
+ ACE_Locked_Simple_Free_List<Mem_Pool_Node<TYPE>, LOCK> free_list_;
+};
+
+template <class TYPE, class LOCK>
+Cached_Memory_Pool_Allocator<TYPE, LOCK>::Cached_Memory_Pool_Allocator (size_t n_chunks)
+ : pool_ (0)
+{
+ ACE_NEW (this->pool_, TYPE [n_chunks]);
+ // ERRNO could be lost because this is within ctor
+
+ for (size_t c = 0 ; c < n_chunks ; c ++)
+ this->free_list_.add (new (&this->pool_ [c]) Mem_Pool_Node<TYPE> ());
+ // put into free list using placement contructor, no real memory
+ // allocation in the above new
+}
+
+template <class TYPE, class LOCK>
+Cached_Memory_Pool_Allocator<TYPE, LOCK>::~Cached_Memory_Pool_Allocator ()
+{
+ delete [] this->pool_;
+}
+
+template <class TYPE, class LOCK> void *
+Cached_Memory_Pool_Allocator<TYPE, LOCK>::malloc (size_t nbytes)
+{
+ // check if size requested fits within pre-determined size
+ if (sizeof (TYPE) < nbytes)
+ return NULL;
+
+ void * ptr;
+ return this->free_list_.remove ()->addr ();
+ // addr() call is really not absolutely necessary because of
+ // the way Mem_Pool_Node's internal structure is arranged.
+}
+
+template <class TYPE, class LOCK> void
+Cached_Memory_Pool_Allocator<TYPE, LOCK>::free (void * ptr)
+{
+ this->free_list_.add ((Mem_Pool_Node<TYPE> *) ptr) ;
+}
+
class Worker_Task : public ACE_Task<ACE_MT_SYNCH>
{
public:
@@ -70,7 +162,7 @@ Worker_Task::put (ACE_Message_Block *mb, ACE_Time_Value *tv)
int
Worker_Task::svc (void)
-{
+{
ACE_NEW_THREAD;
// The <ACE_Task::svc_run()> method automatically adds us to the
// <ACE_Service_Config>'s <ACE_Thread_Manager> when the thread
@@ -164,7 +256,7 @@ Worker_Task::svc (void)
return 0;
}
-Worker_Task::Worker_Task (void)
+Worker_Task::Worker_Task ()
{
// Make us an Active Object.
if (this->activate (THR_NEW_LWP) == -1)
@@ -172,7 +264,7 @@ Worker_Task::Worker_Task (void)
}
static int
-produce (Worker_Task &worker_task)
+produce (Worker_Task &worker_task, ACE_Allocator *alloc_strategy)
{
ACE_Message_Block *mb;
@@ -190,7 +282,7 @@ produce (Worker_Task &worker_task)
ACE_Message_Block::MB_DATA, // type
0, // cont
0, // data
- 0, // allocator
+ alloc_strategy, // allocator
&lock_adapter_, // locking strategy
0), // priority
-1);
@@ -211,7 +303,7 @@ produce (Worker_Task &worker_task)
ACE_NEW_RETURN (mb,
ACE_Message_Block (0, ACE_Message_Block::MB_DATA,
- 0, 0, 0, &lock_adapter_),
+ 0, 0, alloc_strategy, &lock_adapter_),
-1);
if (worker_task.put (mb) == -1)
@@ -225,6 +317,8 @@ produce (Worker_Task &worker_task)
#endif /* ACE_HAS_THREADS */
+typedef char memory_chunk[ACE_MALLOC_ALIGN * 5];
+
int
main (int, char *[])
{
@@ -234,23 +328,41 @@ main (int, char *[])
ACE_DEBUG ((LM_DEBUG, "(%t) threads = %d\n", n_threads));
- // Create the worker tasks.
- Worker_Task worker_task[ACE_MAX_THREADS];
+ Cached_Memory_Pool_Allocator<memory_chunk, ACE_Thread_Mutex> mem_allocator (48);
+ ACE_Profile_Timer ptime;
+ ACE_Profile_Timer::ACE_Elapsed_Time et[ACE_ALLOC_STRATEGY_NO];
+ ACE_Allocator *alloc_strategy [] = { NULL, &mem_allocator };
+ char *alloc_name[] = { "Default", "Cached Memory" };
- // Link all the tasks together into a simple pipeline.
- for (int i = 1; i < ACE_MAX_THREADS; i++)
- worker_task[i - 1].next (&worker_task[i]);
-
- // Generate messages and pass them through the pipeline.
- produce (worker_task[0]);
-
- // Wait for all the threads to reach their exit point.
-
- ACE_DEBUG ((LM_DEBUG, "(%t) waiting for worker tasks to finish...\n"));
+ for (int i = 0; i < ACE_ALLOC_STRATEGY_NO; i++)
+ {
+ ACE_DEBUG ((LM_DEBUG, "(%t) Start Message_Block_Test using %s allocation strategy\n", alloc_name[i]));
+ // Create the worker tasks.
+ Worker_Task worker_task[ACE_MAX_THREADS] ;
+
+ // Link all the tasks together into a simple pipeline.
+ for (int j = 1; j < ACE_MAX_THREADS; j++)
+ worker_task[j - 1].next (&worker_task[j]);
+
+ ptime.start ();
+ // Generate messages and pass them through the pipeline.
+ produce (worker_task[0], alloc_strategy [i]);
+
+ // Wait for all the threads to reach their exit point.
+
+ ACE_DEBUG ((LM_DEBUG, "(%t) waiting for worker tasks to finish...\n"));
+
+ ACE_Service_Config::thr_mgr ()->wait ();
+ ptime.stop ();
+ ptime.elapsed_time (et[i]);
+
+ ACE_DEBUG ((LM_DEBUG, "(%t) destroying worker tasks\n"));
+ }
- ACE_Service_Config::thr_mgr ()->wait ();
+ for (i = 0; i < ACE_ALLOC_STRATEGY_NO; i++)
+ ACE_DEBUG ((LM_DEBUG, "Elapsed time using %s allocation strategy: %f sec\n", alloc_name [i], et[i].real_time));
- ACE_DEBUG ((LM_DEBUG, "(%t) destroying worker tasks and exiting...\n"));
+ ACE_DEBUG ((LM_DEBUG, "(%t) Exiting...\n"));
#else
ACE_ERROR ((LM_ERROR, "threads not supported on this platform\n"));
#endif /* ACE_HAS_THREADS */