summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchris <chris@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2002-09-03 23:09:26 +0000
committerchris <chris@ae88bc3d-4319-0410-8dbf-d08b4c9d3795>2002-09-03 23:09:26 +0000
commit8cddc9a06545cb99612875b7c9508fffeb5b6d03 (patch)
treee763aff3e70097db4ef86735f3ad4a9651bfaa5f
parentba28132d4d2c430fabf8caf83676e56401271071 (diff)
downloadATCD-8cddc9a06545cb99612875b7c9508fffeb5b6d03.tar.gz
ChangeLogTag:Tue Sep 3 15:54:33 2002 Christopher Kohlhoff <chris@kohlhoff.com>
-rw-r--r--ChangeLog38
-rw-r--r--ChangeLogs/ChangeLog-03a38
-rw-r--r--ace/Atomic_Op.cpp192
-rw-r--r--ace/Atomic_Op.h201
-rw-r--r--ace/Atomic_Op.i246
-rw-r--r--ace/Atomic_Op_T.cpp79
-rw-r--r--ace/Atomic_Op_T.h254
-rw-r--r--ace/Atomic_Op_T.i260
-rw-r--r--ace/Makefile.ace2
-rw-r--r--ace/Makefile.bor2
-rw-r--r--ace/OS.cpp38
-rw-r--r--ace/OS.h6
-rw-r--r--ace/Object_Manager.cpp5
-rw-r--r--ace/ace.mpc2
-rw-r--r--tests/Atomic_Op_Test.cpp142
15 files changed, 1169 insertions, 336 deletions
diff --git a/ChangeLog b/ChangeLog
index 01098fb61fc..a117080733f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,41 @@
+Tue Sep 3 15:54:33 2002 Christopher Kohlhoff <chris@kohlhoff.com>
+
+ * ace/OS.h:
+ * ace/OS.cpp:
+
+ Added new functions ACE_OS::num_processors() and
+ ACE_OS::num_processors_online() for determining the number of CPUs
+ available on a system.
+
+ * ace/Atomic_Op.cpp:
+ * ace/Atomic_Op.h:
+ * ace/Atomic_Op.i:
+ * ace/Atomic_Op_T.cpp:
+ * ace/Atomic_Op_T.h:
+ * ace/Atomic_Op_T.i:
+ * ace/Object_Manager.cpp:
+ * ace/Makefile.bor:
+ * ace/Makefile.ace:
+ * ace/ace.mpc:
+ * tests/Atomic_Op_Test.cpp:
+
+ Added a new fast, lightweight ACE_Atomic_Op template specialization
+ for x86 platforms that uses the architecture's atomic integer
+ primitives. It currently supports Win32 using MSVC6 or BCB, and g++
+ for (in theory) all x86 operating systems. It selects the
+ appropriate implementation for single- or multi-CPU systems at
+ runtime, based on the return value of the ACE_OS::num_processors
+ function added above.
+
+ Note that Atomic_Op.cpp is now a real .cpp file and not a template
+ file, and so project files need to be updated accordingly. The
+ template definitions have been moved to the new Atomic_Op_T.* files.
+
+ Note also that the ACE_Atomic_Op::mutex() operation has been
+ deprecated (and is not supported for the new specialization). If
+ you need this functionality, consider using the ACE_Atomic_Op_Ex
+ template instead.
+
Tue Sep 3 15:40:39 2002 Ossama Othman <ossama@uci.edu>
* tests/XtReactor_Test.cpp (sock_callback):
diff --git a/ChangeLogs/ChangeLog-03a b/ChangeLogs/ChangeLog-03a
index 01098fb61fc..a117080733f 100644
--- a/ChangeLogs/ChangeLog-03a
+++ b/ChangeLogs/ChangeLog-03a
@@ -1,3 +1,41 @@
+Tue Sep 3 15:54:33 2002 Christopher Kohlhoff <chris@kohlhoff.com>
+
+ * ace/OS.h:
+ * ace/OS.cpp:
+
+ Added new functions ACE_OS::num_processors() and
+ ACE_OS::num_processors_online() for determining the number of CPUs
+ available on a system.
+
+ * ace/Atomic_Op.cpp:
+ * ace/Atomic_Op.h:
+ * ace/Atomic_Op.i:
+ * ace/Atomic_Op_T.cpp:
+ * ace/Atomic_Op_T.h:
+ * ace/Atomic_Op_T.i:
+ * ace/Object_Manager.cpp:
+ * ace/Makefile.bor:
+ * ace/Makefile.ace:
+ * ace/ace.mpc:
+ * tests/Atomic_Op_Test.cpp:
+
+ Added a new fast, lightweight ACE_Atomic_Op template specialization
+ for x86 platforms that uses the architecture's atomic integer
+ primitives. It currently supports Win32 using MSVC6 or BCB, and g++
+ for (in theory) all x86 operating systems. It selects the
+ appropriate implementation for single- or multi-CPU systems at
+ runtime, based on the return value of the ACE_OS::num_processors
+ function added above.
+
+ Note that Atomic_Op.cpp is now a real .cpp file and not a template
+ file, and so project files need to be updated accordingly. The
+ template definitions have been moved to the new Atomic_Op_T.* files.
+
+ Note also that the ACE_Atomic_Op::mutex() operation has been
+ deprecated (and is not supported for the new specialization). If
+ you need this functionality, consider using the ACE_Atomic_Op_Ex
+ template instead.
+
Tue Sep 3 15:40:39 2002 Ossama Othman <ossama@uci.edu>
* tests/XtReactor_Test.cpp (sock_callback):
diff --git a/ace/Atomic_Op.cpp b/ace/Atomic_Op.cpp
index df82902efcb..8261ec2f6d8 100644
--- a/ace/Atomic_Op.cpp
+++ b/ace/Atomic_Op.cpp
@@ -1,83 +1,177 @@
-#ifndef ACE_ATOMIC_OP_C
-#define ACE_ATOMIC_OP_C
+// $Id$
#include "ace/Atomic_Op.h"
+#include "ace/OS.h"
-#if !defined (ACE_LACKS_PRAGMA_ONCE)
-# pragma once
-#endif /* ACE_LACKS_PRAGMA_ONCE */
+ACE_RCSID(ace, Atomic_Op, "$Id$")
#if !defined (__ACE_INLINE__)
-// On non-Win32 platforms, this code will be treated as normal code.
-#if !defined (ACE_WIN32)
#include "ace/Atomic_Op.i"
-#endif /* !ACE_WIN32 */
#endif /* __ACE_INLINE__ */
+#if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
-ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op_Ex)
-ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op)
-
-ACE_RCSID(ace, Atomic_Op, "$Id$")
+long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::increment_fn_) (volatile long *) = 0;
+long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::decrement_fn_) (volatile long *) = 0;
+long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_add_fn_) (volatile long *, long) = 0;
-// *************************************************
-template <class ACE_LOCK, class TYPE> ACE_LOCK &
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex (void)
+void
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::init_functions (void)
{
- // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex");
- return this->mutex_;
+ if (ACE_OS::num_processors () == 1)
+ {
+ increment_fn_ = single_cpu_increment;
+ decrement_fn_ = single_cpu_decrement;
+ exchange_add_fn_ = single_cpu_exchange_add;
+ }
+ else
+ {
+ increment_fn_ = multi_cpu_increment;
+ decrement_fn_ = multi_cpu_decrement;
+ exchange_add_fn_ = multi_cpu_exchange_add;
+ }
}
-template <class ACE_LOCK, class TYPE> void
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump (void) const
+void
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::dump (void) const
{
- // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump");
ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
- this->mutex_.dump ();
ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP));
}
-template <class ACE_LOCK, class TYPE>
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex
- (ACE_LOCK &mtx)
- : mutex_ (mtx),
- value_ (0)
+#if defined (_MSC_VER)
+// Disable "no return value" warning, as we will be putting
+// the return values directly into the EAX register.
+#pragma warning (push)
+#pragma warning (disable: 4035)
+#endif /* _MSC_VER */
+
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::single_cpu_increment (volatile long *value)
{
- // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ long tmp = 1;
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
+ return tmp + 1;
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
}
-template <class ACE_LOCK, class TYPE>
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex
- (ACE_LOCK &mtx, const TYPE &c)
- : mutex_ (mtx),
- value_ (c)
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::single_cpu_decrement (volatile long *value)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ long tmp = -1;
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
+ return tmp - 1;
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
}
-// ****************************************************************
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::single_cpu_exchange_add (volatile long *value,
+ long rhs)
+{
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
+ return rhs;
+#elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
+# if defined (_MSC_VER)
+ __asm
+ {
+ mov eax, rhs
+ mov edx, value
+ xadd [edx], eax
+ }
+ // Return value is already in EAX register.
+# elif defined (__BORLANDC__)
+ _EAX = rhs;
+ _EDX = ACE_reinterpret_cast (unsigned long, value);
+ __emit__(0x0F, 0xC1, 0x02); // xadd [edx], eax
+ // Return value is already in EAX register.
+# else /* _MSC_VER */
+ ACE_UNUSED_ARG (value);
+ ACE_UNUSED_ARG (rhs);
+ ACE_NOTSUP_RETURN (-1);
+# endif /* _MSC_VER */
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_UNUSED_ARG (rhs);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
+}
-template <class ACE_LOCK, class TYPE>
-ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op (void)
- : ACE_Atomic_Op_Ex < ACE_LOCK,TYPE > (this->own_mutex_)
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::multi_cpu_increment (volatile long *value)
{
- // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ long tmp = 1;
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
+ return tmp + 1;
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
}
-template <class ACE_LOCK, class TYPE>
-ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op (const TYPE &c)
- : ACE_Atomic_Op_Ex < ACE_LOCK,TYPE > (this->own_mutex_, c)
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::multi_cpu_decrement (volatile long *value)
{
- // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ long tmp = -1;
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
+ return tmp - 1;
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE
-ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op
- (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs)
- : ACE_Atomic_Op_Ex < ACE_LOCK,TYPE >
- ( this->own_mutex_, rhs.value() )
+long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::multi_cpu_exchange_add (volatile long *value,
+ long rhs)
{
-// ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+#if defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+ unsigned long addr = ACE_reinterpret_cast (unsigned long, value);
+ asm( "lock ; xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
+ return rhs;
+#elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
+# if defined (_MSC_VER)
+ __asm
+ {
+ mov eax, rhs
+ mov edx, value
+ lock xadd [edx], eax
+ }
+ // Return value is already in EAX register.
+# elif defined (__BORLANDC__)
+ _EAX = rhs;
+ _EDX = ACE_reinterpret_cast (unsigned long, value);
+ __emit__(0xF0, 0x0F, 0xC1, 0x02); // lock xadd [edx], eax
+ // Return value is already in EAX register.
+# else /* _MSC_VER */
+ ACE_UNUSED_ARG (value);
+ ACE_UNUSED_ARG (rhs);
+ ACE_NOTSUP_RETURN (-1);
+# endif /* _MSC_VER */
+#else /* __GNUC__ && ACE_HAS_PENTIUM */
+ ACE_UNUSED_ARG (value);
+ ACE_UNUSED_ARG (rhs);
+ ACE_NOTSUP_RETURN (-1);
+#endif /* __GNUC__ && ACE_HAS_PENTIUM */
}
-#endif /*ACE_ATOMIC_OP */
+#if defined (_MSC_VER)
+#pragma warning (pop)
+#endif /* _MSC_VER */
+
+#endif /* ACE_HAS_BUILTIN_ATOMIC_OP */
diff --git a/ace/Atomic_Op.h b/ace/Atomic_Op.h
index 53da8f91d3e..1cc92325faf 100644
--- a/ace/Atomic_Op.h
+++ b/ace/Atomic_Op.h
@@ -23,172 +23,135 @@
#include "ace/Synch.h"
+// Include the templates here.
+#include "ace/Atomic_Op_T.h"
+
+// Determine whether builtin atomic op support is
+// available on this platform.
+#if defined (ACE_HAS_THREADS)
+# if defined (WIN32)
+# if defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
+# define ACE_HAS_BUILTIN_ATOMIC_OP
+# else /* ACE_HAS_INTERLOCKED_EXCHANGEADD */
+ // Inline assembly emulation of InterlockedExchangeAdd
+ // is currently only implemented for MSVC and Borland.
+# if defined (_MSC_VER) || defined (__BORLANDC__)
+# define ACE_HAS_BUILTIN_ATOMIC_OP
+# endif /* _MSC_VER || __BORLANDC__ */
+# endif /* ACE_HAS_INTERLOCKED_EXCHANGEADD */
+# elif defined (__GNUC__) && defined (ACE_HAS_PENTIUM)
+# define ACE_HAS_BUILTIN_ATOMIC_OP
+# endif /* WIN32 */
+#endif /* ACE_HAS_THREADS */
+
+#if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
+ACE_TEMPLATE_SPECIALIZATION
/**
- * @class ACE_Atomic_Op_Ex
+ * @class ACE_Atomic_Op<ACE_Thread_Mutex, long>
*
- * @brief Transparently parameterizes synchronization into basic
- * arithmetic operations.
- *
- * This class is described in an article in the July/August 1994
- * issue of the C++ Report magazine. It implements a
- * templatized version of the Decorator pattern from the GoF book.
+ * @brief Specialization of ACE_Atomic_Op for platforms that
+ * support atomic integer operations.
*/
-template <class ACE_LOCK, class TYPE>
-class ACE_Atomic_Op_Ex
+class ACE_Export ACE_Atomic_Op<ACE_Thread_Mutex, long>
{
public:
- // = Initialization methods.
-
/// Initialize <value_> to 0.
- ACE_Atomic_Op_Ex (ACE_LOCK &mtx);
+ ACE_Atomic_Op (void);
/// Initialize <value_> to c.
- ACE_Atomic_Op_Ex (ACE_LOCK &mtx, const TYPE &c);
+ ACE_Atomic_Op (long c);
- // = Accessors.
+ /// Manage copying...
+ ACE_Atomic_Op (const ACE_Atomic_Op<ACE_Thread_Mutex, long> &c);
/// Atomically pre-increment <value_>.
- TYPE operator++ (void);
+ long operator++ (void);
/// Atomically post-increment <value_>.
- TYPE operator++ (int);
+ long operator++ (int);
- /// Atomically increment <value_> by i.
- TYPE operator+= (const TYPE &i);
+ /// Atomically increment <value_> by rhs.
+ long operator+= (long rhs);
/// Atomically pre-decrement <value_>.
- TYPE operator-- (void);
+ long operator-- (void);
/// Atomically post-decrement <value_>.
- TYPE operator-- (int);
+ long operator-- (int);
- /// Atomically decrement <value_> by i.
- TYPE operator-= (const TYPE &i);
+ /// Atomically decrement <value_> by rhs.
+ long operator-= (long rhs);
- /// Atomically compare <value_> with i.
- int operator== (const TYPE &i) const;
+ /// Atomically compare <value_> with rhs.
+ int operator== (long rhs) const;
- /// Atomically compare <value_> with i.
- int operator!= (const TYPE &i) const;
+ /// Atomically compare <value_> with rhs.
+ int operator!= (long rhs) const;
- /// Atomically check if <value_> greater than or equal to i.
- int operator>= (const TYPE &i) const;
+ /// Atomically check if <value_> greater than or equal to rhs.
+ int operator>= (long rhs) const;
- /// Atomically check if <value_> greater than i.
- int operator> (const TYPE &rhs) const;
+ /// Atomically check if <value_> greater than rhs.
+ int operator> (long rhs) const;
- /// Atomically check if <value_> less than or equal to i.
- int operator<= (const TYPE &rhs) const;
+ /// Atomically check if <value_> less than or equal to rhs.
+ int operator<= (long rhs) const;
- /// Atomically check if <value_> less than i.
- int operator< (const TYPE &rhs) const;
+ /// Atomically check if <value_> less than rhs.
+ int operator< (long rhs) const;
- /// Atomically assign i to <value_>.
- void operator= (const TYPE &i);
+ /// Atomically assign rhs to <value_>.
+ void operator= (long rhs);
/// Atomically assign <rhs> to <value_>.
- void operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs);
+ void operator= (const ACE_Atomic_Op<ACE_Thread_Mutex, long> &rhs);
/// Explicitly return <value_>.
- TYPE value (void) const;
+ long value (void) const;
/// Dump the state of an object.
void dump (void) const;
+ /// Explicitly return <value_> (by reference).
+ volatile long &value_i (void);
+
// ACE_ALLOC_HOOK_DECLARE;
// Declare the dynamic allocation hooks.
- /// Manage copying...
- ACE_Atomic_Op_Ex (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &);
-
- /**
- * Returns a reference to the underlying <ACE_LOCK>. This makes it
- * possible to acquire the lock explicitly, which can be useful in
- * some cases if you instantiate the <ACE_Atomic_Op> with an
- * <ACE_Recursive_Mutex> or <ACE_Process_Mutex>. NOTE: the right
- * name would be lock_, but HP/C++ will choke on that!
- */
- ACE_LOCK &mutex (void);
-
- /**
- * Explicitly return <value_> (by reference). This gives the user
- * full, unrestricted access to the underlying value. This method
- * will usually be used in conjunction with explicit access to the
- * lock. Use with care ;-)
- */
- TYPE &value_i (void);
+ /// Used during ACE object manager initialization to optimize the fast
+ /// atomic op implementation according to the number of CPUs.
+ static void init_functions (void);
private:
- /// Type of synchronization mechanism.
- ACE_LOCK &mutex_;
+ // This function cannot be supported by this template specialization.
+ // If you need access to an underlying lock, use the ACE_Atomic_Op_Ex
+ // template instead.
+ ACE_UNIMPLEMENTED_FUNC (ACE_Thread_Mutex &mutex (void))
/// Current object decorated by the atomic op.
- TYPE value_;
-};
-
-template <class ACE_LOCK, class TYPE>
-class ACE_Atomic_Op : public ACE_Atomic_Op_Ex <ACE_LOCK, TYPE>
-{
-public:
- /// Initialize <value_> to 0.
- ACE_Atomic_Op (void);
-
- /// Initialize <value_> to c.
- ACE_Atomic_Op (const TYPE &c);
-
- /// Manage copying...
- ACE_Atomic_Op (const ACE_Atomic_Op<ACE_LOCK, TYPE> &);
-
- /// Atomically assign i to <value_>.
- void operator= (const TYPE &i);
-
- /// Atomically assign <rhs> to <value_>.
- void operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs);
-
- /// Atomically assign <rhs> to <value_>.
- void operator= (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs);
-
-private:
- /// Type of synchronization mechanism.
- ACE_LOCK own_mutex_;
+ volatile long value_;
+
+ // Single-cpu atomic op implementations.
+ static long single_cpu_increment (volatile long *value);
+ static long single_cpu_decrement (volatile long *value);
+ static long single_cpu_exchange_add (volatile long *value, long rhs);
+
+ // Multi-cpu atomic op implementations.
+ static long multi_cpu_increment (volatile long *value);
+ static long multi_cpu_decrement (volatile long *value);
+ static long multi_cpu_exchange_add (volatile long *value, long rhs);
+
+ // Pointers to selected atomic op implementations.
+ static long (*increment_fn_) (volatile long *);
+ static long (*decrement_fn_) (volatile long *);
+ static long (*exchange_add_fn_) (volatile long *, long);
};
+#endif /* ACE_HAS_BUILTIN_ATOMIC_OP */
#if defined (__ACE_INLINE__)
-// On non-Win32 platforms, this code will be inlined
-#if !defined (ACE_WIN32)
#include "ace/Atomic_Op.i"
-#endif /* !ACE_WIN32 */
#endif /* __ACE_INLINE__ */
-#if defined (ACE_TEMPLATES_REQUIRE_SOURCE)
-
-#include "Atomic_Op.cpp"
-// On Win32 platforms, this code will be included as template source
-// code and will not be inlined. Therefore, we first turn off
-// ACE_INLINE, set it to be nothing, include the code, and then turn
-// ACE_INLINE back to its original setting. All this nonsense is
-// necessary, since the generic template code that needs to be
-// specialized cannot be inlined, else the compiler will ignore the
-// specialization code. Also, the specialization code *must* be
-// inlined or the compiler will ignore the specializations.
-#if defined (ACE_WIN32)
-#undef ACE_INLINE
-#define ACE_INLINE
-#include "ace/Atomic_Op.i"
-#undef ACE_INLINE
-#if defined (__ACE_INLINE__)
-#define ACE_INLINE inline
-#else
-#define ACE_INLINE
-#endif /* __ACE_INLINE__ */
-#endif /* ACE_WIN32 */
-#endif /* ACE_TEMPLATES_REQUIRE_SOURCE */
-
-
-#if defined (ACE_TEMPLATES_REQUIRE_PRAGMA)
-#pragma implementation ("Atomic_Op.cpp")
-#endif /* ACE_TEMPLATES_REQUIRE_PRAGMA */
-
#include "ace/post.h"
#endif /*ACE_ATOMIC_OP_H*/
diff --git a/ace/Atomic_Op.i b/ace/Atomic_Op.i
index f4aa14da51d..a13caa5e178 100644
--- a/ace/Atomic_Op.i
+++ b/ace/Atomic_Op.i
@@ -1,225 +1,145 @@
// -*- C++ -*-
// $Id$
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++ (void)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return ++this->value_;
-}
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator+= (const TYPE &i)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator+=");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return this->value_ += i;
-}
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-- (void)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator--");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return --this->value_;
-}
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-= (const TYPE &i)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-=");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return this->value_ -= i;
-}
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs)
- : mutex_ (rhs.mutex_)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
- *this = rhs; // Invoke the assignment operator.
-}
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++ (int)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return this->value_++;
-}
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-- (int)
-{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator--");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
- return this->value_--;
-}
+#if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator== (const TYPE &i) const
+ACE_INLINE
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::ACE_Atomic_Op (void)
+ : value_ (0)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator==");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
- return this->value_ == i;
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator!= (const TYPE &i) const
+ACE_INLINE
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::ACE_Atomic_Op (long c)
+ : value_ (c)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator!=");
- return !(*this == i);
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>= (const TYPE &i) const
+ACE_INLINE
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::ACE_Atomic_Op (const ACE_Atomic_Op<ACE_Thread_Mutex, long> &rhs)
+ : value_ (rhs.value_)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>=");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
- return this->value_ >= i;
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator> (const TYPE &rhs) const
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator++ (void)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
- return this->value_ > rhs;
+#if defined (WIN32)
+ return ::InterlockedIncrement (ACE_const_cast (long *, &this->value_));
+#else /* WIN32 */
+ return (*increment_fn_) (&this->value_);
+#endif /* WIN32 */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<= (const TYPE &rhs) const
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator++ (int)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<=");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
- return this->value_ <= rhs;
+#if defined (WIN32)
+ return ::InterlockedIncrement (ACE_const_cast (long *, &this->value_)) - 1;
+#else /* WIN32 */
+ return (*increment_fn_) (&this->value_) - 1;
+#endif /* WIN32 */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE int
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator< (const TYPE &rhs) const
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator-- (void)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
- return this->value_ < rhs;
+#if defined (WIN32)
+ return ::InterlockedDecrement (ACE_const_cast (long *, &this->value_));
+#else /* WIN32 */
+ return (*decrement_fn_) (&this->value_);
+#endif /* WIN32 */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE void
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs)
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator-- (int)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator=");
- if (&rhs == this)
- return; // Avoid deadlock...
- ACE_GUARD (ACE_LOCK, ace_mon, this->mutex_);
- // This will call ACE_Atomic_Op_Ex::TYPE(), which will ensure the value
- // of <rhs> is acquired atomically.
-
- this->value_ = rhs.value ();
+#if defined (WIN32)
+ return ::InterlockedDecrement (ACE_const_cast (long *, &this->value_)) + 1;
+#else /* WIN32 */
+ return (*decrement_fn_) (&this->value_) + 1;
+#endif /* WIN32 */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value (void) const
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator+= (long rhs)
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value");
- ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, this->value_);
- return this->value_;
+#if defined (WIN32) && defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
+ return ::InterlockedExchangeAdd (ACE_const_cast (long *, &this->value_), rhs) + rhs;
+#else /* WIN32 && ACE_HAS_INTERLOCKED_EXCHANGEADD */
+ return (*exchange_add_fn_) (&this->value_, rhs) + rhs;
+#endif /* WIN32 && ACE_HAS_INTERLOCKED_EXCHANGEADD */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE &
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value_i (void)
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator-= (long rhs)
{
- // Explicitly return <value_> (by reference). This gives the user
- // full, unrestricted access to the underlying value. This method
- // will usually be used in conjunction with explicit access to the
- // lock. Use with care ;-)
- return this->value_;
+#if defined (WIN32) && defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
+ return ::InterlockedExchangeAdd (ACE_const_cast (long *, &this->value_), -rhs) - rhs;
+#else /* WIN32 && ACE_HAS_INTERLOCKED_EXCHANGEADD */
+ return (*exchange_add_fn_) (&this->value_, -rhs) - rhs;
+#endif /* WIN32 && ACE_HAS_INTERLOCKED_EXCHANGEADD */
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE void
-ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator= (const TYPE &i)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator== (long rhs) const
{
-// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator=");
- ACE_GUARD (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_);
- this->value_ = i;
+ return (this->value_ == rhs);
}
-//
-// ACE_Atomic_Op inline functions
-//
-
-template <class ACE_LOCK, class TYPE> ACE_INLINE void
-ACE_Atomic_Op<ACE_LOCK, TYPE>::operator= (const TYPE &i)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator!= (long rhs) const
{
- ACE_Atomic_Op_Ex <ACE_LOCK,TYPE> ::operator= (i);
+ return (this->value_ != rhs);
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE void
-ACE_Atomic_Op<ACE_LOCK, TYPE>::operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator>= (long rhs) const
{
- ACE_Atomic_Op_Ex <ACE_LOCK,TYPE> ::operator= (rhs);
+ return (this->value_ >= rhs);
}
-template <class ACE_LOCK, class TYPE> ACE_INLINE void
-ACE_Atomic_Op<ACE_LOCK, TYPE>::operator= (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator> (long rhs) const
{
- ACE_Atomic_Op_Ex <ACE_LOCK,TYPE> ::operator= (rhs);
+ return (this->value_ > rhs);
}
-// These specializations have been added to ACE_Atomic_Op_Ex to make the
-// implementation faster on Win32 that has OS support for doing this
-// quickly through methods like InterlockedIncrement and
-// InterlockedDecrement
-
-#if defined (ACE_WIN32)
-
-// FUZZ: disable check_for_inline
-
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline long
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator++ (void)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator<= (long rhs) const
{
- return ::InterlockedIncrement (&this->value_);
+ return (this->value_ <= rhs);
}
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline long
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator-- (void)
+ACE_INLINE int
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator< (long rhs) const
{
- return ::InterlockedDecrement (&this->value_);
+ return (this->value_ < rhs);
}
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline void
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator= (const long &i)
+ACE_INLINE void
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator= (long rhs)
{
- ::InterlockedExchange (&this->value_,
- i);
+ this->value_ = rhs;
}
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline void
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator= (const ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long> &rhs)
+ACE_INLINE void
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::operator= (const ACE_Atomic_Op<ACE_Thread_Mutex, long> &rhs)
{
- ::InterlockedExchange (&this->value_,
- rhs.value ());
+ this->value_ = rhs.value_;
}
-#if defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
-
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline long
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator+= (const long &i)
+ACE_INLINE long
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::value (void) const
{
- return ::InterlockedExchangeAdd (&this->value_, i);
+ return this->value_;
}
-ACE_TEMPLATE_METHOD_SPECIALIZATION
-inline long
-ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>::operator-= (const long &i)
+ACE_INLINE volatile long &
+ACE_Atomic_Op<ACE_Thread_Mutex, long>::value_i (void)
{
- return ::InterlockedExchangeAdd (&this->value_, -i);
+ return this->value_;
}
-#endif /* ACE_HAS_INTERLOCKED_EXCHANGEADD */
-
-#endif /* ACE_WIN32 */
+#endif /* ACE_HAS_BUILTIN_ATOMIC_OP */
diff --git a/ace/Atomic_Op_T.cpp b/ace/Atomic_Op_T.cpp
new file mode 100644
index 00000000000..d2bf65c73c5
--- /dev/null
+++ b/ace/Atomic_Op_T.cpp
@@ -0,0 +1,79 @@
+#ifndef ACE_ATOMIC_OP_T_C
+#define ACE_ATOMIC_OP_T_C
+
+#include "ace/Atomic_Op_T.h"
+
+#if !defined (ACE_LACKS_PRAGMA_ONCE)
+# pragma once
+#endif /* ACE_LACKS_PRAGMA_ONCE */
+
+#if !defined (__ACE_INLINE__)
+#include "ace/Atomic_Op_T.i"
+#endif /* __ACE_INLINE__ */
+
+
+ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op_Ex)
+ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op)
+
+ACE_RCSID(ace, Atomic_Op_T, "$Id$")
+
+// *************************************************
+template <class ACE_LOCK, class TYPE> ACE_LOCK &
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex (void)
+{
+ // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex");
+ return this->mutex_;
+}
+
+template <class ACE_LOCK, class TYPE> void
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump (void) const
+{
+ // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump");
+ ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
+ this->mutex_.dump ();
+ ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP));
+}
+
+template <class ACE_LOCK, class TYPE>
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex
+ (ACE_LOCK &mtx)
+ : mutex_ (mtx),
+ value_ (0)
+{
+ // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
+}
+
+template <class ACE_LOCK, class TYPE>
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex
+ (ACE_LOCK &mtx, const TYPE &c)
+ : mutex_ (mtx),
+ value_ (c)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
+}
+
+// ****************************************************************
+
+template <class ACE_LOCK, class TYPE>
+ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op (void)
+ : impl_ (this->own_mutex_)
+{
+ // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+}
+
+template <class ACE_LOCK, class TYPE>
+ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op (const TYPE &c)
+ : impl_ (this->own_mutex_, c)
+{
+ // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op
+ (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs)
+ : impl_ (this->own_mutex_, rhs.value ())
+{
+// ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op");
+}
+
+#endif /* ACE_ATOMIC_OP_T_C */
diff --git a/ace/Atomic_Op_T.h b/ace/Atomic_Op_T.h
new file mode 100644
index 00000000000..47f9c9bce28
--- /dev/null
+++ b/ace/Atomic_Op_T.h
@@ -0,0 +1,254 @@
+/* -*- C++ -*- */
+
+//=============================================================================
+/**
+ * @file Atomic_Op_T.h
+ *
+ * $Id$
+ *
+ * @author Douglas C. Schmidt <schmidt@uci.edu>
+ */
+//=============================================================================
+
+#ifndef ACE_ATOMIC_OP_T_H
+#define ACE_ATOMIC_OP_T_H
+#include "ace/pre.h"
+
+#include "ace/config-all.h"
+
+#if !defined (ACE_LACKS_PRAGMA_ONCE)
+# pragma once
+#endif /* ACE_LACKS_PRAGMA_ONCE */
+
+#include "ace/Synch.h"
+
+
+/**
+ * @class ACE_Atomic_Op_Ex
+ *
+ * @brief Transparently parameterizes synchronization into basic
+ * arithmetic operations.
+ *
+ * This class is described in an article in the July/August 1994
+ * issue of the C++ Report magazine. It implements a
+ * templatized version of the Decorator pattern from the GoF book.
+ *
+ * ACE_Atomic_Op_Ex objects must be constructed with a reference
+ * to an existing lock. A single lock can be shared between
+ * multiple ACE_Atomic_Op_Ex objects. If you do not require this
+ * ability consider using the ACE_Atomic_Op class instead, which
+ * may be able to take advantage of platform-specific
+ * optimisations to provide atomic operations without requiring a
+ * lock.
+ */
+template <class ACE_LOCK, class TYPE>
+class ACE_Atomic_Op_Ex
+{
+public:
+ // = Initialization methods.
+
+ /// Initialize <value_> to 0.
+ ACE_Atomic_Op_Ex (ACE_LOCK &mtx);
+
+ /// Initialize <value_> to c.
+ ACE_Atomic_Op_Ex (ACE_LOCK &mtx, const TYPE &c);
+
+ // = Accessors.
+
+ /// Atomically pre-increment <value_>.
+ TYPE operator++ (void);
+
+ /// Atomically post-increment <value_>.
+ TYPE operator++ (int);
+
+ /// Atomically increment <value_> by rhs.
+ TYPE operator+= (const TYPE &rhs);
+
+ /// Atomically pre-decrement <value_>.
+ TYPE operator-- (void);
+
+ /// Atomically post-decrement <value_>.
+ TYPE operator-- (int);
+
+ /// Atomically decrement <value_> by rhs.
+ TYPE operator-= (const TYPE &rhs);
+
+ /// Atomically compare <value_> with rhs.
+ int operator== (const TYPE &rhs) const;
+
+ /// Atomically compare <value_> with rhs.
+ int operator!= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> greater than or equal to rhs.
+ int operator>= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> greater than rhs.
+ int operator> (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> less than or equal to rhs.
+ int operator<= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> less than rhs.
+ int operator< (const TYPE &rhs) const;
+
+ /// Atomically assign rhs to <value_>.
+ void operator= (const TYPE &rhs);
+
+ /// Atomically assign <rhs> to <value_>.
+ void operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs);
+
+ /// Explicitly return <value_>.
+ TYPE value (void) const;
+
+ /// Dump the state of an object.
+ void dump (void) const;
+
+ // ACE_ALLOC_HOOK_DECLARE;
+ // Declare the dynamic allocation hooks.
+
+ /// Manage copying...
+ ACE_Atomic_Op_Ex (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &);
+
+ /**
+ * Returns a reference to the underlying <ACE_LOCK>. This makes it
+ * possible to acquire the lock explicitly, which can be useful in
+ * some cases if you instantiate the <ACE_Atomic_Op_Ex> with an
+ * <ACE_Recursive_Mutex> or <ACE_Process_Mutex>. NOTE: the right
+ * name would be lock_, but HP/C++ will choke on that!
+ */
+ ACE_LOCK &mutex (void);
+
+ /**
+ * Explicitly return <value_> (by reference). This gives the user
+ * full, unrestricted access to the underlying value. This method
+ * will usually be used in conjunction with explicit access to the
+ * lock. Use with care ;-)
+ */
+ TYPE &value_i (void);
+
+private:
+ /// Type of synchronization mechanism.
+ ACE_LOCK &mutex_;
+
+ /// Current object decorated by the atomic op.
+ TYPE value_;
+};
+
+/**
+ * @class ACE_Atomic_Op
+ *
+ * @brief Transparently parameterizes synchronization into basic
+ * arithmetic operations.
+ *
+ * This class is described in an article in the July/August 1994
+ * issue of the C++ Report magazine. It implements a
+ * templatized version of the Decorator pattern from the GoF book.
+ *
+ * Certain platforms may provide a template specialization for
+ * ACE_Atomic_Op <ACE_Thread_Mutex, long> that provides optimized
+ * atomic integer operations without actually requiring a mutex.
+ */
+template <class ACE_LOCK, class TYPE>
+class ACE_Atomic_Op
+{
+public:
+ /// Initialize <value_> to 0.
+ ACE_Atomic_Op (void);
+
+ /// Initialize <value_> to c.
+ ACE_Atomic_Op (const TYPE &c);
+
+ /// Manage copying...
+ ACE_Atomic_Op (const ACE_Atomic_Op<ACE_LOCK, TYPE> &c);
+
+ /// Atomically assign rhs to <value_>.
+ void operator= (const TYPE &rhs);
+
+ /// Atomically assign <rhs> to <value_>.
+ void operator= (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs);
+
+ /// Atomically pre-increment <value_>.
+ TYPE operator++ (void);
+
+ /// Atomically post-increment <value_>.
+ TYPE operator++ (int);
+
+ /// Atomically increment <value_> by rhs.
+ TYPE operator+= (const TYPE &rhs);
+
+ /// Atomically pre-decrement <value_>.
+ TYPE operator-- (void);
+
+ /// Atomically post-decrement <value_>.
+ TYPE operator-- (int);
+
+ /// Atomically decrement <value_> by rhs.
+ TYPE operator-= (const TYPE &rhs);
+
+ /// Atomically compare <value_> with rhs.
+ int operator== (const TYPE &rhs) const;
+
+ /// Atomically compare <value_> with rhs.
+ int operator!= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> greater than or equal to rhs.
+ int operator>= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> greater than rhs.
+ int operator> (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> less than or equal to rhs.
+ int operator<= (const TYPE &rhs) const;
+
+ /// Atomically check if <value_> less than rhs.
+ int operator< (const TYPE &rhs) const;
+
+ /// Explicitly return <value_>.
+ TYPE value (void) const;
+
+ /// Dump the state of an object.
+ void dump (void) const;
+
+ /**
+ * Returns a reference to the underlying <ACE_LOCK>. This makes it
+ * possible to acquire the lock explicitly, which can be useful in
+ * some cases if you instantiate the <ACE_Atomic_Op> with an
+ * <ACE_Recursive_Mutex> or <ACE_Process_Mutex>.
+ *
+ * NOTE: This member function is deprecated and so may go away in
+ * the future. If you need access to the underlying mutex, consider
+ * using the ACE_Atomic_Op_Ex template instead.
+ */
+ ACE_LOCK &mutex (void);
+
+ /**
+ * Explicitly return <value_> (by reference). This gives the user
+ * full, unrestricted access to the underlying value. This method
+ * will usually be used in conjunction with explicit access to the
+ * lock. Use with care ;-)
+ */
+ TYPE &value_i (void);
+
+private:
+ /// Type of synchronization mechanism.
+ ACE_LOCK own_mutex_;
+
+ /// Underlying atomic op implementation.
+ ACE_Atomic_Op_Ex <ACE_LOCK, TYPE> impl_;
+};
+
+
+#if defined (__ACE_INLINE__)
+#include "ace/Atomic_Op_T.i"
+#endif /* __ACE_INLINE__ */
+
+#if defined (ACE_TEMPLATES_REQUIRE_SOURCE)
+#include "Atomic_Op_T.cpp"
+#endif /* ACE_TEMPLATES_REQUIRE_SOURCE */
+
+#if defined (ACE_TEMPLATES_REQUIRE_PRAGMA)
+#pragma implementation ("Atomic_Op_T.cpp")
+#endif /* ACE_TEMPLATES_REQUIRE_PRAGMA */
+
+#include "ace/post.h"
+#endif /*ACE_ATOMIC_OP_T_H*/
diff --git a/ace/Atomic_Op_T.i b/ace/Atomic_Op_T.i
new file mode 100644
index 00000000000..7486d582db4
--- /dev/null
+++ b/ace/Atomic_Op_T.i
@@ -0,0 +1,260 @@
+// -*- C++ -*-
+// $Id$
+
+//
+// ACE_Atomic_Op_Ex inline functions
+//
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++ (void)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return ++this->value_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator+= (const TYPE &rhs)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator+=");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return this->value_ += rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-- (void)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator--");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return --this->value_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-= (const TYPE &rhs)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-=");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return this->value_ -= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs)
+ : mutex_ (rhs.mutex_)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex");
+ *this = rhs; // Invoke the assignment operator.
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++ (int)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator++");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return this->value_++;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator-- (int)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator--");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, this->mutex_, this->value_);
+ return this->value_--;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator== (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator==");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
+ return this->value_ == rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator!= (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator!=");
+ return !(*this == rhs);
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>= (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>=");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
+ return this->value_ >= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator> (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator>");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
+ return this->value_ > rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<= (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<=");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
+ return this->value_ <= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator< (const TYPE &rhs) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator<");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, 0);
+ return this->value_ < rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE void
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator= (const ACE_Atomic_Op_Ex<ACE_LOCK, TYPE> &rhs)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator=");
+ if (&rhs == this)
+ return; // Avoid deadlock...
+ ACE_GUARD (ACE_LOCK, ace_mon, this->mutex_);
+ // This will call ACE_Atomic_Op_Ex::TYPE(), which will ensure the value
+ // of <rhs> is acquired atomically.
+
+ this->value_ = rhs.value ();
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value (void) const
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value");
+ ACE_GUARD_RETURN (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_, this->value_);
+ return this->value_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE &
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::value_i (void)
+{
+ // Explicitly return <value_> (by reference). This gives the user
+ // full, unrestricted access to the underlying value. This method
+ // will usually be used in conjunction with explicit access to the
+ // lock. Use with care ;-)
+ return this->value_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE void
+ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator= (const TYPE &rhs)
+{
+// ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::operator=");
+ ACE_GUARD (ACE_LOCK, ace_mon, (ACE_LOCK &) this->mutex_);
+ this->value_ = rhs;
+}
+
+//
+// ACE_Atomic_Op inline functions
+//
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE void
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator= (const TYPE &i)
+{
+ this->impl_ = i;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE void
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator= (const ACE_Atomic_Op<ACE_LOCK, TYPE> &rhs)
+{
+ this->impl_ = rhs.impl_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator++ (void)
+{
+ return ++this->impl_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator++ (int)
+{
+ return this->impl_++;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator+= (const TYPE &rhs)
+{
+ return this->impl_ += rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator-- (void)
+{
+ return --this->impl_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator-- (int)
+{
+ return this->impl_--;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator-= (const TYPE &rhs)
+{
+ return this->impl_ -= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator== (const TYPE &rhs) const
+{
+ return this->impl_ == rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator!= (const TYPE &rhs) const
+{
+ return this->impl_ != rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator>= (const TYPE &rhs) const
+{
+ return this->impl_ >= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator> (const TYPE &rhs) const
+{
+ return this->impl_ > rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator<= (const TYPE &rhs) const
+{
+ return this->impl_ <= rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE int
+ACE_Atomic_Op<ACE_LOCK, TYPE>::operator< (const TYPE &rhs) const
+{
+ return this->impl_ < rhs;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE
+ACE_Atomic_Op<ACE_LOCK, TYPE>::value (void) const
+{
+ return this->impl_.value ();
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE void
+ACE_Atomic_Op<ACE_LOCK, TYPE>::dump (void) const
+{
+ return this->impl_.dump ();
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE ACE_LOCK &
+ACE_Atomic_Op<ACE_LOCK, TYPE>::mutex (void)
+{
+ return this->own_mutex_;
+}
+
+template <class ACE_LOCK, class TYPE> ACE_INLINE TYPE &
+ACE_Atomic_Op<ACE_LOCK, TYPE>::value_i (void)
+{
+ return this->impl_.value_i ();
+}
diff --git a/ace/Makefile.ace b/ace/Makefile.ace
index 5c087f94b4b..e546cefb2e3 100644
--- a/ace/Makefile.ace
+++ b/ace/Makefile.ace
@@ -74,6 +74,7 @@ LOGGING_FILES = \
Trace
THREADS_FILES = \
Activation_Queue \
+ Atomic_Op \
Process \
Process_Manager \
Synch \
@@ -259,6 +260,7 @@ TEMPLATE_FILES = \
Unbounded_Queue \
Asynch_Acceptor \
Asynch_Connector \
+ Atomic_Op_T \
Auto_IncDec_T \
Auto_Ptr \
Based_Pointer_T \
diff --git a/ace/Makefile.bor b/ace/Makefile.bor
index 05dd3d793b4..8008811bca8 100644
--- a/ace/Makefile.bor
+++ b/ace/Makefile.bor
@@ -35,6 +35,7 @@ OBJFILES = \
$(OBJDIR)\ATM_Params.obj \
$(OBJDIR)\ATM_QoS.obj \
$(OBJDIR)\ATM_Stream.obj \
+ $(OBJDIR)\Atomic_Op.obj \
$(OBJDIR)\Based_Pointer_Repository.obj \
$(OBJDIR)\Base_Thread_Adapter.obj \
$(OBJDIR)\Basic_Types.obj \
@@ -255,7 +256,6 @@ INCLUDES = \
Asynch_Acceptor.cpp \
Asynch_Connector.cpp \
Array_Base.cpp \
- Atomic_Op.cpp \
Node.cpp \
Unbounded_Set.cpp \
Unbounded_Queue.cpp \
diff --git a/ace/OS.cpp b/ace/OS.cpp
index 369fa166aa4..ae4b6b8ecaf 100644
--- a/ace/OS.cpp
+++ b/ace/OS.cpp
@@ -7454,3 +7454,41 @@ ACE_OS::connect (ACE_HANDLE handle,
# endif /* ACE_HAS_WINSOCK2 */
}
#endif // ACE_HAS_WINCE
+
+long
+ACE_OS::num_processors (void)
+{
+ ACE_OS_TRACE ("ACE_OS::num_processors");
+
+#if defined (ACE_WIN32) || defined (ACE_WIN64)
+ SYSTEM_INFO sys_info;
+ ::GetSystemInfo (&sys_info);
+ return sys_info.dwNumberOfProcessors;
+#elif defined (linux) || defined (sun)
+ return ::sysconf (_SC_NPROCESSORS_CONF);
+#else
+ ACE_NOTSUP_RETURN (-1);
+#endif
+}
+
+long
+ACE_OS::num_processors_online (void)
+{
+ ACE_OS_TRACE ("ACE_OS::num_processors");
+
+#if defined (ACE_WIN32) || defined (ACE_WIN64)
+ SYSTEM_INFO sys_info;
+ ::GetSystemInfo (&sys_info);
+ return sys_info.dwNumberOfProcessors;
+#elif defined (linux) || defined (sun)
+ return ::sysconf (_SC_NPROCESSORS_ONLN);
+#elif defined (hpux)
+ struct pst_dynamic psd;
+ if (::pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) != -1)
+ return psd.psd_proc_cnt;
+ else
+ return -1;
+#else
+ ACE_NOTSUP_RETURN (-1);
+#endif
+}
diff --git a/ace/OS.h b/ace/OS.h
index 7a3ecf768a1..177834b7f3e 100644
--- a/ace/OS.h
+++ b/ace/OS.h
@@ -5874,6 +5874,12 @@ public:
# endif /* ACE_LACKS_NATIVE_STRPTIME */
#endif /* ACE_HAS_STRPTIME */
+ /// Get the number of CPUs configured in the machine.
+ static long num_processors (void);
+
+ /// Get the number of CPUs currently online.
+ static long num_processors_online (void);
+
private:
#if defined (ACE_LACKS_WRITEV)
diff --git a/ace/Object_Manager.cpp b/ace/Object_Manager.cpp
index 500bcde38b9..54316097327 100644
--- a/ace/Object_Manager.cpp
+++ b/ace/Object_Manager.cpp
@@ -16,6 +16,7 @@
#include "ace/Malloc.h"
#include "ace/Signal.h"
#include "ace/Framework_Component.h"
+#include "ace/Atomic_Op.h"
#if !defined (__ACE_INLINE__)
# include "ace/Object_Manager.i"
@@ -184,6 +185,10 @@ ACE_Object_Manager::init (void)
// and register with it for chained fini ().
ACE_OS_Object_Manager::instance ()->next_ = this;
+# if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
+ ACE_Atomic_Op<ACE_Thread_Mutex, long>::init_functions ();
+# endif /* ACE_HAS_BUILTIN_ATOMIC_OP */
+
# if !defined (ACE_LACKS_ACE_SVCCONF)
// Construct the ACE_Service_Config's signal handler.
ACE_NEW_RETURN (ace_service_config_sig_handler_,
diff --git a/ace/ace.mpc b/ace/ace.mpc
index 41ee94f9bb8..13d2de77a3a 100644
--- a/ace/ace.mpc
+++ b/ace/ace.mpc
@@ -82,6 +82,7 @@ project : acelib, core {
Threads {
Activation_Queue.cpp
+ Atomic_Op.cpp
Process.cpp
Process_Manager.cpp
Synch.cpp
@@ -286,6 +287,7 @@ project : acelib, core {
Acceptor.cpp
Active_Map_Manager_T.cpp
Array_Base.cpp
+ Atomic_Op_T.cpp
Node.cpp
Unbounded_Set.cpp
Unbounded_Queue.cpp
diff --git a/tests/Atomic_Op_Test.cpp b/tests/Atomic_Op_Test.cpp
index e2dd6dffff6..1e691ec40e6 100644
--- a/tests/Atomic_Op_Test.cpp
+++ b/tests/Atomic_Op_Test.cpp
@@ -26,6 +26,8 @@ ACE_RCSID(tests, Atomic_Op_Test, "$Id$")
#if defined (ACE_HAS_THREADS)
+enum { TEST_ITERATIONS = 1000000 };
+
int
ACE_TMAIN (int, ACE_TCHAR *[])
{
@@ -35,31 +37,163 @@ ACE_TMAIN (int, ACE_TCHAR *[])
ACE_ASSERT (foo == 5);
- ++foo;
+ long result = ++foo;
+ ACE_ASSERT (foo == 6);
+ ACE_ASSERT (result == 6);
+
+ result = --foo;
+ ACE_ASSERT (foo == 5);
+ ACE_ASSERT (result == 5);
+
+ result = foo++;
ACE_ASSERT (foo == 6);
+ ACE_ASSERT (result == 5);
- --foo;
+ result = foo--;
ACE_ASSERT (foo == 5);
+ ACE_ASSERT (result == 6);
- foo += 10;
+ result = foo += 10;
ACE_ASSERT (foo == 15);
+ ACE_ASSERT (result == 15);
- foo -= 10;
+ result = foo -= 10;
ACE_ASSERT (foo == 5);
+ ACE_ASSERT (result == 5);
foo = 5L;
ACE_ASSERT (foo == 5);
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <long> increment %D\n")));
+ int i;
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ ++foo;
+ ++foo;
+ ++foo;
+ ++foo;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <long> increment %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <long> decrement %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ --foo;
+ --foo;
+ --foo;
+ --foo;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <long> decrement %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <long> addition %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ foo += 5;
+ foo += 5;
+ foo += 5;
+ foo += 5;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <long> addition %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <long> subtraction %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ foo -= 5;
+ foo -= 5;
+ foo -= 5;
+ foo -= 5;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <long> subtraction %D\n")));
+
+ ACE_Atomic_Op <ACE_Thread_Mutex, int> bar (5);
+
+ ACE_ASSERT (bar == 5);
+
+ result = ++bar;
+ ACE_ASSERT (bar == 6);
+ ACE_ASSERT (result == 6);
+
+ result = --bar;
+ ACE_ASSERT (bar == 5);
+ ACE_ASSERT (result == 5);
+
+ result = bar++;
+ ACE_ASSERT (bar == 6);
+ ACE_ASSERT (result == 5);
+
+ result = bar--;
+ ACE_ASSERT (bar == 5);
+ ACE_ASSERT (result == 6);
+
+ result = bar += 10;
+ ACE_ASSERT (bar == 15);
+ ACE_ASSERT (result == 15);
+
+ result = bar -= 10;
+ ACE_ASSERT (bar == 5);
+ ACE_ASSERT (result == 5);
+
+ bar = 5L;
+ ACE_ASSERT (bar == 5);
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <int> increment %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ ++bar;
+ ++bar;
+ ++bar;
+ ++bar;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <int> increment %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <int> decrement %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ --bar;
+ --bar;
+ --bar;
+ --bar;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <int> decrement %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <int> addition %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ bar += 5;
+ bar += 5;
+ bar += 5;
+ bar += 5;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <int> addition %D\n")));
+
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Starting <int> subtraction %D\n")));
+ for (i = 0; i < TEST_ITERATIONS; ++i)
+ {
+ bar -= 5;
+ bar -= 5;
+ bar -= 5;
+ bar -= 5;
+ }
+ ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Ending <int> subtraction %D\n")));
+
ACE_END_TEST;
return 0;
}
#if defined (ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION)
+#if !defined (ACE_HAS_BUILTIN_ATOMIC_OP)
template class ACE_Atomic_Op<ACE_Thread_Mutex, long>;
template class ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>;
+#endif /* !ACE_HAS_BUILTIN_ATOMIC_OP */
+template class ACE_Atomic_Op<ACE_Thread_Mutex, int>;
+template class ACE_Atomic_Op_Ex<ACE_Thread_Mutex, int>;
#elif defined (ACE_HAS_TEMPLATE_INSTANTIATION_PRAGMA)
+#if !defined (ACE_HAS_BUILTIN_ATOMIC_OP)
#pragma instantiate ACE_Atomic_Op<ACE_Thread_Mutex, long>
#pragma instantiate ACE_Atomic_Op_Ex<ACE_Thread_Mutex, long>
+#endif /* !ACE_HAS_BUILTIN_ATOMIC_OP */
+#pragma instantiate ACE_Atomic_Op<ACE_Thread_Mutex, int>
+#pragma instantiate ACE_Atomic_Op_Ex<ACE_Thread_Mutex, int>
#endif /* ACE_HAS_EXPLICIT_TEMPLATE_INSTANTIATION */
#else