summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAliaksey Kandratsenka <alk@tut.by>2014-01-04 13:54:24 -0800
committerAliaksey Kandratsenka <alk@tut.by>2014-01-04 13:59:56 -0800
commit1458ee2239e0791567e69112931dc17eb0456cf8 (patch)
treeb09fb9168c4f5ea93f187473ee81f5d2c138027c
parent6630b24e27c6a62727fe73aaae21dcc7364b8fee (diff)
downloadgperftools-1458ee2239e0791567e69112931dc17eb0456cf8.tar.gz
issue-596: removed unused AtomicIncrement operation
There's no need for us to attempt to maintain Google's atomic ops code in era of C++11.
-rw-r--r--src/base/atomicops-internals-arm-generic.h34
-rw-r--r--src/base/atomicops-internals-arm-v6plus.h82
-rw-r--r--src/base/atomicops-internals-linuxppc.h20
-rw-r--r--src/base/atomicops-internals-macosx.h20
-rw-r--r--src/base/atomicops-internals-mips.h70
-rw-r--r--src/base/atomicops-internals-windows.h63
-rw-r--r--src/base/atomicops-internals-x86.h67
-rw-r--r--src/base/atomicops.h20
-rw-r--r--src/tests/atomicops_unittest.cc114
9 files changed, 1 insertions, 489 deletions
diff --git a/src/base/atomicops-internals-arm-generic.h b/src/base/atomicops-internals-arm-generic.h
index 66d45df..d0f9413 100644
--- a/src/base/atomicops-internals-arm-generic.h
+++ b/src/base/atomicops-internals-arm-generic.h
@@ -33,7 +33,7 @@
//
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+// LinuxKernelCmpxchg is from Google Gears.
#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
#define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
@@ -102,26 +102,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
@@ -201,18 +181,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("NoBarrier_AtomicIncrement");
- return 0;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("Barrier_AtomicIncrement");
- return 0;
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
diff --git a/src/base/atomicops-internals-arm-v6plus.h b/src/base/atomicops-internals-arm-v6plus.h
index 4026706..cd1b698 100644
--- a/src/base/atomicops-internals-arm-v6plus.h
+++ b/src/base/atomicops-internals-arm-v6plus.h
@@ -117,39 +117,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 tmp, res;
- __asm__ __volatile__(
- "1:\n"
- "ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]\n"
- "teq %0, #0\n"
- "bne 1b"
- : "=&r" (tmp), "=&r"(res)
- : "r" (ptr), "r"(increment)
- : "cc", "memory");
- return res;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 tmp, res;
- __asm__ __volatile__(
- "1:\n"
- "ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "dmb\n"
- "strex %0, %1, [%2]\n"
- "teq %0, #0\n"
- "bne 1b"
- : "=&r" (tmp), "=&r"(res)
- : "r" (ptr), "r"(increment)
- : "cc", "memory");
- return res;
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
@@ -252,43 +219,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- int store_failed;
- Atomic64 res;
- __asm__ __volatile__(
- "1:\n"
- "ldrexd %1, [%2]\n"
- "adds %Q1, %Q1, %Q3\n"
- "adc %R1, %R1, %R3\n"
- "strexd %0, %1, [%2]\n"
- "teq %0, #0\n"
- "bne 1b"
- : "=&r" (store_failed), "=&r"(res)
- : "r" (ptr), "r"(increment)
- : "cc", "memory");
- return res;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- int store_failed;
- Atomic64 res;
- __asm__ __volatile__(
- "1:\n"
- "ldrexd %1, [%2]\n"
- "adds %Q1, %Q1, %Q3\n"
- "adc %R1, %R1, %R3\n"
- "dmb\n"
- "strexd %0, %1, [%2]\n"
- "teq %0, #0\n"
- "bne 1b"
- : "=&r" (store_failed), "=&r"(res)
- : "r" (ptr), "r"(increment)
- : "cc", "memory");
- return res;
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
int store_failed;
Atomic64 dummy;
@@ -347,18 +277,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
return 0;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("NoBarrier_AtomicIncrement");
- return 0;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("Barrier_AtomicIncrement");
- return 0;
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
diff --git a/src/base/atomicops-internals-linuxppc.h b/src/base/atomicops-internals-linuxppc.h
index dfc4964..b52fdf0 100644
--- a/src/base/atomicops-internals-linuxppc.h
+++ b/src/base/atomicops-internals-linuxppc.h
@@ -184,16 +184,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
return old_value;
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
@@ -335,16 +325,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
return old_value;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
diff --git a/src/base/atomicops-internals-macosx.h b/src/base/atomicops-internals-macosx.h
index 2d6f405..b5130d4 100644
--- a/src/base/atomicops-internals-macosx.h
+++ b/src/base/atomicops-internals-macosx.h
@@ -148,16 +148,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
return Acquire_AtomicExchange(ptr, new_value);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
@@ -248,16 +238,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
return Acquire_AtomicExchange(ptr, new_value);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
diff --git a/src/base/atomicops-internals-mips.h b/src/base/atomicops-internals-mips.h
index 938612d..4bfd7f6 100644
--- a/src/base/atomicops-internals-mips.h
+++ b/src/base/atomicops-internals-mips.h
@@ -112,47 +112,11 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
return old;
}
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment)
-{
- Atomic32 temp, temp2;
- __asm__ volatile(
- ".set push \n"
- ".set noreorder \n"
-
- "1: \n"
- "ll %0, %2 \n" // temp = *ptr
- "addu %1, %0, %3 \n" // temp2 = temp + increment
- "sc %1, %2 \n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b \n" // start again on atomic error
- "addu %1, %0, %3 \n" // temp2 = temp + increment
-
- ".set pop \n"
- : "=&r" (temp), "=&r" (temp2),
- "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory"
- );
- // temp2 now holds the final value.
- return temp2;
-}
-
inline void MemoryBarrier()
{
__asm__ volatile("sync" : : : "memory");
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment)
-{
- MemoryBarrier();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
@@ -283,31 +247,6 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return old;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment)
-{
- Atomic64 temp, temp2;
- __asm__ volatile(
- ".set push \n"
- ".set noreorder \n"
-
- "1: \n"
- "lld %0, %2 \n" // temp = *ptr
- "daddu %1, %0, %3 \n" // temp2 = temp + increment
- "scd %1, %2 \n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b \n" // start again on atomic error
- "daddu %1, %0, %3 \n" // temp2 = temp + increment
-
- ".set pop \n"
- : "=&r" (temp), "=&r" (temp2),
- "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory"
- );
- // temp2 now holds the final value.
- return temp2;
-}
-
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value)
{
@@ -316,15 +255,6 @@ inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
return old_value;
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment)
-{
- MemoryBarrier();
- Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value)
diff --git a/src/base/atomicops-internals-windows.h b/src/base/atomicops-internals-windows.h
index 3efb527..93ced87 100644
--- a/src/base/atomicops-internals-windows.h
+++ b/src/base/atomicops-internals-windows.h
@@ -150,18 +150,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return FastInterlockedExchangeAdd(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(increment)) + increment;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
} // namespace base::subtle
} // namespace base
@@ -306,18 +294,6 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return reinterpret_cast<Atomic64>(result);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return FastInterlockedExchangeAdd64(
- reinterpret_cast<volatile LONGLONG*>(ptr),
- static_cast<LONGLONG>(increment)) + increment;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
@@ -407,45 +383,6 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
#endif
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
-#if 0 // Not implemented
- Atomic64 temp = increment;
- __asm__ __volatile__(
- "0:\n\t"
- "movl (%3), %%ebx\n\t" // Move 64-bit increment into
- "movl 4(%3), %%ecx\n\t" // ecx:ebx
- "movl (%2), %%eax\n\t" // Read contents of ptr into
- "movl 4(%2), %%edx\n\t" // edx:eax
- "add %%eax, %%ebx\n\t" // sum => ecx:ebx
- "adc %%edx, %%ecx\n\t" // edx:eax still has old *ptr
- "lock; cmpxchg8b (%2)\n\t"// Attempt cmpxchg; if *ptr
- "jnz 0b\n\t" // is no longer edx:eax, loop
- : "=A"(temp), "+m"(*ptr)
- : "D" (ptr), "S" (&increment)
- : "memory", "%ebx", "%ecx");
- // temp now contains the previous value of *ptr
- return temp + increment;
-#else
- NotImplementedFatalError("NoBarrier_AtomicIncrement");
- return 0;
-#endif
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
-#if 0 // Not implemented
- Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return new_val;
-#else
- NotImplementedFatalError("Barrier_AtomicIncrement");
- return 0;
-#endif
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value)
{
__asm {
diff --git a/src/base/atomicops-internals-x86.h b/src/base/atomicops-internals-x86.h
index f194823..723ad28 100644
--- a/src/base/atomicops-internals-x86.h
+++ b/src/base/atomicops-internals-x86.h
@@ -105,29 +105,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
@@ -243,29 +220,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
return NoBarrier_AtomicExchange(ptr, new_value);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
@@ -378,27 +332,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
return NoBarrier_AtomicExchange(ptr, new_val);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 old_val, new_val;
-
- do {
- old_val = *ptr;
- new_val = old_val + increment;
- } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
-
- return old_val + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return new_val;
-}
-
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
"movq %%mm0, %0\n\t" // moves (ptr could be read-only)
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index d2ae550..f3cf1ad 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -174,21 +174,6 @@ inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory
-// barriers.
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
@@ -266,9 +251,6 @@ Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
-Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment);
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
@@ -289,8 +271,6 @@ Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
diff --git a/src/tests/atomicops_unittest.cc b/src/tests/atomicops_unittest.cc
index 3de4ea4..aa82a6b 100644
--- a/src/tests/atomicops_unittest.cc
+++ b/src/tests/atomicops_unittest.cc
@@ -38,76 +38,6 @@
#define GG_ULONGLONG(x) static_cast<uint64>(x)
-template <class AtomicType>
-static void TestAtomicIncrement(AtomicType (*atomic_increment_func)
- (volatile AtomicType*, AtomicType)) {
- // For now, we just test single threaded execution
-
- // use a guard value to make sure the atomic_increment_func doesn't go
- // outside the expected address bounds. This is in particular to
- // test that some future change to the asm code doesn't cause the
- // 32-bit atomic_increment_func doesn't do the wrong thing on 64-bit
- // machines.
- struct {
- AtomicType prev_word;
- AtomicType count;
- AtomicType next_word;
- } s;
-
- AtomicType prev_word_value, next_word_value;
- memset(&prev_word_value, 0xFF, sizeof(AtomicType));
- memset(&next_word_value, 0xEE, sizeof(AtomicType));
-
- s.prev_word = prev_word_value;
- s.count = 0;
- s.next_word = next_word_value;
-
- ASSERT_EQ(1, (*atomic_increment_func)(&s.count, 1));
- ASSERT_EQ(1, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(3, (*atomic_increment_func)(&s.count, 2));
- ASSERT_EQ(3, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(6, (*atomic_increment_func)(&s.count, 3));
- ASSERT_EQ(6, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(3, (*atomic_increment_func)(&s.count, -3));
- ASSERT_EQ(3, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(1, (*atomic_increment_func)(&s.count, -2));
- ASSERT_EQ(1, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(0, (*atomic_increment_func)(&s.count, -1));
- ASSERT_EQ(0, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(-1, (*atomic_increment_func)(&s.count, -1));
- ASSERT_EQ(-1, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(-5, (*atomic_increment_func)(&s.count, -4));
- ASSERT_EQ(-5, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-
- ASSERT_EQ(0, (*atomic_increment_func)(&s.count, 5));
- ASSERT_EQ(0, s.count);
- ASSERT_EQ(prev_word_value, s.prev_word);
- ASSERT_EQ(next_word_value, s.next_word);
-}
-
#define NUM_BITS(T) (sizeof(T) * 8)
@@ -160,21 +90,6 @@ static void TestAtomicExchange(AtomicType (*atomic_exchange_func)
}
-template <class AtomicType>
-static void TestAtomicIncrementBounds(AtomicType (*atomic_increment_func)
- (volatile AtomicType*, AtomicType)) {
- // Test increment at the half-width boundary of the atomic type.
- // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
- AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
- AtomicType value = test_val - 1;
- AtomicType new_value = (*atomic_increment_func)(&value, 1);
- ASSERT_EQ(test_val, value);
- ASSERT_EQ(value, new_value);
-
- (*atomic_increment_func)(&value, -1);
- ASSERT_EQ(test_val - 1, value);
-}
-
// This is a simple sanity check that values are correct. Not testing
// atomicity
template <class AtomicType>
@@ -235,42 +150,13 @@ static void TestAtomicOps() {
TestAtomicExchange<AtomicType>(base::subtle::Acquire_AtomicExchange);
TestAtomicExchange<AtomicType>(base::subtle::Release_AtomicExchange);
- TestAtomicIncrementBounds<AtomicType>(
- base::subtle::NoBarrier_AtomicIncrement);
- TestAtomicIncrementBounds<AtomicType>(
- base::subtle::Barrier_AtomicIncrement);
-
TestStore<AtomicType>();
TestLoad<AtomicType>();
}
int main(int argc, char** argv) {
- TestAtomicIncrement<AtomicWord>(base::subtle::NoBarrier_AtomicIncrement);
- TestAtomicIncrement<AtomicWord>(base::subtle::Barrier_AtomicIncrement);
- TestAtomicIncrement<Atomic32>(base::subtle::NoBarrier_AtomicIncrement);
- TestAtomicIncrement<Atomic32>(base::subtle::Barrier_AtomicIncrement);
-
TestAtomicOps<AtomicWord>();
TestAtomicOps<Atomic32>();
-
- // I've commented the Atomic64 tests out for now, because Atomic64
- // doesn't work on x86 systems that are not compiled to support mmx
- // registers. Since I want this project to be as portable as
- // possible -- that is, not to assume we've compiled for mmx or even
- // that the processor supports it -- and we don't actually use
- // Atomic64 anywhere, I've commented it out of the test for now.
- // (Luckily, if we ever do use Atomic64 by accident, we'll get told
- // via a compiler error rather than some obscure runtime failure, so
- // this course of action is safe.)
- // If we ever *do* want to enable this, try adding -msse (or -mmmx?)
- // to the CXXFLAGS in Makefile.am.
-#if 0 and defined(BASE_HAS_ATOMIC64)
- TestAtomicIncrement<base::subtle::Atomic64>(
- base::subtle::NoBarrier_AtomicIncrement);
- TestAtomicIncrement<base::subtle::Atomic64>(
- base::subtle::Barrier_AtomicIncrement);
-#endif
-
printf("PASS\n");
return 0;
}