diff options
author | Venkatesh Srinivas <venkateshs@chromium.org> | 2021-02-08 12:32:23 -0800 |
---|---|---|
committer | Aliaksey Kandratsenka <alkondratenko@gmail.com> | 2021-02-14 11:23:42 -0800 |
commit | 290b123c01f6578391b20bb31849f26b02401112 (patch) | |
tree | 5bae851de8920ef2c0b9599e0cf795ba3c0c5214 | |
parent | 3b1c60cc4ea737ff151977dbcd164c5e2eb859fb (diff) | |
download | gperftools-290b123c01f6578391b20bb31849f26b02401112.tar.gz |
atomicops: Remove Acquire_Store / Release_Load
gperftools' internal atomicops library included atomic
Release Load and Acquire Store operations; those operations
were unused and expressed ordering constraints that aren't
expressible in the C++ standard memory model.
Remove them, to make a transition to C++11 atomics easier
and to avoid confusing use of them.
-rw-r--r-- | src/base/atomicops-internals-arm-generic.h | 19 | ||||
-rw-r--r-- | src/base/atomicops-internals-arm-v6plus.h | 20 | ||||
-rw-r--r-- | src/base/atomicops-internals-gcc.h | 20 | ||||
-rw-r--r-- | src/base/atomicops-internals-linuxppc.h | 32 | ||||
-rw-r--r-- | src/base/atomicops-internals-macosx.h | 29 | ||||
-rw-r--r-- | src/base/atomicops-internals-mips.h | 24 | ||||
-rw-r--r-- | src/base/atomicops-internals-windows.h | 29 | ||||
-rw-r--r-- | src/base/atomicops-internals-x86.h | 38 | ||||
-rw-r--r-- | src/base/atomicops.h | 36 | ||||
-rw-r--r-- | src/tests/atomicops_unittest.cc | 10 |
10 files changed, 0 insertions, 257 deletions
diff --git a/src/base/atomicops-internals-arm-generic.h b/src/base/atomicops-internals-arm-generic.h index d0f9413..cfa6143 100644 --- a/src/base/atomicops-internals-arm-generic.h +++ b/src/base/atomicops-internals-arm-generic.h @@ -122,11 +122,6 @@ inline void MemoryBarrier() { pLinuxKernelMemoryBarrier(); } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; @@ -142,11 +137,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - // 64-bit versions are not implemented yet. @@ -185,10 +175,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("NoBarrier_Store"); } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NotImplementedFatalError("Acquire_Store64"); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("Release_Store"); } @@ -203,11 +189,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return 0; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - NotImplementedFatalError("Atomic64 Release_Load"); - return 0; -} - inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { diff --git a/src/base/atomicops-internals-arm-v6plus.h b/src/base/atomicops-internals-arm-v6plus.h index 35f1048..af2920a 100644 --- a/src/base/atomicops-internals-arm-v6plus.h +++ b/src/base/atomicops-internals-arm-v6plus.h @@ -136,11 +136,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; @@ -156,11 +151,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - // 64-bit versions are only available if LDREXD and STREXD instructions // are available. #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD @@ -288,11 +278,6 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_Store(ptr, value); - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); NoBarrier_Store(ptr, value); @@ -304,11 +289,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return NoBarrier_Load(ptr); -} - inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { diff --git a/src/base/atomicops-internals-gcc.h b/src/base/atomicops-internals-gcc.h index f8d2786..0dcf03e 100644 --- a/src/base/atomicops-internals-gcc.h +++ b/src/base/atomicops-internals-gcc.h @@ -99,11 +99,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; @@ -119,11 +114,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - // 64-bit versions inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, @@ -172,11 +162,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; @@ -192,11 +177,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - } // namespace base::subtle } // namespace base diff --git a/src/base/atomicops-internals-linuxppc.h b/src/base/atomicops-internals-linuxppc.h index b52fdf0..73aa156 100644 --- a/src/base/atomicops-internals-linuxppc.h +++ b/src/base/atomicops-internals-linuxppc.h @@ -359,14 +359,6 @@ inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { - *ptr = value; - // This can't be _lwsync(); we need to order the immediately - // preceding stores against any load that may follow, but lwsync - // doesn't guarantee that. - _sync(); -} - inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { _lwsync(); *ptr = value; @@ -382,14 +374,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { - // This can't be _lwsync(); we need to order the immediately - // preceding stores against any load that may follow, but lwsync - // doesn't guarantee that. - _sync(); - return *ptr; -} - #ifdef __PPC64__ // 64-bit Versions. @@ -398,14 +382,6 @@ inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { - *ptr = value; - // This can't be _lwsync(); we need to order the immediately - // preceding stores against any load that may follow, but lwsync - // doesn't guarantee that. - _sync(); -} - inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { _lwsync(); *ptr = value; @@ -421,14 +397,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { - // This can't be _lwsync(); we need to order the immediately - // preceding stores against any load that may follow, but lwsync - // doesn't guarantee that. - _sync(); - return *ptr; -} - #endif } // namespace base::subtle diff --git a/src/base/atomicops-internals-macosx.h b/src/base/atomicops-internals-macosx.h index b5130d4..c21f606 100644 --- a/src/base/atomicops-internals-macosx.h +++ b/src/base/atomicops-internals-macosx.h @@ -172,11 +172,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; @@ -192,11 +187,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { - MemoryBarrier(); - return *ptr; -} - // 64-bit version inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, @@ -268,11 +258,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; @@ -288,11 +273,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { - MemoryBarrier(); - return *ptr; -} - #else // 64-bit implementation on 32-bit platform @@ -342,11 +322,6 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { #endif -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { - NoBarrier_Store(ptr, value); - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { MemoryBarrier(); NoBarrier_Store(ptr, value); @@ -358,10 +333,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { - MemoryBarrier(); - return NoBarrier_Load(ptr); -} #endif // __LP64__ } // namespace base::subtle diff --git a/src/base/atomicops-internals-mips.h b/src/base/atomicops-internals-mips.h index 4bfd7f6..58e0f14 100644 --- a/src/base/atomicops-internals-mips.h +++ b/src/base/atomicops-internals-mips.h @@ -161,12 +161,6 @@ inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, return NoBarrier_AtomicExchange(ptr, new_value); } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) -{ - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); @@ -185,12 +179,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) -{ - MemoryBarrier(); - return *ptr; -} - #if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64) typedef int64_t Atomic64; @@ -285,12 +273,6 @@ inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, return NoBarrier_AtomicExchange(ptr, new_value); } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) -{ - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); @@ -309,12 +291,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) -{ - MemoryBarrier(); - return *ptr; -} - #endif } // namespace base::subtle diff --git a/src/base/atomicops-internals-windows.h b/src/base/atomicops-internals-windows.h index 93ced87..d124383 100644 --- a/src/base/atomicops-internals-windows.h +++ b/src/base/atomicops-internals-windows.h @@ -188,10 +188,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - Acquire_AtomicExchange(ptr, value); -} - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; // works w/o barrier for current Intel chips as of June 2005 // See comments in Atomic64 version of Release_Store() below. @@ -206,11 +202,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - // 64-bit operations #if defined(_WIN64) || defined(__MINGW64__) @@ -298,11 +289,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; // works w/o barrier for current Intel chips as of June 2005 @@ -323,11 +309,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - #else // defined(_WIN64) || defined(__MINGW64__) // 64-bit low-level operations on 32-bit platform @@ -393,11 +374,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value) } } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier in this implementation -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_Store(ptr, value); } @@ -419,11 +395,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return NoBarrier_Load(ptr); -} - #endif // defined(_WIN64) || defined(__MINGW64__) diff --git a/src/base/atomicops-internals-x86.h b/src/base/atomicops-internals-x86.h index e441ac7..4eadacb 100644 --- a/src/base/atomicops-internals-x86.h +++ b/src/base/atomicops-internals-x86.h @@ -128,11 +128,6 @@ inline void MemoryBarrier() { __asm__ __volatile__("mfence" : : : "memory"); } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - *ptr = value; - MemoryBarrier(); -} - #else inline void MemoryBarrier() { @@ -144,14 +139,6 @@ inline void MemoryBarrier() { } } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - *ptr = value; - __asm__ __volatile__("mfence" : : : "memory"); - } else { - Acquire_AtomicExchange(ptr, value); - } -} #endif inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { @@ -171,11 +158,6 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return value; } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - MemoryBarrier(); - return *ptr; -} - #if defined(__x86_64__) // 64-bit low-level operations on 64-bit platform. @@ -216,11 +198,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - *ptr = value; - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ATOMICOPS_COMPILER_BARRIER(); @@ -254,11 +231,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return *ptr; -} - #else // defined(__x86_64__) // 64-bit low-level operations on 32-bit platform. @@ -333,11 +305,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); } -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - NoBarrier_Store(ptr, value); - MemoryBarrier(); -} - inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ATOMICOPS_COMPILER_BARRIER(); NoBarrier_Store(ptr, value); @@ -363,11 +330,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { return value; } -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - MemoryBarrier(); - return NoBarrier_Load(ptr); -} - #endif // defined(__x86_64__) inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, diff --git a/src/base/atomicops.h b/src/base/atomicops.h index dac95be..2f0bc5b 100644 --- a/src/base/atomicops.h +++ b/src/base/atomicops.h @@ -205,11 +205,6 @@ inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); } -inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { - return base::subtle::Acquire_Store( - reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); -} - inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Release_Store( reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); @@ -225,11 +220,6 @@ inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); } -inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { - return base::subtle::Release_Load( - reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); -} - } // namespace base::subtle } // namespace base #endif // AtomicWordCastType @@ -268,11 +258,9 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value); void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); -void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); void Release_Store(volatile Atomic32* ptr, Atomic32 value); Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); Atomic32 Acquire_Load(volatile const Atomic32* ptr); -Atomic32 Release_Load(volatile const Atomic32* ptr); // Corresponding operations on Atomic64 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, @@ -289,11 +277,9 @@ Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value); void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); -void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); void Release_Store(volatile Atomic64* ptr, Atomic64 value); Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); Atomic64 Acquire_Load(volatile const Atomic64* ptr); -Atomic64 Release_Load(volatile const Atomic64* ptr); } // namespace base::subtle } // namespace base @@ -321,10 +307,6 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } -inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { - return base::subtle::Acquire_Store(ptr, value); -} - inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Release_Store(ptr, value); } @@ -332,10 +314,6 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { return base::subtle::Acquire_Load(ptr); } - -inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { - return base::subtle::Release_Load(ptr); -} #endif // AtomicWordCastType // 32-bit Acquire/Release operations to be deprecated. @@ -350,18 +328,12 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 new_value) { return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - base::subtle::Acquire_Store(ptr, value); -} inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { return base::subtle::Release_Store(ptr, value); } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return base::subtle::Acquire_Load(ptr); } -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - return base::subtle::Release_Load(ptr); -} #ifdef BASE_HAS_ATOMIC64 @@ -377,10 +349,6 @@ inline base::subtle::Atomic64 Release_CompareAndSwap( base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) { return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } -inline void Acquire_Store( - volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { - base::subtle::Acquire_Store(ptr, value); -} inline void Release_Store( volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { return base::subtle::Release_Store(ptr, value); @@ -389,10 +357,6 @@ inline base::subtle::Atomic64 Acquire_Load( volatile const base::subtle::Atomic64* ptr) { return base::subtle::Acquire_Load(ptr); } -inline base::subtle::Atomic64 Release_Load( - volatile const base::subtle::Atomic64* ptr) { - return base::subtle::Release_Load(ptr); -} #endif // BASE_HAS_ATOMIC64 diff --git a/src/tests/atomicops_unittest.cc b/src/tests/atomicops_unittest.cc index aa82a6b..76aae2e 100644 --- a/src/tests/atomicops_unittest.cc +++ b/src/tests/atomicops_unittest.cc @@ -104,11 +104,6 @@ static void TestStore() { base::subtle::NoBarrier_Store(&value, kVal2); ASSERT_EQ(kVal2, value); - base::subtle::Acquire_Store(&value, kVal1); - ASSERT_EQ(kVal1, value); - base::subtle::Acquire_Store(&value, kVal2); - ASSERT_EQ(kVal2, value); - base::subtle::Release_Store(&value, kVal1); ASSERT_EQ(kVal1, value); base::subtle::Release_Store(&value, kVal2); @@ -133,11 +128,6 @@ static void TestLoad() { ASSERT_EQ(kVal1, base::subtle::Acquire_Load(&value)); value = kVal2; ASSERT_EQ(kVal2, base::subtle::Acquire_Load(&value)); - - value = kVal1; - ASSERT_EQ(kVal1, base::subtle::Release_Load(&value)); - value = kVal2; - ASSERT_EQ(kVal2, base::subtle::Release_Load(&value)); } template <class AtomicType> |