summaryrefslogtreecommitdiff
path: root/libstdc++-v3
diff options
context:
space:
mode:
authoramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-10 20:38:33 +0000
committeramacleod <amacleod@138bc75d-0d04-0410-961f-82ee72b054a4>2011-11-10 20:38:33 +0000
commit10b744a37ff1abbecf121987f694e0a704b77b95 (patch)
tree4138d94b9a849ba000843310e148fa8a6d8d6c8a /libstdc++-v3
parent5335e796aebf8ae6b08f8a8c664759c706e10653 (diff)
downloadgcc-10b744a37ff1abbecf121987f694e0a704b77b95.tar.gz
PR middle-end/51038
libstdc++-v3 * include/bits/atomic_base.h (atomic_thread_fence): Call built-in. (atomic_signal_fence): Call built-in. (test_and_set, clear): Call new atomic built-ins. gcc * builtins.c (expand_builtin_atomic_clear): New. Expand atomic_clear. (expand_builtin_atomic_test_and_set): New. Expand atomic test_and_set. (expand_builtin): Add cases for test_and_set and clear. * sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New. (BUILT_IN_ATOMIC_CLEAR): New. testsuite * gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models. * gcc.dg/atomic-flag.c: New. Test __atomic_test_and_set and __atomic_clear. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@181271 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3')
-rw-r--r--libstdc++-v3/ChangeLog7
-rw-r--r--libstdc++-v3/include/bits/atomic_base.h64
2 files changed, 21 insertions, 50 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index c25dd282d05..826ca4482dc 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,10 @@
+2011-11-10 Andrew MacLeod <amacleod@redhat.com>
+
+ PR middle-end/51038
+ * include/bits/atomic_base.h (atomic_thread_fence): Call built-in.
+ (atomic_signal_fence): Call built-in.
+ (test_and_set, clear): Call new atomic built-ins.
+
2011-11-09 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/allocator.h (__shrink_to_fit_aux::_S_do_it): Create
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 5327c1bfa21..f0336611d3f 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -68,11 +68,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __mo2;
}
- void
- atomic_thread_fence(memory_order __m) noexcept;
+ inline void
+ atomic_thread_fence(memory_order __m) noexcept
+ {
+ __atomic_thread_fence (__m);
+ }
- void
- atomic_signal_fence(memory_order __m) noexcept;
+ inline void
+ atomic_signal_fence(memory_order __m) noexcept
+ {
+ __atomic_thread_fence (__m);
+ }
/// kill_dependency
template<typename _Tp>
@@ -261,35 +267,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
- /* The standard *requires* this to be lock free. If exchange is not
- always lock free, the resort to the old test_and_set. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- return __atomic_exchange_n(&_M_i, 1, __m);
- else
- {
- /* Sync test and set is only guaranteed to be acquire. */
- if (__m == memory_order_seq_cst || __m == memory_order_release
- || __m == memory_order_acq_rel)
- atomic_thread_fence (__m);
- return __sync_lock_test_and_set (&_M_i, 1);
- }
+ return __atomic_test_and_set (&_M_i, __m);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
- /* The standard *requires* this to be lock free. If exchange is not
- always lock free, the resort to the old test_and_set. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- return __atomic_exchange_n(&_M_i, 1, __m);
- else
- {
- /* Sync test and set is only guaranteed to be acquire. */
- if (__m == memory_order_seq_cst || __m == memory_order_release
- || __m == memory_order_acq_rel)
- atomic_thread_fence (__m);
- return __sync_lock_test_and_set (&_M_i, 1);
- }
+ return __atomic_test_and_set (&_M_i, __m);
}
void
@@ -299,17 +283,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- /* The standard *requires* this to be lock free. If store is not always
- lock free, the resort to the old style __sync_lock_release. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- __atomic_store_n(&_M_i, 0, __m);
- else
- {
- __sync_lock_release (&_M_i, 0);
- /* __sync_lock_release is only guaranteed to be a release barrier. */
- if (__m == memory_order_seq_cst)
- atomic_thread_fence (__m);
- }
+ __atomic_clear (&_M_i, __m);
}
void
@@ -319,17 +293,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
- /* The standard *requires* this to be lock free. If store is not always
- lock free, the resort to the old style __sync_lock_release. */
- if (__atomic_always_lock_free (sizeof (_M_i), 0))
- __atomic_store_n(&_M_i, 0, __m);
- else
- {
- __sync_lock_release (&_M_i, 0);
- /* __sync_lock_release is only guaranteed to be a release barrier. */
- if (__m == memory_order_seq_cst)
- atomic_thread_fence (__m);
- }
+ __atomic_clear (&_M_i, __m);
}
};