summaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/bits/shared_ptr_base.h
diff options
context:
space:
mode:
Diffstat (limited to 'libstdc++-v3/include/bits/shared_ptr_base.h')
-rw-r--r--libstdc++-v3/include/bits/shared_ptr_base.h196
1 files changed, 196 insertions, 0 deletions
diff --git a/libstdc++-v3/include/bits/shared_ptr_base.h b/libstdc++-v3/include/bits/shared_ptr_base.h
index 183c241e29b..4a30ea4da6a 100644
--- a/libstdc++-v3/include/bits/shared_ptr_base.h
+++ b/libstdc++-v3/include/bits/shared_ptr_base.h
@@ -51,6 +51,202 @@
_GLIBCXX_BEGIN_NAMESPACE(std)
+ /**
+ * @brief Exception possibly thrown by @c shared_ptr.
+ * @ingroup exceptions
+ */
+ class bad_weak_ptr : public std::exception
+ {
+ public:
+ virtual char const*
+ what() const throw()
+ { return "std::bad_weak_ptr"; }
+ };
+
+ // Substitute for bad_weak_ptr object in the case of -fno-exceptions.
+ inline void
+ __throw_bad_weak_ptr()
+ {
+#if __EXCEPTIONS
+ throw bad_weak_ptr();
+#else
+ __builtin_abort();
+#endif
+ }
+
+ using __gnu_cxx::_Lock_policy;
+ using __gnu_cxx::__default_lock_policy;
+ using __gnu_cxx::_S_single;
+ using __gnu_cxx::_S_mutex;
+ using __gnu_cxx::_S_atomic;
+
+ // Empty helper class except when the template argument is _S_mutex.
+ template<_Lock_policy _Lp>
+ class _Mutex_base
+ {
+ protected:
+ // The atomic policy uses fully-fenced builtins, single doesn't care.
+ enum { _S_need_barriers = 0 };
+ };
+
+ template<>
+ class _Mutex_base<_S_mutex>
+ : public __gnu_cxx::__mutex
+ {
+ protected:
+ // This policy is used when atomic builtins are not available.
+ // The replacement atomic operations might not have the necessary
+ // memory barriers.
+ enum { _S_need_barriers = 1 };
+ };
+
+ template<_Lock_policy _Lp = __default_lock_policy>
+ class _Sp_counted_base
+ : public _Mutex_base<_Lp>
+ {
+ public:
+ _Sp_counted_base()
+ : _M_use_count(1), _M_weak_count(1) { }
+
+ virtual
+ ~_Sp_counted_base() // nothrow
+ { }
+
+ // Called when _M_use_count drops to zero, to release the resources
+ // managed by *this.
+ virtual void
+ _M_dispose() = 0; // nothrow
+
+ // Called when _M_weak_count drops to zero.
+ virtual void
+ _M_destroy() // nothrow
+ { delete this; }
+
+ virtual void*
+ _M_get_deleter(const std::type_info&) = 0;
+
+ void
+ _M_add_ref_copy()
+ { __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
+
+ void
+ _M_add_ref_lock();
+
+ void
+ _M_release() // nothrow
+ {
+ // Be race-detector-friendly. For more info see bits/c++config.
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count);
+ if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1)
+ {
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count);
+ _M_dispose();
+ // There must be a memory barrier between dispose() and destroy()
+ // to ensure that the effects of dispose() are observed in the
+ // thread that runs destroy().
+ // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html
+ if (_Mutex_base<_Lp>::_S_need_barriers)
+ {
+ _GLIBCXX_READ_MEM_BARRIER;
+ _GLIBCXX_WRITE_MEM_BARRIER;
+ }
+
+ // Be race-detector-friendly. For more info see bits/c++config.
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
+ if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count,
+ -1) == 1)
+ {
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
+ _M_destroy();
+ }
+ }
+ }
+
+ void
+ _M_weak_add_ref() // nothrow
+ { __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
+
+ void
+ _M_weak_release() // nothrow
+ {
+ // Be race-detector-friendly. For more info see bits/c++config.
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
+ if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1)
+ {
+ _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
+ if (_Mutex_base<_Lp>::_S_need_barriers)
+ {
+ // See _M_release(),
+ // destroy() must observe results of dispose()
+ _GLIBCXX_READ_MEM_BARRIER;
+ _GLIBCXX_WRITE_MEM_BARRIER;
+ }
+ _M_destroy();
+ }
+ }
+
+ long
+ _M_get_use_count() const // nothrow
+ {
+ // No memory barrier is used here so there is no synchronization
+ // with other threads.
+ return const_cast<const volatile _Atomic_word&>(_M_use_count);
+ }
+
+ private:
+ _Sp_counted_base(_Sp_counted_base const&);
+ _Sp_counted_base& operator=(_Sp_counted_base const&);
+
+ _Atomic_word _M_use_count; // #shared
+ _Atomic_word _M_weak_count; // #weak + (#shared != 0)
+ };
+
+ template<>
+ inline void
+ _Sp_counted_base<_S_single>::
+ _M_add_ref_lock()
+ {
+ if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
+ {
+ _M_use_count = 0;
+ __throw_bad_weak_ptr();
+ }
+ }
+
+ template<>
+ inline void
+ _Sp_counted_base<_S_mutex>::
+ _M_add_ref_lock()
+ {
+ __gnu_cxx::__scoped_lock sentry(*this);
+ if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
+ {
+ _M_use_count = 0;
+ __throw_bad_weak_ptr();
+ }
+ }
+
+ template<>
+ inline void
+ _Sp_counted_base<_S_atomic>::
+ _M_add_ref_lock()
+ {
+ // Perform lock-free add-if-not-zero operation.
+ _Atomic_word __count;
+ do
+ {
+ __count = _M_use_count;
+ if (__count == 0)
+ __throw_bad_weak_ptr();
+
+ // Replace the current counter value with the old value + 1, as
+ // long as it's not changed meanwhile.
+ }
+ while (!__sync_bool_compare_and_swap(&_M_use_count, __count,
+ __count + 1));
+ }
+
+
// Forward declarations.
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __shared_ptr;