summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2020-05-19 14:07:34 +0300
committerMonty <monty@mariadb.org>2020-05-19 14:07:34 +0300
commitfa0397849a002a0417085aaea289b0c95c96520a (patch)
tree35b97b7fe96a0c87adf897bc2b2302e7793e957a /include
parentf7079d295ba50b900ec158ba0c54dbafd7fde29e (diff)
downloadmariadb-git-fa0397849a002a0417085aaea289b0c95c96520a.tar.gz
Move c++ code from my_atomic.h to my_atomic_wrapper.h
This is because it breaks code that is using extern "C" when including my_atomic, which is the case with ha_s3.cc
Diffstat (limited to 'include')
-rw-r--r--include/my_atomic.h45
-rw-r--r--include/my_atomic_wrapper.h58
2 files changed, 58 insertions, 45 deletions
diff --git a/include/my_atomic.h b/include/my_atomic.h
index 88f6746ba3d..81da9e35cf9 100644
--- a/include/my_atomic.h
+++ b/include/my_atomic.h
@@ -169,49 +169,4 @@
#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
my_atomic_casptr((P), (E), (D))
#endif
-
-#ifdef __cplusplus
-#include <atomic>
-/**
- A wrapper for std::atomic, defaulting to std::memory_order_relaxed.
-
- When it comes to atomic loads or stores at std::memory_order_relaxed
- on IA-32 or AMD64, this wrapper is only introducing some constraints
- to the C++ compiler, to prevent some optimizations of loads or
- stores.
-
- On POWER and ARM, atomic loads and stores involve different instructions
- from normal loads and stores and will thus incur some overhead.
-
- Because atomic read-modify-write operations will always incur
- overhead, we intentionally do not define
- operator++(), operator--(), operator+=(), operator-=(), or similar,
- to make the overhead stand out in the users of this code.
-*/
-template <typename Type> class Atomic_relaxed
-{
- std::atomic<Type> m;
-public:
- Atomic_relaxed(const Atomic_relaxed<Type> &rhs)
- { m.store(rhs, std::memory_order_relaxed); }
- Atomic_relaxed(Type val) : m(val) {}
- Atomic_relaxed() {}
-
- operator Type() const { return m.load(std::memory_order_relaxed); }
- Type operator=(const Type val)
- { m.store(val, std::memory_order_relaxed); return val; }
- Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
- Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
- { return m.fetch_add(i, o); }
- Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
- { return m.fetch_sub(i, o); }
- bool compare_exchange_strong(Type& i1, const Type i2,
- std::memory_order o1= std::memory_order_relaxed,
- std::memory_order o2= std::memory_order_relaxed)
- { return m.compare_exchange_strong(i1, i2, o1, o2); }
- Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed)
- { return m.exchange(i, o); }
-};
-#endif /* __cplusplus */
-
#endif /* MY_ATOMIC_INCLUDED */
diff --git a/include/my_atomic_wrapper.h b/include/my_atomic_wrapper.h
new file mode 100644
index 00000000000..61db886d53f
--- /dev/null
+++ b/include/my_atomic_wrapper.h
@@ -0,0 +1,58 @@
+/* Copyright (c) 2020, MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
+
+#ifdef __cplusplus
+#include <atomic>
+/**
+ A wrapper for std::atomic, defaulting to std::memory_order_relaxed.
+
+ When it comes to atomic loads or stores at std::memory_order_relaxed
+ on IA-32 or AMD64, this wrapper is only introducing some constraints
+ to the C++ compiler, to prevent some optimizations of loads or
+ stores.
+
+ On POWER and ARM, atomic loads and stores involve different instructions
+ from normal loads and stores and will thus incur some overhead.
+
+ Because atomic read-modify-write operations will always incur
+ overhead, we intentionally do not define
+ operator++(), operator--(), operator+=(), operator-=(), or similar,
+ to make the overhead stand out in the users of this code.
+*/
+template <typename Type> class Atomic_relaxed
+{
+ std::atomic<Type> m;
+public:
+ Atomic_relaxed(const Atomic_relaxed<Type> &rhs)
+ { m.store(rhs, std::memory_order_relaxed); }
+ Atomic_relaxed(Type val) : m(val) {}
+ Atomic_relaxed() {}
+
+ operator Type() const { return m.load(std::memory_order_relaxed); }
+ Type operator=(const Type val)
+ { m.store(val, std::memory_order_relaxed); return val; }
+ Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
+ Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
+ { return m.fetch_add(i, o); }
+ Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
+ { return m.fetch_sub(i, o); }
+ bool compare_exchange_strong(Type& i1, const Type i2,
+ std::memory_order o1= std::memory_order_relaxed,
+ std::memory_order o2= std::memory_order_relaxed)
+ { return m.compare_exchange_strong(i1, i2, o1, o2); }
+ Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed)
+ { return m.exchange(i, o); }
+};
+#endif /* __cplusplus */