summaryrefslogtreecommitdiff
path: root/storage/perfschema/pfs_atomic.h
diff options
context:
space:
mode:
authorMarc Alff <marc.alff@sun.com>2010-01-11 18:47:27 -0700
committerMarc Alff <marc.alff@sun.com>2010-01-11 18:47:27 -0700
commite0e0f9e3d46917fe9b611fc9769e64032c267446 (patch)
treec111d4c62b1e1eb7a74ec68860756418e29cb61e /storage/perfschema/pfs_atomic.h
parent3d915225611a921fad03934e58bf281b48fc15b0 (diff)
downloadmariadb-git-e0e0f9e3d46917fe9b611fc9769e64032c267446.tar.gz
WL#2360 Performance schema
Part V: performance schema implementation
Diffstat (limited to 'storage/perfschema/pfs_atomic.h')
-rw-r--r--storage/perfschema/pfs_atomic.h148
1 files changed, 148 insertions, 0 deletions
diff --git a/storage/perfschema/pfs_atomic.h b/storage/perfschema/pfs_atomic.h
new file mode 100644
index 00000000000..7833c5f1c7a
--- /dev/null
+++ b/storage/perfschema/pfs_atomic.h
@@ -0,0 +1,148 @@
+/* Copyright (C) 2009 Sun Microsystems, Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef PFS_ATOMIC_H
+#define PFS_ATOMIC_H
+
+/**
+ @file storage/perfschema/pfs_atomic.h
+ Atomic operations (declarations).
+*/
+
+#include <my_atomic.h>
+
+/** Helper for atomic operations. */
+class PFS_atomic
+{
+public:
+ static void init();
+ static void cleanup();
+
+ /** Atomic load. */
+ static inline int32 load_32(volatile int32 *ptr)
+ {
+ int32 result;
+ rdlock(ptr);
+ result= my_atomic_load32(ptr);
+ rdunlock(ptr);
+ return result;
+ }
+
+ /** Atomic load. */
+ static inline uint32 load_u32(volatile uint32 *ptr)
+ {
+ uint32 result;
+ rdlock(ptr);
+ result= (uint32) my_atomic_load32((int32*) ptr);
+ rdunlock(ptr);
+ return result;
+ }
+
+ /** Atomic store. */
+ static inline void store_32(volatile int32 *ptr, int32 value)
+ {
+ wrlock(ptr);
+ my_atomic_store32(ptr, value);
+ wrunlock(ptr);
+ }
+
+ /** Atomic store. */
+ static inline void store_u32(volatile uint32 *ptr, uint32 value)
+ {
+ wrlock(ptr);
+ my_atomic_store32((int32*) ptr, (int32) value);
+ wrunlock(ptr);
+ }
+
+ /** Atomic add. */
+ static inline int32 add_32(volatile int32 *ptr, int32 value)
+ {
+ int32 result;
+ wrlock(ptr);
+ result= my_atomic_add32(ptr, value);
+ wrunlock(ptr);
+ return result;
+ }
+
+ /** Atomic add. */
+ static inline uint32 add_u32(volatile uint32 *ptr, uint32 value)
+ {
+ uint32 result;
+ wrlock(ptr);
+ result= (uint32) my_atomic_add32((int32*) ptr, (int32) value);
+ wrunlock(ptr);
+ return result;
+ }
+
+ /** Atomic compare and swap. */
+ static inline bool cas_32(volatile int32 *ptr, int32 *old_value,
+ int32 new_value)
+ {
+ bool result;
+ wrlock(ptr);
+ result= my_atomic_cas32(ptr, old_value, new_value);
+ wrunlock(ptr);
+ return result;
+ }
+
+ /** Atomic compare and swap. */
+ static inline bool cas_u32(volatile uint32 *ptr, uint32 *old_value,
+ uint32 new_value)
+ {
+ bool result;
+ wrlock(ptr);
+ result= my_atomic_cas32((int32*) ptr, (int32*) old_value,
+ (uint32) new_value);
+ wrunlock(ptr);
+ return result;
+ }
+
+private:
+ static my_atomic_rwlock_t m_rwlock_array[256];
+
+ static inline my_atomic_rwlock_t *get_rwlock(volatile void *ptr)
+ {
+ /*
+ Divide an address by 8 to remove alignment,
+ modulo 256 to fall in the array.
+ */
+ uint index= (((intptr) ptr) >> 3) & 0xFF;
+ my_atomic_rwlock_t *result= &m_rwlock_array[index];
+ return result;
+ }
+
+ static inline void rdlock(volatile void *ptr)
+ {
+ my_atomic_rwlock_rdlock(get_rwlock(ptr));
+ }
+
+ static inline void wrlock(volatile void *ptr)
+ {
+ my_atomic_rwlock_wrlock(get_rwlock(ptr));
+ }
+
+ static inline void rdunlock(volatile void *ptr)
+ {
+ my_atomic_rwlock_rdunlock(get_rwlock(ptr));
+ }
+
+ static inline void wrunlock(volatile void *ptr)
+ {
+ my_atomic_rwlock_wrunlock(get_rwlock(ptr));
+ }
+};
+
+#endif
+