summaryrefslogtreecommitdiff
path: root/include/atomic
diff options
context:
space:
mode:
authorunknown <serg@serg.mylan>2006-05-31 18:44:09 +0200
committerunknown <serg@serg.mylan>2006-05-31 18:44:09 +0200
commit8b4581778036c95072438630884ad810b512815a (patch)
tree363277285d9756e50642fa9f691b934925d856f0 /include/atomic
parenteb67ecef898433461ac9251541188fd9e1e945d4 (diff)
downloadmariadb-git-8b4581778036c95072438630884ad810b512815a.tar.gz
WL#2595 - atomic operations
BitKeeper/etc/ignore: Added mysys/test_atomic to the ignore list
Diffstat (limited to 'include/atomic')
-rw-r--r--include/atomic/nolock.h169
-rw-r--r--include/atomic/rwlock.h161
-rw-r--r--include/atomic/x86-gcc.h56
-rw-r--r--include/atomic/x86-msvc.h85
4 files changed, 471 insertions, 0 deletions
diff --git a/include/atomic/nolock.h b/include/atomic/nolock.h
new file mode 100644
index 00000000000..cf21a94c7de
--- /dev/null
+++ b/include/atomic/nolock.h
@@ -0,0 +1,169 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#if defined(__i386__) || defined(_M_IX86)
+#ifdef MY_ATOMIC_MODE_DUMMY
+# define LOCK ""
+#else
+# define LOCK "lock "
+#endif
+#ifdef __GNUC__
+#include "x86-gcc.h"
+#elif defined(_MSC_VER)
+#include "x86-msvc.h"
+#endif
+#endif
+
+#ifdef make_atomic_add_body8
+
+#ifdef HAVE_INLINE
+
+#define make_atomic_add(S) \
+static inline uint ## S _my_atomic_add ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v) \
+{ \
+ make_atomic_add_body ## S; \
+ return v; \
+}
+
+#define make_atomic_swap(S) \
+static inline uint ## S _my_atomic_swap ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v) \
+{ \
+ make_atomic_swap_body ## S; \
+ return v; \
+}
+
+#define make_atomic_cas(S) \
+static inline uint _my_atomic_cas ## S(my_atomic_ ## S ## _t *a,\
+ uint ## S *cmp, uint ## S set) \
+{ \
+ uint8 ret; \
+ make_atomic_cas_body ## S; \
+ return ret; \
+}
+
+#define make_atomic_load(S) \
+static inline uint ## S _my_atomic_load ## S( \
+ my_atomic_ ## S ## _t *a) \
+{ \
+ uint ## S ret; \
+ make_atomic_load_body ## S; \
+ return ret; \
+}
+
+#define make_atomic_store(S) \
+static inline void _my_atomic_store ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v) \
+{ \
+ make_atomic_store_body ## S; \
+}
+
+#else /* no inline functions */
+
+#define make_atomic_add(S) \
+extern uint ## S _my_atomic_add ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v);
+
+#define make_atomic_swap(S) \
+extern uint ## S _my_atomic_swap ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v);
+
+#define make_atomic_cas(S) \
+extern uint _my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
+ uint ## S *cmp, uint ## S set);
+
+#define make_atomic_load(S) \
+extern uint ## S _my_atomic_load ## S( \
+ my_atomic_ ## S ## _t *a);
+
+#define make_atomic_store(S) \
+extern void _my_atomic_store ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v);
+
+#endif
+
+make_atomic_add( 8)
+make_atomic_add(16)
+make_atomic_add(32)
+
+make_atomic_cas( 8)
+make_atomic_cas(16)
+make_atomic_cas(32)
+
+make_atomic_load( 8)
+make_atomic_load(16)
+make_atomic_load(32)
+
+make_atomic_store( 8)
+make_atomic_store(16)
+make_atomic_store(32)
+
+make_atomic_swap( 8)
+make_atomic_swap(16)
+make_atomic_swap(32)
+
+#undef make_atomic_add_body8
+#undef make_atomic_cas_body8
+#undef make_atomic_load_body8
+#undef make_atomic_store_body8
+#undef make_atomic_swap_body8
+#undef make_atomic_add_body16
+#undef make_atomic_cas_body16
+#undef make_atomic_load_body16
+#undef make_atomic_store_body16
+#undef make_atomic_swap_body16
+#undef make_atomic_add_body32
+#undef make_atomic_cas_body32
+#undef make_atomic_load_body32
+#undef make_atomic_store_body32
+#undef make_atomic_swap_body32
+#undef make_atomic_add
+#undef make_atomic_cas
+#undef make_atomic_load
+#undef make_atomic_store
+#undef make_atomic_swap
+
+#define my_atomic_add8(a,v,L) _my_atomic_add8(a,v)
+#define my_atomic_add16(a,v,L) _my_atomic_add16(a,v)
+#define my_atomic_add32(a,v,L) _my_atomic_add32(a,v)
+
+#define my_atomic_cas8(a,c,v,L) _my_atomic_cas8(a,c,v)
+#define my_atomic_cas16(a,c,v,L) _my_atomic_cas16(a,c,v)
+#define my_atomic_cas32(a,c,v,L) _my_atomic_cas32(a,c,v)
+
+#define my_atomic_load8(a,L) _my_atomic_load8(a)
+#define my_atomic_load16(a,L) _my_atomic_load16(a)
+#define my_atomic_load32(a,L) _my_atomic_load32(a)
+
+#define my_atomic_store8(a,v,L) _my_atomic_store8(a,v)
+#define my_atomic_store16(a,v,L) _my_atomic_store16(a,v)
+#define my_atomic_store32(a,v,L) _my_atomic_store32(a,v)
+
+#define my_atomic_swap8(a,v,L) _my_atomic_swap8(a,v)
+#define my_atomic_swap16(a,v,L) _my_atomic_swap16(a,v)
+#define my_atomic_swap32(a,v,L) _my_atomic_swap32(a,v)
+
+#define my_atomic_rwlock_t typedef int
+#define my_atomic_rwlock_destroy(name)
+#define my_atomic_rwlock_init(name)
+#define my_atomic_rwlock_rdlock(name)
+#define my_atomic_rwlock_wrlock(name)
+#define my_atomic_rwlock_rdunlock(name)
+#define my_atomic_rwlock_wrunlock(name)
+
+#endif
+
diff --git a/include/atomic/rwlock.h b/include/atomic/rwlock.h
new file mode 100644
index 00000000000..ca5be29ab9b
--- /dev/null
+++ b/include/atomic/rwlock.h
@@ -0,0 +1,161 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
+
+#ifdef MY_ATOMIC_EXTRA_DEBUG
+#define CHECK_RW if (rw) if (a->rw) assert(rw == a->rw); else a->rw=rw;
+#else
+#define CHECK_RW
+#endif
+
+#ifdef MY_ATOMIC_MODE_DUMMY
+/*
+ the following can never be enabled by ./configure, one need to put #define in
+ a source to trigger the following warning. The resulting code will be broken,
+ it only makes sense to do it to see now test_atomic detects broken
+ implementations (another way is to run a UP build on an SMP box).
+*/
+#warning MY_ATOMIC_MODE_DUMMY and MY_ATOMIC_MODE_RWLOCKS are incompatible
+#define my_atomic_rwlock_destroy(name)
+#define my_atomic_rwlock_init(name)
+#define my_atomic_rwlock_rdlock(name)
+#define my_atomic_rwlock_wrlock(name)
+#define my_atomic_rwlock_rdunlock(name)
+#define my_atomic_rwlock_wrunlock(name)
+#else
+#define my_atomic_rwlock_destroy(name) pthread_rwlock_destroy(& (name)->rw)
+#define my_atomic_rwlock_init(name) pthread_rwlock_init(& (name)->rw, 0)
+#define my_atomic_rwlock_rdlock(name) pthread_rwlock_rdlock(& (name)->rw)
+#define my_atomic_rwlock_wrlock(name) pthread_rwlock_wrlock(& (name)->rw)
+#define my_atomic_rwlock_rdunlock(name) pthread_rwlock_unlock(& (name)->rw)
+#define my_atomic_rwlock_wrunlock(name) pthread_rwlock_unlock(& (name)->rw)
+#endif
+
+#ifdef HAVE_INLINE
+
+#define make_atomic_add(S) \
+static inline uint ## S my_atomic_add ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \
+{ \
+ uint ## S ret; \
+ CHECK_RW; \
+ if (rw) my_atomic_rwlock_wrlock(rw); \
+ ret= a->val; \
+ a->val+= v; \
+ if (rw) my_atomic_rwlock_wrunlock(rw); \
+ return ret; \
+}
+
+#define make_atomic_swap(S) \
+static inline uint ## S my_atomic_swap ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \
+{ \
+ uint ## S ret; \
+ CHECK_RW; \
+ if (rw) my_atomic_rwlock_wrlock(rw); \
+ ret= a->val; \
+ a->val= v; \
+ if (rw) my_atomic_rwlock_wrunlock(rw); \
+ return ret; \
+}
+
+#define make_atomic_cas(S) \
+static inline uint my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
+ uint ## S *cmp, uint ## S set, my_atomic_rwlock_t *rw) \
+{ \
+ uint ret; \
+ CHECK_RW; \
+ if (rw) my_atomic_rwlock_wrlock(rw); \
+ if (ret= (a->val == *cmp)) a->val= set; else *cmp=a->val; \
+ if (rw) my_atomic_rwlock_wrunlock(rw); \
+ return ret; \
+}
+
+#define make_atomic_load(S) \
+static inline uint ## S my_atomic_load ## S( \
+ my_atomic_ ## S ## _t *a, my_atomic_rwlock_t *rw) \
+{ \
+ uint ## S ret; \
+ CHECK_RW; \
+ if (rw) my_atomic_rwlock_wrlock(rw); \
+ ret= a->val; \
+ if (rw) my_atomic_rwlock_wrunlock(rw); \
+ return ret; \
+}
+
+#define make_atomic_store(S) \
+static inline void my_atomic_store ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \
+{ \
+ CHECK_RW; \
+ if (rw) my_atomic_rwlock_rdlock(rw); \
+ (a)->val= (v); \
+ if (rw) my_atomic_rwlock_rdunlock(rw); \
+}
+
+#else /* no inline functions */
+
+#define make_atomic_add(S) \
+extern uint ## S my_atomic_add ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
+
+#define make_atomic_swap(S) \
+extern uint ## S my_atomic_swap ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
+
+#define make_atomic_cas(S) \
+extern uint my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
+ uint ## S *cmp, uint ## S set, my_atomic_rwlock_t *rw);
+
+#define make_atomic_load(S) \
+extern uint ## S my_atomic_load ## S( \
+ my_atomic_ ## S ## _t *a, my_atomic_rwlock_t *rw);
+
+#define make_atomic_store(S) \
+extern void my_atomic_store ## S( \
+ my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
+
+#endif
+
+make_atomic_add( 8)
+make_atomic_add(16)
+make_atomic_add(32)
+make_atomic_add(64)
+make_atomic_cas( 8)
+make_atomic_cas(16)
+make_atomic_cas(32)
+make_atomic_cas(64)
+make_atomic_load( 8)
+make_atomic_load(16)
+make_atomic_load(32)
+make_atomic_load(64)
+make_atomic_store( 8)
+make_atomic_store(16)
+make_atomic_store(32)
+make_atomic_store(64)
+make_atomic_swap( 8)
+make_atomic_swap(16)
+make_atomic_swap(32)
+make_atomic_swap(64)
+#undef make_atomic_add
+#undef make_atomic_cas
+#undef make_atomic_load
+#undef make_atomic_store
+#undef make_atomic_swap
+#undef CHECK_RW
+
+
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
new file mode 100644
index 00000000000..b06d80d37e5
--- /dev/null
+++ b/include/atomic/x86-gcc.h
@@ -0,0 +1,56 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ XXX 64-bit atomic operations can be implemented using
+ cmpxchg8b, if necessary
+*/
+
+#define make_atomic_add_body8 \
+ asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (a->val))
+#define make_atomic_swap_body8 \
+ asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (a->val))
+#define make_atomic_cas_body8 \
+ asm volatile (LOCK "cmpxchg %3, %0; setz %2;" \
+ : "+m" (a->val), "+a" (*cmp), "=q" (ret): "r" (set))
+
+#ifdef MY_ATOMIC_MODE_DUMMY
+#define make_atomic_load_body8 ret=a->val
+#define make_atomic_store_body8 a->val=v
+#else
+/*
+ Actually 32-bit reads/writes are always atomic on x86
+ But we add LOCK here anyway to force memory barriers
+*/
+#define make_atomic_load_body8 \
+ ret=0; \
+ asm volatile (LOCK "cmpxchg %2, %0" \
+ : "+m" (a->val), "+a" (ret): "r" (ret))
+#define make_atomic_store_body8 \
+ asm volatile ("xchg %0, %1;" : "+m" (a->val) : "r" (v))
+#endif
+
+#define make_atomic_add_body16 make_atomic_add_body8
+#define make_atomic_add_body32 make_atomic_add_body8
+#define make_atomic_cas_body16 make_atomic_cas_body8
+#define make_atomic_cas_body32 make_atomic_cas_body8
+#define make_atomic_load_body16 make_atomic_load_body8
+#define make_atomic_load_body32 make_atomic_load_body8
+#define make_atomic_store_body16 make_atomic_store_body8
+#define make_atomic_store_body32 make_atomic_store_body8
+#define make_atomic_swap_body16 make_atomic_swap_body8
+#define make_atomic_swap_body32 make_atomic_swap_body8
+
diff --git a/include/atomic/x86-msvc.h b/include/atomic/x86-msvc.h
new file mode 100644
index 00000000000..19645551196
--- /dev/null
+++ b/include/atomic/x86-msvc.h
@@ -0,0 +1,85 @@
+/* Copyright (C) 2006 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ XXX 64-bit atomic operations can be implemented using
+ cmpxchg8b, if necessary
+*/
+
+// Would it be better to use intrinsics ?
+// (InterlockedCompareExchange, InterlockedCompareExchange16
+// InterlockedExchangeAdd, InterlockedExchange)
+
+#define make_atomic_add_body(REG) \
+ _asm { \
+ _asm mov REG, v \
+ _asm LOCK xadd a->val, REG \
+ _asm movzx v, REG \
+ }
+#define make_atomic_cas_body(AREG,REG2) \
+ _asm { \
+ _asm mov AREG, *cmp \
+ _asm mov REG2, set \
+ _asm LOCK cmpxchg a->val, REG2 \
+ _asm mov *cmp, AREG \
+ _asm setz al \
+ _asm movzx ret, al \
+ }
+#define make_atomic_swap_body(REG) \
+ _asm { \
+ _asm mov REG, v \
+ _asm xchg a->val, REG \
+ _asm mov v, REG \
+ }
+
+#ifdef MY_ATOMIC_MODE_DUMMY
+#define make_atomic_load_body(AREG,REG) ret=a->val
+#define make_atomic_store_body(REG) a->val=v
+#else
+/*
+ Actually 32-bit reads/writes are always atomic on x86
+ But we add LOCK here anyway to force memory barriers
+*/
+#define make_atomic_load_body(AREG,REG2) \
+ _asm { \
+ _asm mov AREG, 0 \
+ _asm mov REG2, AREG \
+ _asm LOCK cmpxchg a->val, REG2 \
+ _asm mov ret, AREG \
+ }
+#define make_atomic_store_body(REG) \
+ _asm { \
+ _asm mov REG, v \
+ _asm xchg a->val, REG \
+ }
+#endif
+
+#define make_atomic_add_body8 make_atomic_add_body(al)
+#define make_atomic_add_body16 make_atomic_add_body(ax)
+#define make_atomic_add_body32 make_atomic_add_body(eax)
+#define make_atomic_cas_body8 make_atomic_cas_body(al, bl)
+#define make_atomic_cas_body16 make_atomic_cas_body(ax, bx)
+#define make_atomic_cas_body32 make_atomic_cas_body(eax, ebx)
+#define make_atomic_load_body8 make_atomic_load_body(al, bl)
+#define make_atomic_load_body16 make_atomic_load_body(ax, bx)
+#define make_atomic_load_body32 make_atomic_load_body(eax, ebx)
+#define make_atomic_store_body8 make_atomic_store_body(al)
+#define make_atomic_store_body16 make_atomic_store_body(ax)
+#define make_atomic_store_body32 make_atomic_store_body(eax)
+#define make_atomic_swap_body8 make_atomic_swap_body(al)
+#define make_atomic_swap_body16 make_atomic_swap_body(ax)
+#define make_atomic_swap_body32 make_atomic_swap_body(eax)
+