summaryrefslogtreecommitdiff
path: root/include/my_atomic.h
diff options
context:
space:
mode:
authorMikael Ronstrom <mikael@mysql.com>2009-10-09 14:21:29 +0200
committerMikael Ronstrom <mikael@mysql.com>2009-10-09 14:21:29 +0200
commita4785fc4a2abeb3f8fbc253b78f558540b949482 (patch)
treeb01f63d310ec7286d7af99b941db923a3e02efb2 /include/my_atomic.h
parentee696e4163354b85c60c68cb791005fa127abcea (diff)
downloadmariadb-git-a4785fc4a2abeb3f8fbc253b78f558540b949482.tar.gz
Moved atomics from 6.0 codebase to prepare for using atomic increments in places where LOCK_thread_count previously was used
Diffstat (limited to 'include/my_atomic.h')
-rw-r--r--include/my_atomic.h273
1 files changed, 209 insertions, 64 deletions
diff --git a/include/my_atomic.h b/include/my_atomic.h
index 03f7d081981..f62d3e32b42 100644
--- a/include/my_atomic.h
+++ b/include/my_atomic.h
@@ -1,3 +1,6 @@
+#ifndef MY_ATOMIC_INCLUDED
+#define MY_ATOMIC_INCLUDED
+
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -13,9 +16,51 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/*
+ This header defines five atomic operations:
+
+ my_atomic_add#(&var, what)
+ add 'what' to *var, and return the old value of *var
+
+ my_atomic_fas#(&var, what)
+ 'Fetch And Store'
+ store 'what' in *var, and return the old value of *var
+
+ my_atomic_cas#(&var, &old, new)
+ 'Compare And Swap'
+ if *var is equal to *old, then store 'new' in *var, and return TRUE
+ otherwise store *var in *old, and return FALSE
+
+ my_atomic_load#(&var)
+ return *var
+
+ my_atomic_store#(&var, what)
+ store 'what' in *var
+
+ '#' is substituted by a size suffix - 8, 16, 32, or ptr
+ (e.g. my_atomic_add8, my_atomic_fas32, my_atomic_casptr).
+
+ NOTE This operations are not always atomic, so they always must be
+ enclosed in my_atomic_rwlock_rdlock(lock)/my_atomic_rwlock_rdunlock(lock)
+ or my_atomic_rwlock_wrlock(lock)/my_atomic_rwlock_wrunlock(lock).
+ Hint: if a code block makes intensive use of atomic ops, it make sense
+ to take/release rwlock once for the whole block, not for every statement.
+
+ On architectures where these operations are really atomic, rwlocks will
+ be optimized away.
+ 8- and 16-bit atomics aren't implemented for windows (see generic-msvc.h),
+ but can be added, if necessary.
+*/
+
#ifndef my_atomic_rwlock_init
#define intptr void *
+/**
+ On most platforms we implement 8-bit, 16-bit, 32-bit and "pointer"
+ operations. Thus the symbol below is defined by default; platforms
+ where we leave out 8-bit or 16-bit operations should undefine it.
+*/
+#define MY_ATOMIC_HAS_8_16 1
#ifndef MY_ATOMIC_MODE_RWLOCKS
/*
@@ -24,124 +69,223 @@
#include "atomic/nolock.h"
#endif
-#ifndef MY_ATOMIC_NOLOCK
-/*
- * Have to use rw-locks for atomic ops
- */
+#ifndef make_atomic_cas_body
+/* nolock.h was not able to generate even a CAS function, fall back */
#include "atomic/rwlock.h"
-#endif
-
-#ifndef MY_ATOMICS_MADE
-
+#else
+/* define missing functions by using the already generated ones */
#ifndef make_atomic_add_body
-#define make_atomic_add_body(S) \
+#define make_atomic_add_body(S) \
int ## S tmp=*a; \
while (!my_atomic_cas ## S(a, &tmp, tmp+v)); \
v=tmp;
#endif
+#ifndef make_atomic_fas_body
+#define make_atomic_fas_body(S) \
+ int ## S tmp=*a; \
+ while (!my_atomic_cas ## S(a, &tmp, v)); \
+ v=tmp;
+#endif
+#ifndef make_atomic_load_body
+#define make_atomic_load_body(S) \
+ ret= 0; /* avoid compiler warning */ \
+ (void)(my_atomic_cas ## S(a, &ret, ret));
+#endif
+#ifndef make_atomic_store_body
+#define make_atomic_store_body(S) \
+ (void)(my_atomic_fas ## S (a, v));
+#endif
+#endif
+
+/*
+ transparent_union doesn't work in g++
+ Bug ?
+
+ Darwin's gcc doesn't want to put pointers in a transparent_union
+ when built with -arch ppc64. Complains:
+ warning: 'transparent_union' attribute ignored
+*/
+#if defined(__GNUC__) && !defined(__cplusplus) && \
+ ! (defined(__APPLE__) && defined(_ARCH_PPC64))
+/*
+ we want to be able to use my_atomic_xxx functions with
+ both signed and unsigned integers. But gcc will issue a warning
+ "passing arg N of `my_atomic_XXX' as [un]signed due to prototype"
+ if the signedness of the argument doesn't match the prototype, or
+ "pointer targets in passing argument N of my_atomic_XXX differ in signedness"
+ if int* is used where uint* is expected (or vice versa).
+ Let's shut these warnings up
+*/
+#define make_transparent_unions(S) \
+ typedef union { \
+ int ## S i; \
+ uint ## S u; \
+ } U_ ## S __attribute__ ((transparent_union)); \
+ typedef union { \
+ int ## S volatile *i; \
+ uint ## S volatile *u; \
+ } Uv_ ## S __attribute__ ((transparent_union));
+#define uintptr intptr
+make_transparent_unions(8)
+make_transparent_unions(16)
+make_transparent_unions(32)
+make_transparent_unions(ptr)
+#undef uintptr
+#undef make_transparent_unions
+#define a U_a.i
+#define cmp U_cmp.i
+#define v U_v.i
+#define set U_set.i
+#else
+#define U_8 int8
+#define U_16 int16
+#define U_32 int32
+#define U_ptr intptr
+#define Uv_8 int8
+#define Uv_16 int16
+#define Uv_32 int32
+#define Uv_ptr intptr
+#define U_a volatile *a
+#define U_cmp *cmp
+#define U_v v
+#define U_set set
+#endif /* __GCC__ transparent_union magic */
#ifdef HAVE_INLINE
-#define make_atomic_add(S) \
-STATIC_INLINE int ## S my_atomic_add ## S( \
- int ## S volatile *a, int ## S v) \
-{ \
- make_atomic_add_body(S); \
- return v; \
+#define make_atomic_cas(S) \
+STATIC_INLINE int my_atomic_cas ## S(Uv_ ## S U_a, \
+ Uv_ ## S U_cmp, U_ ## S U_set) \
+{ \
+ int8 ret; \
+ make_atomic_cas_body(S); \
+ return ret; \
}
-#define make_atomic_swap(S) \
-STATIC_INLINE int ## S my_atomic_swap ## S( \
- int ## S volatile *a, int ## S v) \
-{ \
- make_atomic_swap_body(S); \
- return v; \
+#define make_atomic_add(S) \
+STATIC_INLINE int ## S my_atomic_add ## S( \
+ Uv_ ## S U_a, U_ ## S U_v) \
+{ \
+ make_atomic_add_body(S); \
+ return v; \
}
-#define make_atomic_cas(S) \
-STATIC_INLINE int my_atomic_cas ## S(int ## S volatile *a, \
- int ## S *cmp, int ## S set) \
-{ \
- int8 ret; \
- make_atomic_cas_body(S); \
- return ret; \
+#define make_atomic_fas(S) \
+STATIC_INLINE int ## S my_atomic_fas ## S( \
+ Uv_ ## S U_a, U_ ## S U_v) \
+{ \
+ make_atomic_fas_body(S); \
+ return v; \
}
-#define make_atomic_load(S) \
-STATIC_INLINE int ## S my_atomic_load ## S(int ## S volatile *a) \
-{ \
- int ## S ret; \
- make_atomic_load_body(S); \
- return ret; \
+#define make_atomic_load(S) \
+STATIC_INLINE int ## S my_atomic_load ## S(Uv_ ## S U_a) \
+{ \
+ int ## S ret; \
+ make_atomic_load_body(S); \
+ return ret; \
}
-#define make_atomic_store(S) \
-STATIC_INLINE void my_atomic_store ## S( \
- int ## S volatile *a, int ## S v) \
-{ \
- make_atomic_store_body(S); \
+#define make_atomic_store(S) \
+STATIC_INLINE void my_atomic_store ## S( \
+ Uv_ ## S U_a, U_ ## S U_v) \
+{ \
+ make_atomic_store_body(S); \
}
#else /* no inline functions */
-#define make_atomic_add(S) \
-extern int ## S my_atomic_add ## S(int ## S volatile *a, int ## S v);
+#define make_atomic_add(S) \
+extern int ## S my_atomic_add ## S(Uv_ ## S U_a, U_ ## S U_v);
-#define make_atomic_swap(S) \
-extern int ## S my_atomic_swap ## S(int ## S volatile *a, int ## S v);
+#define make_atomic_fas(S) \
+extern int ## S my_atomic_fas ## S(Uv_ ## S U_a, U_ ## S U_v);
-#define make_atomic_cas(S) \
-extern int my_atomic_cas ## S(int ## S volatile *a, int ## S *cmp, int ## S set);
+#define make_atomic_cas(S) \
+extern int my_atomic_cas ## S(Uv_ ## S U_a, Uv_ ## S U_cmp, U_ ## S U_set);
-#define make_atomic_load(S) \
-extern int ## S my_atomic_load ## S(int ## S volatile *a);
+#define make_atomic_load(S) \
+extern int ## S my_atomic_load ## S(Uv_ ## S U_a);
-#define make_atomic_store(S) \
-extern void my_atomic_store ## S(int ## S volatile *a, int ## S v);
+#define make_atomic_store(S) \
+extern void my_atomic_store ## S(Uv_ ## S U_a, U_ ## S U_v);
#endif /* HAVE_INLINE */
-make_atomic_cas( 8)
+#ifdef MY_ATOMIC_HAS_8_16
+make_atomic_cas(8)
make_atomic_cas(16)
+#endif
make_atomic_cas(32)
make_atomic_cas(ptr)
-make_atomic_add( 8)
+#ifdef MY_ATOMIC_HAS_8_16
+make_atomic_add(8)
make_atomic_add(16)
+#endif
make_atomic_add(32)
-make_atomic_load( 8)
+#ifdef MY_ATOMIC_HAS_8_16
+make_atomic_load(8)
make_atomic_load(16)
+#endif
make_atomic_load(32)
make_atomic_load(ptr)
-make_atomic_store( 8)
+#ifdef MY_ATOMIC_HAS_8_16
+make_atomic_fas(8)
+make_atomic_fas(16)
+#endif
+make_atomic_fas(32)
+make_atomic_fas(ptr)
+
+#ifdef MY_ATOMIC_HAS_8_16
+make_atomic_store(8)
make_atomic_store(16)
+#endif
make_atomic_store(32)
make_atomic_store(ptr)
-make_atomic_swap( 8)
-make_atomic_swap(16)
-make_atomic_swap(32)
-make_atomic_swap(ptr)
+#ifdef _atomic_h_cleanup_
+#include _atomic_h_cleanup_
+#undef _atomic_h_cleanup_
+#endif
+#undef U_8
+#undef U_16
+#undef U_32
+#undef U_ptr
+#undef Uv_8
+#undef Uv_16
+#undef Uv_32
+#undef Uv_ptr
+#undef a
+#undef cmp
+#undef v
+#undef set
+#undef U_a
+#undef U_cmp
+#undef U_v
+#undef U_set
#undef make_atomic_add
#undef make_atomic_cas
#undef make_atomic_load
#undef make_atomic_store
-#undef make_atomic_swap
+#undef make_atomic_fas
#undef make_atomic_add_body
#undef make_atomic_cas_body
#undef make_atomic_load_body
#undef make_atomic_store_body
-#undef make_atomic_swap_body
+#undef make_atomic_fas_body
#undef intptr
-#endif /* MY_ATOMICS_MADE */
-
-#ifdef _atomic_h_cleanup_
-#include _atomic_h_cleanup_
-#undef _atomic_h_cleanup_
+/*
+ the macro below defines (as an expression) the code that
+ will be run in spin-loops. Intel manuals recummend to have PAUSE there.
+ It is expected to be defined in include/atomic/ *.h files
+*/
+#ifndef LF_BACKOFF
+#define LF_BACKOFF (1)
#endif
#define MY_ATOMIC_OK 0
@@ -150,3 +294,4 @@ extern int my_atomic_initialize();
#endif
+#endif /* MY_ATOMIC_INCLUDED */