summaryrefslogtreecommitdiff
path: root/include/atomic
diff options
context:
space:
mode:
authorMikael Ronstrom <mikael@mysql.com>2009-10-12 11:00:39 +0200
committerMikael Ronstrom <mikael@mysql.com>2009-10-12 11:00:39 +0200
commitbae553cfcd9ad8b9df8b5305736f6810fbaed43b (patch)
treeb9c258d33b8f1dd330ba73287a648ffbd20488d7 /include/atomic
parenta4785fc4a2abeb3f8fbc253b78f558540b949482 (diff)
downloadmariadb-git-bae553cfcd9ad8b9df8b5305736f6810fbaed43b.tar.gz
Backported my_atomic from 6.0-codebase and added support for 64-bit atomics to enable removal of LOCK_thread_count from every query, removed LOCK_thread_count from use in dispatch_command and close of query which is used in every query, now uses atomic increments/decrements instead
Diffstat (limited to 'include/atomic')
-rw-r--r--include/atomic/rwlock.h31
-rw-r--r--include/atomic/x86-gcc.h32
2 files changed, 49 insertions, 14 deletions
diff --git a/include/atomic/rwlock.h b/include/atomic/rwlock.h
index 18b77e93d80..29e22fcb3d5 100644
--- a/include/atomic/rwlock.h
+++ b/include/atomic/rwlock.h
@@ -1,3 +1,6 @@
+#ifndef ATOMIC_RWLOCK_INCLUDED
+#define ATOMIC_RWLOCK_INCLUDED
+
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -13,7 +16,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
+typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
+#define MY_ATOMIC_MODE_RWLOCKS 1
#ifdef MY_ATOMIC_MODE_DUMMY
/*
@@ -31,18 +35,27 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
#define my_atomic_rwlock_wrunlock(name)
#define MY_ATOMIC_MODE "dummy (non-atomic)"
#else
-#define my_atomic_rwlock_destroy(name) pthread_rwlock_destroy(& (name)->rw)
-#define my_atomic_rwlock_init(name) pthread_rwlock_init(& (name)->rw, 0)
-#define my_atomic_rwlock_rdlock(name) pthread_rwlock_rdlock(& (name)->rw)
-#define my_atomic_rwlock_wrlock(name) pthread_rwlock_wrlock(& (name)->rw)
-#define my_atomic_rwlock_rdunlock(name) pthread_rwlock_unlock(& (name)->rw)
-#define my_atomic_rwlock_wrunlock(name) pthread_rwlock_unlock(& (name)->rw)
-#define MY_ATOMIC_MODE "rwlocks"
+/*
+ we're using read-write lock macros but map them to mutex locks, and they're
+ faster. Still, having semantically rich API we can change the
+ underlying implementation, if necessary.
+*/
+#define my_atomic_rwlock_destroy(name) pthread_mutex_destroy(& (name)->rw)
+#define my_atomic_rwlock_init(name) pthread_mutex_init(& (name)->rw, 0)
+#define my_atomic_rwlock_rdlock(name) pthread_mutex_lock(& (name)->rw)
+#define my_atomic_rwlock_wrlock(name) pthread_mutex_lock(& (name)->rw)
+#define my_atomic_rwlock_rdunlock(name) pthread_mutex_unlock(& (name)->rw)
+#define my_atomic_rwlock_wrunlock(name) pthread_mutex_unlock(& (name)->rw)
+#define MY_ATOMIC_MODE "mutex"
+#ifndef MY_ATOMIC_MODE_RWLOCKS
+#define MY_ATOMIC_MODE_RWLOCKS 1
+#endif
#endif
#define make_atomic_add_body(S) int ## S sav; sav= *a; *a+= v; v=sav;
-#define make_atomic_swap_body(S) int ## S sav; sav= *a; *a= v; v=sav;
+#define make_atomic_fas_body(S) int ## S sav; sav= *a; *a= v; v=sav;
#define make_atomic_cas_body(S) if ((ret= (*a == *cmp))) *a= set; else *cmp=*a;
#define make_atomic_load_body(S) ret= *a;
#define make_atomic_store_body(S) *a= v;
+#endif /* ATOMIC_RWLOCK_INCLUDED */
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
index c11483b4083..ba7e7eb572f 100644
--- a/include/atomic/x86-gcc.h
+++ b/include/atomic/x86-gcc.h
@@ -42,15 +42,37 @@
#endif
#ifndef MY_ATOMIC_NO_XADD
-#define make_atomic_add_body(S) \
- asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
+#define make_atomic_add_body(S) make_atomic_add_body ## S
+#define make_atomic_cas_body(S) make_atomic_cas_body ## S
#endif
-#define make_atomic_fas_body(S) \
- asm volatile ("xchg %0, %1;" : "+q" (v) , "+m" (*a))
-#define make_atomic_cas_body(S) \
+
+#define make_atomic_add_body32 \
+ asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
+
+#define make_atomic_cas_body32 \
asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
: "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
+#define make_atomic_cas_bodyptr make_atomic_cas_body32
+
+#ifndef __x86_64__
+#define make_atomic_add_body64 make_atomic_add_body32
+#define make_atomic_cas_body64 make_atomic_cas_body32
+#else
+#define make_atomic_add_body64 \
+ int64 tmp=*a; \
+ while (!my_atomic_cas64(a, &tmp, tmp+v)); \
+ v=tmp;
+#define make_atomic_cas_body64 \
+ int32 ebx=(set & 0xFFFFFFFF), ecx=(set >> 32); \
+ asm volatile (LOCK_prefix "; cmpxchg8b %0; setz %2;" \
+ : "+m" (*a), "+A" (*cmp), "=q" (ret) \
+ :"b" (ebx), "c" (ecx))
+#endif
+
+#define make_atomic_fas_body(S) \
+ asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
+
#ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(S) ret=*a
#define make_atomic_store_body(S) *a=v