diff options
author | unknown <serg@janus.mylan> | 2006-10-13 11:37:27 +0200 |
---|---|---|
committer | unknown <serg@janus.mylan> | 2006-10-13 11:37:27 +0200 |
commit | c2872bafde6d6ec2444c293f7a8aa397eb1dbb59 (patch) | |
tree | bb08304c63c5526b2e85d0437c621af8d05148e6 /include | |
parent | d551a55a1b236097e3912c66a91a17dea1600d7e (diff) | |
download | mariadb-git-c2872bafde6d6ec2444c293f7a8aa397eb1dbb59.tar.gz |
push for trnman review
(lockmanager still fails unit tests)
BitKeeper/deleted/.del-Makefile.am~4375ae3d4de2bdf0:
Delete: unittest/maria/Makefile.am
configure.in:
silence up configure warnings, don't generate unittest/maria/Makefile
include/atomic/nolock.h:
s/LOCK/LOCK_prefix/
include/atomic/x86-gcc.h:
s/LOCK/LOCK_prefix/
include/atomic/x86-msvc.h:
s/LOCK/LOCK_prefix/
include/lf.h:
pin asserts, renames
include/my_atomic.h:
move cleanup
include/my_bit.h:
s/uint/uint32/
mysys/lf_dynarray.c:
style fixes, split for() in two, remove if()s
mysys/lf_hash.c:
renames, minor fixes
mysys/my_atomic.c:
run-time assert -> compile-time assert
storage/maria/Makefile.am:
lockman here
storage/maria/unittest/Makefile.am:
new unit tests
storage/maria/unittest/trnman-t.c:
lots of changes
storage/maria/lockman.c:
many changes:
second meaning of "blocker"
portability: s/gettimeofday/my_getsystime/
move mutex/cond out of LOCK_OWNER - it creates a race condition
that will be fixed in a separate changeset
increment lm->count for every element, not only for distinct ones -
because we cannot decrease it for distinct elements only :(
storage/maria/lockman.h:
move mutex/cond out of LOCK_OWNER
storage/maria/trnman.c:
move mutex/cond out of LOCK_OWNER
atomic-ops to access short_trid_to_trn[]
storage/maria/trnman.h:
move mutex/cond out of LOCK_OWNER
storage/maria/unittest/lockman-t.c:
unit stress test
Diffstat (limited to 'include')
-rw-r--r-- | include/atomic/nolock.h | 4 | ||||
-rw-r--r-- | include/atomic/x86-gcc.h | 18 | ||||
-rw-r--r-- | include/atomic/x86-msvc.h | 10 | ||||
-rw-r--r-- | include/lf.h | 14 | ||||
-rw-r--r-- | include/my_atomic.h | 10 | ||||
-rw-r--r-- | include/my_bit.h | 4 |
6 files changed, 31 insertions, 29 deletions
diff --git a/include/atomic/nolock.h b/include/atomic/nolock.h index 21f41484ac9..a696e008f03 100644 --- a/include/atomic/nolock.h +++ b/include/atomic/nolock.h @@ -17,9 +17,9 @@ #if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) # ifdef MY_ATOMIC_MODE_DUMMY -# define LOCK "" +# define LOCK_prefix "" # else -# define LOCK "lock" +# define LOCK_prefix "lock" # endif # ifdef __GNUC__ diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h index 3f0a82a9400..0be8fdf9244 100644 --- a/include/atomic/x86-gcc.h +++ b/include/atomic/x86-gcc.h @@ -22,15 +22,15 @@ #ifdef __x86_64__ # ifdef MY_ATOMIC_NO_XADD -# define MY_ATOMIC_MODE "gcc-amd64" LOCK "-no-xadd" +# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd" # else -# define MY_ATOMIC_MODE "gcc-amd64" LOCK +# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix # endif #else # ifdef MY_ATOMIC_NO_XADD -# define MY_ATOMIC_MODE "gcc-x86" LOCK "-no-xadd" +# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix "-no-xadd" # else -# define MY_ATOMIC_MODE "gcc-x86" LOCK +# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix # endif #endif @@ -41,12 +41,12 @@ #ifndef MY_ATOMIC_NO_XADD #define make_atomic_add_body(S) \ - asm volatile (LOCK "; xadd %0, %1;" : "+r" (v) , "+m" (*a)) + asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a)) #endif #define make_atomic_swap_body(S) \ - asm volatile ("; xchg %0, %1;" : "+r" (v) , "+m" (*a)) + asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a)) #define make_atomic_cas_body(S) \ - asm volatile (LOCK "; cmpxchg %3, %0; setz %2;" \ + asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \ : "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set)) #ifdef MY_ATOMIC_MODE_DUMMY @@ -55,11 +55,11 @@ #else /* Actually 32-bit reads/writes are always atomic on x86 - But we add LOCK here anyway to force memory barriers + But we add LOCK_prefix here anyway to force memory barriers */ #define make_atomic_load_body(S) \ ret=0; \ - asm volatile (LOCK "; cmpxchg %2, %0" \ + asm volatile (LOCK_prefix "; cmpxchg %2, %0" \ : "+m" (*a), "+a" (ret): "r" (ret)) #define make_atomic_store_body(S) \ asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v)) diff --git a/include/atomic/x86-msvc.h b/include/atomic/x86-msvc.h index d4024a854fb..8f3e55aaed7 100644 --- a/include/atomic/x86-msvc.h +++ b/include/atomic/x86-msvc.h @@ -26,19 +26,19 @@ #ifndef _atomic_h_cleanup_ #define _atomic_h_cleanup_ "atomic/x86-msvc.h" -#define MY_ATOMIC_MODE "msvc-x86" LOCK +#define MY_ATOMIC_MODE "msvc-x86" LOCK_prefix #define make_atomic_add_body(S) \ _asm { \ _asm mov reg_ ## S, v \ - _asm LOCK xadd *a, reg_ ## S \ + _asm LOCK_prefix xadd *a, reg_ ## S \ _asm movzx v, reg_ ## S \ } #define make_atomic_cas_body(S) \ _asm { \ _asm mov areg_ ## S, *cmp \ _asm mov reg2_ ## S, set \ - _asm LOCK cmpxchg *a, reg2_ ## S \ + _asm LOCK_prefix cmpxchg *a, reg2_ ## S \ _asm mov *cmp, areg_ ## S \ _asm setz al \ _asm movzx ret, al \ @@ -56,13 +56,13 @@ #else /* Actually 32-bit reads/writes are always atomic on x86 - But we add LOCK here anyway to force memory barriers + But we add LOCK_prefix here anyway to force memory barriers */ #define make_atomic_load_body(S) \ _asm { \ _asm mov areg_ ## S, 0 \ _asm mov reg2_ ## S, areg_ ## S \ - _asm LOCK cmpxchg *a, reg2_ ## S \ + _asm LOCK_prefix cmpxchg *a, reg2_ ## S \ _asm mov ret, areg_ ## S \ } #define make_atomic_store_body(S) \ diff --git a/include/lf.h b/include/lf.h index 6a5047f6052..4c6765b2d40 100644 --- a/include/lf.h +++ b/include/lf.h @@ -88,8 +88,8 @@ nolock_wrap(lf_dynarray_iterate, int, pin manager for memory allocator */ -#define LF_PINBOX_PINS 3 -#define LF_PURGATORY_SIZE 11 +#define LF_PINBOX_PINS 4 +#define LF_PURGATORY_SIZE 10 typedef void lf_pinbox_free_func(void *, void *); @@ -112,9 +112,9 @@ typedef struct { -sizeof(void *)*(LF_PINBOX_PINS+LF_PURGATORY_SIZE+1)]; } LF_PINS; -#define lf_lock_by_pins(PINS) \ +#define lf_rwlock_by_pins(PINS) \ my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinstack.lock) -#define lf_unlock_by_pins(PINS) \ +#define lf_rwunlock_by_pins(PINS) \ my_atomic_rwlock_wrunlock(&(PINS)->pinbox->pinstack.lock) /* @@ -139,11 +139,13 @@ typedef struct { #define _lf_unpin(PINS, PIN) _lf_pin(PINS, PIN, NULL) #define lf_pin(PINS, PIN, ADDR) \ do { \ - lf_lock_by_pins(PINS); \ + lf_rwlock_by_pins(PINS); \ _lf_pin(PINS, PIN, ADDR); \ - lf_unlock_by_pins(PINS); \ + lf_rwunlock_by_pins(PINS); \ } while (0) #define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL) +#define _lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0) +#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN]==0) void lf_pinbox_init(LF_PINBOX *pinbox, lf_pinbox_free_func *free_func, void * free_func_arg); diff --git a/include/my_atomic.h b/include/my_atomic.h index d3e4e0055d3..921b55e68a2 100644 --- a/include/my_atomic.h +++ b/include/my_atomic.h @@ -118,6 +118,11 @@ make_atomic_swap(16) make_atomic_swap(32) make_atomic_swap(ptr) +#ifdef _atomic_h_cleanup_ +#include _atomic_h_cleanup_ +#undef _atomic_h_cleanup_ +#endif + #undef make_atomic_add #undef make_atomic_cas #undef make_atomic_load @@ -130,11 +135,6 @@ make_atomic_swap(ptr) #undef make_atomic_swap_body #undef intptr -#ifdef _atomic_h_cleanup_ -#include _atomic_h_cleanup_ -#undef _atomic_h_cleanup_ -#endif - #ifndef LF_BACKOFF #define LF_BACKOFF (1) #endif diff --git a/include/my_bit.h b/include/my_bit.h index 71bbe2d4ba3..58e8bb39683 100644 --- a/include/my_bit.h +++ b/include/my_bit.h @@ -88,7 +88,7 @@ STATIC_INLINE uint32 my_clear_highest_bit(uint32 v) return v & w; } -STATIC_INLINE uint32 my_reverse_bits(uint key) +STATIC_INLINE uint32 my_reverse_bits(uint32 key) { return (_my_bits_reverse_table[ key & 255] << 24) | @@ -101,7 +101,7 @@ STATIC_INLINE uint32 my_reverse_bits(uint key) extern uint my_bit_log2(ulong value); extern uint32 my_round_up_to_next_power(uint32 v); uint32 my_clear_highest_bit(uint32 v); -uint32 my_reverse_bits(uint key); +uint32 my_reverse_bits(uint32 key); extern uint my_count_bits(ulonglong v); extern uint my_count_bits_ushort(ushort v); #endif |