diff options
author | Marc Alff <marc.alff@sun.com> | 2009-12-17 02:12:02 -0700 |
---|---|---|
committer | Marc Alff <marc.alff@sun.com> | 2009-12-17 02:12:02 -0700 |
commit | 4e8d1c6bf30abfd45a993b058ff2a33d4671b73d (patch) | |
tree | 4b84506f98fd3e9ba8a57b531c2609eb5bd11fbe | |
parent | 0d99234dba5dc64bc4f369361f8fc0a669b8dff3 (diff) | |
parent | a2a4c70c1bfd077a1281604bdd8368dd32ab6e83 (diff) | |
download | mariadb-git-4e8d1c6bf30abfd45a993b058ff2a33d4671b73d.tar.gz |
Merge mysql-next-mr (revno 2939) --> mysql-next-mr-marc
-rwxr-xr-x | BUILD/build_mccge.sh | 6 | ||||
-rw-r--r-- | configure.in | 64 | ||||
-rw-r--r-- | include/atomic/generic-msvc.h | 44 | ||||
-rw-r--r-- | include/atomic/x86-gcc.h | 84 | ||||
-rw-r--r-- | include/hash.h | 11 | ||||
-rw-r--r-- | include/my_atomic.h | 21 | ||||
-rw-r--r-- | include/my_global.h | 2 | ||||
-rw-r--r-- | mysys/hash.c | 49 | ||||
-rw-r--r-- | mysys/ptr_cmp.c | 24 | ||||
-rw-r--r-- | sql/event_scheduler.cc | 9 | ||||
-rw-r--r-- | sql/log_event.cc | 13 | ||||
-rw-r--r-- | sql/mysql_priv.h | 56 | ||||
-rw-r--r-- | sql/mysqld.cc | 9 | ||||
-rw-r--r-- | sql/net_serv.cc | 2 | ||||
-rw-r--r-- | sql/slave.cc | 17 | ||||
-rw-r--r-- | sql/sp_head.cc | 6 | ||||
-rw-r--r-- | sql/sql_base.cc | 9 | ||||
-rw-r--r-- | sql/sql_class.cc | 20 | ||||
-rw-r--r-- | sql/sql_class.h | 5 | ||||
-rw-r--r-- | sql/sql_cursor.cc | 2 | ||||
-rw-r--r-- | sql/sql_parse.cc | 54 | ||||
-rw-r--r-- | unittest/mysys/my_atomic-t.c | 39 |
22 files changed, 443 insertions, 103 deletions
diff --git a/BUILD/build_mccge.sh b/BUILD/build_mccge.sh index 3345ac3dcb5..8ca31b2d119 100755 --- a/BUILD/build_mccge.sh +++ b/BUILD/build_mccge.sh @@ -1303,7 +1303,11 @@ set_linux_configs() compiler_flags="$compiler_flags -m32" fi if test "x$fast_flag" != "xno" ; then - compiler_flags="$compiler_flags -O2" + if test "x$fast_flag" = "xyes" ; then + compiler_flags="$compiler_flags -O3" + else + compiler_flags="$compiler_flags -O2" + fi else compiler_flags="$compiler_flags -O0" fi diff --git a/configure.in b/configure.in index 30c22c74b7b..284fbf5ea49 100644 --- a/configure.in +++ b/configure.in @@ -880,9 +880,73 @@ AC_CHECK_DECLS(MHA_MAPSIZE_VA, #include <sys/mman.h> ] ) +fi + +dnl Use of ALARMs to wakeup on timeout on sockets +dnl +dnl This feature makes use of a mutex and is a scalability hog we +dnl try to avoid using. However we need support for SO_SNDTIMEO and +dnl SO_RCVTIMEO socket options for this to work. So we will check +dnl if this feature is supported by a simple AC_RUN_IFELSE macro. However +dnl on some OS's there is support for setting those variables but +dnl they are silently ignored. For those OS's we will not attempt +dnl o use SO_SNDTIMEO and SO_RCVTIMEO even if it is said to work. +dnl See Bug#29093 for the problem with SO_SND/RCVTIMEO on HP/UX. +dnl To use alarm is simple, simply avoid setting anything. + + +AC_CACHE_CHECK([whether SO_SNDTIMEO and SO_RCVTIMEO work], + [mysql_cv_socket_timeout], + [AC_RUN_IFELSE( + [AC_LANG_PROGRAM([[ + #include <sys/types.h> + #include <sys/socket.h> + #include <sys/time.h> + ]],[[ + int fd = socket(AF_INET, SOCK_STREAM, 0); + struct timeval tv; + int ret= 0; + tv.tv_sec= 2; + tv.tv_usec= 0; + ret|= setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)); + ret|= setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)); + return !!ret; + ]])], + [mysql_cv_socket_timeout=yes], + [mysql_cv_socket_timeout=no], + [mysql_cv_socket_timeout=no + AC_MSG_WARN([Socket timeout options disabled due to cross-compiling])]) + ]) + +use_alarm=yes + +if test "$mysql_cv_socket_timeout" = yes; then + case $SYSTEM_TYPE in + dnl We trust the result from the following systems + *solaris*) use_alarm=no ;; + *freebsd*) use_alarm=no ;; + *darwin*) use_alarm=no ;; + *) + dnl We trust the result from Linux also + if test "$TARGET_LINUX" = "true"; then + use_alarm=no + fi + dnl We trust no one else for the moment + dnl (Windows is hardcoded to not use alarms) + ;; + esac +fi +AC_ARG_WITH(alarm, + AS_HELP_STRING([--with-alarm], [Use alarm to implement socket timeout.]), + [use_alarm=$withval], []) +AC_MSG_CHECKING(whether to use alarms to implement socket timeout) +if test "$use_alarm" = no ; then + AC_DEFINE([NO_ALARM], [1], [No need to use alarm for socket timeout]) + AC_DEFINE([SIGNAL_WITH_VIO_CLOSE], [1], [Need to use vio close for kill connection]) fi +AC_MSG_RESULT($use_alarm) #-------------------------------------------------------------------- # Check for IPv6 support diff --git a/include/atomic/generic-msvc.h b/include/atomic/generic-msvc.h index f1e1b0e88c9..a84cde6b2c3 100644 --- a/include/atomic/generic-msvc.h +++ b/include/atomic/generic-msvc.h @@ -23,6 +23,7 @@ */ #undef MY_ATOMIC_HAS_8_16 +#include <windows.h> /* x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate function calls to kernel32 instead, even in the optimized build. @@ -36,19 +37,17 @@ #else C_MODE_START /*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/ -LONG _InterlockedExchange (LONG volatile *Target,LONG Value); LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp); -LONG _InterlockedExchangeAdd (LONG volatile *Addend, LONG Value); +LONGLONG _InterlockedCompareExchange64 (LONGLONG volatile *Target, + LONGLONG Value, LONGLONG Comp); C_MODE_END -#pragma intrinsic(_InterlockedExchangeAdd) #pragma intrinsic(_InterlockedCompareExchange) -#pragma intrinsic(_InterlockedExchange) +#pragma intrinsic(_InterlockedCompareExchange64) #endif -#define InterlockedExchange _InterlockedExchange -#define InterlockedExchangeAdd _InterlockedExchangeAdd #define InterlockedCompareExchange _InterlockedCompareExchange +#define InterlockedCompareExchange64 _InterlockedCompareExchange64 /* No need to do something special for InterlockedCompareExchangePointer as it is a #define to InterlockedCompareExchange. The same applies to @@ -57,23 +56,39 @@ C_MODE_END #endif /*_M_IX86*/ #define MY_ATOMIC_MODE "msvc-intrinsics" -#define IL_EXCHG_ADD32(X,Y) InterlockedExchangeAdd((volatile LONG *)(X),(Y)) -#define IL_COMP_EXCHG32(X,Y,Z) InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z)) +/* Implement using CAS on WIN32 */ +#define IL_COMP_EXCHG32(X,Y,Z) \ + InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z)) +#define IL_COMP_EXCHG64(X,Y,Z) \ + InterlockedCompareExchange64((volatile LONGLONG *)(X), \ + (LONGLONG)(Y),(LONGLONG)(Z)) #define IL_COMP_EXCHGptr InterlockedCompareExchangePointer -#define IL_EXCHG32(X,Y) InterlockedExchange((volatile LONG *)(X),(Y)) -#define IL_EXCHGptr InterlockedExchangePointer -#define make_atomic_add_body(S) \ - v= IL_EXCHG_ADD ## S (a, v) + #define make_atomic_cas_body(S) \ int ## S initial_cmp= *cmp; \ int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \ if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a; + +#ifndef _M_IX86 +/* Use full set of optimised functions on WIN64 */ +#define IL_EXCHG_ADD32(X,Y) \ + InterlockedExchangeAdd((volatile LONG *)(X),(Y)) +#define IL_EXCHG_ADD64(X,Y) \ + InterlockedExchangeAdd64((volatile LONGLONG *)(X),(LONGLONG)(Y)) +#define IL_EXCHG32(X,Y) \ + InterlockedExchange((volatile LONG *)(X),(Y)) +#define IL_EXCHG64(X,Y) \ + InterlockedExchange64((volatile LONGLONG *)(X),(LONGLONG)(Y)) +#define IL_EXCHGptr InterlockedExchangePointer + +#define make_atomic_add_body(S) \ + v= IL_EXCHG_ADD ## S (a, v) #define make_atomic_swap_body(S) \ v= IL_EXCHG ## S (a, v) #define make_atomic_load_body(S) \ ret= 0; /* avoid compiler warning */ \ ret= IL_COMP_EXCHG ## S (a, ret, ret); - +#endif /* my_yield_processor (equivalent of x86 PAUSE instruction) should be used to improve performance on hyperthreaded CPUs. Intel recommends to use it in @@ -108,9 +123,12 @@ static __inline int my_yield_processor() #else /* cleanup */ #undef IL_EXCHG_ADD32 +#undef IL_EXCHG_ADD64 #undef IL_COMP_EXCHG32 +#undef IL_COMP_EXCHG64 #undef IL_COMP_EXCHGptr #undef IL_EXCHG32 +#undef IL_EXCHG64 #undef IL_EXCHGptr #endif diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h index 59090bc26b7..32839e0a67d 100644 --- a/include/atomic/x86-gcc.h +++ b/include/atomic/x86-gcc.h @@ -22,6 +22,12 @@ architectures support double-word (128-bit) cas. */ +/* + No special support of 8 and 16 bit operations are implemented here + currently. +*/ +#undef MY_ATOMIC_HAS_8_AND_16 + #ifdef __x86_64__ # ifdef MY_ATOMIC_NO_XADD # define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd" @@ -42,29 +48,79 @@ #endif #ifndef MY_ATOMIC_NO_XADD -#define make_atomic_add_body(S) \ - asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a)) +#define make_atomic_add_body(S) make_atomic_add_body ## S +#define make_atomic_cas_body(S) make_atomic_cas_body ## S #endif -#define make_atomic_fas_body(S) \ - asm volatile ("xchg %0, %1;" : "+q" (v) , "+m" (*a)) -#define make_atomic_cas_body(S) \ + +#define make_atomic_add_body32 \ + asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a)) + +#define make_atomic_cas_body32 \ asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \ : "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set)) -#ifdef MY_ATOMIC_MODE_DUMMY -#define make_atomic_load_body(S) ret=*a -#define make_atomic_store_body(S) *a=v -#else +#ifdef __x86_64__ +#define make_atomic_add_body64 make_atomic_add_body32 +#define make_atomic_cas_body64 make_atomic_cas_body32 + +#define make_atomic_fas_body(S) \ + asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a)) + /* Actually 32-bit reads/writes are always atomic on x86 But we add LOCK_prefix here anyway to force memory barriers */ -#define make_atomic_load_body(S) \ - ret=0; \ - asm volatile (LOCK_prefix "; cmpxchg %2, %0" \ - : "+m" (*a), "+a" (ret): "r" (ret)) -#define make_atomic_store_body(S) \ +#define make_atomic_load_body(S) \ + ret=0; \ + asm volatile (LOCK_prefix "; cmpxchg %2, %0" \ + : "+m" (*a), "+a" (ret): "r" (ret)) +#define make_atomic_store_body(S) \ asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v)) + +#else +/* + Use default implementations of 64-bit operations since we solved + the 64-bit problem on 32-bit platforms for CAS, no need to solve it + once more for ADD, LOAD, STORE and FAS as well. + Since we already added add32 support, we need to define add64 + here, but we haven't defined fas, load and store at all, so + we can fallback on default implementations. +*/ +#define make_atomic_add_body64 \ + int64 tmp=*a; \ + while (!my_atomic_cas64(a, &tmp, tmp+v)); \ + v=tmp; + +/* + On some platforms (e.g. Mac OS X and Solaris) the ebx register + is held as a pointer to the global offset table. Thus we're not + allowed to use the b-register on those platforms when compiling + PIC code, to avoid this we push ebx and pop ebx and add a movl + instruction to avoid having ebx in the interface of the assembler + instruction. + + cmpxchg8b works on both 32-bit platforms and 64-bit platforms but + the code here is only used on 32-bit platforms, on 64-bit + platforms the much simpler make_atomic_cas_body32 will work + fine. +*/ +#define make_atomic_cas_body64 \ + int32 ebx=(set & 0xFFFFFFFF), ecx=(set >> 32); \ + asm volatile ("push %%ebx; movl %3, %%ebx;" \ + LOCK_prefix "; cmpxchg8b %0; setz %2; pop %%ebx"\ + : "+m" (*a), "+A" (*cmp), "=q" (ret) \ + :"m" (ebx), "c" (ecx)) #endif +/* + The implementation of make_atomic_cas_body32 is adaptable to + the OS word size, so on 64-bit platforms it will automatically + adapt to 64-bits and so it will work also on 64-bit platforms +*/ +#define make_atomic_cas_bodyptr make_atomic_cas_body32 + +#ifdef MY_ATOMIC_MODE_DUMMY +#define make_atomic_load_body(S) ret=*a +#define make_atomic_store_body(S) *a=v +#endif #endif /* ATOMIC_X86_GCC_INCLUDED */ diff --git a/include/hash.h b/include/hash.h index d870e17c341..515c764214d 100644 --- a/include/hash.h +++ b/include/hash.h @@ -30,6 +30,7 @@ extern "C" { /* flags for hash_init */ #define HASH_UNIQUE 1 /* hash_insert fails on duplicate key */ +typedef uint my_hash_value_type; typedef uchar *(*my_hash_get_key)(const uchar *,size_t*,my_bool); typedef void (*my_hash_free_key)(void *); @@ -60,8 +61,18 @@ void my_hash_free(HASH *tree); void my_hash_reset(HASH *hash); uchar *my_hash_element(HASH *hash, ulong idx); uchar *my_hash_search(const HASH *info, const uchar *key, size_t length); +uchar *my_hash_search_using_hash_value(const HASH *info, + my_hash_value_type hash_value, + const uchar *key, size_t length); +my_hash_value_type my_calc_hash(const HASH *info, + const uchar *key, size_t length); uchar *my_hash_first(const HASH *info, const uchar *key, size_t length, HASH_SEARCH_STATE *state); +uchar *my_hash_first_from_hash_value(const HASH *info, + my_hash_value_type hash_value, + const uchar *key, + size_t length, + HASH_SEARCH_STATE *state); uchar *my_hash_next(const HASH *info, const uchar *key, size_t length, HASH_SEARCH_STATE *state); my_bool my_hash_insert(HASH *info, const uchar *data); diff --git a/include/my_atomic.h b/include/my_atomic.h index 85cf87165fb..23c3dc749ab 100644 --- a/include/my_atomic.h +++ b/include/my_atomic.h @@ -37,7 +37,7 @@ my_atomic_store#(&var, what) store 'what' in *var - '#' is substituted by a size suffix - 8, 16, 32, or ptr + '#' is substituted by a size suffix - 8, 16, 32, 64, or ptr (e.g. my_atomic_add8, my_atomic_fas32, my_atomic_casptr). NOTE This operations are not always atomic, so they always must be @@ -49,18 +49,17 @@ On architectures where these operations are really atomic, rwlocks will be optimized away. 8- and 16-bit atomics aren't implemented for windows (see generic-msvc.h), - but can be added, if necessary. + but can be added, if necessary. */ #ifndef my_atomic_rwlock_init #define intptr void * /** - On most platforms we implement 8-bit, 16-bit, 32-bit and "pointer" - operations. Thus the symbol below is defined by default; platforms - where we leave out 8-bit or 16-bit operations should undefine it. + Currently we don't support 8-bit and 16-bit operations. + It can be added later if needed. */ -#define MY_ATOMIC_HAS_8_16 1 +#undef MY_ATOMIC_HAS_8_16 #ifndef MY_ATOMIC_MODE_RWLOCKS /* @@ -129,6 +128,7 @@ make_transparent_unions(8) make_transparent_unions(16) make_transparent_unions(32) +make_transparent_unions(64) make_transparent_unions(ptr) #undef uintptr #undef make_transparent_unions @@ -140,10 +140,12 @@ make_transparent_unions(ptr) #define U_8 int8 #define U_16 int16 #define U_32 int32 +#define U_64 int64 #define U_ptr intptr #define Uv_8 int8 #define Uv_16 int16 #define Uv_32 int32 +#define Uv_64 int64 #define Uv_ptr intptr #define U_a volatile *a #define U_cmp *cmp @@ -217,6 +219,7 @@ make_atomic_cas(8) make_atomic_cas(16) #endif make_atomic_cas(32) +make_atomic_cas(64) make_atomic_cas(ptr) #ifdef MY_ATOMIC_HAS_8_16 @@ -224,12 +227,14 @@ make_atomic_add(8) make_atomic_add(16) #endif make_atomic_add(32) +make_atomic_add(64) #ifdef MY_ATOMIC_HAS_8_16 make_atomic_load(8) make_atomic_load(16) #endif make_atomic_load(32) +make_atomic_load(64) make_atomic_load(ptr) #ifdef MY_ATOMIC_HAS_8_16 @@ -237,6 +242,7 @@ make_atomic_fas(8) make_atomic_fas(16) #endif make_atomic_fas(32) +make_atomic_fas(64) make_atomic_fas(ptr) #ifdef MY_ATOMIC_HAS_8_16 @@ -244,6 +250,7 @@ make_atomic_store(8) make_atomic_store(16) #endif make_atomic_store(32) +make_atomic_store(64) make_atomic_store(ptr) #ifdef _atomic_h_cleanup_ @@ -254,10 +261,12 @@ make_atomic_store(ptr) #undef U_8 #undef U_16 #undef U_32 +#undef U_64 #undef U_ptr #undef Uv_8 #undef Uv_16 #undef Uv_32 +#undef Uv_64 #undef Uv_ptr #undef a #undef cmp diff --git a/include/my_global.h b/include/my_global.h index 976253945aa..81ad947c6b6 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -876,6 +876,8 @@ typedef SOCKET_SIZE_TYPE size_socket; #endif #endif /* defined (HAVE_LONG_LONG) && !defined(ULONGLONG_MAX)*/ +#define INT_MIN64 (~0x7FFFFFFFFFFFFFFFLL) +#define INT_MAX64 0x7FFFFFFFFFFFFFFFLL #define INT_MIN32 (~0x7FFFFFFFL) #define INT_MAX32 0x7FFFFFFFL #define UINT_MAX32 0xFFFFFFFFL diff --git a/mysys/hash.c b/mysys/hash.c index 6996b045d1c..5fa804ce7ce 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -33,16 +33,18 @@ typedef struct st_hash_info { uchar *data; /* data for current entry */ } HASH_LINK; -static uint my_hash_mask(size_t hashnr, size_t buffmax, size_t maxlength); +static uint my_hash_mask(my_hash_value_type hashnr, + size_t buffmax, size_t maxlength); static void movelink(HASH_LINK *array,uint pos,uint next_link,uint newlink); static int hashcmp(const HASH *hash, HASH_LINK *pos, const uchar *key, size_t length); -static uint calc_hash(const HASH *hash, const uchar *key, size_t length) +static my_hash_value_type calc_hash(const HASH *hash, + const uchar *key, size_t length) { ulong nr1=1, nr2=4; hash->charset->coll->hash_sort(hash->charset,(uchar*) key,length,&nr1,&nr2); - return nr1; + return (my_hash_value_type)nr1; } /** @@ -179,7 +181,8 @@ my_hash_key(const HASH *hash, const uchar *record, size_t *length, /* Calculate pos according to keys */ -static uint my_hash_mask(size_t hashnr, size_t buffmax, size_t maxlength) +static uint my_hash_mask(my_hash_value_type hashnr, size_t buffmax, + size_t maxlength) { if ((hashnr & (buffmax-1)) < maxlength) return (hashnr & (buffmax-1)); return (hashnr & ((buffmax >> 1) -1)); @@ -200,7 +203,7 @@ static #if !defined(__USLC__) && !defined(__sgi) inline #endif -unsigned int rec_hashnr(HASH *hash,const uchar *record) +my_hash_value_type rec_hashnr(HASH *hash,const uchar *record) { size_t length; uchar *key= (uchar*) my_hash_key(hash, record, &length, 0); @@ -214,6 +217,21 @@ uchar* my_hash_search(const HASH *hash, const uchar *key, size_t length) return my_hash_first(hash, key, length, &state); } +uchar* my_hash_search_using_hash_value(const HASH *hash, + my_hash_value_type hash_value, + const uchar *key, + size_t length) +{ + HASH_SEARCH_STATE state; + return my_hash_first_from_hash_value(hash, hash_value, + key, length, &state); +} + +my_hash_value_type my_calc_hash(const HASH *hash, + const uchar *key, size_t length) +{ + return calc_hash(hash, key, length ? length : hash->key_length); +} /* Search after a record based on a key @@ -224,14 +242,25 @@ uchar* my_hash_search(const HASH *hash, const uchar *key, size_t length) uchar* my_hash_first(const HASH *hash, const uchar *key, size_t length, HASH_SEARCH_STATE *current_record) { + return my_hash_first_from_hash_value(hash, + calc_hash(hash, key, length ? length : hash->key_length), + key, length, current_record); +} + +uchar* my_hash_first_from_hash_value(const HASH *hash, + my_hash_value_type hash_value, + const uchar *key, + size_t length, + HASH_SEARCH_STATE *current_record) +{ HASH_LINK *pos; uint flag,idx; - DBUG_ENTER("my_hash_first"); + DBUG_ENTER("my_hash_first_from_hash_value"); flag=1; if (hash->records) { - idx= my_hash_mask(calc_hash(hash, key, length ? length : hash->key_length), + idx= my_hash_mask(hash_value, hash->blength, hash->records); do { @@ -331,7 +360,8 @@ static int hashcmp(const HASH *hash, HASH_LINK *pos, const uchar *key, my_bool my_hash_insert(HASH *info, const uchar *record) { int flag; - size_t idx,halfbuff,hash_nr,first_index; + size_t idx,halfbuff,first_index; + my_hash_value_type hash_nr; uchar *UNINIT_VAR(ptr_to_rec),*UNINIT_VAR(ptr_to_rec2); HASH_LINK *data,*empty,*UNINIT_VAR(gpos),*UNINIT_VAR(gpos2),*pos; @@ -467,7 +497,8 @@ my_bool my_hash_insert(HASH *info, const uchar *record) my_bool my_hash_delete(HASH *hash, uchar *record) { - uint blength,pos2,pos_hashnr,lastpos_hashnr,idx,empty_index; + uint blength,pos2,idx,empty_index; + my_hash_value_type pos_hashnr, lastpos_hashnr; HASH_LINK *data,*lastpos,*gpos,*pos,*pos3,*empty; DBUG_ENTER("my_hash_delete"); if (!hash->records) diff --git a/mysys/ptr_cmp.c b/mysys/ptr_cmp.c index 24ab6a1ea9c..2005e3eb2b7 100644 --- a/mysys/ptr_cmp.c +++ b/mysys/ptr_cmp.c @@ -22,16 +22,39 @@ #include "mysys_priv.h" #include <myisampack.h> +#ifdef TARGET_OS_SOLARIS +/* + * On Solaris, memcmp() is normally faster than the unrolled ptr_compare_N + * functions, as memcmp() is usually a platform-specific implementation + * written in assembler, provided in /usr/lib/libc/libc_hwcap*.so.1. + * This implementation is also usually faster than the built-in memcmp + * supplied by GCC, so it is recommended to build with "-fno-builtin-memcmp" + * in CFLAGS if building with GCC on Solaris. + */ + +#include <string.h> + +static int native_compare(size_t *length, unsigned char **a, unsigned char **b) +{ + return memcmp(*a, *b, *length); +} + +#else /* TARGET_OS_SOLARIS */ + static int ptr_compare(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_0(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_1(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_2(size_t *compare_length, uchar **a, uchar **b); static int ptr_compare_3(size_t *compare_length, uchar **a, uchar **b); +#endif /* TARGET_OS_SOLARIS */ /* Get a pointer to a optimal byte-compare function for a given size */ qsort2_cmp get_ptr_compare (size_t size) { +#ifdef TARGET_OS_SOLARIS + return (qsort2_cmp) native_compare; +#else if (size < 4) return (qsort2_cmp) ptr_compare; switch (size & 3) { @@ -41,6 +64,7 @@ qsort2_cmp get_ptr_compare (size_t size) case 3: return (qsort2_cmp) ptr_compare_3; } return 0; /* Impossible */ +#endif /* TARGET_OS_SOLARIS */ } diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index d9ca161f260..31bb3d39b85 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -132,9 +132,8 @@ post_init_event_thread(THD *thd) pthread_mutex_lock(&LOCK_thread_count); threads.append(thd); thread_count++; - thread_running++; + inc_thread_running(); pthread_mutex_unlock(&LOCK_thread_count); - return FALSE; } @@ -156,7 +155,7 @@ deinit_event_thread(THD *thd) DBUG_PRINT("exit", ("Event thread finishing")); pthread_mutex_lock(&LOCK_thread_count); thread_count--; - thread_running--; + dec_thread_running(); delete thd; pthread_cond_broadcast(&COND_thread_count); pthread_mutex_unlock(&LOCK_thread_count); @@ -417,7 +416,7 @@ Event_scheduler::start() net_end(&new_thd->net); pthread_mutex_lock(&LOCK_thread_count); thread_count--; - thread_running--; + dec_thread_running(); delete new_thd; pthread_cond_broadcast(&COND_thread_count); pthread_mutex_unlock(&LOCK_thread_count); @@ -550,7 +549,7 @@ error: net_end(&new_thd->net); pthread_mutex_lock(&LOCK_thread_count); thread_count--; - thread_running--; + dec_thread_running(); delete new_thd; pthread_cond_broadcast(&COND_thread_count); pthread_mutex_unlock(&LOCK_thread_count); diff --git a/sql/log_event.cc b/sql/log_event.cc index cc2b4667115..10b4ecd3f7b 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3055,10 +3055,7 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli, rpl_filter->db_ok(thd->db)) { thd->set_time((time_t)when); - thd->set_query((char*)query_arg, q_len_arg); - pthread_mutex_lock(&LOCK_thread_count); - thd->query_id = next_query_id(); - pthread_mutex_unlock(&LOCK_thread_count); + thd->set_query_and_id((char*)query_arg, q_len_arg, next_query_id()); thd->variables.pseudo_thread_id= thread_id; // for temp tables DBUG_PRINT("query",("%s", thd->query())); @@ -4580,9 +4577,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, if (rpl_filter->db_ok(thd->db)) { thd->set_time((time_t)when); - pthread_mutex_lock(&LOCK_thread_count); - thd->query_id = next_query_id(); - pthread_mutex_unlock(&LOCK_thread_count); + thd->set_query_id(next_query_id()); thd->warning_info->opt_clear_warning_info(thd->query_id); TABLE_LIST tables; @@ -8071,9 +8066,7 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli) DBUG_ASSERT(rli->sql_thd == thd); /* Step the query id to mark what columns that are actually used. */ - pthread_mutex_lock(&LOCK_thread_count); - thd->query_id= next_query_id(); - pthread_mutex_unlock(&LOCK_thread_count); + thd->set_query_id(next_query_id()); if (!(memory= my_multi_malloc(MYF(MY_WME), &table_list, (uint) sizeof(RPL_TABLE_LIST), diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 46fab53039f..3ddaf114673 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -53,6 +53,7 @@ #include "sql_array.h" #include "sql_plugin.h" #include "scheduler.h" +#include <my_atomic.h> #include <mysql/psi/mysql_file.h> #ifndef __WIN__ #include <netdb.h> @@ -89,11 +90,60 @@ typedef ulong nesting_map; /* Used for flags of nesting constructs */ typedef ulonglong nested_join_map; /* query_id */ -typedef ulonglong query_id_t; +typedef int64 query_id_t; extern query_id_t global_query_id; +extern int32 thread_running; +extern my_atomic_rwlock_t global_query_id_lock; +extern my_atomic_rwlock_t thread_running_lock; /* increment query_id and return it. */ -inline query_id_t next_query_id() { return global_query_id++; } +inline query_id_t next_query_id() +{ + query_id_t id; + my_atomic_rwlock_wrlock(&global_query_id_lock); + id= my_atomic_add64(&global_query_id, 1); + my_atomic_rwlock_wrunlock(&global_query_id_lock); + return (id+1); +} + +inline query_id_t get_query_id() +{ + query_id_t id; + my_atomic_rwlock_wrlock(&global_query_id_lock); + id= my_atomic_load64(&global_query_id); + my_atomic_rwlock_wrunlock(&global_query_id_lock); + return id; +} + +inline int32 +inc_thread_running() +{ + int32 num_thread_running; + my_atomic_rwlock_wrlock(&thread_running_lock); + num_thread_running= my_atomic_add32(&thread_running, 1); + my_atomic_rwlock_wrunlock(&thread_running_lock); + return (num_thread_running+1); +} + +inline int32 +dec_thread_running() +{ + int32 num_thread_running; + my_atomic_rwlock_wrlock(&thread_running_lock); + num_thread_running= my_atomic_add32(&thread_running, -1); + my_atomic_rwlock_wrunlock(&thread_running_lock); + return (num_thread_running-1); +} + +inline int32 +get_thread_running() +{ + int32 num_thread_running; + my_atomic_rwlock_wrlock(&thread_running_lock); + num_thread_running= my_atomic_load32(&thread_running); + my_atomic_rwlock_wrunlock(&thread_running_lock); + return num_thread_running; +} /* useful constants */ extern MYSQL_PLUGIN_IMPORT const key_map key_map_empty; @@ -1955,7 +2005,7 @@ extern bool opt_ignore_builtin_innodb; extern my_bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress; extern bool in_bootstrap; -extern uint volatile thread_count, thread_running, global_read_lock; +extern uint volatile thread_count, global_read_lock; extern uint connection_count; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 88b7b5931f7..e8d2b10034a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -524,7 +524,8 @@ uint mysqld_port_timeout; uint delay_key_write_options, protocol_version; uint lower_case_table_names; uint tc_heuristic_recover= 0; -uint volatile thread_count, thread_running; +uint volatile thread_count; +int32 thread_running; ulonglong thd_startup_options; ulong back_log, connect_timeout, concurrency, server_id; ulong table_cache_size, table_def_size; @@ -540,6 +541,8 @@ ulonglong max_binlog_cache_size=0; ulong query_cache_size=0; ulong refresh_version; /* Increments on each reload */ query_id_t global_query_id; +my_atomic_rwlock_t global_query_id_lock; +my_atomic_rwlock_t thread_running_lock; ulong aborted_threads, aborted_connects; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; @@ -1372,6 +1375,8 @@ void clean_up(bool print_message) DBUG_PRINT("quit", ("Error messages freed")); /* Tell main we are ready */ logger.cleanup_end(); + my_atomic_rwlock_destroy(&global_query_id_lock); + my_atomic_rwlock_destroy(&thread_running_lock); (void) pthread_mutex_lock(&LOCK_thread_count); DBUG_PRINT("quit", ("got thread count lock")); ready_to_exit=1; @@ -7789,6 +7794,8 @@ static int mysql_init_variables(void) what_to_log= ~ (1L << (uint) COM_TIME); refresh_version= 1L; /* Increments on each reload */ global_query_id= thread_id= 1L; + my_atomic_rwlock_init(&global_query_id_lock); + my_atomic_rwlock_init(&thread_running_lock); strmov(server_version, MYSQL_SERVER_VERSION); myisam_recover_options_str= sql_mode_str= "OFF"; myisam_stats_method_str= "nulls_unequal"; diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 5cf3597c638..d54ff1d2779 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -71,8 +71,10 @@ #if defined(__WIN__) || !defined(MYSQL_SERVER) /* The following is because alarms doesn't work on windows. */ +#ifndef NO_ALARM #define NO_ALARM #endif +#endif #ifndef NO_ALARM #include "my_pthread.h" diff --git a/sql/slave.cc b/sql/slave.cc index 6d78d44f6f2..6876734d746 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1206,6 +1206,8 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) mi->clock_diff_with_master= (long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10)); } + else if (check_io_slave_killed(mi->io_thd, mi, NULL)) + goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), @@ -1258,7 +1260,9 @@ not always make sense; please check the manual before using it)."; } else if (mysql_errno(mysql)) { - if (is_network_error(mysql_errno(mysql))) + if (check_io_slave_killed(mi->io_thd, mi, NULL)) + goto slave_killed_err; + else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master SERVER_ID failed with error: %s", mysql_error(mysql)); @@ -1329,6 +1333,8 @@ be equal for the Statement-format replication to work"; goto err; } } + else if (check_io_slave_killed(mi->io_thd, mi, NULL)) + goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), @@ -1390,6 +1396,8 @@ be equal for the Statement-format replication to work"; goto err; } } + else if (check_io_slave_killed(mi->io_thd, mi, NULL)) + goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), @@ -1453,6 +1461,11 @@ network_err: if (master_res) mysql_free_result(master_res); DBUG_RETURN(2); + +slave_killed_err: + if (master_res) + mysql_free_result(master_res); + DBUG_RETURN(2); } static bool wait_for_relay_log_space(Relay_log_info* rli) @@ -2678,7 +2691,7 @@ connected: if (ret == 1) /* Fatal error */ goto err; - + if (ret == 2) { if (check_io_slave_killed(mi->io_thd, mi, "Slave I/O thread killed" diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 542db282700..3f7d812384c 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1335,7 +1335,7 @@ sp_head::execute(THD *thd) /* To avoid wiping out thd->change_list on old_change_list destruction */ old_change_list.empty(); thd->lex= old_lex; - thd->query_id= old_query_id; + thd->set_query_id(old_query_id); DBUG_ASSERT(!thd->derived_tables); thd->derived_tables= old_derived_tables; thd->variables.sql_mode= save_sql_mode; @@ -2745,9 +2745,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, */ thd->lex= m_lex; - pthread_mutex_lock(&LOCK_thread_count); - thd->query_id= next_query_id(); - pthread_mutex_unlock(&LOCK_thread_count); + thd->set_query_id(next_query_id()); if (thd->prelocked_mode == NON_PRELOCKED) { diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 7df66a5af16..ee264ec3de6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2555,6 +2555,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, char key[MAX_DBKEY_LENGTH]; uint key_length; char *alias= table_list->alias; + my_hash_value_type hash_value; HASH_SEARCH_STATE state; DBUG_ENTER("open_table"); @@ -2741,6 +2742,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, on disk. */ + hash_value= my_calc_hash(&open_cache, (uchar*) key, key_length); mysql_mutex_lock(&LOCK_open); /* @@ -2783,8 +2785,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, an implicit "pending locks queue" - see wait_for_locked_table_names for details. */ - for (table= (TABLE*) my_hash_first(&open_cache, (uchar*) key, key_length, - &state); + for (table= (TABLE*) my_hash_first_from_hash_value(&open_cache, + hash_value, + (uchar*) key, + key_length, + &state); table && table->in_use ; table= (TABLE*) my_hash_next(&open_cache, (uchar*) key, key_length, &state)) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index cded82c2e87..019c22d9dd2 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3273,6 +3273,26 @@ void THD::set_query(char *query_arg, uint32 query_length_arg) pthread_mutex_unlock(&LOCK_thd_data); } +/** Assign a new value to thd->query and thd->query_id. */ + +void THD::set_query_and_id(char *query_arg, uint32 query_length_arg, + query_id_t new_query_id) +{ + pthread_mutex_lock(&LOCK_thd_data); + set_query_inner(query_arg, query_length_arg); + query_id= new_query_id; + pthread_mutex_unlock(&LOCK_thd_data); +} + +/** Assign a new value to thd->query_id. */ + +void THD::set_query_id(query_id_t new_query_id) +{ + pthread_mutex_lock(&LOCK_thd_data); + query_id= new_query_id; + pthread_mutex_unlock(&LOCK_thd_data); +} + /** Mark transaction to rollback and mark error as fatal to a sub-statement. diff --git a/sql/sql_class.h b/sql/sql_class.h index b518ac84d8f..4156f2afa8e 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2319,10 +2319,13 @@ public: virtual void set_statement(Statement *stmt); /** - Assign a new value to thd->query. + Assign a new value to thd->query and thd->query_id. Protected with LOCK_thd_data mutex. */ void set_query(char *query_arg, uint32 query_length_arg); + void set_query_and_id(char *query_arg, uint32 query_length_arg, + query_id_t new_query_id); + void set_query_id(query_id_t new_query_id); private: /** The current internal error handler for this thread, or NULL. */ Internal_error_handler *m_internal_handler; diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index ffc3fafe55f..533b47e61da 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -438,7 +438,7 @@ Sensitive_cursor::fetch(ulong num_rows) thd->derived_tables= derived_tables; thd->open_tables= open_tables; thd->lock= lock; - thd->query_id= query_id; + thd->set_query_id(query_id); thd->change_list= change_list; /* save references to memory allocated during fetch */ thd->set_n_backup_active_arena(this, &backup_arena); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 30af6dc93b4..0774e373908 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -482,7 +482,7 @@ static void handle_bootstrap_impl(THD *thd) query= (char *) thd->memdup_w_gap(buff, length + 1, thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE); - thd->set_query(query, length); + thd->set_query_and_id(query, length, next_query_id()); DBUG_PRINT("query",("%-.4096s",thd->query())); #if defined(ENABLED_PROFILING) thd->profiling.start_new_query(); @@ -493,7 +493,6 @@ static void handle_bootstrap_impl(THD *thd) We don't need to obtain LOCK_thread_count here because in bootstrap mode we have only one thread. */ - thd->query_id=next_query_id(); thd->set_time(); mysql_parse(thd, thd->query(), length, & found_semicolon); close_thread_tables(thd); // Free tables @@ -915,29 +914,29 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->enable_slow_log= TRUE; thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */ thd->set_time(); - pthread_mutex_lock(&LOCK_thread_count); - thd->query_id= global_query_id; - - switch( command ) { - /* Ignore these statements. */ - case COM_STATISTICS: - case COM_PING: - break; - /* Only increase id on these statements but don't count them. */ - case COM_STMT_PREPARE: - case COM_STMT_CLOSE: - case COM_STMT_RESET: - next_query_id(); - break; - /* Increase id and count all other statements. */ - default: - statistic_increment(thd->status_var.questions, &LOCK_status); - next_query_id(); + { + query_id_t query_id; + switch( command ) { + /* Ignore these statements. */ + case COM_STATISTICS: + case COM_PING: + query_id= get_query_id(); + break; + /* Only increase id on these statements but don't count them. */ + case COM_STMT_PREPARE: + case COM_STMT_CLOSE: + case COM_STMT_RESET: + query_id= next_query_id() - 1; + break; + /* Increase id and count all other statements. */ + default: + statistic_increment(thd->status_var.questions, &LOCK_status); + query_id= next_query_id() - 1; + } + thd->set_query_id(query_id); } - - thread_running++; + inc_thread_running(); /* TODO: set thd->lex->sql_command to SQLCOM_END here */ - pthread_mutex_unlock(&LOCK_thread_count); /** Clear the set of flags that are expected to be cleared at the @@ -1165,16 +1164,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->security_ctx->priv_user, (char *) thd->security_ctx->host_or_ip); - thd->set_query(beginning_of_next_stmt, length); - pthread_mutex_lock(&LOCK_thread_count); + thd->set_query_and_id(beginning_of_next_stmt, length, next_query_id()); /* Count each statement from the client. */ statistic_increment(thd->status_var.questions, &LOCK_status); - thd->query_id= next_query_id(); thd->set_time(); /* Reset the query start time. */ /* TODO: set thd->lex->sql_command to SQLCOM_END here */ - pthread_mutex_unlock(&LOCK_thread_count); mysql_parse(thd, beginning_of_next_stmt, length, &end_of_stmt); } @@ -1488,9 +1484,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd_proc_info(thd, "cleaning up"); thd->set_query(NULL, 0); thd->command=COM_SLEEP; - pthread_mutex_lock(&LOCK_thread_count); // For process list - thread_running--; - pthread_mutex_unlock(&LOCK_thread_count); + dec_thread_running(); thd_proc_info(thd, 0); thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c index 1a558e8cb73..9853d3cf964 100644 --- a/unittest/mysys/my_atomic-t.c +++ b/unittest/mysys/my_atomic-t.c @@ -48,6 +48,34 @@ pthread_handler_t test_atomic_add(void *arg) return 0; } +volatile int64 a64; +/* add and sub a random number in a loop. Must get 0 at the end */ +pthread_handler_t test_atomic_add64(void *arg) +{ + int m= (*(int *)arg)/2; + GCC_BUG_WORKAROUND int64 x; + for (x= ((int64)(intptr)(&m)); m ; m--) + { + x= (x*m+0xfdecba987654321LL) & INT_MAX64; + my_atomic_rwlock_wrlock(&rwl); + my_atomic_add64(&a64, x); + my_atomic_rwlock_wrunlock(&rwl); + + my_atomic_rwlock_wrlock(&rwl); + my_atomic_add64(&a64, -x); + my_atomic_rwlock_wrunlock(&rwl); + } + pthread_mutex_lock(&mutex); + if (!--running_threads) + { + bad= (a64 != 0); + pthread_cond_signal(&cond); + } + pthread_mutex_unlock(&mutex); + return 0; +} + + /* 1. generate thread number 0..N-1 from b32 2. add it to bad @@ -128,7 +156,7 @@ pthread_handler_t test_atomic_cas(void *arg) void do_tests() { - plan(4); + plan(6); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); @@ -142,5 +170,14 @@ void do_tests() b32= c32= 0; test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES); + { + int64 b=0x1000200030004000LL; + a64=0; + my_atomic_add64(&a64, b); + ok(a64==b, "add64"); + } + a64=0; + test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES); + my_atomic_rwlock_destroy(&rwl); } |