summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <joreland@mysql.com>2004-08-24 21:07:08 +0200
committerunknown <joreland@mysql.com>2004-08-24 21:07:08 +0200
commit1e91f015caeeec47c65b754d19ac956d79ded31d (patch)
treeb97a7c4b8467dab0c313ce080b2c9a3b7cfb9e80
parentf7e31ee94c822b1d2be6d479d4fade6304243754 (diff)
parent6b20f46abc65f8b3dd851df71d04b89cb0d3f208 (diff)
downloadmariadb-git-1e91f015caeeec47c65b754d19ac956d79ded31d.tar.gz
Merge mysql.com:/home/jonas/src/mysql-4.1
into mysql.com:/home/jonas/src/mysql-4.1-ndb ndb/src/mgmsrv/Services.cpp: Auto merged configure.in: Auto merged acinclude.m4: Auto merged
-rw-r--r--acinclude.m424
-rw-r--r--client/mysql.cc6
-rw-r--r--client/mysqladmin.c2
-rw-r--r--client/mysqlbinlog.cc3
-rw-r--r--client/mysqlcheck.c2
-rw-r--r--client/mysqldump.c3
-rw-r--r--client/mysqlimport.c2
-rw-r--r--client/mysqlshow.c2
-rw-r--r--cmd-line-utils/Makefile.am17
-rw-r--r--configure.in19
-rw-r--r--mysql-test/r/mysql_protocols.result9
-rw-r--r--mysql-test/t/mysql_protocols.test10
-rw-r--r--mysys/my_handler.c4
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.hpp15
-rw-r--r--ndb/src/kernel/blocks/grep/GrepInit.cpp2
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp10
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp4
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp2
-rw-r--r--ndb/src/mgmsrv/Services.cpp2
-rw-r--r--ndb/src/ndbapi/NdbEventOperation.cpp2
-rw-r--r--ndb/src/ndbapi/NdbScanFilter.cpp3
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp2
-rw-r--r--ndb/test/ndbapi/testIndex.cpp2
-rw-r--r--sql-common/client.c5
-rw-r--r--sql/ha_ndbcluster.cc3
-rw-r--r--sql/item.cc2
-rw-r--r--sql/lock.cc8
-rw-r--r--sql/mysqld.cc216
-rw-r--r--sql/sql_class.cc19
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_table.cc14
31 files changed, 254 insertions, 162 deletions
diff --git a/acinclude.m4 b/acinclude.m4
index d19942ce4e4..809e5e768e5 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1,10 +1,33 @@
# Local macros for automake & autoconf
+
+AC_DEFUN(MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY,[
+ AC_CACHE_CHECK([HIST_ENTRY is declared in readline/readline.h], mysql_cv_hist_entry_declared,
+ AC_TRY_COMPILE(
+ [
+ #include "stdio.h"
+ #undef __P // readline-4.2 declares own __P
+ #include "readline/readline.h"
+ ],
+ [
+ HIST_ENTRY entry;
+ ],
+ [
+ mysql_cv_hist_entry_declared=yes
+ AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY, [1],
+ [HIST_ENTRY is defined in the outer libeditreadline])
+ ],
+ [mysql_cv_libedit_interface=no]
+ )
+ )
+])
+
AC_DEFUN(MYSQL_CHECK_LIBEDIT_INTERFACE,[
AC_CACHE_CHECK([libedit variant of rl_completion_entry_function], mysql_cv_libedit_interface,
AC_TRY_COMPILE(
[
#include "stdio.h"
+ #undef __P // readline-4.2 declares own __P
#include "readline/readline.h"
],
[
@@ -26,6 +49,7 @@ AC_DEFUN(MYSQL_CHECK_NEW_RL_INTERFACE,[
AC_TRY_COMPILE(
[
#include "stdio.h"
+ #undef __P // readline-4.2 declares own __P
#include "readline/readline.h"
],
[
diff --git a/client/mysql.cc b/client/mysql.cc
index 015c168cea7..0fb6184e78a 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -84,6 +84,7 @@ extern "C" {
#if defined( __WIN__) || defined(OS2)
#include <conio.h>
#elif !defined(__NETWARE__)
+#undef __P // readline-4.2 declares own __P
#include <readline/readline.h>
#define HAVE_READLINE
#endif
@@ -294,7 +295,7 @@ static const char *server_default_groups[]=
HIST_ENTRY is defined for libedit, but not for the real readline
Need to redefine it for real readline to find it
*/
-#if !defined(USE_LIBEDIT_INTERFACE)
+#if !defined(HAVE_HIST_ENTRY)
typedef struct _hist_entry {
const char *line;
const char *data;
@@ -753,8 +754,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
opt_nopager= 1;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) ==
- ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqladmin.c b/client/mysqladmin.c
index aaed101a83e..a3bb0fea180 100644
--- a/client/mysqladmin.c
+++ b/client/mysqladmin.c
@@ -249,7 +249,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 97746a52b39..5f9a499bd31 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -633,8 +633,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) ==
- ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index 78e82e670f8..47512b2a277 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -271,7 +271,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'V': print_version(); exit(0);
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqldump.c b/client/mysqldump.c
index dfac9ea0e7c..8fcf1bb1781 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -582,8 +582,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
case (int) OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol= find_type(argument, &sql_protocol_typelib, 0))
- == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index ccf7fd9880d..86f373d75fe 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -203,7 +203,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index d9e2a1fa92a..05108bd03c8 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -268,7 +268,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/cmd-line-utils/Makefile.am b/cmd-line-utils/Makefile.am
index 7214d1231f9..88aaedde06d 100644
--- a/cmd-line-utils/Makefile.am
+++ b/cmd-line-utils/Makefile.am
@@ -1,3 +1,20 @@
+# Copyright (C) 2004 MySQL AB
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Library General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Library General Public License for more details.
+#
+# You should have received a copy of the GNU Library General Public
+# License along with this library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA
+
## Process this file with automake to create Makefile.in
SUBDIRS= @readline_basedir@
diff --git a/configure.in b/configure.in
index 9a902d17305..a4b38778363 100644
--- a/configure.in
+++ b/configure.in
@@ -2301,6 +2301,20 @@ AC_ARG_WITH(libedit,
[ with_libedit=undefined ]
)
+#
+# We support next variants of compilation:
+# --with-readline
+# | yes | no | undefined
+# --with-libedit | | |
+# ---------------+----------------+------+----------------------------------
+# yes | ERROR! | use libedit from mysql sources
+# ---------------+----------------+------+----------------------------------
+# no | use readline | use system readline or external libedit
+# | from mysql | according to results of m4 tests
+# ---------------+ sources (if it + +----------------------------------
+# undefined | is presented) | | use libedit from mysql sources
+
+
compile_readline="no"
compile_libedit="no"
@@ -2328,6 +2342,7 @@ then
readline_link="\$(top_builddir)/cmd-line-utils/libedit/liblibedit.a"
readline_h_ln_cmd="\$(LN) -s \$(top_builddir)/cmd-line-utils/libedit/readline readline"
compile_libedit=yes
+ AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY)
AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE, 1)
elif test "$with_readline" = "yes"
then
@@ -2339,8 +2354,12 @@ then
compile_readline=yes
AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE, 1)
else
+ AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
MYSQL_CHECK_LIBEDIT_INTERFACE
MYSQL_CHECK_NEW_RL_INTERFACE
+ MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY
+ AC_LANG_RESTORE
if [test "$mysql_cv_new_rl_interface" = "yes"] || [test "$mysql_cv_libedit_interface" = "no"]
then
readline_link="-lreadline"
diff --git a/mysql-test/r/mysql_protocols.result b/mysql-test/r/mysql_protocols.result
new file mode 100644
index 00000000000..272e3bda6f0
--- /dev/null
+++ b/mysql-test/r/mysql_protocols.result
@@ -0,0 +1,9 @@
+<default>
+ ok
+TCP
+ ok
+SOCKET
+ ok
+ERROR 2047: Wrong or unknown protocol
+ERROR 2047: Wrong or unknown protocol
+Unknown option to protocol: NullS
diff --git a/mysql-test/t/mysql_protocols.test b/mysql-test/t/mysql_protocols.test
new file mode 100644
index 00000000000..942ba2722d8
--- /dev/null
+++ b/mysql-test/t/mysql_protocols.test
@@ -0,0 +1,10 @@
+
+# test for Bug #4998 "--protocol doesn't reject bad values"
+
+--exec echo "select ' ok' as '<default>'" | $MYSQL
+--exec echo "select ' ok' as 'TCP'" | $MYSQL --protocol=TCP
+--exec echo "select ' ok' as 'SOCKET'" | $MYSQL --protocol=SOCKET
+--exec echo "select ' ok' as 'PIPE'" | $MYSQL --protocol=PIPE 2>&1
+--exec echo "select ' ok' as 'MEMORY'" | $MYSQL --protocol=MEMORY 2>&1
+--exec echo "select ' ok' as 'NullS'" | $MYSQL --protocol=NullS 2>&1
+
diff --git a/mysys/my_handler.c b/mysys/my_handler.c
index 6003808df25..360a7666e94 100644
--- a/mysys/my_handler.c
+++ b/mysys/my_handler.c
@@ -158,7 +158,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag=mi_compare_text(keyseg->charset,a,a_length,b,b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0),
- !(nextflag & SEARCH_PREFIX))))
+ (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a+=a_length;
b+=b_length;
@@ -171,7 +171,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag= mi_compare_text(keyseg->charset, a, a_length, b, b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0),
- !(nextflag & SEARCH_PREFIX))))
+ (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a=end;
b+=length;
diff --git a/ndb/src/kernel/blocks/grep/Grep.hpp b/ndb/src/kernel/blocks/grep/Grep.hpp
index ba8f5780522..eeabac36966 100644
--- a/ndb/src/kernel/blocks/grep/Grep.hpp
+++ b/ndb/src/kernel/blocks/grep/Grep.hpp
@@ -148,7 +148,7 @@ private:
*/
class Grep : public SimulatedBlock //GrepParticipant
{
- //BLOCK_DEFINES(Grep);
+ BLOCK_DEFINES(Grep);
public:
Grep(const Configuration & conf);
@@ -519,19 +519,6 @@ public:
typedef void (Grep::* ExecSignalLocal1) (Signal* signal);
typedef void (Grep::PSCoord::* ExecSignalLocal2) (Signal* signal);
typedef void (Grep::PSPart::* ExecSignalLocal4) (Signal* signal);
-
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal1 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal2 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal4 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
};
diff --git a/ndb/src/kernel/blocks/grep/GrepInit.cpp b/ndb/src/kernel/blocks/grep/GrepInit.cpp
index 70bf6678754..cfb454a1f9b 100644
--- a/ndb/src/kernel/blocks/grep/GrepInit.cpp
+++ b/ndb/src/kernel/blocks/grep/GrepInit.cpp
@@ -132,7 +132,7 @@ Grep::~Grep()
{
}
-//BLOCK_FUNCTIONS(Grep);
+BLOCK_FUNCTIONS(Grep);
Grep::PSPart::PSPart(Grep * sb) :
BlockComponent(sb),
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 7175952aed0..141a0be0eff 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -655,12 +655,20 @@ CommandInterpreter::executeShow(char* parameters)
mgm_nodes= 0;
for(i=0; i < state->no_of_nodes; i++) {
+ if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB &&
+ state->node_states[i].version != 0){
+ master_id= state->node_states[i].dynamic_id;
+ break;
+ }
+ }
+
+ for(i=0; i < state->no_of_nodes; i++) {
switch(state->node_states[i].node_type) {
case NDB_MGM_NODE_TYPE_API:
api_nodes++;
break;
case NDB_MGM_NODE_TYPE_NDB:
- if (state->node_states[i].dynamic_id > master_id)
+ if (state->node_states[i].dynamic_id < master_id)
master_id= state->node_states[i].dynamic_id;
ndb_nodes++;
break;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 587d5a7572d..0936ec234cf 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -2304,7 +2304,7 @@ bool
MgmtSrvr::alloc_node_id(NodeId * nodeId,
enum ndb_mgm_node_type type,
struct sockaddr *client_addr,
- socklen_t *client_addr_len)
+ SOCKET_SIZE_TYPE *client_addr_len)
{
Guard g(&f_node_id_mutex);
#if 0
@@ -2885,4 +2885,6 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
}
template class Vector<SigMatch>;
+#if __SUNPRO_CC != 0x560
template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned);
+#endif
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 661dcdfb784..b26eaeb4ab9 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -467,7 +467,7 @@ public:
*/
bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ;
bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type,
- struct sockaddr *client_addr, socklen_t *client_addr_len);
+ struct sockaddr *client_addr, SOCKET_SIZE_TYPE *client_addr_len);
/**
*
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index d51e54f688d..0f54a15c20c 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -402,7 +402,7 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
}
struct sockaddr addr;
- socklen_t addrlen= sizeof(addr);
+ SOCKET_SIZE_TYPE addrlen= sizeof(addr);
int r = getpeername(m_socket, &addr, &addrlen);
if (r != 0 ) {
m_output->println(cmd);
diff --git a/ndb/src/ndbapi/NdbEventOperation.cpp b/ndb/src/ndbapi/NdbEventOperation.cpp
index ebdebaffd61..506a6c8d86d 100644
--- a/ndb/src/ndbapi/NdbEventOperation.cpp
+++ b/ndb/src/ndbapi/NdbEventOperation.cpp
@@ -37,7 +37,7 @@
NdbEventOperation::NdbEventOperation(Ndb *theNdb,
const char* eventName,
- const int bufferLength)
+ int bufferLength)
: m_impl(* new NdbEventOperationImpl(*this,theNdb,
eventName,
bufferLength))
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp
index eace1a0acf5..3813ab139de 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -778,7 +778,8 @@ main(void){
#endif
template class Vector<NdbScanFilterImpl::State>;
+#if __SUNPRO_CC != 0x560
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32);
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64);
-
+#endif
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 60ea3625524..14da4b11aa1 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -161,7 +161,9 @@ private:
/**
* Block number handling
*/
+public:
static const unsigned MAX_NO_THREADS = 4711;
+private:
struct ThreadData {
static const Uint32 ACTIVE = (1 << 16) | 1;
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index a0844cee8f8..6ebbfd8b680 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -386,6 +386,7 @@ sync_down(NDBT_Context* ctx){
if(threads){
ctx->decProperty("PauseThreads");
}
+ return 0;
}
int
@@ -397,6 +398,7 @@ sync_up_and_wait(NDBT_Context* ctx){
if(threads){
ndbout_c("wait completed");
}
+ return 0;
}
int
diff --git a/sql-common/client.c b/sql-common/client.c
index 1941e6bc517..04d4bc06102 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -1058,9 +1058,8 @@ void mysql_read_default_options(struct st_mysql_options *options,
options->max_allowed_packet= atoi(opt_arg);
break;
case 28: /* protocol */
- if ((options->protocol = find_type(opt_arg,
- &sql_protocol_typelib,0))
- == ~(ulong) 0)
+ if ((options->protocol= find_type(opt_arg,
+ &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", opt_arg);
exit(1);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index cc6b9016bfb..1a11f0d3073 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1423,7 +1423,8 @@ int ha_ndbcluster::write_row(byte *record)
{
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
DBUG_PRINT("info",
- ("Trying to set next auto increment value to %u", next_val));
+ ("Trying to set next auto increment value to %lu",
+ (ulong) next_val));
if (m_ndb->setAutoIncrementValue((NDBTAB *) m_table, next_val, true))
DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val));
diff --git a/sql/item.cc b/sql/item.cc
index 11d618748b3..2c98aad2074 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -919,7 +919,7 @@ double Item_param::val()
This works for example when user says SELECT ?+0.0 and supplies
time value for the placeholder.
*/
- return (double) TIME_to_ulonglong(&value.time);
+ return ulonglong2double(TIME_to_ulonglong(&value.time));
case NULL_VALUE:
return 0.0;
default:
diff --git a/sql/lock.cc b/sql/lock.cc
index fa199ce7454..fab0a61e506 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -787,7 +787,7 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi
LINT_INIT(old_message);
(void) pthread_mutex_lock(&LOCK_open);
- if (need_exit_cond= must_wait)
+ if ((need_exit_cond= must_wait))
{
if (thd->global_read_lock) // This thread had the read locks
{
@@ -805,7 +805,11 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commi
}
if (!abort_on_refresh && !result)
protect_against_global_read_lock++;
- if (unlikely(need_exit_cond)) // global read locks are rare
+ /*
+ The following is only true in case of a global read locks (which is rare)
+ and if old_message is set
+ */
+ if (unlikely(need_exit_cond))
thd->exit_cond(old_message);
else
pthread_mutex_unlock(&LOCK_open);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 8dfca2bb684..9b40768f0da 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -4562,6 +4562,10 @@ replicating a LOAD DATA INFILE command.",
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0},
+ /* QQ: The following should be removed soon! (bdb_max_lock preferred) */
+ {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
+ (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
+ REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
@@ -4570,15 +4574,16 @@ replicating a LOAD DATA INFILE command.",
"The maximum number of locks you can have active on a BDB table.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
- /* QQ: The following should be removed soon! */
- {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
- (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
- REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
#endif /* HAVE_BERKELEY_DB */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0},
+ {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
+ "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
+ (gptr*) &global_system_variables.bulk_insert_buff_size,
+ (gptr*) &max_system_variables.bulk_insert_buff_size,
+ 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"connect_timeout", OPT_CONNECT_TIMEOUT,
"The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.",
(gptr*) &connect_timeout, (gptr*) &connect_timeout,
@@ -4589,18 +4594,38 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb,
0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0},
#endif
- {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
- "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
- (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
- GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
+ { "date_format", OPT_DATE_FORMAT,
+ "The DATE format (For future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "datetime_format", OPT_DATETIME_FORMAT,
+ "The DATETIME/TIMESTAMP format (for future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "default_week_format", OPT_DEFAULT_WEEK_FORMAT,
+ "The default week format used by WEEK() functions.",
+ (gptr*) &global_system_variables.default_week_format,
+ (gptr*) &max_system_variables.default_week_format,
+ 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
{"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT,
"After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.",
(gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0},
+ {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
+ "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
+ (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
+ GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
{ "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE,
"What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.",
(gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0},
+ {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
+ "Binary logs will be rotated after expire-log-days days ",
+ (gptr*) &expire_logs_days,
+ (gptr*) &expire_logs_days, 0, GET_ULONG,
+ REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
{ "flush_time", OPT_FLUSH_TIME,
"A dedicated thread is created to flush all tables at the given interval.",
(gptr*) &flush_time, (gptr*) &flush_time, 0, GET_ULONG, REQUIRED_ARG,
@@ -4609,14 +4634,14 @@ replicating a LOAD DATA INFILE command.",
"List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)",
0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
- "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
- (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
- REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_max_word_len", OPT_FT_MAX_WORD_LEN,
"The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
(gptr*) &ft_max_word_len, (gptr*) &ft_max_word_len, 0, GET_ULONG,
REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0},
+ { "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
+ "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
+ (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
+ REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT,
"Number of best matches to use for query expansion",
(gptr*) &ft_query_expansion_limit, (gptr*) &ft_query_expansion_limit, 0, GET_ULONG,
@@ -4631,48 +4656,52 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
#ifdef HAVE_INNOBASE_DB
- {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
- "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
- (gptr*) &innobase_mirrored_log_groups,
- (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
- 0, 1, 0},
- {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
- "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
- (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
- 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
- {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
- "Size of each log file in a log group in megabytes.",
- (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
- GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
- {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
- "The size of the buffer which InnoDB uses to write log to the log files on disk.",
- (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
- GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
- {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
- "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
- (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
- GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
- {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
- "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
- (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
- GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
{"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
"Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
(gptr*) &innobase_additional_mem_pool_size,
(gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG,
1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0},
+ {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
+ "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
+ (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
+ GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
+ {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
+ "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
+ (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
+ GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS,
"Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads,
(gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0,
1, 0},
- {"innodb_open_files", OPT_INNODB_OPEN_FILES,
- "How many files at the maximum InnoDB keeps open at the same time.",
- (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
- GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
+ {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
+ "Helps to save your data in case the disk image of the database becomes corrupt.",
+ (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
+ GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
{"innodb_lock_wait_timeout", OPT_INNODB_LOCK_WAIT_TIMEOUT,
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
(gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout,
0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
+ {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
+ "The size of the buffer which InnoDB uses to write log to the log files on disk.",
+ (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
+ GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
+ {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
+ "Size of each log file in a log group in megabytes.",
+ (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
+ GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
+ {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
+ "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
+ (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
+ 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
+ {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
+ "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
+ (gptr*) &innobase_mirrored_log_groups,
+ (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
+ 0, 1, 0},
+ {"innodb_open_files", OPT_INNODB_OPEN_FILES,
+ "How many files at the maximum InnoDB keeps open at the same time.",
+ (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
+ GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
#ifdef HAVE_REPLICATION
/*
Disabled for the 4.1.3 release. Disabling just this paragraph of code is
@@ -4697,10 +4726,6 @@ replicating a LOAD DATA INFILE command.",
"Helps in performance tuning in heavily concurrent environments.",
(gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency,
0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0},
- {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
- "Helps to save your data in case the disk image of the database becomes corrupt.",
- (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
- GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
#endif /* HAVE_INNOBASE_DB */
{"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
"The number of seconds the server waits for activity on an interactive connection before closing it.",
@@ -4720,6 +4745,12 @@ replicating a LOAD DATA INFILE command.",
0, (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
IO_SIZE, 0},
+ {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
+ "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
+ (gptr*) &dflt_key_cache_var.param_age_threshold,
+ (gptr*) 0,
+ 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
+ 300, 100, ~0L, 0, 100, 0},
{"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
"The default size of key cache blocks",
(gptr*) &dflt_key_cache_var.param_block_size,
@@ -4732,12 +4763,6 @@ replicating a LOAD DATA INFILE command.",
(gptr*) 0,
0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100,
1, 100, 0, 1, 0},
- {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
- "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
- (gptr*) &dflt_key_cache_var.param_age_threshold,
- (gptr*) 0,
- 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
- 300, 100, ~0L, 0, 100, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time,
@@ -4768,14 +4793,14 @@ value. Will also apply to relay logs if max_relay_log_size is 0. \
The minimum value for this variable is 4096.",
(gptr*) &max_binlog_size, (gptr*) &max_binlog_size, 0, GET_ULONG,
REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0},
- {"max_connections", OPT_MAX_CONNECTIONS,
- "The number of simultaneous clients allowed.", (gptr*) &max_connections,
- (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
- 0},
{"max_connect_errors", OPT_MAX_CONNECT_ERRORS,
"If there is more than this number of interrupted connections from a host this host will be blocked from further connections.",
(gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG,
REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0},
+ {"max_connections", OPT_MAX_CONNECTIONS,
+ "The number of simultaneous clients allowed.", (gptr*) &max_connections,
+ (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
+ 0},
{"max_delayed_threads", OPT_MAX_DELAYED_THREADS,
"Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.",
(gptr*) &global_system_variables.max_insert_delayed_threads,
@@ -4828,11 +4853,6 @@ The minimum value for this variable is 4096.",
"After this many write locks, allow some read locks to run in between.",
(gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG,
REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0},
- {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
- "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
- (gptr*) &global_system_variables.bulk_insert_buff_size,
- (gptr*) &max_system_variables.bulk_insert_buff_size,
- 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"myisam_block_size", OPT_MYISAM_BLOCK_SIZE,
"Block size to be used for MyISAM index pages.",
(gptr*) &opt_myisam_block_size,
@@ -4871,16 +4891,16 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.net_buffer_length,
(gptr*) &max_system_variables.net_buffer_length, 0, GET_ULONG,
REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0},
- {"net_retry_count", OPT_NET_RETRY_COUNT,
- "If a read on a communication port is interrupted, retry this many times before giving up.",
- (gptr*) &global_system_variables.net_retry_count,
- (gptr*) &max_system_variables.net_retry_count,0,
- GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_read_timeout", OPT_NET_READ_TIMEOUT,
"Number of seconds to wait for more data from a connection before aborting the read.",
(gptr*) &global_system_variables.net_read_timeout,
(gptr*) &max_system_variables.net_read_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
+ {"net_retry_count", OPT_NET_RETRY_COUNT,
+ "If a read on a communication port is interrupted, retry this many times before giving up.",
+ (gptr*) &global_system_variables.net_retry_count,
+ (gptr*) &max_system_variables.net_retry_count,0,
+ GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_write_timeout", OPT_NET_WRITE_TIMEOUT,
"Number of seconds to wait for a block to be written to a connection before aborting the write.",
(gptr*) &global_system_variables.net_write_timeout,
@@ -4932,11 +4952,21 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.query_prealloc_size,
(gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0},
+ {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
+ "Allocation block size for storing ranges during optimization",
+ (gptr*) &global_system_variables.range_alloc_block_size,
+ (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
+ REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"read_buffer_size", OPT_RECORD_BUFFER,
"Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.",
(gptr*) &global_system_variables.read_buff_size,
(gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0},
+ {"read_only", OPT_READONLY,
+ "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
+ (gptr*) &opt_readonly,
+ (gptr*) &opt_readonly,
+ 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER,
"When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.",
(gptr*) &global_system_variables.read_rnd_buff_size,
@@ -4969,16 +4999,6 @@ The minimum value for this variable is 4096.",
(gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0,
GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
#endif /* HAVE_REPLICATION */
- {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
- "Allocation block size for storing ranges during optimization",
- (gptr*) &global_system_variables.range_alloc_block_size,
- (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
- REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
- {"read-only", OPT_READONLY,
- "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
- (gptr*) &opt_readonly,
- (gptr*) &opt_readonly,
- 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"slow_launch_time", OPT_SLOW_LAUNCH_TIME,
"If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.",
(gptr*) &slow_launch_time, (gptr*) &slow_launch_time, 0, GET_ULONG,
@@ -5008,23 +5028,28 @@ The minimum value for this variable is 4096.",
"The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0},
- {"thread_concurrency", OPT_THREAD_CONCURRENCY,
- "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
- (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
- DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
"How many threads we should keep in a cache for reuse.",
(gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 16384, 0, 1, 0},
+ {"thread_concurrency", OPT_THREAD_CONCURRENCY,
+ "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
+ (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
+ DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
+ {"thread_stack", OPT_THREAD_STACK,
+ "The stack size for each thread.", (gptr*) &thread_stack,
+ (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
+ 1024L*128L, ~0L, 0, 1024, 0},
+ { "time_format", OPT_TIME_FORMAT,
+ "The TIME format (for future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tmp_table_size", OPT_TMP_TABLE_SIZE,
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size,
(gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0},
- {"thread_stack", OPT_THREAD_STACK,
- "The stack size for each thread.", (gptr*) &thread_stack,
- (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
- 1024L*128L, ~0L, 0, 1024, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
@@ -5041,31 +5066,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.net_wait_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
0, 1, 0},
- {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
- "Binary logs will be rotated after expire-log-days days ",
- (gptr*) &expire_logs_days,
- (gptr*) &expire_logs_days, 0, GET_ULONG,
- REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
- { "default-week-format", OPT_DEFAULT_WEEK_FORMAT,
- "The default week format used by WEEK() functions.",
- (gptr*) &global_system_variables.default_week_format,
- (gptr*) &max_system_variables.default_week_format,
- 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
- { "date-format", OPT_DATE_FORMAT,
- "The DATE format (For future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "datetime-format", OPT_DATETIME_FORMAT,
- "The DATETIME/TIMESTAMP format (for future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "time-format", OPT_TIME_FORMAT,
- "The TIME format (for future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ff7dc805119..aecb2ef6522 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -155,11 +155,13 @@ bool foreign_key_prefix(Key *a, Key *b)
** Thread specific functions
****************************************************************************/
-THD::THD():user_time(0), current_arena(this), is_fatal_error(0),
- last_insert_id_used(0),
- insert_id_used(0), rand_used(0), time_zone_used(0),
- in_lock_tables(0), global_read_lock(0), bootstrap(0)
+THD::THD()
+ :user_time(0), global_read_lock(0), is_fatal_error(0),
+ last_insert_id_used(0),
+ insert_id_used(0), rand_used(0), time_zone_used(0),
+ in_lock_tables(0), bootstrap(0)
{
+ current_arena= this;
host= user= priv_user= db= ip=0;
host_or_ip= "connecting host";
locked=some_tables_deleted=no_errors=password= 0;
@@ -439,10 +441,13 @@ void THD::awake(bool prepare_to_die)
it is the true value but maybe current_mutex is not yet non-zero (we're
in the middle of enter_cond() and there is a "memory order
inversion"). So we test the mutex too to not lock 0.
+
Note that there is a small chance we fail to kill. If victim has locked
- current_mutex, and hasn't entered enter_cond(), then we don't know it's
- going to wait on cond. Then victim goes into its cond "forever" (until
- we issue a second KILL). True we have set its thd->killed but it may not
+ current_mutex, but hasn't yet entered enter_cond() (which means that
+ current_cond and current_mutex are 0), then the victim will not get
+ a signal and it may wait "forever" on the cond (until
+ we issue a second KILL or the status it's waiting for happens).
+ It's true that we have set its thd->killed but it may not
see it immediately and so may have time to reach the cond_wait().
*/
if (mysys_var->current_cond && mysys_var->current_mutex)
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 279a56b9e58..630a520066a 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -8014,7 +8014,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
Item *itemptr=*order->item;
if (itemptr->type() == Item::INT_ITEM)
{ /* Order by position */
- uint count= itemptr->val_int();
+ uint count= (uint) itemptr->val_int();
if (!count || count > fields.elements)
{
my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 03777daa9b0..408f3408346 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3281,7 +3281,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
ha_rows *deleted)
{
int error;
- Copy_field *copy,*copy_end, *next_field= 0;
+ Copy_field *copy,*copy_end;
ulong found_count,delete_count;
THD *thd= current_thd;
uint length;
@@ -3291,6 +3291,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
List<Item> fields;
List<Item> all_fields;
ha_rows examined_rows;
+ bool auto_increment_field_copied= 0;
DBUG_ENTER("copy_data_between_tables");
if (!(copy= new Copy_field[to->fields]))
@@ -3309,7 +3310,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (def->field)
{
if (*ptr == to->next_number_field)
- next_field= copy_end;
+ auto_increment_field_copied= TRUE;
(copy_end++)->set(*ptr,def->field,0);
}
@@ -3368,11 +3369,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
thd->row_count++;
if (to->next_number_field)
- to->next_number_field->reset();
- for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
{
- if (copy_ptr == next_field)
+ if (auto_increment_field_copied)
to->auto_increment_field_not_null= TRUE;
+ else
+ to->next_number_field->reset();
+ }
+ for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
+ {
copy_ptr->do_copy(copy_ptr);
}
if ((error=to->file->write_row((byte*) to->record[0])))