diff options
author | serg@serg.mysql.com <> | 2001-03-13 10:44:04 +0100 |
---|---|---|
committer | serg@serg.mysql.com <> | 2001-03-13 10:44:04 +0100 |
commit | 5f6cbe3d94f5413cdd8f76cb14b21792060a196f (patch) | |
tree | b6c5e089698a36e1518b4f3a25e12a9f9a803768 | |
parent | a9344b422963e392ff6fdbae47e3fbbee99164d1 (diff) | |
parent | 61e33ebe17ca83097d48aa70002062c50f62a1cf (diff) | |
download | mariadb-git-5f6cbe3d94f5413cdd8f76cb14b21792060a196f.tar.gz |
Merge
47 files changed, 549 insertions, 128 deletions
diff --git a/.bzrignore b/.bzrignore index 9eae1ed2545..e9e207dc35c 100644 --- a/.bzrignore +++ b/.bzrignore @@ -197,3 +197,4 @@ bdb/build_win32/db.h bdb/dist/configure bdb/dist/tags bdb/build_unix/* +sql/.gdbinit diff --git a/BUILD/compile-pentium b/BUILD/compile-pentium index 4ece8b1f321..aa013c85d30 100755 --- a/BUILD/compile-pentium +++ b/BUILD/compile-pentium @@ -7,11 +7,6 @@ extra_flags="$pentium_cflags $fast_cflags" extra_configs="$pentium_configs" strip=yes -# Use the optimized version if it exists -if test -d /usr/local/BerkeleyDB-opt/ -then - extra_configs="$extra_configs --with-berkeley-db=/usr/local/BerkeleyDB-opt/" -fi -extra_configs="$extra_configs --with-innobase" +extra_configs="$extra_configs --with-berkeley-db --with-innobase" . "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-debug b/BUILD/compile-pentium-debug index 2c5e867471b..2800ace97c5 100755 --- a/BUILD/compile-pentium-debug +++ b/BUILD/compile-pentium-debug @@ -8,11 +8,6 @@ c_warnings="$c_warnings $debug_extra_warnings" cxx_warnings="$cxx_warnings $debug_extra_warnings" extra_configs="$pentium_configs $debug_configs" -# Use the debug version if it exists -if test -d /usr/local/BerkeleyDB-dbug/ -then - extra_configs="$extra_configs --with-berkeley-db=/usr/local/BerkeleyDB-dbug/" -fi -extra_configs="$extra_configs --with-innobase" +extra_configs="$extra_configs --with-berkeley-db --with-innobase" . "$path/FINISH.sh" diff --git a/Docs/manual.texi b/Docs/manual.texi index a032c4f8e6d..006091668fd 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -595,7 +595,7 @@ Replication in MySQL * Replication Options:: Replication Options in my.cnf * Replication SQL:: SQL Commands related to replication * Replication FAQ:: Frequently Asked Questions about replication -* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication +* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication Getting Maximum Performance from MySQL @@ -895,6 +895,8 @@ Changes in release 4.0.x (Development; Alpha) Changes in release 3.23.x (Stable) +* News-3.23.35:: Changes in release 3.23.35 +* News-3.23.34a:: Changes in release 3.23.34a * News-3.23.34:: Changes in release 3.23.34 * News-3.23.33:: Changes in release 3.23.33 * News-3.23.32:: Changes in release 3.23.32 @@ -1173,7 +1175,7 @@ see @ref{General-SQL}. For books that focus more specifically on @strong{MySQL}, the most popular Open Source SQL database, is provided by @strong{MySQL AB}. @strong{MySQL AB} is a commercial company that -builds is business providing services around the @strong{MySQL} database. +builds its business providing services around the @strong{MySQL} database. @xref{What is MySQL AB}. @table @asis @@ -4486,6 +4488,11 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}. @uref{http://mysql.paco.net.ua, WWW} @uref{ftp://mysql.paco.net.ua/, FTP} +@item +@c mizi@alkar.net (Alexander Ryumshin) +@image{Flags/ukraine} Ukraine [ISP Alkar Teleport/Dnepropetrovsk] @ +@uref{http://mysql.dp.ua/, WWW} + @end itemize @strong{North America:} @@ -5679,7 +5686,7 @@ To install the HP-UX tar.gz distribution, you must have a copy of GNU @cindex installing, source distribution -@cindex source distribtuion, installing +@cindex source distribution, installing @cindex installation overview @node Installing source, Installing source tree, Installing binary, Installing @section Installing a MySQL Source Distribution @@ -6165,9 +6172,15 @@ Download @strong{BitKeeper} from @item Follow the instructions to install it. @item -Once @strong{BitKeeper} is installed, -@code{bk clone bk://work.mysql.com:7000 mysql} - the initial download -may take a while, depending on the speed of your connection. +Once @strong{BitKeeper} is installed, if you want to clone 3.23 branch, +@code{bk clone bk://work.mysql.com:7000 mysql}, and +@code{bk clone bk://work.mysql.com:7001 mysql-4.0} for 4.0 branch + +- the initial download +may take a while, depending on the speed of your connection. If you have the +most +recent beta version of @strong{BitKeeper}, replace @code{clone} with +@code{oclone}. @item You will need GNU autoconf/automake, libtool, and m4 to do the next stage. If you get some strange error during the first stage, check that you really @@ -6193,7 +6206,7 @@ you @code{./configure} with different values for @code{prefix}, @code{tcp-port}, and @code{unix-socket-path}. @item Play hard with your new installation and try to make the new features -crash. Report bugs to @email{bugs@@lists.mysql.com}. As always, make +crash. Start by running @code{make test}. Report bugs to @email{bugs@@lists.mysql.com}. As always, make sure you have a full test case for the bug that we can run. @item If you have gotten to the @code{make} stage and it does not compile, @@ -6204,7 +6217,9 @@ if you execute @code{aclocal} and get @code{command not found}, or a similar problem, do not report it - make sure all the needed tools are installed and your @code{PATH} variable is set correctly. @item -After the initial @code{bk clone}, do @code{bk pull} to get the updates. +After the initial @code{bk clone}, do @code{bk pull} to get the updates. If +you are using the most recent beta version of BitKeeper, you should use +@code{bk opull} instead. @item You can examine change history of the tree with all the diffs with @code{bk sccstool}. If you see some funny diffs or code that you have a @@ -6618,6 +6633,7 @@ To install the @strong{MySQL} @code{DBD} module with ActiveState Perl on Windows, you should do the following: @itemize @bullet +@item Get activestate perl from @uref{http://www.activestate.com/Products/ActivePerl/index.html} and install it. @item Open a DOS shell. @item If required, set the HTTP_proxy variable. For example, you might try: @code{set HTTP_proxy=my.proxy.com:3128} @@ -22064,6 +22080,11 @@ or mysql> UPDATE mysql.user SET password=PASSWORD("newpass") where user="bob' and host="%.loc.gov"; @end example +@item SQL_ANSI_MODE = 0 | 1 +@cindex ANSI mode, SQL_ANSI_MODE +If set to @code{1}, the connection will be in ANSI mode, as described in +@ref{ANSI mode}. + @item SQL_AUTO_IS_NULL = 0 | 1 If set to @code{1} (default) then one can find the last inserted row for a table with an auto_increment row with the following construct: @@ -23587,7 +23608,7 @@ The @code{GEMINI} table type is developed and supported by NuSphere Corporation (@uref{http://www.nusphere.com}). It features row-level locking, transaction support (@code{COMMIT} and @code{ROLLBACK}), and automatic crash recovery. -@code{GEMINI} tables will be included in the @strong{MySQL} 4.0 source +@code{GEMINI} tables will be included in the @strong{MySQL} 3.23.35 source distribution. @node GEMINI start, GEMINI features, GEMINI overview, GEMINI @@ -23665,10 +23686,10 @@ NuSphere is working on removing these limitations. @subsection INNOBASE Tables overview Innobase is included in the @strong{MySQL} source distribution starting -from 3.23.34 and will be activated in the @strong{MySQL}-max binary. +from 3.23.34a and will be activated in the @strong{MySQL}-max binary. If you have downloaded a binary version of @strong{MySQL} that includes -support for Berkeley DB, simply follow the instructions for +support for Innobase, simply follow the instructions for installing a binary version of @strong{MySQL}. @xref{Installing binary}. To compile @strong{MySQL} with Innobase support, download @strong{MySQL} @@ -23677,7 +23698,7 @@ To compile @strong{MySQL} with Innobase support, download @strong{MySQL} @example cd /path/to/source/of/mysql-3.23.34 -./configure --with-berkeley-db +./configure --with-innobase @end example Innobase provides MySQL with a transaction safe table handler with @@ -26615,7 +26636,7 @@ tables}. * Replication Options:: Replication Options in my.cnf * Replication SQL:: SQL Commands related to replication * Replication FAQ:: Frequently Asked Questions about replication -* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication +* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication @end menu @node Replication Intro, Replication Implementation, Replication, Replication @@ -33693,7 +33714,7 @@ If you find out something is wrong, please only send the relevant rows send the whole MyODBC or ODBC log file! If you are unable to find out what's wrong, the last option is to -make an archive (tar or zip) that contains a MyODBC log file, the ODBC +make an archive (tar or zip) that contains a MyODBC trace file, the ODBC log file, and a README file that explains the problem. You can send this to @uref{ftp://support.mysql.com/pub/mysql/secret}. Only we at MySQL AB will have access to the files you upload, and we will be very discrete @@ -40245,6 +40266,12 @@ If a test fails totally, you should check the logs file in the @item If you have compiled @strong{MySQL} with debugging you can try to debug this with the @code{--gdb} and @code{--debug} options to @code{mysql-test-run}. +@xref{Making trace files}. + +If you have not compiled @strong{MySQL} for debugging you should probably +do that. Just specify the @code{--with-debug} options to @code{configure}! +@xref{Installing source}. + @end itemize @page @@ -41105,8 +41132,8 @@ An open source client for exploring databases and executing SQL. Supports A query tool for @strong{MySQL} and PostgreSQL. @item @uref{http://dbman.linux.cz/,dbMan} A query tool written in Perl. Uses DBI and Tk. -@item @uref{http://www.mysql.com/Downloads/Win32/Msc110.EXE, Mascon 2000.1.10.48} -@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc110.EXE, Free Mascon 2000.1.10.47} +@item @uref{http://www.mysql.com/Downloads/Win32/Msc201.EXE, Mascon 2.1.15} +@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc201.EXE, Free Mascon 2.1.14} Mascon is a powerful Win32 GUI for the administering MySQL server databases. Mascon's features include visual table design, connections to multiple servers, data and blob editing of tables, security setting, SQL @@ -42040,6 +42067,9 @@ users uses this code as the rest of the code and because of this we are not yet 100 % confident in this code. @menu + +* News-3.23.35:: Changes in release 3.23.35 +* News-3.23.34a:: Changes in release 3.23.34a * News-3.23.34:: Changes in release 3.23.34 * News-3.23.33:: Changes in release 3.23.33 * News-3.23.32:: Changes in release 3.23.32 @@ -42077,7 +42107,23 @@ not yet 100 % confident in this code. * News-3.23.0:: Changes in release 3.23.0 @end menu -@node News-3.23.34, News-3.23.33, News-3.23.x, News-3.23.x +@node News-3.23.35, News-3.23.34a, News-3.23.x, News-3.23.x +@appendixsubsec Changes in release 3.23.35 +@itemize @bullet +@item +Added SQL_ANSI_MODE. You can now switch to ANSI mode for only your +connection by running @code{SET SQL_ANSI_MODE=1}, and you can turn +ANSI mode off with @code{SET SQL_ANSI_MODE=0}. +@end itemize + +@node News-3.23.34a, News-3.23.34, News-3.23.35, News-3.23.x +@appendixsubsec Changes in release 3.23.34a +@itemize @bullet +@item +Add extra files to distribution to allow one to compile Innobase. +@end itemize + +@node News-3.23.34, News-3.23.33, News-3.23.34a, News-3.23.x @appendixsubsec Changes in release 3.23.34 @itemize @bullet @item @@ -42124,6 +42170,8 @@ Fixed bug in bi-directonal replication. Fixed bug in @code{BDB} tables when using index on multi-part key where a key part may be @code{NULL}. @item +Fixed @code{MAX()} optimization on sub-key for @code{BDB} tables. +@item Fixed problem with 'garbage results' when using @code{BDB} tables and @code{BLOB} or @code{TEXT} fields when joining many tables. @item @@ -47037,6 +47085,12 @@ in some cases the @code{PROCEDURE} will not transform the columns. @item Creation of a table of type @code{MERGE} doesn't check if the underlying tables are of compatible types. +@item +@strong{MySQL} can't yet handle @code{NaN}, @code{-Inf} and @code{Inf} +values in double. Using these will cause problems when trying to export +and import data. We should as a intermediate solution change @code{NaN} to +@code{NULL} (if possible) and @code{-Inf} and @code{Inf} to the +Minimum respective maximum possible @code{double} value. @end itemize The following are known bugs in earlier versions of @strong{MySQL}: @@ -47177,6 +47231,11 @@ characters in database, table and column names. @item Add a portable interface over @code{gethostbyaddr_r()} so that we can change @code{ip_to_hostname()} to not block other threads while doing DNS lookups. +@item +Add @code{record_in_range()} method to @code{MERGE} tables to be +able to choose the right index when there is many to choose from. We should +also extend the info interface to get the key distribution for each index, +of @code{analyze} is run on all sub tables. @end itemize @node TODO future, TODO sometime, TODO MySQL 4.0, TODO @@ -47668,7 +47727,7 @@ send mail to @email{mysql@@lists.mysql.com} and ask for help. Please use the If you can cause the @code{mysqld} server to crash quickly, you can try to create a trace file of this: -Start the @code{mysqld} server with a trace log in @file{/tmp/mysql.trace}. +Start the @code{mysqld} server with a trace log in @file{/tmp/mysqld.trace}. The log file will get very @emph{BIG}. @code{mysqld --debug --log} diff --git a/client/mysql.cc b/client/mysql.cc index d140e524094..8935e459f68 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -39,7 +39,7 @@ #include "my_readline.h" #include <signal.h> -const char *VER="11.12"; +const char *VER="11.13"; /* Don't try to make a nice table if the data is too big */ #define MAX_COLUMN_LENGTH 1024 @@ -1518,6 +1518,22 @@ com_ego(String *buffer,char *line) return result; } +static void +print_field_types(MYSQL_RES *result) +{ + MYSQL_FIELD *field; + while ((field = mysql_fetch_field(result))) + { + tee_fprintf(PAGER,"%s '%s' %d %d %d %d %d\n", + field->name, + field->table ? "" : field->table, + (int) field->type, + field->length, field->max_length, + field->flags, field->decimals); + } + tee_puts("", PAGER); +} + static void print_table_data(MYSQL_RES *result) @@ -1528,6 +1544,11 @@ print_table_data(MYSQL_RES *result) bool *num_flag; num_flag=(bool*) my_alloca(sizeof(bool)*mysql_num_fields(result)); + if (info_flag) + { + print_field_types(result); + mysql_field_seek(result,0); + } separator.copy("+",1); while ((field = mysql_fetch_field(result))) { diff --git a/client/mysqladmin.c b/client/mysqladmin.c index a8bda38d8cc..b09f799c683 100644 --- a/client/mysqladmin.c +++ b/client/mysqladmin.c @@ -28,9 +28,9 @@ #include <my_pthread.h> /* because of signal() */ #endif -#define ADMIN_VERSION "8.17" +#define ADMIN_VERSION "8.18" #define MAX_MYSQL_VAR 64 -#define MAX_TIME_TO_WAIT 3600 /* Wait for shutdown */ +#define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */ #define MAX_TRUNC_LENGTH 3 char truncated_var_names[MAX_MYSQL_VAR][MAX_TRUNC_LENGTH]; @@ -40,7 +40,7 @@ static int interval=0; static my_bool option_force=0,interrupted=0,new_line=0, opt_compress=0, opt_relative=0, opt_verbose=0, opt_vertical=0; static uint tcp_port = 0, option_wait = 0, option_silent=0; -static ulong opt_connect_timeout; +static ulong opt_connect_timeout, opt_shutdown_timeout; static my_string unix_port=0; /* When using extended-status relatively, ex_val_max_len is the estimated @@ -134,6 +134,8 @@ static struct option long_options[] = { CHANGEABLE_VAR changeable_vars[] = { { "connect_timeout", (long*) &opt_connect_timeout, 0, 0, 3600*12, 0, 1}, + { "shutdown_timeout", (long*) &opt_shutdown_timeout, SHUTDOWN_DEF_TIMEOUT, 0, + 3600*12, 0, 1}, { 0, 0, 0, 0, 0, 0, 0} }; @@ -148,6 +150,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); mysql_init(&mysql); load_defaults("my",load_default_groups,&argc,&argv); + set_all_changeable_vars( changeable_vars ); while ((c=getopt_long(argc,argv,"h:i:p::u:#::P:sS:Ct:fq?vVw::WrEO:", long_options, &option_index)) != EOF) @@ -1125,7 +1128,7 @@ static void wait_pidfile(char *pidfile) uint count=0; system_filename(buff,pidfile); - while ((fd = open(buff, O_RDONLY)) >= 0 && count++ < MAX_TIME_TO_WAIT) + while ((fd = open(buff, O_RDONLY)) >= 0 && count++ < opt_shutdown_timeout) { close(fd); sleep(1); diff --git a/configure.in b/configure.in index 908cfcd67a9..05392da9a61 100644 --- a/configure.in +++ b/configure.in @@ -4,7 +4,7 @@ dnl Process this file with autoconf to produce a configure script. AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! -AM_INIT_AUTOMAKE(mysql, 3.23.34) +AM_INIT_AUTOMAKE(mysql, 3.23.34a) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 diff --git a/innobase/mem/Makefile.am b/innobase/mem/Makefile.am index 89076f76f3b..84f642e4469 100644 --- a/innobase/mem/Makefile.am +++ b/innobase/mem/Makefile.am @@ -21,4 +21,6 @@ libs_LIBRARIES = libmem.a libmem_a_SOURCES = mem0mem.c mem0pool.c +EXTRA_DIST = mem0dbg.c + EXTRA_PROGRAMS = diff --git a/innobase/pars/Makefile.am b/innobase/pars/Makefile.am index d39430862a7..e5611f9dfc6 100644 --- a/innobase/pars/Makefile.am +++ b/innobase/pars/Makefile.am @@ -19,6 +19,8 @@ include ../include/Makefile.i libs_LIBRARIES = libpars.a +noinst_HEADERS = pars0grm.h + libpars_a_SOURCES = pars0grm.c lexyy.c pars0opt.c pars0pars.c pars0sym.c EXTRA_PROGRAMS = diff --git a/mysql-test/README b/mysql-test/README index 6ad59ea6a8b..c5dc3e219de 100644 --- a/mysql-test/README +++ b/mysql-test/README @@ -6,7 +6,10 @@ actually have a co-existing MySQL installation - the tests will not conflict with it. All tests must pass. If one or more of them fail on your system, please -report the details to bugs@lists.mysql.com +read the following manual section of how to report the problem: + +http://www.mysql.com/doc/M/y/MySQL_test_suite.html + You can create your own test cases. To create a test case: diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index fb247000be7..1b7d1c26f30 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -450,14 +450,14 @@ stop_slave () { if [ x$SLAVE_RUNNING = x1 ] then - $MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root shutdown + $MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root -O shutdown_timeout=10 shutdown if [ $? != 0 ] && [ -f $SLAVE_MYPID ] then # try harder! $ECHO "slave not cooperating with mysqladmin, will try manual kill" kill `$CAT $SLAVE_MYPID` sleep $SLEEP_TIME if [ -f $SLAVE_MYPID ] ; then - $ECHO "slave refused to die, resorting to SIGKILL murder" + $ECHO "slave refused to die. Sending SIGKILL" kill -9 `$CAT $SLAVE_MYPID` $RM -f $SLAVE_MYPID else @@ -472,14 +472,14 @@ stop_master () { if [ x$MASTER_RUNNING = x1 ] then - $MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root shutdown + $MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root -O shutdown_timeout=10 shutdown if [ $? != 0 ] && [ -f $MASTER_MYPID ] then # try harder! $ECHO "master not cooperating with mysqladmin, will try manual kill" kill `$CAT $MASTER_MYPID` sleep $SLEEP_TIME if [ -f $MASTER_MYPID ] ; then - $ECHO "master refused to die, resorting to SIGKILL murder" + $ECHO "master refused to die. Sending SIGKILL" kill -9 `$CAT $MASTER_MYPID` $RM -f $MASTER_MYPID else diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 42d14e1c34f..2e760ae5b75 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -490,3 +490,5 @@ a 2 a b a 1 a 2 +MIN(B) MAX(b) +1 1 diff --git a/mysql-test/r/innobase.result b/mysql-test/r/innobase.result index 577bfcbf5b2..6c45bfd810d 100644 --- a/mysql-test/r/innobase.result +++ b/mysql-test/r/innobase.result @@ -443,3 +443,5 @@ i j 1 2 i j 1 2 +MIN(B) MAX(b) +1 1 diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index 900260217d5..564491fc520 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -664,3 +664,17 @@ SELECT * FROM t1 WHERE a='a' AND b=2; SELECT * FROM t1 WHERE a='a' AND b in (2); SELECT * FROM t1 WHERE a='a' AND b in (1,2); drop table t1; + +# +# Test min-max optimization +# + +CREATE TABLE t1 ( + a int3 unsigned NOT NULL, + b int1 unsigned NOT NULL, + UNIQUE (a, b) +) TYPE = BDB; + +INSERT INTO t1 VALUES (1, 1); +SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; +drop table t1; diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index 4bc1afa7612..6fbd99e3283 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -11,6 +11,7 @@ insert delayed into t1 set a = 4; insert delayed into t1 set a = 5, tmsp = 19711006010203; insert delayed into t1 (a, tmsp) values (6, 19711006010203); insert delayed into t1 (a, tmsp) values (7, NULL); +--sleep 1 insert into t1 set a = 8,tmsp=19711006010203; select * from t1 where tmsp=0; select * from t1 where tmsp=19711006010203; @@ -27,5 +28,6 @@ insert delayed into t1 values (null,"c"); insert delayed into t1 values (3,"d"),(null,"e"); --error 1136 insert delayed into t1 values (3,"this will give an","error"); +--sleep 2 select * from t1; drop table t1; diff --git a/mysql-test/t/innobase.test b/mysql-test/t/innobase.test index bca10751c13..4fa8d07bd52 100644 --- a/mysql-test/t/innobase.test +++ b/mysql-test/t/innobase.test @@ -394,3 +394,17 @@ select * from t1 where i=1 and j=2; create index ax1 on t1 (i,j); select * from t1 where i=1 and j=2; drop table t1; + +# +# Test min-max optimization +# + +CREATE TABLE t1 ( + a int3 unsigned NOT NULL, + b int1 unsigned NOT NULL, + UNIQUE (a, b) +) TYPE = innobase; + +INSERT INTO t1 VALUES (1, 1); +SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; +drop table t1; diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test index 86d66e6b540..4e5cee0d0ff 100644 --- a/mysql-test/t/order_by.test +++ b/mysql-test/t/order_by.test @@ -165,3 +165,43 @@ insert into t3 (ID, DateOfAction) values (87, '1999-07-19'), (89, '1999-07-19 select t3.DateOfAction, t1.TransactionID from t1 join t2 join t3 where t2.ID = t1.TransactionID and t3.ID = t2.GroupID order by t3.DateOfAction, t1.TransactionID; select t3.DateOfAction, t1.TransactionID from t1 join t2 join t3 where t2.ID = t1.TransactionID and t3.ID = t2.GroupID order by t1.TransactionID,t3.DateOfAction; drop table t1,t2,t3; + +#bug reported by Wouter de Jong + +drop table if exists members; +CREATE TABLE members ( + member_id int(11) NOT NULL auto_increment, + inschrijf_datum varchar(20) NOT NULL default '', + lastchange_datum varchar(20) NOT NULL default '', + nickname varchar(20) NOT NULL default '', + password varchar(8) NOT NULL default '', + voornaam varchar(30) NOT NULL default '', + tussenvoegsels varchar(10) NOT NULL default '', + achternaam varchar(50) NOT NULL default '', + straat varchar(100) NOT NULL default '', + postcode varchar(10) NOT NULL default '', + wijk varchar(40) NOT NULL default '', + plaats varchar(50) NOT NULL default '', + telefoon varchar(10) NOT NULL default '', + geboortedatum date NOT NULL default '0000-00-00', + geslacht varchar(5) NOT NULL default '', + email varchar(80) NOT NULL default '', + uin varchar(15) NOT NULL default '', + homepage varchar(100) NOT NULL default '', + internet varchar(15) NOT NULL default '', + scherk varchar(30) NOT NULL default '', + favo_boek varchar(50) NOT NULL default '', + favo_tijdschrift varchar(50) NOT NULL default '', + favo_tv varchar(50) NOT NULL default '', + favo_eten varchar(50) NOT NULL default '', + favo_muziek varchar(30) NOT NULL default '', + info text NOT NULL, + ipnr varchar(30) NOT NULL default '', + PRIMARY KEY (member_id) +) TYPE=MyISAM PACK_KEYS=1; + +insert into members (member_id) values (1),(2),(3); +select member_id, nickname, voornaam FROM members +ORDER by lastchange_datum DESC LIMIT 2; +drop table members; + diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 98e31cfe1ef..25a429f764a 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -282,7 +282,7 @@ fi echo "Installing all prepared tables" if eval "$execdir/mysqld $defaults --bootstrap --skip-grant-tables \ - --basedir=$basedir --datadir=$ldata --skip-innobase --skip-gemeni --skip-bdb $args" << END_OF_DATA + --basedir=$basedir --datadir=$ldata --skip-innobase --skip-gemini --skip-bdb $args" << END_OF_DATA use mysql; $c_d $i_d diff --git a/sql-bench/TODO b/sql-bench/TODO new file mode 100644 index 00000000000..8a103e89199 --- /dev/null +++ b/sql-bench/TODO @@ -0,0 +1,21 @@ +When comparing with ms-sql: + +Check how to get MySQL faster mysql ms-sql + +count_distinct (2000) | 89.00| 39.00| +count_distinct_big (120) | 324.00| 121.00| +count_distinct_group (1000) | 158.00| 107.00| +count_distinct_group_on_key (1000) | 49.00| 17.00| +count_distinct_group_on_key_parts (1| 157.00| 108.00| +order_by_big (10) | 197.00| 89.00| +order_by_big_key (10) | 170.00| 82.00| +order_by_big_key2 (10) | 163.00| 73.00| +order_by_big_key_desc (10) | 172.00| 84.00| +order_by_big_key_diff (10) | 193.00| 89.00| +order_by_big_key_prefix (10) | 165.00| 72.00| + + +Why is the following slow on NT: + NT Linux +update_of_primary_key_many_keys (256| 560.00| 65.00| + diff --git a/sql-bench/bench-init.pl.sh b/sql-bench/bench-init.pl.sh index d18d2c79ced..165b15a0ede 100644 --- a/sql-bench/bench-init.pl.sh +++ b/sql-bench/bench-init.pl.sh @@ -31,7 +31,7 @@ # $server Object for current server # $limits Hash reference to limits for benchmark -$benchmark_version="2.11a"; +$benchmark_version="2.12"; use Getopt::Long; require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n"; diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh index f5d25a26ffb..014962b1c6d 100644 --- a/sql-bench/crash-me.sh +++ b/sql-bench/crash-me.sh @@ -47,6 +47,7 @@ require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n"; $opt_server="mysql"; $opt_host="localhost"; $opt_database="test"; $opt_dir="limits"; +$opt_user=$opt_password=""; $opt_debug=$opt_help=$opt_Information=$opt_restart=$opt_force=$opt_quick=0; $opt_log_all_queries=$opt_fix_limit_file=$opt_batch_mode=0; $opt_db_start_cmd=""; # the db server start command diff --git a/sql-bench/run-all-tests.sh b/sql-bench/run-all-tests.sh index cbcafce3117..31d48c837df 100644 --- a/sql-bench/run-all-tests.sh +++ b/sql-bench/run-all-tests.sh @@ -36,11 +36,6 @@ use DBI; $opt_silent=1; # Don't write header -$prog_args=""; -foreach $arg (@ARGV) -{ - $prog_args.="'" . $arg . "' "; -} chomp($pwd = `pwd`); $pwd = "." if ($pwd eq ''); require "$pwd/bench-init.pl" || die "Can't read Configuration file: $!\n"; @@ -50,6 +45,20 @@ $machine=machine(); $redirect= !($machine =~ /windows/i || $machine =~ "^NT\s") ? "2>&1" : ""; $dir= ($pwd =~ /\\/) ? '\\' : '/'; # directory symbol for shell +$prog_args=""; +foreach $arg (@ARGV) +{ + if ($redirect) + { + $prog_args.="'" . $arg . "' "; + } + else + { + # Windows/NT can't handle ' around arguments + $prog_args.=$arg . " "; + } +} + $prog_count=$errors=0; if ($opt_cmp) { diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 8ede6022cfc..bfd9d7ce758 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -194,6 +194,11 @@ sub new { $limits{'working_blobs'} = 0; # HEAP tables can't handle BLOB's } + if (defined($main::opt_create_options) && + $main::opt_create_options =~ /type=innobase/i) + { + $limits{'max_text_size'} = 8000; # Limit in Innobase + } return $self; } diff --git a/sql-bench/test-select.sh b/sql-bench/test-select.sh index e75ee26fff8..3c7efe3c5c2 100644 --- a/sql-bench/test-select.sh +++ b/sql-bench/test-select.sh @@ -130,6 +130,37 @@ if ($opt_lock_tables) select_test: +if ($limits->{'group_functions'}) +{ + my ($tmp); $tmp=1000; + print "Test if the database has a query cache\n"; + + # First ensure that the table is read into memory + fetch_all_rows($dbh,"select sum(idn+$tmp),sum(rev_idn+$tmp) from bench1"); + + $loop_time=new Benchmark; + for ($tests=0 ; $tests < $opt_loop_count ; $tests++) + { + fetch_all_rows($dbh,"select sum(idn+$tests),sum(rev_idn+$tests) from bench1"); + } + $end_time=new Benchmark; + print "Time for select_query_cache ($opt_loop_count): " . + timestr(timediff($end_time, $loop_time),"all") . "\n\n"; + + # If the database has a query cache, the following loop should be much + # slower than the previous loop + + $loop_time=new Benchmark; + for ($tests=0 ; $tests < $opt_loop_count ; $tests++) + { + fetch_all_rows($dbh,"select sum(idn+$tests),sum(rev_idn+$tests) from bench1"); + } + $end_time=new Benchmark; + print "Time for select_query_cache2 ($opt_loop_count): " . + timestr(timediff($end_time, $loop_time),"all") . "\n\n"; +} + + print "Testing big selects on the table\n"; $loop_time=new Benchmark; $rows=0; @@ -288,8 +319,21 @@ if ($limits->{'group_distinct_functions'}) $rows=$estimated=$count=0; for ($i=0 ; $i < $opt_medium_loop_count ; $i++) { - $count+=2; + $count++; $rows+=fetch_all_rows($dbh,"select count(distinct region) from bench1"); + $end_time=new Benchmark; + last if ($estimated=predict_query_time($loop_time,$end_time,\$count,$i+1, + $opt_medium_loop_count)); + } + print_time($estimated); + print " for count_distinct_key_prefix ($count:$rows): " . + timestr(timediff($end_time, $loop_time),"all") . "\n"; + + $loop_time=new Benchmark; + $rows=$estimated=$count=0; + for ($i=0 ; $i < $opt_medium_loop_count ; $i++) + { + $count++; $rows+=fetch_all_rows($dbh,"select count(distinct grp) from bench1"); $end_time=new Benchmark; last if ($estimated=predict_query_time($loop_time,$end_time,\$count,$i+1, @@ -304,6 +348,20 @@ if ($limits->{'group_distinct_functions'}) for ($i=0 ; $i < $opt_medium_loop_count ; $i++) { $count++; + $rows+=fetch_all_rows($dbh,"select count(distinct grp),count(distinct rev_idn) from bench1"); + $end_time=new Benchmark; + last if ($estimated=predict_query_time($loop_time,$end_time,\$count,$i+1, + $opt_medium_loop_count)); + } + print_time($estimated); + print " for count_distinct_2 ($count:$rows): " . + timestr(timediff($end_time, $loop_time),"all") . "\n"; + + $loop_time=new Benchmark; + $rows=$estimated=$count=0; + for ($i=0 ; $i < $opt_medium_loop_count ; $i++) + { + $count++; $rows+=fetch_all_rows($dbh,"select region,count(distinct idn) from bench1 group by region"); $end_time=new Benchmark; last if ($estimated=predict_query_time($loop_time,$end_time,\$count,$i+1, diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 78ffb266366..ca105257f19 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -254,7 +254,7 @@ you have to change 'main' to print out the new function\n"); if (write_warning) fprintf (stderr,"Fatal error when generating hash for symbols\n\ Didn't find suitable values for perfect hashing:\n\ -You have to edit gen_lex_hase.cc to generate a new hashing function.\n\ +You have to edit gen_lex_hash.cc to generate a new hashing function.\n\ You can try running gen_lex_hash with --search to find a suitable value\n\ Symbol array size = %d\n",function_mod); return -1; @@ -472,8 +472,10 @@ int main(int argc,char **argv) int error; MY_INIT(argv[0]); - start_value=5315771L; best_t1=6916833L; best_t2=3813748L; best_type=3; /* mode=5839 add=5 type: 0 */ - if (get_options(argc,(char **) argv)) + start_value=4934807L; best_t1=5181754L; best_t2=1469522L; best_type=0; + /* mode=4999 add=7 type: 0 */ + + if (get_options(argc,(char **) argv)) exit(1); make_max_length_table(); diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 3f6b2629e16..10ff4dcc260 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -1429,7 +1429,12 @@ int ha_berkeley::index_read(byte * buf, const byte * key, pack_key(&last_key, active_index, key_buff, key, key_len); /* Store for compare */ memcpy(key_buff2, key_buff, (key_len=last_key.size)); - key_info->handler.bdb_return_if_eq= -1; + /* + If HA_READ_AFTER_KEY is set, return next key, else return first + matching key. + */ + key_info->handler.bdb_return_if_eq= (find_flag == HA_READ_AFTER_KEY ? + 1 : -1); error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE), (char*) buf, active_index, &row, (DBT*) 0, 0); key_info->handler.bdb_return_if_eq= 0; diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h index 73991be208d..7ce22e70dcb 100644 --- a/sql/ha_innobase.h +++ b/sql/ha_innobase.h @@ -72,16 +72,18 @@ class ha_innobase: public handler /* Init values for the class: */ public: ha_innobase(TABLE *table): handler(table), - int_option_flag(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | - HA_REC_NOT_IN_SEQ | - HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | - HA_HAVE_KEY_READ_ONLY | HA_READ_NOT_EXACT_KEY | - HA_LONGLONG_KEYS | HA_NULL_KEY | - HA_NOT_EXACT_COUNT | - HA_NO_WRITE_DELAYED | - HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE), - last_dup_key((uint) -1), - start_of_scan(0) + int_option_flag(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | + HA_REC_NOT_IN_SEQ | + HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | + HA_HAVE_KEY_READ_ONLY | HA_READ_NOT_EXACT_KEY | + HA_LONGLONG_KEYS | HA_NULL_KEY | + HA_NOT_EXACT_COUNT | + HA_NO_WRITE_DELAYED | + HA_PRIMARY_KEY_IN_READ_INDEX | + HA_DROP_BEFORE_CREATE | + HA_NOT_READ_AFTER_KEY), + last_dup_key((uint) -1), + start_of_scan(0) { } ~ha_innobase() {} diff --git a/sql/ha_isam.h b/sql/ha_isam.h index c8305c655ef..b255e8ba87f 100644 --- a/sql/ha_isam.h +++ b/sql/ha_isam.h @@ -33,7 +33,8 @@ class ha_isam: public handler int_option_flag(HA_READ_NEXT+HA_READ_PREV+HA_READ_RND_SAME+ HA_KEYPOS_TO_RNDPOS+ HA_READ_ORDER+ HA_LASTKEY_ORDER+ HA_HAVE_KEY_READ_ONLY+HA_READ_NOT_EXACT_KEY+ - HA_LONGLONG_KEYS+HA_KEY_READ_WRONG_STR + HA_DUPP_POS) + HA_LONGLONG_KEYS+HA_KEY_READ_WRONG_STR + HA_DUPP_POS + + HA_NOT_DELETE_WITH_CACHE) {} ~ha_isam() {} const char *table_type() const { return "ISAM"; } diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index c4393ea8c5d..af35c0269c4 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -484,7 +484,8 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) } break; } - if (!error && start_records != file->state->records) + if (!error && start_records != file->state->records && + !(check_opt->flags & T_VERY_SILENT)) { char llbuff[22],llbuff2[22]; sql_print_error("Warning: Found %s of %s rows when repairing '%s'", diff --git a/sql/handler.h b/sql/handler.h index 8cecd1fe171..1c8a83ac9ed 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -70,6 +70,8 @@ #define HA_NO_WRITE_DELAYED (HA_NOT_EXACT_COUNT*2) #define HA_PRIMARY_KEY_IN_READ_INDEX (HA_NO_WRITE_DELAYED*2) #define HA_DROP_BEFORE_CREATE (HA_PRIMARY_KEY_IN_READ_INDEX*2) +#define HA_NOT_READ_AFTER_KEY (HA_DROP_BEFORE_CREATE*2) +#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2) /* Parameters for open() (in register form->filestat) */ /* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */ diff --git a/sql/lex.h b/sql/lex.h index 6f030aa524d..6d676a39755 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -270,6 +270,7 @@ static SYMBOL symbols[] = { { "SLAVE", SYM(SLAVE),0,0}, { "SMALLINT", SYM(SMALLINT),0,0}, { "SONAME", SYM(UDF_SONAME_SYM),0,0}, + { "SQL_ANSI_MODE", SYM(SQL_ANSI_MODE),0,0}, { "SQL_AUTO_IS_NULL", SYM(SQL_AUTO_IS_NULL),0,0}, { "SQL_BIG_RESULT", SYM(SQL_BIG_RESULT),0,0}, { "SQL_BIG_SELECTS", SYM(SQL_BIG_SELECTS),0,0}, diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index b9fd954486b..1f41e0a0d2e 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -477,7 +477,7 @@ extern uint32 server_id; extern char mysql_data_home[2],server_version[SERVER_VERSION_LENGTH], max_sort_char, mysql_real_data_home[]; extern my_string mysql_unix_port,mysql_tmpdir; -extern const char *first_keyword, *localhost; +extern const char *first_keyword, *localhost, *delayed_user; extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables, created_tmp_tables, created_tmp_disk_tables, aborted_threads,aborted_connects, @@ -539,6 +539,8 @@ extern struct show_var_st init_vars[]; extern struct show_var_st status_vars[]; extern enum db_type default_table_type; +extern uchar global_state_map[256]; + #ifndef __WIN__ extern pthread_t signal_thread; #endif @@ -595,7 +597,7 @@ void change_byte(byte *,uint,char,char); void unireg_abort(int exit_code); void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form, SQL_SELECT *select, - bool use_record_cache, bool print_errors); + int use_record_cache, bool print_errors); void end_read_record(READ_RECORD *info); ha_rows filesort(TABLE **form,struct st_sort_field *sortorder, uint s_length, SQL_SELECT *select, ha_rows special,ha_rows max_rows); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 22e44b2b325..2f672c6f409 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -243,6 +243,7 @@ volatile ulong cached_thread_count=0; my_string master_user = (char*) "test", master_password = 0, master_host=0, master_info_file = (char*) "master.info"; const char *localhost=LOCAL_HOST; +const char *delayed_user="DELAYED"; uint master_port = MYSQL_PORT, master_connect_retry = 60; ulong max_tmp_tables,max_heap_table_size; @@ -2364,7 +2365,7 @@ pthread_handler_decl(handle_connections_namedpipes,arg) continue; } /* host name is unknown */ - thd->host = my_strdup("localhost",MYF(0)); /* Host is unknown */ + thd->host = my_strdup(localhost,MYF(0)); /* Host is unknown */ create_new_thread(thd); } @@ -2715,6 +2716,14 @@ struct show_var_st init_vars[]= { {"have_raid", (char*) &have_raid, SHOW_HAVE}, {"have_ssl", (char*) &have_ssl, SHOW_HAVE}, {"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR}, +#ifdef HAVE_INNOBASE_DB + {"innobase_data_file_path", innobase_data_file_path, SHOW_CHAR}, + {"innobase_data_home_dir", innobase_data_home_dir, SHOW_CHAR}, + {"innobase_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_MY_BOOL}, + {"innobase_log_arch_dir", innobase_log_arch_dir, SHOW_CHAR}, + {"innobase_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL}, + {"innobase_log_group_home_dir", innobase_log_group_home_dir, SHOW_CHAR}, +#endif {"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG}, {"join_buffer_size", (char*) &join_buff_size, SHOW_LONG}, {"key_buffer_size", (char*) &keybuff_size, SHOW_LONG}, @@ -2960,12 +2969,19 @@ static void usage(void) --bdb-tmpdir=directory Berkeley DB tempfile name\n\ --skip-bdb Don't use berkeley db (will save memory)\n\ "); -#endif +#endif /* HAVE_BERKELEY_DB */ #ifdef HAVE_INNOBASE_DB puts("\ - --skip-innobase Don't use innobase (will save memory)\n\ + --innobase_data_home_dir=dir The common part for innobase table spaces\n + --innobase_data_file_path=dir Path to individual files and their sizes\n + --innobase_flush_log_at_trx_commit[=#] + Set to 0 if you don't want to flush logs\n\ + --innobase_log_arch_dir=dir Where full logs should be archived\n\ + --innobase_log_archive[=#] Set to 1 if you want to have logs archived\n\ + --innobase_log_group_home_dir=dir Path to Innobase log files. + --skip-innobase Don't use innobase (will save memory)\n\ "); -#endif +#endif /* HAVE_INNOBASE_DB */ print_defaults("my",load_default_groups); puts(""); diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index d56bf68db62..df49d52d54a 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -141,6 +141,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) break; } TABLE *table=((Item_field*) expr)->field->table; + if ((table->file->option_flag() & HA_NOT_READ_AFTER_KEY)) + { + const_result=0; + break; + } bool error=table->file->index_init((uint) ref.key); if (!ref.key_length) diff --git a/sql/records.cc b/sql/records.cc index 89eae81fe27..e6f76e7fec6 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -31,7 +31,7 @@ static int rr_cmp(uchar *a,uchar *b); void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, SQL_SELECT *select, - bool use_record_cache, bool print_error) + int use_record_cache, bool print_error) { IO_CACHE *tempfile; DBUG_ENTER("init_read_record"); @@ -97,9 +97,11 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->read_record=rr_sequential; table->file->rnd_init(); /* We can use record cache if we don't update dynamic length tables */ - if (use_record_cache || + if (use_record_cache > 0 || (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY || - !(table->db_options_in_use & HA_OPTION_PACK_RECORD)) + !(table->db_options_in_use & HA_OPTION_PACK_RECORD) || + (use_record_cache < 0 && + !(table->file->option_flag() & HA_NOT_DELETE_WITH_CACHE))) VOID(table->file->extra(HA_EXTRA_CACHE)); // Cache reads } DBUG_VOID_RETURN; diff --git a/sql/slave.cc b/sql/slave.cc index e0220a28454..37e11d43573 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -36,6 +36,7 @@ bool wild_do_table_inited = 0, wild_ignore_table_inited = 0; bool table_rules_on = 0; uint32 slave_skip_counter = 0; static TABLE* save_temporary_tables = 0; +THD* slave_thd = 0; // when slave thread exits, we need to remember the temporary tables so we // can re-use them on slave start @@ -1157,7 +1158,7 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused))) // needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff my_thread_init(); - thd = new THD; // note that contructor of THD uses DBUG_ ! + slave_thd = thd = new THD; // note that contructor of THD uses DBUG_ ! thd->set_time(); DBUG_ENTER("handle_slave"); @@ -1347,6 +1348,7 @@ position %s", pthread_cond_broadcast(&COND_slave_stopped); // tell the world we are done pthread_mutex_unlock(&LOCK_slave); net_end(&thd->net); // destructor will not free it, because we are weird + slave_thd = 0; delete thd; my_thread_end(); #ifndef DBUG_OFF @@ -1376,8 +1378,13 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) } if(!slave_was_killed) - mysql_log.write(thd, COM_CONNECT_OUT, "%s@%s:%d", + { + mysql_log.write(thd, COM_CONNECT_OUT, "%s@%s:%d", mi->user, mi->host, mi->port); +#ifdef STOP_IO_WITH_FD_CLOSE + thd->set_active_fd(vio_fd(mysql->net.vio)); +#endif + } return slave_was_killed; } @@ -1404,11 +1411,16 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi) } if(!slave_was_killed) - sql_print_error("Slave: reconnected to master '%s@%s:%d',\ + { + sql_print_error("Slave: reconnected to master '%s@%s:%d',\ replication resumed in log '%s' at position %s", glob_mi.user, glob_mi.host, glob_mi.port, RPL_LOG_NAME, llstr(glob_mi.pos,llbuff)); +#ifdef STOP_IO_WITH_FD_CLOSE + thd->set_active_fd(vio_fd(mysql->net.vio)); +#endif + } return slave_was_killed; } diff --git a/sql/slave.h b/sql/slave.h index e667cac52eb..048cb3f0100 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -99,6 +99,7 @@ extern uint32 slave_skip_counter; // have caused errors, and have been manually applied by DBA already extern pthread_t slave_real_id; +extern THD* slave_thd; extern MASTER_INFO glob_mi; extern HASH replicate_do_table, replicate_ignore_table; extern DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index abe7fad822c..1bced49be57 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -84,6 +84,10 @@ THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0), query_start_used=0; query_length=col_access=0; query_error=0; +#ifdef STOP_IO_WITH_FD_CLOSE + active_fd = -1; + pthread_mutex_init(&active_fd_lock, NULL); +#endif server_id = ::server_id; server_status=SERVER_STATUS_AUTOCOMMIT; next_insert_id=last_insert_id=0; @@ -176,11 +180,15 @@ THD::~THD() if (host != localhost) // If not pointer to constant safeFree(host); - safeFree(user); + if (user != delayed_user) + safeFree(user); safeFree(db); safeFree(ip); free_root(&mem_root,MYF(0)); mysys_var=0; // Safety (shouldn't be needed) +#ifdef STOP_IO_WITH_FD_CLOSE + pthread_mutex_destroy(&active_fd_lock); +#endif DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index f9720a3774a..3fd166ebc5a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -21,6 +21,8 @@ #pragma interface /* gcc class implementation */ #endif +#include <thr_alarm.h> + class Query_log_event; class Load_log_event; @@ -256,6 +258,10 @@ public: #ifndef __WIN__ sigset_t signals,block_signals; #endif +#ifdef STOP_IO_WITH_FD_CLOSE + int active_fd; + pthread_mutex_t active_fd_lock; +#endif ulonglong next_insert_id,last_insert_id,current_insert_id; ha_rows select_limit,offset_limit,default_select_limit,cuted_fields, max_join_size,sent_row_count; @@ -280,10 +286,58 @@ public: ulong slave_proxy_id; // in slave thread we need to know in behalf of which // thread the query is being run to replicate temp tables properly - + + // thread-specific state map for lex parser + uchar state_map[256]; + THD(); ~THD(); bool store_globals(); +#ifdef STOP_IO_WITH_FD_CLOSE + inline void set_active_fd(int fd) + { + pthread_mutex_lock(&active_fd_lock); + active_fd = fd; + pthread_mutex_unlock(&active_fd_lock); + } + inline void clear_active_fd() + { + pthread_mutex_lock(&active_fd_lock); + active_fd = -1; + pthread_mutex_unlock(&active_fd_lock); + } + inline void close_active_fd() + { + pthread_mutex_lock(&active_fd_lock); + if(active_fd >= 0) + { + my_close(active_fd, MYF(MY_WME)); + active_fd = -1; + } + pthread_mutex_unlock(&active_fd_lock); + } +#endif + inline void prepare_to_die() + { + thr_alarm_kill(real_id); + killed = 1; +#ifdef STOP_IO_WITH_FD_CLOSE + close_active_fd(); +#endif + if (mysys_var) + { + pthread_mutex_lock(&mysys_var->mutex); + if (!system_thread) // Don't abort locks + mysys_var->abort=1; + if (mysys_var->current_mutex) + { + pthread_mutex_lock(mysys_var->current_mutex); + pthread_cond_broadcast(mysys_var->current_cond); + pthread_mutex_unlock(mysys_var->current_mutex); + } + pthread_mutex_unlock(&mysys_var->mutex); + } + } inline const char* enter_cond(pthread_cond_t *cond, pthread_mutex_t* mutex, const char* msg) { diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e1196341bef..c20a656c547 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -192,7 +192,7 @@ int mysql_delete(THD *thd,TABLE_LIST *table_list,COND *conds,ha_rows limit, (void) table->file->extra(HA_EXTRA_NO_READCHECK); if (options & OPTION_QUICK) (void) table->file->extra(HA_EXTRA_QUICK); - init_read_record(&info,thd,table,select,1,1); + init_read_record(&info,thd,table,select,-1,1); ulong deleted=0L; thd->proc_info="updating"; while (!(error=info.read_record(&info)) && !thd->killed) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index c44391d9ce0..e92c5255ef8 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -489,7 +489,7 @@ public: table(0),tables_in_use(0),stacked_inserts(0), status(0), dead(0), group_count(0) { - thd.user=thd.priv_user=(char*) ""; + thd.user=thd.priv_user=(char*) delayed_user; thd.host=(char*) localhost; thd.current_tablenr=0; thd.version=refresh_version; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index ca36cb9f205..f0357b99d70 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -75,7 +75,7 @@ inline int lex_casecmp(const char *s, const char *t, uint len) #include "lex_hash.h" -static uchar state_map[256]; +uchar global_state_map[256]; void lex_init(void) @@ -89,42 +89,49 @@ void lex_init(void) VOID(pthread_key_create(&THR_LEX,NULL)); - /* Fill state_map with states to get a faster parser */ + /* Fill global_state_map with states to get a faster parser */ for (i=0; i < 256 ; i++) { if (isalpha(i)) - state_map[i]=(uchar) STATE_IDENT; + global_state_map[i]=(uchar) STATE_IDENT; else if (isdigit(i)) - state_map[i]=(uchar) STATE_NUMBER_IDENT; + global_state_map[i]=(uchar) STATE_NUMBER_IDENT; #if defined(USE_MB) && defined(USE_MB_IDENT) else if (use_mb(default_charset_info) && my_ismbhead(default_charset_info, i)) - state_map[i]=(uchar) STATE_IDENT; + global_state_map[i]=(uchar) STATE_IDENT; #endif else if (!isgraph(i)) - state_map[i]=(uchar) STATE_SKIP; + global_state_map[i]=(uchar) STATE_SKIP; else - state_map[i]=(uchar) STATE_CHAR; + global_state_map[i]=(uchar) STATE_CHAR; } - state_map[(uchar)'_']=state_map[(uchar)'$']=(uchar) STATE_IDENT; - state_map[(uchar)'\'']=state_map[(uchar)'"']=(uchar) STATE_STRING; - state_map[(uchar)'-']=state_map[(uchar)'+']=(uchar) STATE_SIGNED_NUMBER; - state_map[(uchar)'.']=(uchar) STATE_REAL_OR_POINT; - state_map[(uchar)'>']=state_map[(uchar)'=']=state_map[(uchar)'!']= (uchar) STATE_CMP_OP; - state_map[(uchar)'<']= (uchar) STATE_LONG_CMP_OP; - state_map[(uchar)'&']=state_map[(uchar)'|']=(uchar) STATE_BOOL; - state_map[(uchar)'#']=(uchar) STATE_COMMENT; - state_map[(uchar)';']=(uchar) STATE_COLON; - state_map[(uchar)':']=(uchar) STATE_SET_VAR; - state_map[0]=(uchar) STATE_EOL; - state_map[(uchar)'\\']= (uchar) STATE_ESCAPE; - state_map[(uchar)'/']= (uchar) STATE_LONG_COMMENT; - state_map[(uchar)'*']= (uchar) STATE_END_LONG_COMMENT; - state_map[(uchar)'@']= (uchar) STATE_USER_END; - state_map[(uchar) '`']= (uchar) STATE_USER_VARIABLE_DELIMITER; + global_state_map[(uchar)'_']= + global_state_map[(uchar)'$']=(uchar) STATE_IDENT; + global_state_map[(uchar)'\'']= + global_state_map[(uchar)'"']=(uchar) STATE_STRING; + global_state_map[(uchar)'-']= + global_state_map[(uchar)'+']=(uchar) STATE_SIGNED_NUMBER; + global_state_map[(uchar)'.']=(uchar) STATE_REAL_OR_POINT; + global_state_map[(uchar)'>']= + global_state_map[(uchar)'=']= + global_state_map[(uchar)'!']= (uchar) STATE_CMP_OP; + global_state_map[(uchar)'<']= (uchar) STATE_LONG_CMP_OP; + global_state_map[(uchar)'&']=global_state_map[(uchar)'|']=(uchar) STATE_BOOL; + global_state_map[(uchar)'#']=(uchar) STATE_COMMENT; + global_state_map[(uchar)';']=(uchar) STATE_COLON; + global_state_map[(uchar)':']=(uchar) STATE_SET_VAR; + global_state_map[0]=(uchar) STATE_EOL; + global_state_map[(uchar)'\\']= (uchar) STATE_ESCAPE; + global_state_map[(uchar)'/']= (uchar) STATE_LONG_COMMENT; + global_state_map[(uchar)'*']= (uchar) STATE_END_LONG_COMMENT; + global_state_map[(uchar)'@']= (uchar) STATE_USER_END; + global_state_map[(uchar) '`']= (uchar) STATE_USER_VARIABLE_DELIMITER; + if (thd_startup_options & OPTION_ANSI_MODE) { - state_map[(uchar) '"'] = STATE_USER_VARIABLE_DELIMITER; + global_state_map[(uchar) '"'] = STATE_USER_VARIABLE_DELIMITER; } + DBUG_VOID_RETURN; } @@ -250,7 +257,8 @@ static char *get_text(LEX *lex) str=lex->tok_start+1; end=lex->ptr-1; - start=(uchar*) sql_alloc((uint) (end-str)+1); + if (!(start=(uchar*) sql_alloc((uint) (end-str)+1))) + return (char*) ""; // Sql_alloc has set error flag if (!found_escape) { lex->yytoklen=(uint) (end-str); @@ -417,6 +425,7 @@ int yylex(void *arg) uint length; enum lex_states state,prev_state; LEX *lex=current_lex; + uchar *state_map = lex->thd->state_map; YYSTYPE *yylval=(YYSTYPE*) arg; lex->yylval=yylval; // The global state diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 31f59c5b850..6ddb3b45864 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -512,6 +512,10 @@ pthread_handler_decl(handle_one_connection,arg) return 0; } + // copy global state map into thread + for(int x=0; x < 256; x++) + thd->state_map[x] = global_state_map[x]; + do { int error; @@ -2778,22 +2782,8 @@ void kill_one_thread(THD *thd, ulong id) if ((thd->master_access & PROCESS_ACL) || !strcmp(thd->user,tmp->user)) { - thr_alarm_kill(tmp->real_id); - tmp->killed=1; + tmp->prepare_to_die(); error=0; - if (tmp->mysys_var) - { - pthread_mutex_lock(&tmp->mysys_var->mutex); - if (!tmp->system_thread) // Don't abort locks - tmp->mysys_var->abort=1; - if (tmp->mysys_var->current_mutex) - { - pthread_mutex_lock(tmp->mysys_var->current_mutex); - pthread_cond_broadcast(tmp->mysys_var->current_cond); - pthread_mutex_unlock(tmp->mysys_var->current_mutex); - } - pthread_mutex_unlock(&tmp->mysys_var->mutex); - } } else error=ER_KILL_DENIED_ERROR; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index e354bb65713..7922ad0eb6a 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -572,6 +572,9 @@ int stop_slave(THD* thd, bool net_report ) { abort_slave = 1; thr_alarm_kill(slave_real_id); +#ifdef STOP_IO_WITH_FD_CLOSE + slave_thd->close_active_fd(); +#endif // do not abort the slave in the middle of a query, so we do not set // thd->killed for the slave thread thd->proc_info = "waiting for slave to die"; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 4d6a2e79fc3..ed109127e30 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1027,6 +1027,7 @@ int mysql_backup_table(THD* thd, TABLE_LIST* table_list) "backup", TL_READ, 0, 0, 0, &handler::backup)); } + int mysql_restore_table(THD* thd, TABLE_LIST* table_list) { DBUG_ENTER("mysql_restore_table"); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index fa44e7799fa..9a0badda099 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -394,6 +394,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token END %token THEN_SYM +%token SQL_ANSI_MODE %token SQL_BIG_TABLES %token SQL_BIG_SELECTS %token SQL_SELECT_LIMIT @@ -2603,6 +2604,18 @@ option_value: else Lex->options|= OPTION_NOT_AUTO_COMMIT; } + | SQL_ANSI_MODE equal NUM + { + if(atoi($3.str) == 0) + { + Lex->options &= ~(OPTION_ANSI_MODE); + Lex->thd->state_map[(uchar) '"'] = STATE_STRING; + } else { + Lex->options |= OPTION_ANSI_MODE; + Lex->thd->state_map[(uchar) '"'] = STATE_USER_VARIABLE_DELIMITER; + } + } + | SQL_SELECT_LIMIT equal ULONG_NUM { Lex->select_limit= $3; @@ -2697,16 +2710,20 @@ text_or_password: } set_option: - SQL_BIG_TABLES { $$= OPTION_BIG_TABLES; } + SQL_BIG_TABLES { $$= OPTION_BIG_TABLES; } | SQL_BIG_SELECTS { $$= OPTION_BIG_SELECTS; } | SQL_LOG_OFF { $$= OPTION_LOG_OFF; } | SQL_LOG_UPDATE { - $$= (opt_sql_bin_update)? OPTION_UPDATE_LOG|OPTION_BIN_LOG: OPTION_UPDATE_LOG ; + $$= (opt_sql_bin_update)? + OPTION_UPDATE_LOG|OPTION_BIN_LOG: + OPTION_UPDATE_LOG ; } | SQL_LOG_BIN { - $$= (opt_sql_bin_update)? OPTION_UPDATE_LOG|OPTION_BIN_LOG: OPTION_BIN_LOG ; + $$= (opt_sql_bin_update)? + OPTION_UPDATE_LOG|OPTION_BIN_LOG: + OPTION_BIN_LOG ; } | SQL_WARNINGS { $$= OPTION_WARNINGS; } | SQL_LOW_PRIORITY_UPDATES { $$= OPTION_LOW_PRIORITY_UPDATES; } diff --git a/tests/fork3_test.pl b/tests/fork3_test.pl index 0ede221a7f4..032f1bf15e9 100755 --- a/tests/fork3_test.pl +++ b/tests/fork3_test.pl @@ -1,9 +1,9 @@ #!/usr/bin/perl -w # -# This is a test with uses 3 processes to insert, delete and select +# This is a test with uses 4 processes to insert, delete , check and select # -$opt_loop_count=100000; # Change this to make test harder/easier +$opt_loop_count=200000; # Change this to make test harder/easier ##################### Standard benchmark inits ############################## @@ -21,8 +21,8 @@ GetOptions("host=s","db=s","loop-count=i","skip-create","skip-in","skip-delete", "verbose","fast-insert","lock-tables","debug","fast","force") || die "Aborted"; $opt_verbose=$opt_debug=$opt_lock_tables=$opt_fast_insert=$opt_fast=$opt_skip_in=$opt_force=undef; # Ignore warnings from these -print "Testing 3 multiple connections to a server with 1 insert, 1 delete\n"; -print "and 1 select connections.\n"; +print "Testing 4 multiple connections to a server with 1 insert, 1 delete\n"; +print "1 select and one repair/check connection.\n"; $firsttable = "bench_f1"; @@ -51,6 +51,7 @@ $|= 1; # Autoflush test_insert() if (($pid=fork()) == 0); $work{$pid}="insert"; test_delete() if (($pid=fork()) == 0); $work{$pid}="delete"; test_select() if (($pid=fork()) == 0); $work{$pid}="select1"; +repair_and_check() if (($pid=fork()) == 0); $work{$pid}="repair/check"; $errors=0; while (($pid=wait()) != -1) @@ -148,3 +149,40 @@ sub test_select print "Test_select: ok\n"; exit(0); } + +sub repair_and_check +{ + my ($dbh,$row,$found1,$last_found1,$i,$type, $table); + $found1=0; $last_found1= -1; + + $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host", + $opt_user, $opt_password, + { PrintError => 0}) || die $DBI::errstr; + + for ($i=0; $found1 != $last_found1 ; $i++) + { + $type=($i & 2) ? "repair" : "check"; + $table=$firsttable; + $last_found1=$found1; + $sth=$dbh->prepare("$type table $table") || die "Got error on prepare: $dbh->errstr\n"; + $sth->execute || die $dbh->errstr; + + while (($row=$sth->fetchrow_arrayref)) + { + if ($row->[3] ne "OK") + { + print "Got error " . $row->[3] . " when doing $type on $table\n"; + exit(1); + } + } + $sth=$dbh->prepare("select count(*) from $table") || die "Got error on prepare: $dbh->errstr\n"; + $sth->execute || die $dbh->errstr; + @row = $sth->fetchrow_array(); + $found1= $row[0]; + $sth->finish; + sleep(3); + } + $dbh->disconnect; $dbh=0; + print "check/repair: Did $i repair/checks\n"; + exit(0); +} |