diff options
123 files changed, 3140 insertions, 1315 deletions
diff --git a/.bzrignore b/.bzrignore index 05395e62b57..0b2aeb2a92c 100644 --- a/.bzrignore +++ b/.bzrignore @@ -59,9 +59,17 @@ PENDING/* TAGS aclocal.m4 bdb/README +bdb/btree/btree_auto.c bdb/build_unix/* bdb/build_vxworks/db.h +bdb/build_vxworks/db_int.h bdb/build_win32/db.h +bdb/build_win32/db_int.h +bdb/build_win32/include.tcl +bdb/build_win32/libdb.rc +bdb/db/crdel_auto.c +bdb/db/db_auto.c +bdb/dist/config.hin bdb/dist/configure bdb/dist/tags bdb/dist/template/rec_btree @@ -94,6 +102,7 @@ bdb/txn/txn_auto.c client/insert_test client/mysql client/mysqladmin +client/mysqlcheck client/mysqldump client/mysqlimport client/mysqlshow @@ -122,6 +131,8 @@ heap/hp_test2 include/my_config.h include/my_global.h include/mysql_version.h +innobase/ib_config.h +innobase/ib_config.h.in isam/isamchk isam/isamlog isam/pack_isam @@ -137,11 +148,82 @@ libmysql_r/acconfig.h libmysql_r/conf_to_src libmysql_r/my_static.h libmysql_r/mysys_priv.h +libmysqld/convert.cc +libmysqld/derror.cc +libmysqld/errmsg.c +libmysqld/field.cc +libmysqld/field_conv.cc +libmysqld/filesort.cc +libmysqld/get_password.c +libmysqld/ha_berkeley.cc +libmysqld/ha_heap.cc +libmysqld/ha_isam.cc +libmysqld/ha_isammrg.cc +libmysqld/ha_myisam.cc +libmysqld/ha_myisammrg.cc +libmysqld/handler.cc +libmysqld/hostname.cc +libmysqld/init.cc +libmysqld/item.cc +libmysqld/item_buff.cc +libmysqld/item_cmpfunc.cc +libmysqld/item_create.cc +libmysqld/item_func.cc +libmysqld/item_strfunc.cc +libmysqld/item_sum.cc +libmysqld/item_timefunc.cc +libmysqld/item_uniq.cc +libmysqld/key.cc +libmysqld/lock.cc +libmysqld/log.cc +libmysqld/log_event.cc +libmysqld/md5.c +libmysqld/mini_client.cc +libmysqld/net_pkg.cc +libmysqld/net_serv.cc +libmysqld/opt_ft.cc +libmysqld/opt_range.cc +libmysqld/opt_sum.cc +libmysqld/password.c +libmysqld/procedure.cc +libmysqld/records.cc +libmysqld/slave.cc +libmysqld/sql_acl.cc +libmysqld/sql_analyse.cc +libmysqld/sql_base.cc +libmysqld/sql_cache.cc +libmysqld/sql_class.cc +libmysqld/sql_crypt.cc +libmysqld/sql_db.cc +libmysqld/sql_delete.cc +libmysqld/sql_handler.cc +libmysqld/sql_insert.cc +libmysqld/sql_lex.cc +libmysqld/sql_list.cc +libmysqld/sql_load.cc +libmysqld/sql_manager.cc +libmysqld/sql_map.cc +libmysqld/sql_parse.cc +libmysqld/sql_rename.cc +libmysqld/sql_repl.cc +libmysqld/sql_select.cc +libmysqld/sql_show.cc +libmysqld/sql_string.cc +libmysqld/sql_table.cc +libmysqld/sql_test.cc +libmysqld/sql_udf.cc +libmysqld/sql_update.cc +libmysqld/sql_yacc.cc +libmysqld/table.cc +libmysqld/thr_malloc.cc +libmysqld/time.cc +libmysqld/unireg.cc libtool linked_client_sources linked_include_sources linked_libmysql_r_sources linked_libmysql_sources +linked_libmysqld_sources linked_server_sources myisam/ft_dump myisam/ft_eval @@ -159,6 +241,7 @@ mysql-test/mysql-test-run mysql-test/r/*.reject mysql-test/share/mysql mysql-test/var/* +mysql.proj mysqld.S mysqld.sym mysys/test_charset @@ -188,6 +271,7 @@ scripts/mysqldumpslow scripts/mysqlhotcopy scripts/safe_mysqld sql-bench/Results-linux/ATIS-mysql_bdb-Linux_2.2.14_my_SMP_i686 +sql-bench/bench-count-distinct sql-bench/bench-init.pl sql-bench/compare-results sql-bench/copy-db @@ -236,83 +320,3 @@ support-files/mysql.server support-files/mysql.spec tags tmp/* -bdb/btree/btree_auto.c -bdb/build_vxworks/db_int.h -bdb/build_win32/db_int.h -bdb/build_win32/include.tcl -bdb/build_win32/libdb.rc -bdb/db/crdel_auto.c -bdb/db/db_auto.c -bdb/dist/config.hin -libmysqld/convert.cc -libmysqld/derror.cc -libmysqld/errmsg.c -libmysqld/field.cc -libmysqld/field_conv.cc -libmysqld/filesort.cc -libmysqld/get_password.c -libmysqld/ha_berkeley.cc -libmysqld/ha_heap.cc -libmysqld/ha_isam.cc -libmysqld/ha_isammrg.cc -libmysqld/ha_myisam.cc -libmysqld/ha_myisammrg.cc -libmysqld/handler.cc -libmysqld/hostname.cc -libmysqld/init.cc -libmysqld/item.cc -libmysqld/item_buff.cc -libmysqld/item_cmpfunc.cc -libmysqld/item_create.cc -libmysqld/item_func.cc -libmysqld/item_strfunc.cc -libmysqld/item_sum.cc -libmysqld/item_timefunc.cc -libmysqld/item_uniq.cc -libmysqld/key.cc -libmysqld/lock.cc -libmysqld/log.cc -libmysqld/log_event.cc -libmysqld/md5.c -libmysqld/mini_client.cc -libmysqld/net_pkg.cc -libmysqld/net_serv.cc -libmysqld/opt_ft.cc -libmysqld/opt_range.cc -libmysqld/opt_sum.cc -libmysqld/password.c -libmysqld/procedure.cc -libmysqld/records.cc -libmysqld/slave.cc -libmysqld/sql_acl.cc -libmysqld/sql_analyse.cc -libmysqld/sql_base.cc -libmysqld/sql_cache.cc -libmysqld/sql_class.cc -libmysqld/sql_crypt.cc -libmysqld/sql_db.cc -libmysqld/sql_delete.cc -libmysqld/sql_insert.cc -libmysqld/sql_lex.cc -libmysqld/sql_list.cc -libmysqld/sql_load.cc -libmysqld/sql_manager.cc -libmysqld/sql_map.cc -libmysqld/sql_parse.cc -libmysqld/sql_rename.cc -libmysqld/sql_repl.cc -libmysqld/sql_select.cc -libmysqld/sql_show.cc -libmysqld/sql_string.cc -libmysqld/sql_table.cc -libmysqld/sql_test.cc -libmysqld/sql_udf.cc -libmysqld/sql_update.cc -libmysqld/sql_yacc.cc -libmysqld/table.cc -libmysqld/thr_malloc.cc -libmysqld/time.cc -libmysqld/unireg.cc -linked_libmysqld_sources -sql-bench/bench-count-distinct -libmysqld/sql_handler.cc diff --git a/BUILD/compile-ia64-O0 b/BUILD/compile-ia64-O0 deleted file mode 100644 index d07067289b4..00000000000 --- a/BUILD/compile-ia64-O0 +++ /dev/null @@ -1,12 +0,0 @@ -make -k clean -/bin/rm -f */.deps/*.P config.cache - -aclocal; autoheader; aclocal; automake; autoconf - -CFLAGS="-O0 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O0 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex -make -strip sql/mysqld - - - - diff --git a/BUILD/compile-ia64-O0-sgicc b/BUILD/compile-ia64-O0-sgicc deleted file mode 100644 index c5e14eab033..00000000000 --- a/BUILD/compile-ia64-O0-sgicc +++ /dev/null @@ -1,12 +0,0 @@ -make -k clean -/bin/rm -f */.deps/*.P config.cache - -aclocal; autoheader; aclocal; automake; autoconf - -CC=sgicc CFLAGS="-O0" CXX=sgicc CXXFLAGS="-O0" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex -make -strip sql/mysqld - - - - diff --git a/BUILD/compile-ia64-O2 b/BUILD/compile-ia64-O2 deleted file mode 100644 index 140d34b2466..00000000000 --- a/BUILD/compile-ia64-O2 +++ /dev/null @@ -1,12 +0,0 @@ -make -k clean -/bin/rm -f */.deps/*.P config.cache - -aclocal; autoheader; aclocal; automake; autoconf - -CFLAGS="-O2 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O2 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex -make -strip sql/mysqld - - - - diff --git a/BUILD/compile-ia64-O2-sgicc b/BUILD/compile-ia64-O2-sgicc deleted file mode 100644 index 64b2ff17beb..00000000000 --- a/BUILD/compile-ia64-O2-sgicc +++ /dev/null @@ -1,12 +0,0 @@ -make -k clean -/bin/rm -f */.deps/*.P config.cache - -aclocal; autoheader; aclocal; automake; autoconf - -CC=sgicc CFLAGS="-O2" CXX=sgicc CXXFLAGS="-O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex -make -strip sql/mysqld - - - - diff --git a/BUILD/compile-ia64-O6 b/BUILD/compile-ia64-O6 deleted file mode 100644 index 8792c0e1479..00000000000 --- a/BUILD/compile-ia64-O6 +++ /dev/null @@ -1,8 +0,0 @@ -make -k clean -/bin/rm -f */.deps/*.P config.cache - -aclocal; autoheader; aclocal; automake; autoconf - -CFLAGS="-O6 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O6 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex -make -strip sql/mysqld diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index ba242021e19..b01ea787227 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -1,5 +1,8 @@ heikki@donna.mysql.fi +jani@hynda.mysql.fi jcole@abel.spaceapes.com +jcole@main.burghcom.com +jcole@tetra.spaceapes.com monty@donna.mysql.fi monty@work.mysql.com paul@central.snake.net diff --git a/Docs/manual.texi b/Docs/manual.texi index 8166b0ce35f..cf1df129f29 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -69,7 +69,7 @@ END-INFO-DIR-ENTRY @sp 10 @center @titlefont{@strong{MySQL} Reference Manual} @sp 10 -@center Copyright @copyright{} 1997-2001 TcX AB, Detron HB and MySQL Finland AB +@center Copyright @copyright{} 1997-2001 MySQL AB @c blank page after title page makes page 1 be a page front. @c also makes the back of the title page blank. @page @@ -532,10 +532,20 @@ GEMINI Tables InnoDB Tables -* InnoDB overview:: +* InnoDB overview:: InnoDB tables overview * InnoDB start:: InnoDB startup options -* Using InnoDB tables:: Using InnoDB tables -* InnoDB restrictions:: Some restrictions on @code{InnoDB} tables: +* Creating an InnoDB database:: Creating an InnoDB database. +* Using InnoDB tables:: Creating InnoDB tables +* Adding and removing:: Adding and removing InnoDB data and log files +* Backing up:: Backing up and recovering an InnoDB database +* Moving:: Moving an InnoDB database to another machine +* InnoDB transaction model:: InnoDB transaction model. +* Implementation:: Implementation of multiversioning +* Table and index:: Table and index structures +* File space management:: File space management and disk i/o +* Error handling:: Error handling +* InnoDB restrictions:: Some restrictions on InnoDB tables +* InnoDB contact information:: InnoDB contact information. MySQL Tutorial @@ -658,7 +668,7 @@ Speed of Queries that Access or Update Data MySQL Utilites * Programs:: What do the executables do? -* mysqld-max:: +* mysqld-max:: mysqld-max, An extended mysqld server * safe_mysqld:: safe_mysqld, the wrapper around mysqld * mysqld_multi:: Program for managing multiple @strong{MySQL} servers * mysql:: The command line tool @@ -751,6 +761,7 @@ Problems and Common Errors * No matching rows:: Solving problems with no matching rows * ALTER TABLE problems:: Problems with @code{ALTER TABLE}. * Change column order:: How to change the order of columns in a table +* Temporary table problems:: Some Common Errors When Using MySQL @@ -918,6 +929,7 @@ Changes in release 4.0.x (Development; Alpha) Changes in release 3.23.x (Stable) +* News-3.23.38:: Changes in release 3.23.38 * News-3.23.37:: Changes in release 3.23.37 * News-3.23.36:: Changes in release 3.23.36 * News-3.23.35:: Changes in release 3.23.35 @@ -2122,7 +2134,7 @@ because of bugs in @strong{MySQL}. @cindex retrieving, data @cindex data, ISAM table handler -@item The MyISAM table handler --- Gamma +@item The MyISAM table handler --- Stable This is new in @strong{MySQL} Version 3.23. It's largely based on the ISAM table code but has a lot of new and very useful features. @@ -2213,7 +2225,7 @@ The Berkeley DB code is very stable, but we are still improving the interface between @strong{MySQL} and BDB tables, so it will take some time before this is as tested as the other table types. -@item Innodb Tables -- Alpha +@item InnoDB Tables -- Alpha This is a very recent addition to @code{MySQL} and is not very tested yet. @item Automatic recovery of MyISAM tables - Beta @@ -2384,7 +2396,7 @@ Apart from the following links, you can find and download a lot of Information about the German MySQL mailing list. @item @uref{http://www2.rent-a-database.de/mysql/} -@strong{MySQL} manual in German. +@strong{MySQL} handbook in German. @item @uref{http://www.bitmover.com:8888//home/bk/mysql} Web access to the @strong{MySQL} BitKeeper repository. @@ -2432,6 +2444,9 @@ New Client libraries for the Mac OS Classic (Macintosh). @item @uref{http://www.lilback.com/macsql/} Client libraries for Mac OS Classic (Macintosh). + +@item @uref{http://sixk.maniasys.com/index_en.html} +MySQL for Amiga @end table @subheading Perl-related Links @@ -2706,7 +2721,7 @@ Popular iODBC Driver Manager (libiodbc) now available as Open Source. @item @uref{http://users.ids.net/~bjepson/freeODBC/} The FreeODBC Pages. -@item @uref{http:/http://genix.net/unixODBC/} +@item @uref{http://genix.net/unixODBC/} The unixODBC Project goals are to develop and promote unixODBC to be the definitive standard for ODBC on the Linux platform. This is to include GUI support for KDE. @@ -2763,7 +2778,7 @@ environment. @item @uref{http://www.wix.com/mysql-hosting/} Registry of Web providers who support @strong{MySQL}. -@item @uref{http://www.softagency.co.jp/mysql/index.en.phtml} +@item @uref{http://www.softagency.co.jp/mysql/index.en.html} Links about using @strong{MySQL} in Japan/Asia. @item @uref{http://abattoir.cc.ndsu.nodak.edu/~nem/mysql/udf/} @@ -2883,11 +2898,6 @@ same (or similar) query. Uses PHP and @strong{MySQL}. @item @uref{http://www.stopbit.com/} Stopbit - A technology news site using @strong{MySQL} and PHP. -@c Added 990604 -@c EMAIL: ah@dybdahl.dk -@item @uref{http://www.jokes2000.com/scripts/} -Example scripts at Jokes2000. - @item @uref{http://www.linuxsupportline.com/~kalendar/} KDE based calendar manager - The calendar manager has both single user (file based) and multi-user (@strong{MySQL} database) support. @@ -3092,8 +3102,23 @@ unsubscribe from the @code{myodbc} list, send a message to @email{myodbc-subscribe@@lists.mysql.com} or @email{myodbc-unsubscribe@@lists.mysql.com}. -There is also a german mailing list. You can find information about this -at: @uref{http://www.4t2.com/mysql}. +The following table shows some @strong{MySQL} mailing in other languages than +English. Note that these are not operated by @strong{MySQL AB}, so we can't +guarantee the quality on these. + +@table @code +@item @email{mysql-france-subscribe@@yahoogroups.com, A French mailing list} +@item @email{list@@tinc.net, A Korean mailing list} +Email @code{subscribe mysql your@@email.address} to this list. +@item @email{mysql-de-request@@lists.4t2.com, A German mailing list} +Email @code{subscribe mysql-de your@@email.address} to this list. +You can find information about this mailing list at +@uref{http://www.4t2.com/mysql}. +@item @email{mysql-br-request@@listas.linkway.com.br, A Portugese mailing list} +Email @code{subscribe mysql-br your@@email.address} to this list. +@item @email{mysql-alta@@elistas.net, A Spanish mailing list} +Email @code{subscribe mysql your@@email.address} to this list. +@end table @cindex net etiquette @cindex mailing lists, archive location @@ -5000,6 +5025,8 @@ sucessfully on the following operating system/thread package combinations: @item AIX 4.x with native threads. @xref{IBM-AIX}. @item +Amiga. +@item BSDI 2.x with the included MIT-pthreads package. @xref{BSDI}. @item BSDI 3.0, 3.1 and 4.x with native threads. @xref{BSDI}. @@ -5909,12 +5936,15 @@ A reasonable @code{tar} to unpack the distribution. GNU @code{tar} is known to work. Sun @code{tar} is known to have problems. @item -A working ANSI C++ compiler. @code{gcc} >= 2.8.1, @code{egcs} >= -1.0.2, SGI C++, and SunPro C++ are some of the compilers that are known to -work. @code{libg++} is not needed when using @code{gcc}. @code{gcc} -2.7.x has a bug that makes it impossible to compile some perfectly legal -C++ files, such as @file{sql/sql_base.cc}. If you only have @code{gcc} 2.7.x, -you must upgrade your @code{gcc} to be able to compile @strong{MySQL}. +A working ANSI C++ compiler. @code{gcc} >= 2.95.2, @code{egcs} >= 1.0.2 +or @code{egcs 2.91.66}, SGI C++, and SunPro C++ are some of the +compilers that are known to work. @code{libg++} is not needed when +using @code{gcc}. @code{gcc} 2.7.x has a bug that makes it impossible +to compile some perfectly legal C++ files, such as +@file{sql/sql_base.cc}. If you only have @code{gcc} 2.7.x, you must +upgrade your @code{gcc} to be able to compile @strong{MySQL}. @code{gcc} +2.8.1 is also known to have problems on some platforms so it should be +avoided if there exists a new compiler for the platform.. @code{gcc} >= 2.95.2 is recommended when compiling @strong{MySQL} Version 3.23.x. @@ -7716,7 +7746,7 @@ For the source distribution of @code{glibc} 2.0.7, a patch that is easy to apply and is tested with @strong{MySQL} may be found at: @example -@uref{http://www.mysql.com/Download/Linux/glibc-2.0.7-total-patch.tar.gz} +@uref{http://www.mysql.com/Downloads/Linux/glibc-2.0.7-total-patch.tar.gz} @end example If you experience crashes like these when you build @strong{MySQL}, you can @@ -8506,8 +8536,8 @@ We recommend the following @code{configure} line with @code{egcs} and @code{gcc 2.95} on AIX: @example -CC="gcc -pipe -mcpu=power2 -Wa,-many" \ -CXX="gcc -pipe -mcpu=power2 -Wa,-many" \ +CC="gcc -pipe -mcpu=power -Wa,-many" \ +CXX="gcc -pipe -mcpu=power -Wa,-many" \ CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti" \ ./configure --prefix=/usr/local/mysql --with-low-memory @end example @@ -8519,6 +8549,21 @@ available. We don't know if the @code{-fno-exceptions} is required with option generates faster code, we recommend that you should always use this option with @code{egcs / gcc}. +If you get a problem with assembler code try changing the -mcpu=xxx to +match your cpu. Typically power2, power, or powerpc may need to be used, +alternatively you might need to use 604 or 604e. I'm not positive but I +would think using "power" would likely be safe most of the time, even on +a power2 machine. + +If you don't know what your cpu is then do a "uname -m", this will give +you back a string that looks like "000514676700", with a format of +xxyyyyyymmss where xx and ss are always 0's, yyyyyy is a unique system +id and mm is the id of the CPU Planar. A chart of these values can be +found at +@uref{http://www.rs6000.ibm.com/doc_link/en_US/a_doc_lib/cmds/aixcmds5/uname.htm}. +This will give you a machine type and a machine model you can use to +determine what type of cpu you have. + If you have problems with signals (@strong{MySQL} dies unexpectedly under high load) you may have found an OS bug with threads and signals. In this case you can tell @strong{MySQL} not to use signals by @@ -8539,6 +8584,29 @@ On some versions of AIX, linking with @code{libbind.a} makes @code{getservbyname} core dump. This is an AIX bug and should be reported to IBM. +For AIX 4.2.1 and gcc you have to do the following changes. + +After configuring, edit @file{config.h} and @file{include/my_config.h} +and change the line that says + +@example +#define HAVE_SNPRINTF 1 +@end example + +to + +@example +#undef HAVE_SNPRINTF +@end example + +And finally, in @file{mysqld.cc} you need to add a prototype for initgoups. + +@example +#ifdef _AIX41 +extern "C" int initgroups(const char *,int); +#endif +@end example + @node HP-UX 10.20, HP-UX 11.x, IBM-AIX, Source install system issues @subsection HP-UX Version 10.20 Notes @@ -8666,6 +8734,16 @@ The optimization flags used by @strong{MySQL} (-O3) are not recognized by HP's compilers. I did not change the flags. @end itemize +If you get the following error from @code{configure} + +@example +checking for cc option to accept ANSI C... no +configure: error: MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual. +@end example + +Check that you don't have the path to the K&R compiler before the path +to the HP-UX C and C++ compiler. + @node Mac OS X, BEOS, HP-UX 11.x, Source install system issues @subsection Mac OS X Notes @@ -9939,7 +10017,7 @@ yourself with the different BDB specific startup options. @xref{BDB start}. If you are using Gemini tables, refer to the Gemini-specific startup options. @xref{GEMINI start}. -If you are using Innodb tables, refer to the Innodb-specific startup +If you are using InnoDB tables, refer to the InnoDB-specific startup options. @xref{InnoDB start}. @node Automatic start, Command-line options, Starting server, Post-installation @@ -10048,6 +10126,10 @@ Chroot mysqld daemon during startup. Recommended security measure. It will somewhat limit @code{LOAD DATA INFILE} and @code{SELECT ... INTO OUTFILE} though. +@item --core-file +Write a core file if @code{mysqld} dies. For some systems you must also +specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}. + @item -h, --datadir=path Path to the database root. @@ -10202,6 +10284,10 @@ gives everyone @emph{full access} to all databases! (You can tell a running server to start using the grant tables again by executing @code{mysqladmin flush-privileges} or @code{mysqladmin reload}.) +@item --skip-host-cache +Never use host name cache for faster name-ip resolution, but query DNS server +on every connect instead. @xref{DNS}. + @item --skip-locking Don't use system locking. To use @code{isamchk} or @code{myisamchk} you must shut down the server. @xref{Stability}. Note that in @strong{MySQL} Version @@ -10217,14 +10303,14 @@ Don't listen for TCP/IP connections at all. All interaction with @code{mysqld} must be made via Unix sockets. This option is highly recommended for systems where only local requests are allowed. @xref{DNS}. -@item --skip-host-cache -Never use host name cache for faster name-ip resolution, but query DNS server -on every connect instead. @xref{DNS}. - @item --skip-new Don't use new, possible wrong routines. Implies @code{--skip-delay-key-write}. This will also set default table type to @code{ISAM}. @xref{ISAM}. +@item --skip-stack-trace +Don't write stack traces. This option is useful when you are running +@code{mysqld} under a debugger. @xref{Debugging server}. + @item --skip-safemalloc If @strong{MySQL} is configured with @code{--with-debug=full}, all programs will check the memory for overruns for every memory allocation and memory @@ -11925,10 +12011,11 @@ communication. All other information is transferred as text that can be read by anyone who is able to watch the connection. If you are concerned about this, you can use the compressed protocol (in @strong{MySQL} Version 3.22 and above) -to make things much harder. To make things even more secure you should -use @code{ssh} (see @uref{http://www.cs.hut.fi/ssh}). With this, you -can get an encrypted TCP/IP connection between a @strong{MySQL} server -and a @strong{MySQL} client. +to make things much harder. To make things even more secure you should use +@code{ssh}. You can find an open source ssh client at +@uref{http://www.openssh.org}, and a commercial ssh client at +@uref{http://www.ssh.com}. With this, you can get an encrypted TCP/IP +connection between a @strong{MySQL} server and a @strong{MySQL} client. To make a @strong{MySQL} system secure, you should strongly consider the following suggestions: @@ -18168,6 +18255,11 @@ per-connection basis. It will not be changed by another client. It will not even be changed if you update another @code{AUTO_INCREMENT} column with a non-magic value (that is, a value that is not @code{NULL} and not @code{0}). +If you insert many rows at the same time with an insert statement, +@code{LAST_INSERT_ID()} returns the value for the first inserted row. +The reason for this is so that you it makes it possible to easily reproduce +the same @code{INSERT} statement against some other server. + @cindex sequence emulation If @code{expr} is given as an argument to @code{LAST_INSERT_ID()} in an @code{UPDATE} clause, then the value of the argument is returned as a @@ -18671,9 +18763,10 @@ When you insert a value of @code{NULL} (recommended) or @code{0} into an @xref{mysql_insert_id, , @code{mysql_insert_id()}}. If you delete the row containing the maximum value for an -@code{AUTO_INCREMENT} column, the value will be reused with an ISAM -table but not with a @code{MyISAM} table. If you delete all rows in the -table with @code{DELETE FROM table_name} (without a @code{WHERE}) in +@code{AUTO_INCREMENT} column, the value will be reused with an +@code{ISAM}, @code{BDB} or @code{INNODB} table but not with a +@code{MyISAM} table. If you delete all rows in the table with +@code{DELETE FROM table_name} (without a @code{WHERE}) in @code{AUTOCOMMIT} mode, the sequence starts over for both table types. @strong{NOTE:} There can be only one @code{AUTO_INCREMENT} column per @@ -19683,7 +19776,7 @@ SELECT [STRAIGHT_JOIN] [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT] [ORDER BY @{unsigned_integer | col_name | formula@} [ASC | DESC] ,...] [LIMIT [offset,] rows] [PROCEDURE procedure_name] - [FOR UPDATE | IN SHARE MODE]] + [FOR UPDATE | LOCK IN SHARE MODE]] @end example @c help end @@ -20425,7 +20518,8 @@ like you could do this, but that was a bug that has been corrected. @section @code{LOAD DATA INFILE} Syntax @example -LOAD DATA [LOW_PRIORITY] [LOCAL] INFILE 'file_name.txt' [REPLACE | IGNORE] +LOAD DATA [LOW_PRIORITY | CONCURRENT] [LOCAL] INFILE 'file_name.txt' + [REPLACE | IGNORE] INTO TABLE tbl_name [FIELDS [TERMINATED BY '\t'] @@ -20453,6 +20547,12 @@ If you specify the keyword @code{LOW_PRIORITY}, execution of the @code{LOAD DATA} statement is delayed until no other clients are reading from the table. +If you specify the keyword @code{CONCURRENT} with a @code{MyISAM} table, +then other threads can retrieve data from the table while @code{LOAD +DATA} is executing. Using this option will of course affect the +performance of @code{LOAD DATA} a bit even if no other thread is using +the table at the same time. + Using @code{LOCAL} will be a bit slower than letting the server access the files directly, because the contents of the file must travel from the client host to the server host. On the other hand, you do not need the @@ -21682,7 +21782,7 @@ if @code{--skip-bdb} is used. @code{YES} if @code{mysqld} supports Gemini tables. @code{DISABLED} if @code{--skip-gemini} is used. @item @code{have_innodb} -@code{YES} if @code{mysqld} supports Innodb tables. @code{DISABLED} +@code{YES} if @code{mysqld} supports InnoDB tables. @code{DISABLED} if @code{--skip-innodb} is used. @item @code{have_raid} @code{YES} if @code{mysqld} supports the @code{RAID} option. @@ -22473,11 +22573,11 @@ non-transactional table will not change. If you are using @code{BEGIN} or @code{SET AUTOCOMMIT=0}, you should use the @strong{MySQL} binary log for backups instead of the -old update log; The transaction is stored in the binary log -in one chunk, during @code{COMMIT}, the to ensure and @code{ROLLBACK}:ed -transactions are not stored. @xref{Binary log}. +older update log. Transactions are stored in the binary log +in one chunk, upon @code{COMMIT}, to ensure that transactions which are +rolled back are not stored. @xref{Binary log}. -The following commands automatically ends an transaction (as if you had done +The following commands automatically end a transaction (as if you had done a @code{COMMIT} before executing the command): @multitable @columnfractions .33 .33 .33 @@ -22531,6 +22631,9 @@ locks while the thread is waiting for the @code{WRITE} lock. You should only use @code{LOW_PRIORITY WRITE} locks if you are sure that there will eventually be a time when no threads will have a @code{READ} lock. +@code{LOCK TABLES} and @code{UNLOCK TABLES} both commits any active +transactions. + When you use @code{LOCK TABLES}, you must lock all tables that you are going to use and you must use the same alias that you are going to use in your queries! If you are using a table multiple times in a query @@ -22767,7 +22870,7 @@ You can set the default isolation level for @code{mysqld} with @findex GRANT @findex REVOKE -@node GRANT, HANDLER, SET TRANSACTION, Reference +@node GRANT, CREATE INDEX, SET TRANSACTION, Reference @section @code{GRANT} and @code{REVOKE} Syntax @example @@ -23330,7 +23433,14 @@ the table type, the index and data will be stored in other files. The default table type in @strong{MySQL} is @code{MyISAM}. If you are trying to use a table type that is not compiled-in or activated, -@strong{MySQL} will instead create a table of type @code{MyISAM}. +@strong{MySQL} will instead create a table of type @code{MyISAM}. This +is a very useful feature when you want to copy tables between different +SQL servers that supports different table types (like copying tables to +a slave that is optimized for speed by not having transactional tables). +This automatic table changing can however also be very confusing for new +@strong{MySQL} users. We plan to fix this by introducing warnings in +@strong{MySQL} 4.0 and giving a warning when a table type is automaticly +changed. You can convert tables between different types with the @code{ALTER TABLE} statement. @xref{ALTER TABLE, , @code{ALTER TABLE}}. @@ -23430,7 +23540,7 @@ Internal handling of one @code{AUTO_INCREMENT} column. @code{MyISAM} will automatically update this on @code{INSERT/UPDATE}. The @code{AUTO_INCREMENT} value can be reset with @code{myisamchk}. This will make @code{AUTO_INCREMENT} columns faster (at least 10 %) and old -numbers will not be reused as with the old ISAM. Note that when an +numbers will not be reused as with the old @code{ISAM}. Note that when an @code{AUTO_INCREMENT} is defined on the end of a multi-part-key the old behavior is still present. @item @@ -23799,7 +23909,7 @@ is not signaled to the other servers. @section MERGE Tables @code{MERGE} tables are new in @strong{MySQL} Version 3.23.25. The code -is still in beta, but should stabilize soon! +is still in gamma, but should be resonable stable. A @code{MERGE} table is a collection of identical @code{MyISAM} tables that can be used as one. You can only @code{SELECT}, @code{DELETE}, and @@ -23812,8 +23922,8 @@ will only clear the mapping for the table, not delete everything in the mapped tables. (We plan to fix this in 4.0). With identical tables we mean that all tables are created with identical -column information. You can't put a MERGE over tables where the columns -are packed differently or doesn't have exactly the same columns. +column and key information. You can't put a MERGE over tables where the +columns are packed differently or doesn't have exactly the same columns. Some of the tables can however be compressed with @code{myisampack}. @xref{myisampack}. @@ -23848,8 +23958,10 @@ More efficient repairs. It's easier to repair the individual files that are mapped to a @code{MERGE} file than trying to repair a real big file. @item Instant mapping of many files as one. A @code{MERGE} table uses the -index of the individual tables. It doesn't need an index of its one. -This makes @code{MERGE} table collections VERY fast to make or remap. +index of the individual tables. It doesn't need to maintain an index of +its one. This makes @code{MERGE} table collections VERY fast to make or +remap. Note that you must specify the key definitions when you create +a @code{MERGE} table!. @item If you have a set of tables that you join to a big table on demand or batch, you should instead create a @code{MERGE} table on them on demand. @@ -24062,62 +24174,62 @@ SUM_OVER_ALL_KEYS(max_length_of_key + sizeof(char*) * 2) @section BDB or Berkeley_DB Tables @menu -* BDB overview:: -* BDB install:: -* BDB start:: -* BDB characteristic:: -* BDB TODO:: -* BDB portability:: -* BDB errors:: +* BDB overview:: Overview of BDB Tables +* BDB install:: Installing BDB +* BDB start:: BDB startup options +* BDB characteristic:: Some characteristic of @code{BDB} tables: +* BDB TODO:: Some things we need to fix for BDB in the near future: +* BDB portability:: Operating systems supported by @strong{BDB} +* BDB errors:: Errors You May Get When Using BDB Tables @end menu @node BDB overview, BDB install, BDB, BDB -@subsection Overview over BDB tables +@subsection Overview of BDB Tables -BDB tables are included in the @strong{MySQL} source distribution -starting from 3.23.34 and will be activated in the @strong{MySQL}-max +Support for BDB tables is included in the @strong{MySQL} source distribution +starting from Version 3.23.34 and is activated in the @strong{MySQL}-Max binary. -Berkeley DB (@uref{http://www.sleepycat.com}) has provided -@strong{MySQL} with a transaction-safe table handler. This will survive -crashes and also provides @code{COMMIT} and @code{ROLLBACK} on -transactions. The @strong{MySQL} source distribution comes with a BDB -distribution that has a couple of small patches to make it work more -smoothly with @strong{MySQL}. You can't use a not-patched @code{BDB} -version with @strong{MySQL}. +BerkeleyDB, available at @uref{http://www.sleepycat.com/} has provided +@strong{MySQL} with a transactional table handler. By using BerkeleyDB +tables, your tables may have a greater chance of surviving crashes, and also +provides @code{COMMIT} and @code{ROLLBACK} on transactions. The +@strong{MySQL} source distribution comes with a BDB distribution that has a +couple of small patches to make it work more smoothly with @strong{MySQL}. +You can't use a non-patched @code{BDB} version with @strong{MySQL}. -We at MySQL AB are working in close cooperating with Sleepycat to -keep the quality of the @strong{MySQL} - BDB interface high. +We at @strong{MySQL AB} are working in close cooperation with Sleepycat to +keep the quality of the @strong{MySQL}/BDB interface high. When it comes to supporting BDB tables, we are committed to help our users to locate the problem and help creating a reproducable test case for any problems involving BDB tables. Any such test case will be forwarded to Sleepycat who in turn will help us find and fix the -problem. As this is a two stage operating, any problems with BDB tables -may take a little longer for us to fix than for other table handlers, -but as the Berkeley code itself has been used by many other applications -than @strong{MySQL} we don't envision any big problems with this. -@xref{Table handler support}. +problem. As this is a two stage operation, any problems with BDB tables +may take a little longer for us to fix than for other table handlers. +However, as the BerkeleyDB code itself has been used by many other +applications than @strong{MySQL}, we don't envision any big problems with +this. @xref{Table handler support}. @node BDB install, BDB start, BDB overview, BDB @subsection Installing BDB If you have downloaded a binary version of @strong{MySQL} that includes -support for Berkeley DB, simply follow the instructions for -installing a binary version of @strong{MySQL}. @xref{Installing binary}. -@xref{mysqld-max}. +support for BerkeleyDB, simply follow the instructions for installing a +binary version of @strong{MySQL}. +@xref{Installing binary}. @xref{mysqld-max}. To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL} -3.23.34 or newer and configure @code{MySQL} with the -@code{--with-berkeley-db} option. @xref{Installing source}. +Version 3.23.34 or newer and configure @code{MySQL} with the +@code{--with-berkeley-db} option. @xref{Installing source}. @example cd /path/to/source/of/mysql-3.23.34 ./configure --with-berkeley-db @end example -Please refer to the manual provided by @code{BDB} distribution for -more/updated information. +Please refer to the manual provided with the @code{BDB} distribution for +more updated information. Even though Berkeley DB is in itself very tested and reliable, the @strong{MySQL} interface is still considered beta quality. @@ -24425,24 +24537,24 @@ limited by @code{gemini_connection_limit}. The default is 100 users. NuSphere is working on removing these limitations. -@node InnoDB, , GEMINI, Table types +@node InnoDB, , GEMINI, Table types @section InnoDB Tables @menu -* InnoDB overview:: InnoDB tables overview -* InnoDB start:: InnoDB startup options -* Creating an InnoDB database:: Creating an InnoDB database -* Using InnoDB tables:: Creating InnoDB tables -* Adding and removing:: Adding and removing InnoDB data and log files -* Backing up:: Backing up and recovering an InnoDB database -* Moving:: Moving an InnoDB database to another machine -* InnoDB transaction model:: InnoDB transaction model -* Implementation:: Implementation of multiversioning -* Table and index:: Table and index structures -* File space management:: File space management and disk i/o -* Error handling:: Error handling -* InnoDB restrictions:: Some restrictions on InnoDB tables -* InnoDB contact information:: InnoDB contact information +* InnoDB overview:: InnoDB tables overview +* InnoDB start:: InnoDB startup options +* Creating an InnoDB database:: Creating an InnoDB database. +* Using InnoDB tables:: Creating InnoDB tables +* Adding and removing:: Adding and removing InnoDB data and log files +* Backing up:: Backing up and recovering an InnoDB database +* Moving:: Moving an InnoDB database to another machine +* InnoDB transaction model:: InnoDB transaction model. +* Implementation:: Implementation of multiversioning +* Table and index:: Table and index structures +* File space management:: File space management and disk i/o +* Error handling:: Error handling +* InnoDB restrictions:: Some restrictions on InnoDB tables +* InnoDB contact information:: InnoDB contact information. @end menu @node InnoDB overview, InnoDB start, InnoDB, InnoDB @@ -24452,55 +24564,56 @@ InnoDB tables are included in the @strong{MySQL} source distribution starting from 3.23.34a and are activated in the @strong{MySQL -max} binary. -If you have downloaded a binary version of MySQL that includes -support for InnoDB, simply follow the instructions for -installing a binary version of MySQL. -See section 4.6 'Installing a MySQL Binary Distribution'. +If you have downloaded a binary version of @strong{MySQL} that includes +support for InnoDB (mysqld-max), simply follow the instructions for +installing a binary version of @strong{MySQL}. @xref{Installing binary}. +@xref{mysqld-max}. -To compile MySQL with InnoDB support, download MySQL-3.23.34a or newer -and configure @code{MySQL} with the -@code{--with-innobase} option. Starting from MySQL-3.23.37 the option -is @code{--with-innodb}. See section -4.7 'Installing a MySQL Source Distribution'. +To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer +and configure @code{MySQL} with the @code{--with-innodb} option. +@xref{Installing source}. @example cd /path/to/source/of/mysql-3.23.37 ./configure --with-innodb @end example -InnoDB provides MySQL with a transaction safe table handler with +InnoDB provides @strong{MySQL} with a transaction safe table handler with commit, rollback, and crash recovery capabilities. InnoDB does locking on row level, and also provides an Oracle-style consistent non-locking read in @code{SELECTS}, which increases transaction concurrency. There is not need for lock escalation in InnoDB, because row level locks in InnoDB fit in very small space. -Technically, InnoDB is a database backend placed under MySQL. InnoDB +Technically, InnoDB is a database backend placed under @strong{MySQL}. InnoDB has its own buffer pool for caching data and indexes in main memory. InnoDB stores its tables and indexes in a tablespace, which may consist of several files. This is different from, for example, @code{MyISAM} tables where each table is stored as a separate file. InnoDB is distributed under the GNU GPL License Version 2 (of June 1991). -In the source distribution of MySQL, InnoDB appears as a subdirectory. +In the source distribution of @strong{MySQL}, InnoDB appears as a subdirectory. -@node InnoDB start +@node InnoDB start, Creating an InnoDB database, InnoDB overview, InnoDB @subsection InnoDB startup options -Beginning from MySQL-3.23.37 the prefix of the options is changed +Beginning from @strong{MySQL}-3.23.37 the prefix of the options is changed from @code{innobase_...} to @code{innodb_...}. -To use InnoDB tables you must specify configuration parameters -in the MySQL configuration file in the @code{[mysqld]} section of -the configuration file @file{my.cnf}. -Suppose you have a Windows NT machine with 128 MB RAM and a -single 10 GB hard disk. -Below is an example of possible configuration parameters in @file{my.cnf} for -InnoDB: +To use InnoDB tables you @strong{MUST} specify configuration parameters +in the @strong{MySQL} configuration file in the @code{[mysqld]} section of +the configuration file @file{my.cnf}. @xref{Option files}. + +The only required parameter to use InnoDB is @code{innodb_data_file_path}, +but you should set others if you want to get a better performance. + +Suppose you have a Windows NT machine with 128 MB RAM and a single 10 GB +hard disk. Below is an example of possible configuration parameters in +@file{my.cnf} for InnoDB: @example -innodb_data_home_dir = c:\ibdata innodb_data_file_path = ibdata1:2000M;ibdata2:2000M +innodb_data_home_dir = c:\ibdata set-variable = innodb_mirrored_log_groups=1 innodb_log_group_home_dir = c:\iblogs set-variable = innodb_log_files_in_group=3 @@ -24522,8 +24635,8 @@ Below is an example of possible configuration parameters in @file{my.cnf} for InnoDB: @example -innodb_data_home_dir = / innodb_data_file_path = ibdata/ibdata1:2000M;dr2/ibdata/ibdata2:2000M +innodb_data_home_dir = / set-variable = innodb_mirrored_log_groups=1 innodb_log_group_home_dir = /dr3 set-variable = innodb_log_files_in_group=3 @@ -24593,7 +24706,7 @@ log archiving. The value of this parameter should currently be set the same as @code{innodb_log_group_home_dir}. @item @code{innodb_log_archive} @tab This value should currently be set to 0. As recovery from a backup is -done by MySQL using its own log files, there is currently no need to +done by @strong{MySQL} using its own log files, there is currently no need to archive InnoDB log files. @item @code{innodb_buffer_pool_size} @tab The size of the memory buffer InnoDB uses to cache data and indexes of @@ -24608,7 +24721,7 @@ and other internal data structures. A sensible value for this might be 2M, but the more tables you have in your application the more you will need to allocate here. If InnoDB runs out of memory in this pool, it will start to allocate memory from the operating system, and write -warning messages to the MySQL error log. +warning messages to the @strong{MySQL} error log. @item @code{innodb_file_io_threads} @tab Number of file i/o threads in InnoDB. Normally, this should be 4, but on Windows NT disk i/o may benefit from a larger number. @@ -24622,18 +24735,18 @@ InnoDB cannot notice. In cases like this the timeout is useful to resolve the situation. @end multitable -@node Creating an InnoDB database +@node Creating an InnoDB database, Using InnoDB tables, InnoDB start, InnoDB @subsection Creating an InnoDB database -Suppose you have installed MySQL and have edited @file{my.cnf} so that +Suppose you have installed @strong{MySQL} and have edited @file{my.cnf} so that it contains the necessary InnoDB configuration parameters. -Before starting MySQL you should check that the directories you have +Before starting @strong{MySQL} you should check that the directories you have specified for InnoDB data files and log files exist and that you have access rights to those directories. InnoDB cannot create directories, only files. Check also you have enough disk space for the data and log files. -When you now start MySQL, InnoDB will start creating your data files +When you now start @strong{MySQL}, InnoDB will start creating your data files and log files. InnoDB will print something like the following: @example @@ -24658,9 +24771,9 @@ InnoDB: Started mysqld: ready for connections @end example -A new InnoDB database has now been created. You can connect to the MySQL -server with the usual MySQL client programs like @code{mysql}. -When you shut down the MySQL server with @file{mysqladmin shutdown}, +A new InnoDB database has now been created. You can connect to the @strong{MySQL} +server with the usual @strong{MySQL} client programs like @code{mysql}. +When you shut down the @strong{MySQL} server with @file{mysqladmin shutdown}, InnoDB output will be like the following: @example @@ -24675,7 +24788,7 @@ will see the files created. The log directory will also contain a small file named @file{ib_arch_log_0000000000}. That file resulted from the database creation, after which InnoDB switched off log archiving. -When MySQL is again started, the output will be like the following: +When @strong{MySQL} is again started, the output will be like the following: @example ~/mysqlm/sql > mysqld @@ -24685,17 +24798,17 @@ mysqld: ready for connections @subsubsection If something goes wrong in database creation -If something goes wrong in an InnoDB database creation, you should delete -all files created by InnoDB. This means all data files, all log files, -the small archived log file, and in the case you already did create -some InnoDB tables, delete also the corresponding @file{.frm} -files for these tables from the MySQL database directories. Then you can -try the InnoDB database creation again. +If something goes wrong in an InnoDB database creation, you should +delete all files created by InnoDB. This means all data files, all log +files, the small archived log file, and in the case you already did +create some InnoDB tables, delete also the corresponding @file{.frm} +files for these tables from the @strong{MySQL} database +directories. Then you can try the InnoDB database creation again. -@node Using InnoDB tables +@node Using InnoDB tables, Adding and removing, Creating an InnoDB database, InnoDB @subsection Creating InnoDB tables -Suppose you have started the MySQL client with the command +Suppose you have started the @strong{MySQL} client with the command @code{mysql test}. To create a table in the InnoDB format you must specify @code{TYPE = InnoDB} in the table creation SQL command: @@ -24706,15 +24819,15 @@ CREATE TABLE CUSTOMER (A INT, B CHAR (20), INDEX (A)) TYPE = InnoDB; This SQL command will create a table and an index on column @code{A} into the InnoDB tablespace consisting of the data files you specified -in @file{my.cnf}. In addition MySQL will create a file -@file{CUSTOMER.frm} to the MySQL database directory @file{test}. +in @file{my.cnf}. In addition @strong{MySQL} will create a file +@file{CUSTOMER.frm} to the @strong{MySQL} database directory @file{test}. Internally, InnoDB will add to its own data dictionary an entry for table @code{'test/CUSTOMER'}. Thus you can create a table -of the same name @code{CUSTOMER} in another database of MySQL, and +of the same name @code{CUSTOMER} in another database of @strong{MySQL}, and the table names will not collide inside InnoDB. You can query the amount of free space in the InnoDB tablespace -by issuing the table status command of MySQL for any table you have +by issuing the table status command of @strong{MySQL} for any table you have created with @code{TYPE = InnoDB}. Then the amount of free space in the tablespace appears in the table comment section in the output of @code{SHOW}. An example: @@ -24732,16 +24845,16 @@ You must drop the tables individually. Also take care not to delete or add @file{.frm} files to your InnoDB database manually: use @code{CREATE TABLE} and @code{DROP TABLE} commands. InnoDB has its own internal data dictionary, and you will get problems -if the MySQL @file{.frm} files are out of 'sync' with the InnoDB +if the @strong{MySQL} @file{.frm} files are out of 'sync' with the InnoDB internal data dictionary. -@node Adding and removing +@node Adding and removing, Backing up, Using InnoDB tables, InnoDB @subsection Adding and removing InnoDB data and log files You cannot increase the size of an InnoDB data file. To add more into your tablespace you have to add a new data file. To do this you have to -shut down your MySQL database, edit the @file{my.cnf} file, adding a -new file to @code{innodb_data_file_path}, and then start MySQL +shut down your @strong{MySQL} database, edit the @file{my.cnf} file, adding a +new file to @code{innodb_data_file_path}, and then start @strong{MySQL} again. Currently you cannot remove a data file from InnoDB. To decrease the @@ -24750,14 +24863,14 @@ all your tables, create a new database, and import your tables to the new database. If you want to change the number or the size of your InnoDB log files, -you have to shut down MySQL and make sure that it shuts down without errors. +you have to shut down @strong{MySQL} and make sure that it shuts down without errors. Then copy the old log files into a safe place just in case something went wrong in the shutdown and you will need them to recover the database. Delete then the old log files from the log file directory, -edit @file{my.cnf}, and start MySQL again. InnoDB will tell +edit @file{my.cnf}, and start @strong{MySQL} again. InnoDB will tell you at the startup that it is creating new log files. -@node Backing up +@node Backing up, Moving, Adding and removing, InnoDB @subsection Backing up and recovering an InnoDB database The key to safe database management is taking regular backups. @@ -24765,7 +24878,7 @@ To take a 'binary' backup of your database you have to do the following: @itemize @bullet @item -Shut down your MySQL database and make sure it shuts down without errors. +Shut down your @strong{MySQL} database and make sure it shuts down without errors. @item Copy all your data files into a safe place. @item @@ -24796,12 +24909,12 @@ dumps. Then you can take the binary backup, and you will then have a consistent snapshot of your database in two formats. To be able to recover your InnoDB database to the present from the -binary backup described above, you have to run your MySQL database -with the general logging and log archiving of MySQL switched on. Here -by the general logging we mean the logging mechanism of the MySQL server +binary backup described above, you have to run your @strong{MySQL} database +with the general logging and log archiving of @strong{MySQL} switched on. Here +by the general logging we mean the logging mechanism of the @strong{MySQL} server which is independent of InnoDB logs. -To recover from a crash of your MySQL server process, the only thing +To recover from a crash of your @strong{MySQL} server process, the only thing you have to do is to restart it. InnoDB will automatically check the logs and perform a roll-forward of the database to the present. InnoDB will automatically roll back uncommitted transactions which were @@ -24836,7 +24949,7 @@ mysqld: ready for connections If your database gets corrupted or your disk fails, you have to do the recovery from a backup. In the case of corruption, you should first find a backup which is not corrupted. From a backup do the recovery -from the general log files of MySQL according to instructions in the +from the general log files of @strong{MySQL} according to instructions in the MySQL manual. @subsubsection Checkpoints @@ -24869,7 +24982,7 @@ the total size of the log files as big as the buffer pool or even bigger. The drawback in big log files is that crash recovery can last longer because there will be more log to apply to the database. -@node Moving +@node Moving, InnoDB transaction model, Backing up, InnoDB @subsection Moving an InnoDB database to another machine InnoDB data and log files are binary-compatible on all platforms @@ -24889,7 +25002,7 @@ the big rollback segment the big import transaction will generate. Do the commit only after importing a whole table or a segment of a table. -@node InnoDB transaction model +@node InnoDB transaction model, Implementation, Moving, InnoDB @subsection InnoDB transaction model In the InnoDB transaction model the goal has been to combine the best @@ -24902,7 +25015,7 @@ to lock every row in the database, or any random subset of the rows, without InnoDB running out of memory. In InnoDB all user activity happens inside transactions. If the -auto commit mode is used in MySQL, then each SQL statement +auto commit mode is used in @strong{MySQL}, then each SQL statement will form a single transaction. If the auto commit mode is switched off, then we can think that a user always has a transaction open. If he issues @@ -24953,10 +25066,10 @@ happen that meanwhile some other user has deleted the parent row from the table @code{PARENT}, and you are not aware of that. The solution is to perform the @code{SELECT} in a locking -mode, @code{IN SHARE MODE}. +mode, @code{LOCK IN SHARE MODE}. @example -SELECT * FROM PARENT WHERE NAME = 'Jones' IN SHARE MODE; +SELECT * FROM PARENT WHERE NAME = 'Jones' LOCK IN SHARE MODE; @end example Performing a read in share mode means that we read the latest @@ -25055,7 +25168,7 @@ table. @code{SELECT ... FROM ...} : this is a consistent read, reading a snapshot of the database and setting no locks. @item -@code{SELECT ... FROM ... IN SHARE MODE} : sets shared next-key locks +@code{SELECT ... FROM ... LOCK IN SHARE MODE} : sets shared next-key locks on all index records the read encounters. @item @code{SELECT ... FROM ... FOR UPDATE} : sets exclusive next-key locks @@ -25070,7 +25183,7 @@ on the duplicate index record. @code{INSERT INTO T SELECT ... FROM S WHERE ...} sets an exclusive (non-next-key) lock on each row inserted into @code{T}. Does the search on @code{S} as a consistent read, but sets shared next-key -locks on @code{S} if the MySQL logging is on. InnoDB has to set +locks on @code{S} if the @strong{MySQL} logging is on. InnoDB has to set locks in the latter case because in roll-forward recovery from a backup every SQL statement has to be executed in exactly the same way as it was done originally. @@ -25090,10 +25203,10 @@ lock on every record the search encounters. lock on every record the search encounters. @item @code{LOCK TABLES ... } : sets table locks. In the implementation -the MySQL layer of code sets these locks. The automatic deadlock detection +the @strong{MySQL} layer of code sets these locks. The automatic deadlock detection of InnoDB cannot detect deadlocks where such table locks are involved: see the next section below. See also section 13 'InnoDB restrictions' -about the following: since MySQL does know about row level locks, +about the following: since @strong{MySQL} does know about row level locks, it is possible that you get a table lock on a table where another user currently has row level locks. But that does not put transaction integerity into danger. @@ -25104,7 +25217,7 @@ locks. But that does not put transaction integerity into danger. InnoDB automatically detects a deadlock of transactions and rolls back the transaction whose lock request was the last one to build a deadlock, that is, a cycle in the waits-for graph of transactions. -InnoDB cannot detect deadlocks where a lock set by a MySQL +InnoDB cannot detect deadlocks where a lock set by a @strong{MySQL} @code{LOCK TABLES} statement is involved, or if a lock set in another table handler than InnoDB is involved. You have to resolve these situations using @code{innodb_lock_wait_timeout} set in @@ -25117,7 +25230,7 @@ set by the SQL statement may be preserved. This is because InnoDB stores row locks in a format where it cannot afterwards know which was set by which SQL statement. -@node Implementation +@node Implementation, Table and index, InnoDB transaction model, InnoDB @subsection Implementation of multiversioning Since InnoDB is a multiversioned database, it must keep information @@ -25166,7 +25279,7 @@ its index records from the database. This removal operation is called a purge, and it is quite fast, usually taking the same order of time as the SQL statement which did the deletion. -@node Table and index +@node Table and index, File space management, Implementation, InnoDB @subsection Table and index structures Every InnoDB table has a special index called the clustered index @@ -25275,11 +25388,11 @@ Each secondary index record contains also all the fields defined for the clustered index key. @item A record contains also a pointer to each field of the record. -If the total length of the fields in a record is < 256 bytes, then +If the total length of the fields in a record is < 128 bytes, then the pointer is 1 byte, else 2 bytes. @end itemize -@node File space management +@node File space management, Error handling, Table and index, InnoDB @subsection File space management and disk i/o @subsubsection Disk i/o @@ -25358,7 +25471,7 @@ but remember that deleted rows can be physically removed only in a purge operation after they are no longer needed in transaction rollback or consistent read. -@node Error handling +@node Error handling, InnoDB restrictions, File space management, InnoDB @subsection Error handling The error handling in InnoDB is not always the same as @@ -25371,7 +25484,7 @@ The following list specifies the error handling of InnoDB. @itemize @bullet @item If you run out of file space in the tablespace, -you will get the MySQL @code{'Table is full'} error +you will get the @strong{MySQL} @code{'Table is full'} error and InnoDB rolls back the SQL statement. @item A transaction deadlock or a timeout in a lock wait will give @@ -25386,7 +25499,7 @@ statement. @item A 'row too long' error rolls back the SQL statement. @item -Other errors are mostly detected by the MySQL layer of code, and +Other errors are mostly detected by the @strong{MySQL} layer of code, and they roll back the corresponding SQL statement. @end itemize @@ -25394,19 +25507,20 @@ they roll back the corresponding SQL statement. @subsection Some restrictions on InnoDB tables @itemize @bullet -@item You cannot create an index on a prefix of a column: +@item +If you try to create an unique index on a prefix of a column you will get an +error: @example -@code{CREATE TABLE T (A CHAR(20), B INT, INDEX T_IND (A(5))) TYPE = InnoDB; -} +CREATE TABLE T (A CHAR(20), B INT, UNIQUE (A(5))) TYPE = InnoDB; @end example -The above will not work. For a MyISAM table the above would create an index -where only the first 5 characters from column @code{A} are stored. +If you create a non unique index on a prefix of a column, InnoDB will +create an index over the whole column. @item @code{INSERT DELAYED} is not supported for InnoDB tables. @item -The MySQL @code{LOCK TABLES} operation does not know of InnoDB +The @strong{MySQL} @code{LOCK TABLES} operation does not know of InnoDB row level locks set in already completed SQL statements: this means that you can get a table lock on a table even if there still exist transactions of other users which have row level locks on the same table. Thus @@ -25423,7 +25537,7 @@ A table cannot contain more than 1000 columns. @item @code{DELETE FROM TABLE} does not regenerate the table but instead deletes all rows, one by one, which is not that fast. In future versions -of MySQL you can use @code{TRUNCATE} which is fast. +of @strong{MySQL} you can use @code{TRUNCATE} which is fast. @item Before dropping a database with InnoDB tables one has to drop the individual InnoDB tables first. @@ -25444,7 +25558,7 @@ The maximum tablespace size is 4 billion database pages. This is also the maximum size for a table. @end itemize -@node InnoDB contact information, , InnoDB restrictions, InnoDB +@node InnoDB contact information, , InnoDB restrictions, InnoDB @subsection InnoDB contact information Contact information of Innobase Oy, producer of the InnoDB engine: @@ -29273,7 +29387,7 @@ have been assigned a low semantical value in @strong{a particular dataset}. * Fulltext TODO:: @end menu -@node Fulltext Fine-tuning, Fulltext Features to Appear in MySQL 4.0, , Fulltext Search +@node Fulltext Fine-tuning, Fulltext Features to Appear in MySQL 4.0, Fulltext Search, Fulltext Search @section Fine-tuning MySQL Full-text Search Unfortunately, full-text search has no user-tunable parameters yet, @@ -31501,7 +31615,7 @@ We can find the result from crash-me on a lot of different databases at @menu * Programs:: What do the executables do? -* mysqld-max:: +* mysqld-max:: mysqld-max, An extended mysqld server * safe_mysqld:: safe_mysqld, the wrapper around mysqld * mysqld_multi:: Program for managing multiple @strong{MySQL} servers * mysql:: The command line tool @@ -31652,12 +31766,47 @@ the following configure options: @multitable @columnfractions .3 .7 @item @strong{Option} @tab @strong{Comment} -@item --with-server-suffix=-max @tab Add a suffix to the @code{mysqld} version string. +@item --with-server-suffix=-Max @tab Add a suffix to the @code{mysqld} version string. @item --with-bdb @tab Support for Berkeley DB (BDB) tables @item --with-innodb @tab Support for InnoDB tables. @item CFLAGS=-DUSE_SYMDIR @tab Symbolic links support for Windows. @end multitable +Note that as Berkeley DB and InnoDB are not available for all platforms, +some of the @code{Max} binaries may not have support for both of these. +You can check which table types are supported by doing the following +query: + +@example +mysql> show variables like "have_%"; ++---------------+-------+ +| Variable_name | Value | ++---------------+-------+ +| have_bdb | YES | +| have_gemini | NO | +| have_innodb | NO | +| have_isam | YES | +| have_raid | YES | +| have_ssl | NO | ++---------------+-------+ +@end example + +The meaning of the values are: + +@multitable @columnfractions .3 .7 +@item @strong{Value} @tab @strong{Meaning}. +@item YES @tab The option is activated and usable. +@item NO @tab @strong{MySQL} is not compiled with support for this option. +@item DISABLED @tab The xxxx option is disabled because one started @code{mysqld} with @code{--skip-xxxx} or because one didn't start @code{mysqld} with all needed options to enable the option. In this case the @code{hostname.err} file should contain a reason for why the option is disabled. +@end multitable + +@strong{NOTE}: To be able to create InnoDB tables you @strong{MUST} edit +your startup options to include at least the @code{innodb_data_file_path} +option. @xref{InnoDB start}. + +To get better performance for BDB tables, you should add some configuration +options for these too. @xref{BDB start}. + @code{safe_mysqld} will automaticly try to start any @code{mysqld} binary with the @code{-max} prefix. This makes it very easy to test out a another @code{mysqld} binary in an existing installation. Just @@ -31665,9 +31814,26 @@ run @code{configure} with the options you want and then install the new @code{mysqld} binary as @code{mysqld-max} in the same directory where your old @code{mysqld} binary is. @xref{safe_mysqld}. -The @code{mysqld-max} RPM uses this @code{safe_mysqld} feature. It just -installs the @code{mysqld-max} executable and @code{safe_mysqld} will -automaticly use this when @code{mysqld} is restarted. +The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld} +feature. It just installs the @code{mysqld-max} executable and +@code{safe_mysqld} will automaticly use this executable when +@code{safe_mysqld} is restarted. + +The following table shows which table types our standard @strong{MySQL-Max} +binaries includes: + +@multitable @columnfractions .4 .3 .3 +@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB} +@item AIX 4.3 @tab N @tab Y +@item HPUX 11.0 @tab N @tab Y +@item Linux-Alpha @tab N @tab Y +@item Linux-Intel @tab Y @tab Y +@item Linux-Ia64 @tab N @tab Y +@item Solaris-intel @tab N @tab Y +@item Solaris-sparc @tab Y @tab Y +@item SCO OSR5 @tab Y @tab Y +@item UnixWare @tab Y @tab Y +@end multitable @cindex tools, safe_mysqld @cindex scripts @@ -32194,9 +32360,9 @@ the @code{mysql} variables that affect your queries. @cindex @code{safe-mode} command A useful startup option for beginners (introduced in @strong{MySQL} -Version 3.23.11) is @code{--safe-mode} (or @code{--i-am-a-dummy} for +Version 3.23.11) is @code{--safe-updates} (or @code{--i-am-a-dummy} for users that has at some time done a @code{DELETE FROM table_name} but -forgot the @code{WHERE} clause. When using this option, @code{mysql} +forgot the @code{WHERE} clause). When using this option, @code{mysql} sends the following command to the @strong{MySQL} server when opening the connection: @@ -32521,6 +32687,10 @@ used.) @item -q, --quick Don't buffer query, dump directly to stdout. Uses @code{mysql_use_result()} to do this. +@item -r, --result-file=... +Direct output to a given file. This option should be used in MSDOS, +because it prevents new line '\n' from being converted to '\n\r' (new +line + carriage return). @item -S /path/to/socket, --socket=/path/to/socket The socket file to use when connecting to @code{localhost} (which is the default host). @@ -34198,9 +34368,8 @@ Record file is crashed @item Got error ### from table handler -To get more information about the error you can do @code{perror -###}. Here is the most common errors that indicates a problem with the -table: +To get more information about the error you can run @code{perror ###}. Here +is the most common errors that indicates a problem with the table: @example shell> perror 126 127 132 134 135 136 141 144 145 @@ -34218,22 +34387,13 @@ shell> perror 126 127 132 134 135 136 141 144 145 Note that error 135, no more room in record file, is not an error that can be fixed by a simple repair. In this case you have to do: -@itemize @bullet -@item -@code{CREATE TABLE ...} for the table with proper @code{MAX_ROWS} and -@code{AVG_ROW_LENGTH} values. @xref{CREATE TABLE}. -@item -Copy the data over from the old table with @code{INSERT INTO new_table -SELECT * from old_table}. -@item -Rename the old table to the new table: -@code{RENAME old_table to tmp_table, new_table to old_table} -@item -Delete the old table: @code{DROP TABLE tmp_table}. -@end itemize +@example +ALTER TABLE table MAX_ROWS=xxx AVG_ROW_LENGTH=yyy; +@end example + @end itemize -In these cases, you must repair your tables. @code{myisamchk} +In the other cases, you must repair your tables. @code{myisamchk} can usually detect and fix most things that go wrong. The repair process involves up to four stages, described below. Before you @@ -34243,12 +34403,12 @@ that @code{mysqld} runs as (and to you, because you need to access the files you are checking). If it turns out you need to modify files, they must also be writable by you. -If you are using @strong{MySQL} Version 3.23.16 and above, you can (and should) use the -@code{CHECK} and @code{REPAIR} commands to check and repair @code{MyISAM} -tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}. +If you are using @strong{MySQL} Version 3.23.16 and above, you can (and +should) use the @code{CHECK} and @code{REPAIR} commands to check and repair +@code{MyISAM} tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}. The manual section about table maintenance includes the options to -@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}. +@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}. The following section is for the cases where the above command fails or if you want to use the extended features that @code{isamchk}/@code{myisamchk} provides. @@ -34510,12 +34670,13 @@ functions. Consult this file to see how UDF calling conventions work. For mysqld to be able to use UDF functions, you should configure MySQL with @code{--with-mysqld-ldflags=-rdynamic} The reason is that to on -many platforms you can load a dynamic library (with @code{dlopen()}) -from a static linked program, which you would get if you are using -@code{--with-mysqld-ldflags=-all-static} If you want to use an UDF that -needs to access symbols from mysqld (like the @code{methaphone} example -in @file{sql/udf_example.cc} that uses @code{default_charset_info}), you must -link the program with @code{-rdynamic}. (see @code{man dlopen}). +many platforms (including Linux) you can load a dynamic library (with +@code{dlopen()}) from a static linked program, which you would get if +you are using @code{--with-mysqld-ldflags=-all-static} If you want to +use an UDF that needs to access symbols from mysqld (like the +@code{methaphone} example in @file{sql/udf_example.cc} that uses +@code{default_charset_info}), you must link the program with +@code{-rdynamic}. (see @code{man dlopen}). For each function that you want to use in SQL statements, you should define corresponding C (or C++) functions. In the discussion below, the name @@ -34932,12 +35093,13 @@ one that has been loaded with @code{CREATE FUNCTION} and not removed with @node Adding native function, , Adding UDF, Adding functions @section Adding a New Native Function -The procedure for adding a new native function is described below. Note that -you cannot add native functions to a binary distribution because the procedure -involves modifying @strong{MySQL} source code. You must compile -@strong{MySQL} yourself from a source distribution. Also note that if you -migrate to another version of @strong{MySQL} (for example, when a new version is -released), you will need to repeat the procedure with the new version. +The procedure for adding a new native function is described below. Note +that you cannot add native functions to a binary distribution because +the procedure involves modifying @strong{MySQL} source code. You must +compile @strong{MySQL} yourself from a source distribution. Also note +that if you migrate to another version of @strong{MySQL} (for example, +when a new version is released), you will need to repeat the procedure +with the new version. To add a new native @strong{MySQL} function, follow these steps: @@ -35608,6 +35770,8 @@ INSERT INTO foo (auto,text) VALUES(NULL,'text'); INSERT INTO foo2 (id,text) VALUES(LAST_INSERT_ID(),'text'); @end example +@xref{Getting unique ID}. + For the benefit of some ODBC applications (at least Delphi and Access), the following query can be used to find a newly inserted row: @example @@ -35768,6 +35932,7 @@ pre-allocated MYSQL struct. * No matching rows:: Solving problems with no matching rows * ALTER TABLE problems:: Problems with @code{ALTER TABLE}. * Change column order:: How to change the order of columns in a table +* Temporary table problems:: @end menu This chapter lists some common problems and error messages that users have @@ -36165,6 +36330,10 @@ server closes the connection after 8 hours if nothing has happened. You can change the time limit by setting the @code{wait_timeout} variable when you start mysqld. +Another common reason to receive the @code{MySQL server has gone away} error +is because you have issued a ``close'' on your @strong{MySQL} connection +and then tried to run a query on the closed connection. + You can check that the @strong{MySQL} hasn't died by executing @code{mysqladmin version} and examining the uptime. @@ -36697,6 +36866,17 @@ thread that is waiting on the disk-full condition will allow the other threads to continue. @end itemize +Exceptions to the above behaveour is when you use @code{REPAIR} or +@code{OPTIMIZE} or when the indexes are created in a batch after an +@code{LOAD DATA INFILE} or after an @code{ALTER TABLE} statement. + +All of the above commands may use big temporary files that left to +themself would cause big problems for the rest of the system. If +@strong{MySQL} gets disk full while doing any of the above operations, +it will remove the big temporary files and mark the table as crashed +(except for @code{ALTER TABLE}, in which the old table will be left +unchanged). + @node Multiple sql commands, Temporary files, Full disk, Problems @section How to Run SQL Commands from a Text File @@ -37314,7 +37494,7 @@ simple rename should get your data back. @cindex columns, changing @cindex changing, column order @cindex tables, changing column order -@node Change column order, , ALTER TABLE problems, Problems +@node Change column order, Temporary table problems, ALTER TABLE problems, Problems @section How To Change the Order of Columns in a Table The whole point of SQL is to abstract the application from the data @@ -37353,6 +37533,32 @@ Drop or rename @code{old_table}. @code{ALTER TABLE new_table RENAME old_table}. @end enumerate +@cindex temporary tables, problems +@node Temporary table problems, , Change column order, Problems +@section TEMPORARY TABLE problems + +The following are a list of the limitations with @code{TEMPORARY TABLES}. + +@itemize @bullet +@item +A temporary table can only be of type @code{HEAP}, @code{ISAM} or +@code{MyISAM}. +@item +You can't use temporary tables more than once in the same query. +For example, the following doesn't work. + +@example +select * from temporary_table, temporary_table as t2; +@end example + +We plan to fix the above in 4.0. +@item +You can't use @code{RENAME} on a @code{TEMPORARY} table. +Note that @code{ALTER TABLE org_name RENAME new_name} works! + +We plan to fix the above in 4.0. +@end itemize + @cindex problems, solving @cindex solving, problems @cindex databases, replicating @@ -41867,7 +42073,7 @@ This is a relatively low traffic list, in comparison with * MySQL test suite:: MySQL test suite @end menu -@node MySQL threads, MySQL test suite, , MySQL internals +@node MySQL threads, MySQL test suite, MySQL internals, MySQL internals @section MySQL Threads The @strong{MySQL} server creates the following threads: @@ -42218,7 +42424,6 @@ more than one way to compute} @item @uref{http://www.yaboo.dk/, Yaboo - Yet Another BOOkmarker} -@item @uref{http://www.yahoosuck.com, Yahoosuck} @item @uref{http://www.ozsearch.com.au, OzSearch Internet Guide} @@ -42231,7 +42436,7 @@ more than one way to compute} @itemize @bullet -@item @uref{http:www.spylog.ru/, SpyLOG ; A very popular Web counter site} +@item @uref{http://www.spylog.ru/, SpyLOG ; A very popular Web counter site} @item @uref{http://www.tucows.com/, TuCows Network; Free Software archive} @@ -42253,8 +42458,6 @@ more than one way to compute} @item @uref{http://www.game-developer.com/,The Game Development Search Engine} -@item @uref{http://www.i-run.com/html/cookbook.html,My-Recipe.com; Cookbook at i-run.com} - @item @uref{www.theinnkeeper.com, The Innkeeper Vacation Guides} @item @uref{http://www.macgamedatabase.com/, The Mac Game Database uses PHP and MySQL} @@ -42386,7 +42589,7 @@ the @strong{MySQL} database @itemize @bullet @c @item @uref{http://www.wh200th.com, White House 200th Anniversary site} -@item @uref{http://war.jgaa.com:8080/support/index.php3, Jgaa's Internet - Official Support Site} +@item @uref{http://support.jgaa.com/, Jgaa's Internet - Official Support Site} @item @uref{http://io.incluso.com, Ionline - online publication:} @strong{MySQL}, PHP, Java, Web programming, DB development @@ -42450,10 +42653,6 @@ Ecommerce site that is selling computers. @appendixsec Programming -@itemize @bullet -@item @uref{http://www.perl.org/cpan-testers, The Perl CPAN Testers results page} -@end itemize - @cindex web pages, miscellaneous @appendixsec Uncategorized Pages @@ -42873,6 +43072,15 @@ of several databases simultaneously. By Innovative-IT Development AB. @item @uref{http://www.mysql.com/downloads/gui-clients.html, MySQLGUI} The @strong{MySQL} GUI client homepage. By Sinisa at @strong{MySQL AB}. +@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_navigator_0.9.0.tar.gz, MySQL navigator 0.9} +MySQL Navigator is MySQL database server GUI client program. The purpose +of MySQL Navigator is to provide a useful client interface to MySQL +database servers, whilst supporting multiple operating systems and +languages. You can currently import/export database, enter queries, get +result sets, edit scripts, run scripts, add, alter, and delete users, +and retrieve client and server information. Uses QT 2.2. GPL +@uref{http://sql.kldp.org/mysql, Home page for MySQL Navigator}. + @item @uref{http://www.mysql.com/Downloads/Win32/secman.zip, MySQL Security GUI} A user and security management GUI for @strong{MySQL} on Windows. By Martin Jeremic. @@ -42923,6 +43131,8 @@ import-/export-files. (Freeware). By Ansgar Becker. @item @uref{http://www.mysql.com/Downloads/Win32/W9xstop.zip,Utility from Artronic to stop MySQL on win9x}. +@item @uref{http://bardo.hyperlink.cz/mysqlmon,a light weight GUI client for Windows}. + @item @uref{http://dbtools.vila.bol.com.br/, Dbtools} A tool to manage @strong{MySQL} databases. Currently only for Windows. Some features: @@ -42951,8 +43161,8 @@ An open source client for exploring databases and executing SQL. Supports A query tool for @strong{MySQL} and PostgreSQL. @item @uref{http://dbman.linux.cz/,dbMan} A query tool written in Perl. Uses DBI and Tk. -@item @uref{http://www.mysql.com/Downloads/Win32/Msc201.EXE, Mascon 2.1.15} -@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc201.EXE, Free Mascon 2.1.14} +@item @uref{http://www.mysql.com/Downloads/Win32/Msc201.EXE, Mascon 202} +@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc202.EXE, Free Mascon 202} Mascon is a powerful Win32 GUI for the administering @strong{MySQL} server databases. Mascon's features include visual table design, connections to multiple servers, data and blob editing of tables, security setting, SQL @@ -43041,6 +43251,10 @@ Apache module to include HTML from @strong{MySQL} queries into your pages, and run update queries. Originally written to implement a simple fast low-overhead banner-rotation system. By Sasha Pachev. +@item @uref{http://htcheck.sourceforge.net, htCheck} - URL checker with +MySQL backend. Spidered URLs can later be queried using SQL to retrieve +various kinds of information, eg. broken links. Written by Gabriele Bartolini. + @item @uref{http://www.odbsoft.com/cook/sources.htm} This package has various functions for generating html code from a SQL table structure and for generating SQL statements (Select, Insert, @@ -43161,7 +43375,7 @@ An authentication module for the Cyrus IMAP server. By Aaron Newsome. @appendixsec Converters @itemize @bullet -item @uref{http://www.mysql.com/Downloads/Contrib/mssql2mysql.txt, mssql2mysql.txt} +@item @uref{http://www.mysql.com/Downloads/Contrib/mssql2mysql.txt, mssql2mysql.txt} Converter from MS-SQL to MySQL. By Michael Kofler. @uref{http://www.kofler.cc/mysql/mssql2mysql.html, mssql2mysql home page}. @@ -43881,6 +44095,9 @@ Allow @code{SELECT expression LIMIT ...}. Added @code{IDENTITY} as a synonym for @code{AUTO_INCREMENT} (like Sybase). @item Added @code{ORDER BY} syntax to @code{UPDATE} and @code{DELETE}. +@item +Optimized queries of type: +@code{SELECT DISTINCT * from table_name ORDER by key_part1 LIMIT #} @end itemize @node News-3.23.x, News-3.22.x, News-4.0.x, News @@ -43896,7 +44113,7 @@ A new ISAM library which is tuned for SQL and supports large files. @item @strong{BerkeleyDB} or @strong{BDB} Uses the Berkeley DB library from Sleepycat Software to implement transaction-safe tables. -@item @strong{Innodb} +@item @strong{InnoDB} A transaction-safe table handler that supports row level locking, and many Oracle-like features. @c change "three" to "four" above when uncommenting this @@ -43927,6 +44144,7 @@ users uses this code as the rest of the code and because of this we are not yet 100% confident in this code. @menu +* News-3.23.38:: Changes in release 3.23.38 * News-3.23.37:: Changes in release 3.23.37 * News-3.23.36:: Changes in release 3.23.36 * News-3.23.35:: Changes in release 3.23.35 @@ -43968,12 +44186,58 @@ not yet 100% confident in this code. * News-3.23.0:: Changes in release 3.23.0 @end menu -@node News-3.23.37, News-3.23.36, News-3.23.x, News-3.23.x +@node News-3.23.38, News-3.23.37, News-3.23.x, News-3.23.x +@appendixsubsec Changes in release 3.23.38 +@itemize @bullet +@item +Fixed a bug in @code{REPLACE()} when using the ujis character set. +@item +Applied Sleepycat BDB patches 3.2.9.1 and 3.2.9.2. +@item +Added option @code{--skip-stack-trace} to @code{mysqld}. +@item +@code{CREATE TEMPORARY} now works with @code{InnoDB} tables. +@item +@code{InnoDB} now promotes sub keys to whole keys. +@item +Added option @code{CONCURRENT} to @code{LOAD DATA}. +@item +Better error message when slave @code{max_allowed_packet} is to low to +read a very long log event from the master +@item +Fixed bug when too many rows where removed when using +@code{SELECT DISTINCT ... HAVING}. +@item +@code{SHOW CREATE TABLE} now returns @code{TEMPORARY} for temporary tables. +@item +Added @code{Rows_examined} to slow query log. +@item +Fixed problems with function returning empty string when using +together with a group functions and a @code{WHERE} that didn't match any rows. +@item +New program @code{mysqlcheck}. +@item +Added database name to output for administrative commands like @code{CHECK}, +@code{REPAIR}, @code{OPTIMIZE}. +@item +Lots of portability fixes for InnoDB. +@item +Changed optimizer so that queries like +@code{SELECT * FROM table_name,table_name2 ... ORDER BY key_part1 LIMIT #} +will use index on @code{key_part1} instead of @code{filesort}. +@item +Fixed bug when doing +@code{LOCK TABLE to_table WRITE,...; INSERT INTO to_table... SELECT ...} +when @code{to_table} was empty. +@item +Fixed bug with @code{LOCK TABLE} and BDB tables. +@end itemize + +@node News-3.23.37, News-3.23.36, News-3.23.38, News-3.23.x @appendixsubsec Changes in release 3.23.37 @itemize @bullet @item -Added variables @code{ft_min_word_len}, @code{ft_max_word_len}, and -@code{ft_max_word_len_for_sort}. +Fixed a bug when using @code{MATCH} in @code{HAVING} clause. @item Fixed a bug when using @code{HEAP} tables with @code{LIKE}. @item @@ -43990,35 +44254,35 @@ Fixed bug when using indexes on @code{CHAR(255) NULL} columns. Slave thread will now be started even if @code{master-host} is not set, as long as @code{server-id} is set and valid @code{master.info} is present @item -Partial updates ( terminated with kill) are now logged with a special error +Partial updates (terminated with kill) are now logged with a special error code to the binary log. Slave will refuse to execute them if the error code indicates the update was terminated abnormally, and will have to be recovered with @code{SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START} after a manual sanity -check/correction of data integrity +check/correction of data integrity. @item Fixed bug that erroneously logged a drop of internal temporary table - on thread termination to the binary log - bug affected replication +on thread termination to the binary log - bug affected replication. @item Fixed a bug in @code{REGEXP()} on 64-bit machines. @item @code{UPDATE} and @code{DELETE} with @code{WHERE unique_key_part IS NULL} didn't update/delete all rows. @item -Disabled @code{INSERT DELAYED} for tables that supports transactions. +Disabled @code{INSERT DELAYED} for tables that support transactions. @item Fixed bug when using date functions on @code{TEXT}/@code{BLOB} column with wrong date format. @item -UDF's now also works on windows. (Patch by Ralph Mason) +UDFs now also work on Windows. (Patch by Ralph Mason) @item Fixed bug in @code{ALTER TABLE} and @code{LOAD DATA INFILE} that disabled -key-sorting. These command should now be faster in most cases. +key-sorting. These commands should now be faster in most cases. @item Fixed performance bug where reopened tables (tables that had been waiting for @code{FLUSH} or @code{REPAIR}) would not use indexes for the next query. @item -Fixed problem with @code{ALTER TABLE} to Innobase tables on Freebsd. +Fixed problem with @code{ALTER TABLE} to Innobase tables on FreeBSD. @item Added @code{mysqld} variables @code{myisam_max_sort_file_size} and @code{myisam_max_extra_sort_file_size}. @@ -44037,8 +44301,9 @@ Added @code{--skip-safemalloc} option to @code{mysqld}. @appendixsubsec Changes in release 3.23.36 @itemize @bullet @item -Fixed a bug that allowed you to use database names with @code{.}. This -fixes a serious security issue when @code{mysqld} is run as root. +Fixed a bug that allowed you to use database names containing a @samp{.} +character. This fixes a serious security issue when @code{mysqld} is run +as root. @item Fixed bug when thread creation failed (could happen when doing a LOT of connections in a short time). @@ -48994,12 +49259,16 @@ mysql> UPDATE tbl_name SET KEY=KEY+1,KEY=KEY+1; will update @code{KEY} with @code{2} instead of with @code{1}. @item You can't use temporary tables more than once in the same query. +For example, the following doesn't work. @example select * from temporary_table, temporary_table as t2; @end example @item +@code{RENAME} doesn't work with @code{TEMPORARY} tables. + +@item The optimizer may handle @code{DISTINCT} differently if you are using 'hidden' columns in a join or not. In a join, hidden columns are counted as part of the result (even if they are not shown) while in @@ -49227,6 +49496,8 @@ Add @code{record_in_range()} method to @code{MERGE} tables to be able to choose the right index when there is many to choose from. We should also extend the info interface to get the key distribution for each index, of @code{analyze} is run on all sub tables. +@item +@code{SET SQL_DEFAULT_TABLE_TYPE=[MyISAM | INNODB | BDB | GEMINI | HEAP]}. @end itemize @node TODO future, TODO sometime, TODO MySQL 4.0, TODO @@ -49239,6 +49510,25 @@ Fail safe replication. Subqueries. @code{select id from t where grp in (select grp from g where u > 100)} @item +Derieved tables. +@example +select a.col1, b.col2 from (select max(col1) as col1 from root_table ) a, +other_table b where a.col1=b.col1 +@end example + +This could be done by automaticly create temporary tables for the +derived tables for the duration of the query. +@item +Add @code{PREPARE} of statements and sending of parameters to @code{mysqld}. +@item +Extend the server/client protocol to support warnings. +@item +Add options to the server/protocol protocol to get progress notes +for long running commands. +@item +Add database and real table name (in case of alias) to the MYSQL_FIELD +structure. +@item Don't allow more than a defined number of threads to run MyISAM recover at the same time. @item @@ -49765,6 +50055,9 @@ With some older @code{gdb} versions on Linux you must use @code{run --one-thread} if you want to be able to debug @code{mysqld} threads. In this case you can only have one thread active at a time. +When running @code{mysqld} under gdb, you should disable the stack trace +with @code{--skip-stack-trace} to be able to catch segfaults within gdb. + It's very hard to debug @strong{MySQL} under @code{gdb} if you do a lot of new connections the whole time as @code{gdb} doesn't free the memory for old threads. You can avoid this problem by starting @code{mysqld} with diff --git a/acconfig.h b/acconfig.h index f8dd1b52c0b..db9e2d70d0b 100644 --- a/acconfig.h +++ b/acconfig.h @@ -117,6 +117,12 @@ /* pthread_attr_setscope */ #undef HAVE_PTHREAD_ATTR_SETSCOPE +/* pthread_yield that doesn't take any arguments */ +#undef HAVE_PTHREAD_YIELD_ZERO_ARG + +/* pthread_yield function with one argument */ +#undef HAVE_PTHREAD_YIELD_ONE_ARG + /* POSIX readdir_r */ #undef HAVE_READDIR_R diff --git a/acinclude.m4 b/acinclude.m4 index 412bfd55470..2f07a29023f 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -196,6 +196,42 @@ then fi ]) + +AC_DEFUN(MYSQL_PTHREAD_YIELD, +[AC_CACHE_CHECK([if pthread_yield takes zero arguments], ac_cv_pthread_yield_zero_arg, +[AC_TRY_LINK([#define _GNU_SOURCE +#include <pthread.h> +#ifdef __cplusplus +extern "C" +#endif +], +[ + pthread_yield(); +], ac_cv_pthread_yield_zero_arg=yes, ac_cv_pthread_yield_zero_arg=yeso)]) +if test "$ac_cv_pthread_yield_zero_arg" = "yes" +then + AC_DEFINE(HAVE_PTHREAD_YIELD_ZERO_ARG) +fi +] +[AC_CACHE_CHECK([if pthread_yield takes 1 argument], ac_cv_pthread_yield_one_arg, +[AC_TRY_LINK([#define _GNU_SOURCE +#include <pthread.h> +#ifdef __cplusplus +extern "C" +#endif +], +[ + pthread_yield(0); +], ac_cv_pthread_yield_one_arg=yes, ac_cv_pthread_yield_one_arg=no)]) +if test "$ac_cv_pthread_yield_one_arg" = "yes" +then + AC_DEFINE(HAVE_PTHREAD_YIELD_ONE_ARG) +fi +] +) + + + #---END: AC_DEFUN(MYSQL_CHECK_FP_EXCEPT, @@ -514,7 +550,8 @@ AC_DEFUN(MYSQL_STACK_DIRECTION, AC_DEFUN(MYSQL_FUNC_ALLOCA, [ -# Since we have heard that alloca fails on IRIX never define it on a SGI machine +# Since we have heard that alloca fails on IRIX never define it on a +# SGI machine if test ! "$host_vendor" = "sgi" then AC_REQUIRE_CPP()dnl Set CPP; we run AC_EGREP_CPP conditionally. @@ -941,6 +978,7 @@ dnl circular references. ../innobase/odbc/libodbc.a\ ../innobase/srv/libsrv.a\ ../innobase/que/libque.a\ + ../innobase/srv/libsrv.a\ ../innobase/dict/libdict.a\ ../innobase/ibuf/libibuf.a\ ../innobase/row/librow.a\ diff --git a/bdb/include/log.h b/bdb/include/log.h index 1cac0492252..81ecb4174a6 100644 --- a/bdb/include/log.h +++ b/bdb/include/log.h @@ -198,6 +198,7 @@ struct __fname { */ typedef enum { DB_LV_INCOMPLETE, + DB_LV_NONEXISTENT, DB_LV_NORMAL, DB_LV_OLD_READABLE, DB_LV_OLD_UNREADABLE diff --git a/bdb/log/log.c b/bdb/log/log.c index 69af1624824..8ddb7bcaf7d 100644 --- a/bdb/log/log.c +++ b/bdb/log/log.c @@ -309,13 +309,13 @@ __log_find(dblp, find_first, valp, statusp) int find_first, *valp; logfile_validity *statusp; { - logfile_validity clv_status, status; + logfile_validity logval_status, status; u_int32_t clv, logval; int cnt, fcnt, ret; const char *dir; char **names, *p, *q, savech; - clv_status = status = DB_LV_NORMAL; + logval_status = status = DB_LV_NONEXISTENT; /* Return a value of 0 as the log file number on failure. */ *valp = 0; @@ -385,10 +385,14 @@ __log_find(dblp, find_first, valp, statusp) * as a valid log file. */ break; + case DB_LV_NONEXISTENT: + /* Should never happen. */ + DB_ASSERT(0); + break; case DB_LV_NORMAL: case DB_LV_OLD_READABLE: logval = clv; - clv_status = status; + logval_status = status; break; case DB_LV_OLD_UNREADABLE: /* @@ -410,7 +414,7 @@ __log_find(dblp, find_first, valp, statusp) */ if (!find_first) { logval = clv; - clv_status = status; + logval_status = status; } break; } @@ -420,7 +424,7 @@ __log_find(dblp, find_first, valp, statusp) err: __os_dirfree(names, fcnt); __os_freestr(p); - *statusp = clv_status; + *statusp = logval_status; return (ret); } diff --git a/bdb/log/log_rec.c b/bdb/log/log_rec.c index ad6d9f7ead2..493dd06d4c6 100644 --- a/bdb/log/log_rec.c +++ b/bdb/log/log_rec.c @@ -430,7 +430,7 @@ __log_add_logid(dbenv, logp, dbp, ndx) TAILQ_INIT(&logp->dbentry[i].dblist); else TAILQ_REINSERT_HEAD( - &logp->dbentry[i].dblist, dbp, links); + &logp->dbentry[i].dblist, dbtmp, links); } /* Initialize the new entries. */ diff --git a/client/Makefile.am b/client/Makefile.am index 77f6cb72ff1..24221dcab74 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -21,13 +21,14 @@ INCLUDES = -I$(srcdir)/../include \ -I.. LIBS = @CLIENT_LIBS@ LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysql/libmysqlclient.la -bin_PROGRAMS = mysql mysqladmin mysqlshow mysqldump mysqlimport mysqltest +bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow mysqldump mysqlimport mysqltest noinst_PROGRAMS = insert_test select_test thread_test noinst_HEADERS = sql_string.h completion_hash.h my_readline.h mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS) mysql_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) mysqladmin_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) +mysqlcheck_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) mysqlshow_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) mysqldump_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) mysqlimport_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) diff --git a/client/mysqladmin.c b/client/mysqladmin.c index bda86c881e3..1e6bf3c5219 100644 --- a/client/mysqladmin.c +++ b/client/mysqladmin.c @@ -28,7 +28,7 @@ #include <my_pthread.h> /* because of signal() */ #endif -#define ADMIN_VERSION "8.19" +#define ADMIN_VERSION "8.20" #define MAX_MYSQL_VAR 64 #define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */ #define MAX_TRUNC_LENGTH 3 @@ -417,19 +417,13 @@ static my_bool execute_commands(MYSQL *mysql,int argc, char **argv) } case ADMIN_DROP: { - char buff[FN_REFLEN+20]; if (argc < 2) { my_printf_error(0,"Too few arguments to drop",MYF(ME_BELL)); return 1; } - sprintf(buff,"drop database `%.*s`",FN_REFLEN,argv[1]); - if (mysql_query(mysql,buff)) - { - my_printf_error(0,"DROP DATABASE failed; error: '%-.200s'", - MYF(ME_BELL), mysql_error(mysql)); + if (drop_db(mysql,argv[1])) return 1; - } argc--; argv++; break; } @@ -867,7 +861,8 @@ static int drop_db(MYSQL *mysql, const char *db) { puts("Dropping the database is potentially a very bad thing to do."); puts("Any data stored in the database will be destroyed.\n"); - printf("Do you really want to drop the '%s' database [y/N]\n",db); + printf("Do you really want to drop the '%s' database [y/N] ",db); + fflush(stdout); VOID(fgets(buf,sizeof(buf)-1,stdin)); if ((*buf != 'y') && (*buf != 'Y')) { @@ -878,7 +873,7 @@ static int drop_db(MYSQL *mysql, const char *db) sprintf(name_buff,"drop database %.*s",FN_REFLEN,db); if (mysql_query(mysql,name_buff)) { - my_printf_error(0,"drop of '%s' failed;\nerror: '%s'",MYF(ME_BELL), + my_printf_error(0,"DROP DATABASE %s failed;\nerror: '%s'",MYF(ME_BELL), db,mysql_error(mysql)); return 1; } diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c new file mode 100644 index 00000000000..3d4d4597ef5 --- /dev/null +++ b/client/mysqlcheck.c @@ -0,0 +1,685 @@ +/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* By Jani Tolonen, 2001-04-20, MySQL Development Team */ + +#define CHECK_VERSION "1.01" + +#include <global.h> +#include <my_sys.h> +#include <m_string.h> +#include <m_ctype.h> + +#include "mysql.h" +#include "mysql_version.h" +#include "mysqld_error.h" +#include <getopt.h> +#include "sslopt-vars.h" + +#include <m_string.h> + +/* Exit codes */ + +#define EX_USAGE 1 +#define EX_MYSQLERR 2 + +static MYSQL mysql_connection, *sock = 0; +static my_bool opt_alldbs = 0, opt_check_only_changed = 0, opt_extended = 0, + opt_compress = 0, opt_databases = 0, opt_fast = 0, + opt_medium_check = 0, opt_quick = 0, opt_all_in_1 = 0, + opt_silent = 0, opt_auto_repair = 0, ignore_errors = 0; +static uint verbose = 0, opt_mysql_port=0; +static my_string opt_mysql_unix_port = 0; +static char *opt_password = 0, *current_user = 0, *default_charset = 0, + *current_host = 0; +static int first_error = 0; +DYNAMIC_ARRAY tables4repair; + +enum operations {DO_CHECK, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE}; + +enum options {OPT_CHARSETS_DIR=256, OPT_COMPRESS, OPT_DEFAULT_CHARSET, + OPT_TABLES, OPT_AUTO_REPAIR}; + +static struct option long_options[] = +{ + {"all-databases", no_argument, 0, 'A'}, + {"all-in-1", no_argument, 0, '1'}, + {"auto-repair", no_argument, 0, OPT_AUTO_REPAIR}, + {"analyze", no_argument, 0, 'a'}, + {"character-sets-dir", required_argument, 0, OPT_CHARSETS_DIR}, + {"check", no_argument, 0, 'c'}, + {"check-only-changed", no_argument, 0, 'C'}, + {"compress", no_argument, 0, OPT_COMPRESS}, + {"databases", no_argument, 0, 'B'}, + {"debug", optional_argument, 0, '#'}, + {"default-character-set", required_argument, 0, OPT_DEFAULT_CHARSET}, + {"fast", no_argument, 0, 'F'}, + {"force", no_argument, 0, 'f'}, + {"extended", no_argument, 0, 'e'}, + {"help", no_argument, 0, '?'}, + {"host", required_argument, 0, 'h'}, + {"medium-check", no_argument, 0, 'm'}, + {"optimize", no_argument, 0, 'o'}, + {"password", optional_argument, 0, 'p'}, +#ifdef __WIN__ + {"pipe", no_argument, 0, 'W'}, +#endif + {"port", required_argument, 0, 'P'}, + {"quick", no_argument, 0, 'q'}, + {"repair", no_argument, 0, 'r'}, + {"silent", no_argument, 0, 's'}, + {"socket", required_argument, 0, 'S'}, +#include "sslopt-longopts.h" + {"tables", no_argument, 0, OPT_TABLES}, +#ifndef DONT_ALLOW_USER_CHANGE + {"user", required_argument, 0, 'u'}, +#endif + {"verbose", no_argument, 0, 'v'}, + {"version", no_argument, 0, 'V'}, + {0, 0, 0, 0} +}; + +static const char *load_default_groups[] = { "mysqlcheck", "client", 0 }; + + +static void print_version(void); +static void usage(void); +static int get_options(int *argc, char ***argv); +static int process_all_databases(); +static int process_databases(char **db_names); +static int process_selected_tables(char *db, char **table_names, int tables); +static int process_all_tables_in_db(char *database); +static int use_db(char *database); +static int handle_request_for_tables(char *tables, uint length); +static int dbConnect(char *host, char *user,char *passwd); +static void dbDisconnect(char *host); +static void DBerror(MYSQL *mysql, const char *when); +static void safe_exit(int error); +static void print_result(); +int what_to_do = 0; + +static void print_version(void) +{ + printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, CHECK_VERSION, + MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); +} /* print_version */ + + +static void usage(void) +{ + print_version(); + puts("By Jani Tolonen, 2001-04-20, MySQL Development Team\n"); + puts("This software comes with ABSOLUTELY NO WARRANTY. This is free"); + puts("software and you are welcome to modify and redistribute it"); + puts("under the GPL license.\n"); + puts("This program can be used to CHECK (-c,-m,-C), REPAIR (-r), ANALYZE (-a)"); + puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be"); + puts("used same time. It works on MyISAM and in some cases on BDB tables."); + puts("Please consult the MySQL manual for latest information about the"); + puts("above. The options -c,-r,-a and -o are exclusive to each other, which"); + puts("means that the last option will be used, if several was specified.\n"); + puts("The option -c will be used by default, if none was specified. You"); + puts("can change the default behavior by making a symbolic link, or"); + puts("copying this file somewhere with another name, the alternatives are:"); + puts("mysqlrepair: The default option will be -r"); + puts("mysqlanalyze: The default option will be -a"); + puts("mysqloptimize: The default option will be -o\n"); + printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); + printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n", + my_progname); + printf("OR %s [OPTIONS] --all-databases\n", my_progname); + printf("\ + -A, --all-databases Check all the databases. This will be same as\n\ + --databases with all databases selected\n\ + -1, --all-in-1 Instead of making one query for each table, execute\n\ + all queries in 1 query separately for each database.\n\ + Table names will be in a comma separeted list.\n\ + -a, --analyze Analyze given tables.\n\ + --auto-repair If a checked table is corrupted, automatically fix\n\ + it. Repairing will be done after all tables have\n\ + been checked, if corrupted ones were found.\n\ + -#, --debug=... Output debug log. Often this is 'd:t:o,filename'\n\ + --character-sets-dir=...\n\ + Directory where character sets are\n\ + -c, --check Check table for errors\n\ + -C, --check-only-changed\n\ + Check only tables that have changed since last check\n\ + or haven't been closed properly.\n\ + --compress Use compression in server/client protocol.\n\ + -?, --help Display this help message and exit.\n\ + -B, --databases To check several databases. Note the difference in\n\ + usage; In this case no tables are given. All name\n\ + arguments are regarded as databasenames.\n\ + --default-character-set=...\n\ + Set the default character set\n\ + -F, --fast Check only tables that hasn't been closed properly\n\ + -f, --force Continue even if we get an sql-error.\n\ + -e, --extended If you are using this option with CHECK TABLE,\n\ + it will ensure that the table is 100 percent\n\ + consistent, but will take a long time.\n\n"); +printf("\ + If you are using this option with REPAIR TABLE,\n\ + it will run an extended repair on the table, which\n\ + may not only take a long time to execute, but\n\ + may produce a lot of garbage rows also!\n\ + -h, --host=... Connect to host.\n\ + -m, --medium-check Faster than extended-check, but only finds 99.99 percent\n\ + of all errors. Should be good enough for most cases.\n\ + -o, --optimize Optimize table\n\ + -p, --password[=...] Password to use when connecting to server.\n\ + If password is not given it's solicited on the tty.\n"); +#ifdef __WIN__ + puts("-W, --pipe Use named pipes to connect to server"); +#endif + printf("\ + -P, --port=... Port number to use for connection.\n\ + -q, --quick If you are using this option with CHECK TABLE, it\n\ + prevents the check from scanning the rows to check\n\ + for wrong links. This is the fastest check.\n\n\ + If you are using this option with REPAIR TABLE, it\n\ + will try to repair only the index tree. This is\n\ + the fastest repair method for a table.\n\ + -r, --repair Can fix almost anything except unique keys that aren't\n\ + unique.\n\ + -s, --silent Print only error messages.\n\ + -S, --socket=... Socket file to use for connection.\n\ + --tables Overrides option --databases (-B).\n"); +#include "sslopt-usage.h" +#ifndef DONT_ALLOW_USER_CHANGE + printf("\ + -u, --user=# User for login if not current user.\n"); +#endif + printf("\ + -v, --verbose Print info about the various stages.\n\ + -V, --version Output version information and exit.\n"); + print_defaults("my", load_default_groups); +} /* usage */ + + +static int get_options(int *argc, char ***argv) +{ + int c, option_index; + my_bool tty_password = 0; + + if (*argc == 1) + { + usage(); + exit(0); + } + + load_defaults("my", load_default_groups, argc, argv); + while ((c = getopt_long(*argc, *argv, "#::p::h:u:P:S:BaAcCdeFfmqorsvVw:?I1", + long_options, &option_index)) != EOF) + { + switch(c) { + case 'a': + what_to_do = DO_ANALYZE; + break; + case '1': + opt_all_in_1 = 1; + break; + case 'A': + opt_alldbs = 1; + break; + case OPT_AUTO_REPAIR: + opt_auto_repair = 1; + break; + case OPT_DEFAULT_CHARSET: + default_charset = optarg; + break; + case OPT_CHARSETS_DIR: + charsets_dir = optarg; + break; + case 'c': + what_to_do = DO_CHECK; + break; + case 'C': + what_to_do = DO_CHECK; + opt_check_only_changed = 1; + break; + case 'e': + opt_extended = 1; + break; + case OPT_COMPRESS: + opt_compress = 1; + break; + case 'B': + opt_databases = 1; + break; + case 'F': + opt_fast = 1; + break; + case 'f': + ignore_errors = 1; + break; + case 'I': /* Fall through */ + case '?': + usage(); + exit(0); + case 'h': + my_free(current_host, MYF(MY_ALLOW_ZERO_PTR)); + current_host = my_strdup(optarg, MYF(MY_WME)); + break; + case 'm': + what_to_do = DO_CHECK; + opt_medium_check = 1; + break; + case 'o': + what_to_do = DO_OPTIMIZE; + break; +#ifndef DONT_ALLOW_USER_CHANGE + case 'u': + current_user = optarg; + break; +#endif + case 'p': + if (optarg) + { + char *start = optarg; + my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR)); + opt_password = my_strdup(optarg, MYF(MY_FAE)); + while (*optarg) *optarg++= 'x'; /* Destroy argument */ + if (*start) + start[1] = 0; /* Cut length of argument */ + } + else + tty_password = 1; + break; + case 'P': + opt_mysql_port = (unsigned int) atoi(optarg); + break; + case 'q': + opt_quick = 1; + break; + case 'r': + what_to_do = DO_REPAIR; + break; + case 'S': + opt_mysql_unix_port = optarg; + break; + case 's': + opt_silent = 1; + break; + case 'W': +#ifdef __WIN__ + opt_mysql_unix_port = MYSQL_NAMEDPIPE; +#endif + break; + case '#': + DBUG_PUSH(optarg ? optarg : "d:t:o"); + break; + case OPT_TABLES: + opt_databases = 0; + break; + case 'v': + verbose++; + break; + case 'V': print_version(); exit(0); + default: + fprintf(stderr, "%s: Illegal option character '%c'\n", my_progname, + opterr); +#include "sslopt-case.h" + } + } + if (!what_to_do) + { + int pnlen = strlen(my_progname); + + if (pnlen < 6) // name too short + what_to_do = DO_CHECK; + else if (!strcmp("repair", my_progname + pnlen - 6)) + what_to_do = DO_REPAIR; + else if (!strcmp("analyze", my_progname + pnlen - 7)) + what_to_do = DO_ANALYZE; + else if (!strcmp("optimize", my_progname + pnlen - 8)) + what_to_do = DO_OPTIMIZE; + else + what_to_do = DO_CHECK; + } + if (default_charset) + { + if (set_default_charset_by_name(default_charset, MYF(MY_WME))) + exit(1); + } + (*argc) -= optind; + (*argv) += optind; + if (*argc > 0 && opt_alldbs) + { + printf("You should give only options, no arguments at all, with option\n"); + printf("--all-databases. Please see %s --help for more information.\n", + my_progname); + return 1; + } + if (*argc < 1 && !opt_alldbs) + { + printf("You forgot to give the arguments! Please see %s --help\n", + my_progname); + printf("for more information.\n"); + return 1; + } + if (tty_password) + opt_password = get_tty_password(NullS); + return(0); +} /* get_options */ + + +static int process_all_databases() +{ + MYSQL_ROW row; + MYSQL_RES *tableres; + int result = 0; + + if (mysql_query(sock, "SHOW DATABASES") || + !(tableres = mysql_store_result(sock))) + { + my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", + MYF(0), mysql_error(sock)); + return 1; + } + while ((row = mysql_fetch_row(tableres))) + { + if (process_all_tables_in_db(row[0])) + result = 1; + } + return result; +} +/* process_all_databases */ + + +static int process_databases(char **db_names) +{ + int result = 0; + for ( ; *db_names ; db_names++) + { + if (process_all_tables_in_db(*db_names)) + result = 1; + } + return result; +} /* process_databases */ + + +static int process_selected_tables(char *db, char **table_names, int tables) +{ + if (use_db(db)) + return 1; + if (opt_all_in_1) + { + char *table_names_comma_sep, *end; + int i, tot_length = 0; + + for (i = 0; i < tables; i++) + tot_length += strlen(*(table_names + i)) + 1; + + if (!(table_names_comma_sep = (char *) + my_malloc((sizeof(char) * tot_length) + 1, MYF(MY_WME)))) + return 1; + + for (end = table_names_comma_sep + 1; tables > 0; + tables--, table_names++) + { + end = strmov(end, *table_names); + *end++= ','; + } + *--end = 0; + handle_request_for_tables(table_names_comma_sep + 1, tot_length - 1); + my_free(table_names_comma_sep, MYF(0)); + } + else + for (; tables > 0; tables--, table_names++) + handle_request_for_tables(*table_names, strlen(*table_names)); + return 0; +} /* process_selected_tables */ + + +static int process_all_tables_in_db(char *database) +{ + MYSQL_RES *res; + MYSQL_ROW row; + + LINT_INIT(res); + if (use_db(database)) + return 1; + if (!(mysql_query(sock, "SHOW TABLES") || + (res = mysql_store_result(sock)))) + return 1; + + if (opt_all_in_1) + { + char *tables, *end; + uint tot_length = 0; + + while ((row = mysql_fetch_row(res))) + tot_length += strlen(row[0]) + 1; + mysql_data_seek(res, 0); + + if (!(tables=(char *) my_malloc(sizeof(char)*tot_length+1, MYF(MY_WME)))) + { + mysql_free_result(res); + return 1; + } + for (end = tables + 1; (row = mysql_fetch_row(res)) ;) + { + end = strmov(end, row[0]); + *end++= ','; + } + *--end = 0; + if (tot_length) + handle_request_for_tables(tables + 1, tot_length - 1); + my_free(tables, MYF(0)); + } + else + { + while ((row = mysql_fetch_row(res))) + handle_request_for_tables(row[0], strlen(row[0])); + } + mysql_free_result(res); + return 0; +} /* process_all_tables_in_db */ + + +static int use_db(char *database) +{ + if (mysql_select_db(sock, database)) + { + DBerror(sock, "when selecting the database"); + return 1; + } + return 0; +} /* use_db */ + + +static int handle_request_for_tables(char *tables, uint length) +{ + char *query, *end, options[100]; + const char *op = 0; + + options[0] = 0; + switch (what_to_do) { + case DO_CHECK: + op = "CHECK"; + end = options; + if (opt_quick) end = strmov(end, "QUICK"); + if (opt_fast) end = strmov(end, "FAST"); + if (opt_medium_check) end = strmov(end, "MEDIUM"); /* Default */ + if (opt_extended) end = strmov(end, "EXTENDED"); + if (opt_check_only_changed) end = strmov(end, "CHANGED"); + break; + case DO_REPAIR: + op = "REPAIR"; + end = options; + if (opt_quick) end = strmov(end, "QUICK"); + if (opt_extended) end = strmov(end, "EXTENDED"); + break; + case DO_ANALYZE: + op = "ANALYZE"; + break; + case DO_OPTIMIZE: + op = "OPTIMIZE"; + break; + } + + if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME)))) + return 1; + sprintf(query, "%s TABLE %s %s", op, options, tables); + if (mysql_query(sock, query)) + { + sprintf(options, "when executing '%s TABLE'", op); + DBerror(sock, options); + return 1; + } + print_result(); + my_free(query, MYF(0)); + return 0; +} + + +static void print_result() +{ + MYSQL_RES *res; + MYSQL_ROW row; + char prev[NAME_LEN*2+2]; + int i; + + res = mysql_use_result(sock); + prev[0] = '\0'; + for (i = 0; (row = mysql_fetch_row(res)); i++) + { + int changed = strcmp(prev, row[0]); + int status = !strcmp(row[2], "status"); + if (opt_silent && status) + continue; + if (status && changed) + printf("%-50s %s", row[0], row[3]); + else if (!status && changed) + { + printf("%s\n%-9s: %s", row[0], row[2], row[3]); + if (what_to_do != DO_REPAIR && opt_auto_repair) + insert_dynamic(&tables4repair, row[0]); + } + else + printf("%-9s: %s", row[2], row[3]); + strmov(prev, row[0]); + putchar('\n'); + } + mysql_free_result(res); +} + + +static int dbConnect(char *host, char *user, char *passwd) +{ + DBUG_ENTER("dbConnect"); + if (verbose) + { + fprintf(stderr, "# Connecting to %s...\n", host ? host : "localhost"); + } + mysql_init(&mysql_connection); + if (opt_compress) + mysql_options(&mysql_connection, MYSQL_OPT_COMPRESS, NullS); +#ifdef HAVE_OPENSSL + if (opt_use_ssl) + mysql_ssl_set(&mysql_connection, opt_ssl_key, opt_ssl_cert, opt_ssl_ca, + opt_ssl_capath); +#endif + if (!(sock = mysql_real_connect(&mysql_connection, host, user, passwd, + NULL, opt_mysql_port, opt_mysql_unix_port, 0))) + { + DBerror(&mysql_connection, "when trying to connect"); + return 1; + } + return 0; +} /* dbConnect */ + + +static void dbDisconnect(char *host) +{ + if (verbose) + fprintf(stderr, "# Disconnecting from %s...\n", host ? host : "localhost"); + mysql_close(sock); +} /* dbDisconnect */ + + +static void DBerror(MYSQL *mysql, const char *when) +{ + DBUG_ENTER("DBerror"); + my_printf_error(0,"Got error: %d: %s %s", MYF(0), + mysql_errno(mysql), mysql_error(mysql), when); + safe_exit(EX_MYSQLERR); + DBUG_VOID_RETURN; +} /* DBerror */ + + +static void safe_exit(int error) +{ + if (!first_error) + first_error= error; + if (ignore_errors) + return; + if (sock) + mysql_close(sock); + exit(error); +} + + +int main(int argc, char **argv) +{ + MY_INIT(argv[0]); + /* + ** Check out the args + */ + if (get_options(&argc, &argv)) + { + my_end(0); + exit(EX_USAGE); + } + if (dbConnect(current_host, current_user, opt_password)) + exit(EX_MYSQLERR); + + if (opt_auto_repair && + init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,64)) + { + first_error = 1; + goto end; + } + + if (opt_alldbs) + process_all_databases(); + /* Only one database and selected table(s) */ + else if (argc > 1 && !opt_databases) + process_selected_tables(*argv, (argv + 1), (argc - 1)); + /* One or more databases, all tables */ + else + process_databases(argv); + if (opt_auto_repair) + { + uint i; + + if (!opt_silent && tables4repair.elements) + puts("\nRepairing tables"); + what_to_do = DO_REPAIR; + for (i = 0; i < tables4repair.elements ; i++) + { + char *name= (char*) dynamic_array_ptr(&tables4repair, i); + handle_request_for_tables(name, strlen(name)); + } + } + end: + dbDisconnect(current_host); + if (opt_auto_repair) + delete_dynamic(&tables4repair); + my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR)); + my_end(0); + return(first_error!=0); +} /* main */ diff --git a/client/mysqldump.c b/client/mysqldump.c index ce6c64aa00e..4893c13a0a0 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -37,7 +37,7 @@ ** Tõnu Samuel <tonu@please.do.not.remove.this.spam.ee> **/ -#define DUMP_VERSION "8.13" +#define DUMP_VERSION "8.14" #include <global.h> #include <my_sys.h> @@ -73,7 +73,7 @@ static my_bool verbose=0,tFlag=0,cFlag=0,dFlag=0,quick=0, extended_insert = 0, lock_tables=0,ignore_errors=0,flush_logs=0,replace=0, ignore=0,opt_drop=0,opt_keywords=0,opt_lock=0,opt_compress=0, opt_delayed=0,create_options=0,opt_quoted=0,opt_databases=0, - opt_alldbs=0,opt_create_db=0,opt_first_slave=0; + opt_alldbs=0,opt_create_db=0,opt_first_slave=0; static MYSQL mysql_connection,*sock=0; static char insert_pat[12 * 1024],*opt_password=0,*current_user=0, *current_host=0,*path=0,*fields_terminated=0, @@ -85,6 +85,7 @@ static int first_error=0; extern ulong net_buffer_length; static DYNAMIC_STRING extended_row; #include "sslopt-vars.h" +FILE *result_file; enum options {OPT_FTB=256, OPT_LTB, OPT_ENC, OPT_O_ENC, OPT_ESC, OPT_KEYWORDS, OPT_LOCKS, OPT_DROP, OPT_OPTIMIZE, OPT_DELAYED, OPT_TABLES, @@ -127,6 +128,7 @@ static struct option long_options[] = {"port", required_argument, 0, 'P'}, {"quick", no_argument, 0, 'q'}, {"quote-names", no_argument, 0, 'Q'}, + {"result-file", required_argument, 0, 'r'}, {"set-variable", required_argument, 0, 'O'}, {"socket", required_argument, 0, 'S'}, #include "sslopt-longopts.h" @@ -227,6 +229,10 @@ puts("\ -P, --port=... Port number to use for connection.\n\ -q, --quick Don't buffer query, dump directly to stdout.\n\ -Q, --quote-names Quote table and column names with `\n\ + -r, --result-file=... Direct output to a given file. This option should be\n\ + used in MSDOS, because it prevents new line '\\n'\n\ + from being converted to '\\n\\r' (newline + carriage\n\ + return).\n\ -S, --socket=... Socket file to use for connection.\n\ --tables Overrides option --databases (-B).\n"); #include "sslopt-usage.h" @@ -284,9 +290,11 @@ static int get_options(int *argc,char ***argv) int c,option_index; my_bool tty_password=0; + result_file=stdout; load_defaults("my",load_default_groups,argc,argv); set_all_changeable_vars(changeable_vars); - while ((c=getopt_long(*argc,*argv,"#::p::h:u:O:P:S:T:EBaAcCdefFlnqtvVw:?Ix", + while ((c=getopt_long(*argc,*argv, + "#::p::h:u:O:P:r:S:T:EBaAcCdefFlnqtvVw:?Ix", long_options, &option_index)) != EOF) { switch(c) { @@ -346,6 +354,11 @@ static int get_options(int *argc,char ***argv) case 'P': opt_mysql_port= (unsigned int) atoi(optarg); break; + case 'r': + if (!(result_file = my_fopen(optarg, O_WRONLY | O_BINARY, + MYF(MY_WME)))) + exit(1); + break; case 'S': opt_mysql_unix_port= optarg; break; @@ -589,7 +602,7 @@ static uint getTableStructure(char *table, char* db) char *strpos, *table_name; const char *delayed; char name_buff[NAME_LEN+3],table_buff[NAME_LEN+3]; - FILE *sql_file = stdout; + FILE *sql_file = result_file; DBUG_ENTER("getTableStructure"); delayed= opt_delayed ? " DELAYED " : ""; @@ -625,8 +638,8 @@ static uint getTableStructure(char *table, char* db) O_WRONLY, MYF(MY_WME)); if (!sql_file) /* If file couldn't be opened */ { - safe_exit(EX_MYSQLERR); - DBUG_RETURN(0); + safe_exit(EX_MYSQLERR); + DBUG_RETURN(0); } write_heder(sql_file, db); } @@ -724,9 +737,9 @@ static uint getTableStructure(char *table, char* db) if (init) { if (!tFlag) - fputs(",\n",sql_file); + fputs(",\n",sql_file); if (cFlag) - strpos=strmov(strpos,", "); + strpos=strmov(strpos,", "); } init=1; if (cFlag) @@ -734,20 +747,20 @@ static uint getTableStructure(char *table, char* db) if (!tFlag) { if (opt_keywords) - fprintf(sql_file, " %s.%s %s", table_name, - quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]); + fprintf(sql_file, " %s.%s %s", table_name, + quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]); else - fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME],name_buff), - row[SHOW_TYPE]); + fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME], + name_buff), row[SHOW_TYPE]); if (row[SHOW_DEFAULT]) { - fputs(" DEFAULT ", sql_file); - unescape(sql_file,row[SHOW_DEFAULT],lengths[SHOW_DEFAULT]); + fputs(" DEFAULT ", sql_file); + unescape(sql_file,row[SHOW_DEFAULT],lengths[SHOW_DEFAULT]); } if (!row[SHOW_NULL][0]) - fputs(" NOT NULL", sql_file); + fputs(" NOT NULL", sql_file); if (row[SHOW_EXTRA][0]) - fprintf(sql_file, " %s",row[SHOW_EXTRA]); + fprintf(sql_file, " %s",row[SHOW_EXTRA]); } } numFields = (uint) mysql_num_rows(tableRes); @@ -761,9 +774,9 @@ static uint getTableStructure(char *table, char* db) if (mysql_query(sock, buff)) { fprintf(stderr, "%s: Can't get keys for table '%s' (%s)\n", - my_progname, table, mysql_error(sock)); + my_progname, table, mysql_error(sock)); if (sql_file != stdout) - my_fclose(sql_file, MYF(MY_WME)); + my_fclose(sql_file, MYF(MY_WME)); safe_exit(EX_MYSQLERR); DBUG_RETURN(0); } @@ -776,16 +789,16 @@ static uint getTableStructure(char *table, char* db) { if (atoi(row[3]) == 1) { - keynr++; - #ifdef FORCE_PRIMARY_KEY - if (atoi(row[1]) == 0 && primary_key == INT_MAX) - primary_key=keynr; - #endif - if (!strcmp(row[2],"PRIMARY")) - { - primary_key=keynr; - break; - } + keynr++; +#ifdef FORCE_PRIMARY_KEY + if (atoi(row[1]) == 0 && primary_key == INT_MAX) + primary_key=keynr; +#endif + if (!strcmp(row[2],"PRIMARY")) + { + primary_key=keynr; + break; + } } } mysql_data_seek(tableRes,0); @@ -794,21 +807,21 @@ static uint getTableStructure(char *table, char* db) { if (atoi(row[3]) == 1) { - if (keynr++) - putc(')', sql_file); - if (atoi(row[1])) /* Test if duplicate key */ - /* Duplicate allowed */ - fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff)); - else if (keynr == primary_key) - fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */ - else - fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff)); + if (keynr++) + putc(')', sql_file); + if (atoi(row[1])) /* Test if duplicate key */ + /* Duplicate allowed */ + fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff)); + else if (keynr == primary_key) + fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */ + else + fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff)); } else - putc(',', sql_file); + putc(',', sql_file); fputs(quote_name(row[4],name_buff), sql_file); if (row[7]) - fprintf(sql_file, " (%s)",row[7]); /* Sub key */ + fprintf(sql_file, " (%s)",row[7]); /* Sub key */ } if (keynr) putc(')', sql_file); @@ -820,28 +833,28 @@ static uint getTableStructure(char *table, char* db) sprintf(buff,"show table status like '%s'",table); if (mysql_query(sock, buff)) { - if (mysql_errno(sock) != ER_PARSE_ERROR) - { /* If old MySQL version */ - if (verbose) - fprintf(stderr, - "# Warning: Couldn't get status information for table '%s' (%s)\n", - table,mysql_error(sock)); - } + if (mysql_errno(sock) != ER_PARSE_ERROR) + { /* If old MySQL version */ + if (verbose) + fprintf(stderr, + "# Warning: Couldn't get status information for table '%s' (%s)\n", + table,mysql_error(sock)); + } } else if (!(tableRes=mysql_store_result(sock)) || - !(row=mysql_fetch_row(tableRes))) + !(row=mysql_fetch_row(tableRes))) { - fprintf(stderr, - "Error: Couldn't read status information for table '%s' (%s)\n", - table,mysql_error(sock)); + fprintf(stderr, + "Error: Couldn't read status information for table '%s' (%s)\n", + table,mysql_error(sock)); } else { - fputs("/*!",sql_file); - print_value(sql_file,tableRes,row,"type=","Type",0); - print_value(sql_file,tableRes,row,"","Create_options",0); - print_value(sql_file,tableRes,row,"comment=","Comment",1); - fputs(" */",sql_file); + fputs("/*!",sql_file); + print_value(sql_file,tableRes,row,"type=","Type",0); + print_value(sql_file,tableRes,row,"","Create_options",0); + print_value(sql_file,tableRes,row,"comment=","Comment",1); + fputs(" */",sql_file); } mysql_free_result(tableRes); /* Is always safe to free */ } @@ -960,14 +973,14 @@ static void dumpTable(uint numFields, char *table) } else { - printf("\n#\n# Dumping data for table '%s'\n", table); + fprintf(result_file,"\n#\n# Dumping data for table '%s'\n", table); sprintf(query, "SELECT * FROM %s", quote_name(table,table_buff)); if (where) { - printf("# WHERE: %s\n",where); + fprintf(result_file,"# WHERE: %s\n",where); strxmov(strend(query), " WHERE ",where,NullS); } - puts("#\n"); + fputs("#\n\n", result_file); if (mysql_query(sock, query)) { @@ -994,7 +1007,8 @@ static void dumpTable(uint numFields, char *table) } if (opt_lock) - printf("LOCK TABLES %s WRITE;\n", quote_name(table,table_buff)); + fprintf(result_file,"LOCK TABLES %s WRITE;\n", + quote_name(table,table_buff)); total_length=net_buffer_length; /* Force row break */ row_break=0; @@ -1007,7 +1021,7 @@ static void dumpTable(uint numFields, char *table) ulong *lengths=mysql_fetch_lengths(res); rownr++; if (!extended_insert) - fputs(insert_pat,stdout); + fputs(insert_pat,result_file); mysql_field_seek(res,0); for (i = 0; i < mysql_num_fields(res); i++) @@ -1061,17 +1075,17 @@ static void dumpTable(uint numFields, char *table) else { if (i) - putchar(','); + fputc(',',result_file); if (row[i]) { if (!IS_NUM_FIELD(field)) - unescape(stdout, row[i], lengths[i]); + unescape(result_file, row[i], lengths[i]); else - fputs(row[i],stdout); + fputs(row[i],result_file); } else { - fputs("NULL",stdout); + fputs("NULL",result_file); } } } @@ -1084,27 +1098,25 @@ static void dumpTable(uint numFields, char *table) if (total_length + row_length < net_buffer_length) { total_length += row_length; - putchar(','); /* Always row break */ - fputs(extended_row.str,stdout); + fputc(',',result_file); /* Always row break */ + fputs(extended_row.str,result_file); } else { if (row_break) - puts(";"); + fputs(";\n", result_file); row_break=1; /* This is first row */ - fputs(insert_pat,stdout); - fputs(extended_row.str,stdout); + fputs(insert_pat,result_file); + fputs(extended_row.str,result_file); total_length = row_length+init_length; } } else - { - puts(");"); - } + fputs(");\n", result_file); } if (extended_insert && row_break) - puts(";"); /* If not empty table */ - fflush(stdout); + fputs(";\n", result_file); /* If not empty table */ + fflush(result_file); if (mysql_errno(sock)) { sprintf(query,"%s: Error %d: %s when dumping table '%s' at row: %ld\n", @@ -1118,7 +1130,7 @@ static void dumpTable(uint numFields, char *table) return; } if (opt_lock) - puts("UNLOCK TABLES;"); + fputs("UNLOCK TABLES;\n", result_file); mysql_free_result(res); } } /* dumpTable */ @@ -1194,10 +1206,11 @@ static int init_dumping(char *database) { if (opt_databases || opt_alldbs) { - printf("\n#\n# Current Database: %s\n#\n", database); + fprintf(result_file,"\n#\n# Current Database: %s\n#\n", database); if (!opt_create_db) - printf("\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n", database); - printf("\nUSE %s;\n", database); + fprintf(result_file,"\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n", + database); + fprintf(result_file,"\nUSE %s;\n", database); } } if (extended_insert) @@ -1329,7 +1342,7 @@ int main(int argc, char **argv) if (dbConnect(current_host, current_user, opt_password)) exit(EX_MYSQLERR); if (!path) - write_heder(stdout, *argv); + write_heder(result_file, *argv); if (opt_first_slave) { @@ -1365,7 +1378,9 @@ int main(int argc, char **argv) } } dbDisconnect(current_host); - puts(""); + fputs("\n", result_file); + if (result_file != stdout) + my_fclose(result_file, MYF(0)); my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR)); if (extended_insert) dynstr_free(&extended_row); diff --git a/client/mysqltest.c b/client/mysqltest.c index 09138f93df6..e1ca5638340 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -569,7 +569,7 @@ int eval_expr(VAR* v, const char* p, const char** p_end) else { v->str_val = (char*)p; - v->str_val_len = (p_end && *p_end) ? *p_end - p : strlen(p); + v->str_val_len = (p_end && *p_end) ? (int) (*p_end - p) : (int) strlen(p); v->int_val=atoi(p); v->int_dirty=0; return 0; @@ -1758,6 +1758,7 @@ static void init_var_hash() die("Variable hash initialization failed"); var_from_env("MASTER_MYPORT", "9306"); var_from_env("SLAVE_MYPORT", "9307"); + var_from_env("MYSQL_TEST_DIR", ""); } int main(int argc, char** argv) diff --git a/configure.in b/configure.in index cae90eeefe8..853086c8012 100644 --- a/configure.in +++ b/configure.in @@ -119,6 +119,17 @@ AC_PROG_AWK AC_PROG_CC AC_PROG_CXX AC_PROG_CPP + +# Fix for sgi gcc / sgiCC which tries to emulate gcc +if test "$CC" = "sgicc" +then + ac_cv_prog_gcc="no" +fi +if test "$CXX" = "sgi++" +then + GXX="no" +fi + if test "$ac_cv_prog_gcc" = "yes" then AS="$CC -c" @@ -1248,7 +1259,7 @@ fi AC_SUBST(COMPILATION_COMMENT) AC_MSG_CHECKING("need of special linking flags") -if test "$IS_LINUX" = "true" -a "$all_is_static" != "yes" +if test "$IS_LINUX" = "true" -a "$ac_cv_prog_gcc" = "yes" -a "$all_is_static" != "yes" then LDFLAGS="$LDFLAGS -rdynamic" AC_MSG_RESULT("-rdynamic") @@ -1326,10 +1337,11 @@ MYSQL_CXX_BOOL MYSQL_CHECK_LONGLONG_TO_FLOAT if test "$ac_cv_conv_longlong_to_float" != "yes" then - AC_MSG_ERROR([Your compiler can't convert a longlong value to a float! + AC_MSG_ERROR([Your compiler cannot convert a longlong value to a float! If you are using gcc 2.8.# you should upgrade to egcs 1.0.3 or newer and try again]); fi +MYSQL_PTHREAD_YIELD ###################################################################### # For readline-4.0 (We simply move the mimimum amount of stuff from @@ -1386,7 +1398,7 @@ AC_CHECK_FUNCS(alarm bmove \ sigset sigthreadmask pthread_sigmask pthread_setprio pthread_setprio_np \ pthread_setschedparam pthread_attr_setprio pthread_attr_setschedparam \ pthread_attr_create pthread_getsequence_np pthread_attr_setstacksize \ - pthread_condattr_create rwlock_init pthread_rwlock_rdlock pthread_yield\ + pthread_condattr_create rwlock_init pthread_rwlock_rdlock \ fchmod getpass getpassphrase initgroups mlockall) # Sanity check: We chould not have any fseeko symbol unless @@ -2103,7 +2115,7 @@ AC_OUTPUT(Makefile extra/Makefile mysys/Makefile isam/Makefile \ merge/Makefile dbug/Makefile scripts/Makefile \ include/Makefile sql-bench/Makefile \ tests/Makefile Docs/Makefile support-files/Makefile \ - mysql-test/Makefile fs/Makefile \ + mysql-test/Makefile \ include/mysql_version.h , , [ test -z "$CONFIG_HEADERS" || echo timestamp > stamp-h diff --git a/extra/resolve_stack_dump.c b/extra/resolve_stack_dump.c index 8976a7698b8..bda23a41efd 100644 --- a/extra/resolve_stack_dump.c +++ b/extra/resolve_stack_dump.c @@ -303,7 +303,7 @@ static void do_resolve() uchar* addr = (uchar*)read_addr(&p); if(resolve_addr(addr, &se)) fprintf(fp_out, "%p %s + %d\n", addr, se.symbol, - addr - se.addr); + (int) (addr - se.addr)); else fprintf(fp_out, "%p (?)\n", addr); diff --git a/include/global.h b/include/global.h index 2c9157630f4..a11600a96fc 100644 --- a/include/global.h +++ b/include/global.h @@ -332,7 +332,8 @@ typedef int File; /* File descriptor */ typedef int my_socket; /* File descriptor for sockets */ #define INVALID_SOCKET -1 #endif -typedef RETSIGTYPE sig_handler; /* Function to handle signals */ +/* Type for fuctions that handles signals */ +#define sig_handler RETSIGTYPE typedef void (*sig_return)();/* Returns type from signal */ #if defined(__GNUC__) && !defined(_lint) typedef char pchar; /* Mixed prototypes can take char */ diff --git a/include/m_ctype.h b/include/m_ctype.h index 438b7b34c9a..645c07b79ae 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -66,6 +66,7 @@ extern CHARSET_INFO compiled_charsets[]; #endif /* Don't include std ctype.h when this is included */ #define _CTYPE_H +#define _CTYPE_H_ #define _CTYPE_INCLUDED #define __CTYPE_INCLUDED #define _CTYPE_USING /* Don't put names in global namespace. */ diff --git a/include/myisam.h b/include/myisam.h index 8139faaa14e..8e68a3f75c9 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -333,9 +333,9 @@ typedef struct st_mi_check_param ulonglong unique_count[MI_MAX_KEY_SEG+1]; ha_checksum key_crc[MI_MAX_POSSIBLE_KEY]; ulong rec_per_key_part[MI_MAX_KEY_SEG*MI_MAX_POSSIBLE_KEY]; - void* thd; - char* table_name; - char* op_name; + void *thd; + char *db_name,*table_name; + char *op_name; } MI_CHECK; diff --git a/include/mysql.h b/include/mysql.h index 350ce860a2f..b5d918a98af 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -20,6 +20,14 @@ #ifndef _mysql_h #define _mysql_h +#ifdef __CYGWIN__ /* CYGWIN implements a UNIX API */ +#undef WIN +#undef _WIN +#undef _WIN32 +#undef _WIN64 +#undef __WIN__ +#endif + #ifndef MYSQL_SERVER #ifdef __cplusplus extern "C" { diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c index 34fbc5b9f98..0046a3761a6 100644 --- a/innobase/buf/buf0buf.c +++ b/innobase/buf/buf0buf.c @@ -204,7 +204,28 @@ ulint buf_dbg_counter = 0; /* This is used to insert validation ibool buf_debug_prints = FALSE; /* If this is set TRUE, the program prints info whenever read-ahead or flush occurs */ - + +/************************************************************************ +Calculates a page checksum which is stored to the page when it is written +to a file. Note that we must be careful to calculate the same value +on 32-bit and 64-bit architectures. */ + +ulint +buf_calc_page_checksum( +/*===================*/ + /* out: checksum */ + byte* page) /* in: buffer page */ +{ + ulint checksum; + + checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN); + + ut_fold_binary(page + FIL_PAGE_DATA, UNIV_PAGE_SIZE - FIL_PAGE_DATA + - FIL_PAGE_END_LSN); + checksum = checksum & 0xFFFFFFFF; + + return(checksum); +} + /************************************************************************ Initializes a buffer control block when the buf_pool is created. */ static @@ -1171,12 +1192,36 @@ buf_page_io_complete( dulint id; dict_index_t* index; ulint io_type; + ulint checksum; ut_ad(block); io_type = block->io_fix; if (io_type == BUF_IO_READ) { + checksum = buf_calc_page_checksum(block->frame); + + /* From version 3.23.38 up we store the page checksum + to the 4 upper bytes of the page end lsn field */ + + if ((mach_read_from_4(block->frame + FIL_PAGE_LSN + 4) + != mach_read_from_4(block->frame + UNIV_PAGE_SIZE + - FIL_PAGE_END_LSN + 4)) + || (checksum != mach_read_from_4(block->frame + + UNIV_PAGE_SIZE + - FIL_PAGE_END_LSN) + && mach_read_from_4(block->frame + FIL_PAGE_LSN) + != mach_read_from_4(block->frame + + UNIV_PAGE_SIZE + - FIL_PAGE_END_LSN))) { + fprintf(stderr, + "InnoDB: Database page corruption or a failed\n" + "InnoDB: file read of page %lu.\n", block->offset); + fprintf(stderr, + "InnoDB: You may have to recover from a backup.\n"); + exit(1); + } + if (recv_recovery_is_on()) { recv_recover_page(TRUE, block->frame, block->space, block->offset); @@ -1208,17 +1253,8 @@ buf_page_io_complete( ut_ad(buf_pool->n_pend_reads > 0); buf_pool->n_pend_reads--; buf_pool->n_pages_read++; -/* - if (0 != ut_dulint_cmp( - mach_read_from_8(block->frame + FIL_PAGE_LSN), - mach_read_from_8(block->frame + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN))) { - printf("DB error: file page corrupted!\n"); - ut_error; - } -*/ rw_lock_x_unlock_gen(&(block->lock), BUF_IO_READ); rw_lock_x_unlock_gen(&(block->read_lock), BUF_IO_READ); diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c index 443256cca34..90bdde1ebc6 100644 --- a/innobase/buf/buf0flu.c +++ b/innobase/buf/buf0flu.c @@ -222,6 +222,12 @@ buf_flush_write_block_low( mach_write_to_8(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN, block->newest_modification); + /* We overwrite the first 4 bytes of the end lsn field to store + a page checksum */ + + mach_write_to_4(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN, + buf_calc_page_checksum(block->frame)); + fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER, FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE, (void*)block->frame, (void*)block); diff --git a/innobase/buf/buf0rea.c b/innobase/buf/buf0rea.c index 13e9ed0476b..644dd226a0e 100644 --- a/innobase/buf/buf0rea.c +++ b/innobase/buf/buf0rea.c @@ -73,11 +73,13 @@ buf_read_page_low( sync = TRUE; } #endif - if (trx_sys_hdr_page(space, offset)) { + if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) { /* Trx sys header is so low in the latching order that we play safe and do not leave the i/o-completion to an asynchronous - i/o-thread: */ + i/o-thread. Ibuf bitmap pages must always be read with + syncronous i/o, to make sure they do not get involved in + thread deadlocks. */ sync = TRUE; } diff --git a/innobase/configure.in b/innobase/configure.in index 0bcc53cc05b..2ed456ff0b1 100644 --- a/innobase/configure.in +++ b/innobase/configure.in @@ -4,18 +4,94 @@ AC_CANONICAL_SYSTEM AM_MAINTAINER_MODE AM_CONFIG_HEADER(ib_config.h) AM_INIT_AUTOMAKE(ib, 0.90) + +# This is need before AC_PROG_CC +# + +if test "x${CFLAGS-}" = x ; then + cflags_is_set=no +else + cflags_is_set=yes +fi + +if test "x${CPPFLAGS-}" = x ; then + cppflags_is_set=no +else + cppflags_is_set=yes +fi + +if test "x${LDFLAGS-}" = x ; then + ldflags_is_set=no +else + ldflags_is_set=yes +fi + +# The following hack should ensure that configure doesn't add optimizing +# or debugging flags to CFLAGS or CXXFLAGS +CFLAGS="$CFLAGS " +CXXFLAGS="$CXXFLAGS " + AC_PROG_CC AC_PROG_RANLIB AC_PROG_INSTALL AC_CHECK_HEADERS(aio.h sched.h) AC_CHECK_SIZEOF(int, 4) AC_CHECK_FUNCS(sched_yield) -AC_C_INLINE +#AC_C_INLINE Already checked in MySQL AC_C_BIGENDIAN +# Build optimized or debug version ? +# First check for gcc and g++ +if test "$ac_cv_prog_gcc" = "yes" +then + DEBUG_CFLAGS="-g" + DEBUG_OPTIMIZE_CC="-O" + OPTIMIZE_CFLAGS="$MAX_C_OPTIMIZE" +else + DEBUG_CFLAGS="-g" + DEBUG_OPTIMIZE_CC="" + OPTIMIZE_CFLAGS="-O" +fi +if test "$ac_cv_prog_cxx_g" = "yes" +then + DEBUG_CXXFLAGS="-g" + DEBUG_OPTIMIZE_CXX="-O" + OPTIMIZE_CXXFLAGS="-O3" +else + DEBUG_CXXFLAGS="-g" + DEBUG_OPTIMIZE_CXX="" + OPTIMIZE_CXXFLAGS="-O" +fi +AC_ARG_WITH(debug, + [ --without-debug Build a production version without debugging code], + [with_debug=$withval], + [with_debug=no]) +if test "$with_debug" = "yes" +then + # Medium debug. + CFLAGS="$DEBUG_CFLAGS $DEBUG_OPTIMIZE_CC -DDBUG_ON -DSAFE_MUTEX $CFLAGS" + CXXFLAGS="$DEBUG_CXXFLAGS $DEBUG_OPTIMIZE_CXX -DSAFE_MUTEX $CXXFLAGS" +elif test "$with_debug" = "full" +then + # Full debug. Very slow in some cases + CFLAGS="$DEBUG_CFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC $CFLAGS" + CXXFLAGS="$DEBUG_CXXFLAGS -DSAFE_MUTEX -DSAFEMALLOC $CXXFLAGS" +else + # Optimized version. No debug + CFLAGS="$OPTIMIZE_CFLAGS -DDBUG_OFF $CFLAGS -DDEBUG_OFF" + CXXFLAGS="$OPTIMIZE_CXXFLAGS -DDBUG_OFF $CXXFLAGS -DDEBUG_OFF" +fi + case "$target_os" in - hp*) AC_DEFINE(UNIV_MUST_NOT_INLINE, 1, - No inlining because gcc broken on HP-UX);; + hp*) + CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; + irix*) + CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; + osf*) + CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; + sysv5uw7*) + # Problem when linking on SCO + CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; esac AC_OUTPUT(Makefile os/Makefile ut/Makefile btr/Makefile diff --git a/innobase/include/Makefile.i b/innobase/include/Makefile.i index 91dd9892bdf..8c7e9910f26 100644 --- a/innobase/include/Makefile.i +++ b/innobase/include/Makefile.i @@ -4,7 +4,5 @@ libsdir = ../libs INCLUDES = -I../../include -I../include -CFLAGS= -g -O2 -DDEBUG_OFF - # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h index 09883fbb037..5e90f5952fc 100644 --- a/innobase/include/buf0buf.h +++ b/innobase/include/buf0buf.h @@ -342,6 +342,16 @@ buf_frame_get_modify_clock( /*=======================*/ /* out: value */ buf_frame_t* frame); /* in: pointer to a frame */ +/************************************************************************ +Calculates a page checksum which is stored to the page when it is written +to a file. Note that we must be careful to calculate the same value +on 32-bit and 64-bit architectures. */ + +ulint +buf_calc_page_checksum( +/*===================*/ + /* out: checksum */ + byte* page); /* in: buffer page */ /************************************************************************** Gets the page number of a pointer pointing within a buffer frame containing a file page. */ diff --git a/innobase/include/que0que.h b/innobase/include/que0que.h index bd21a9801aa..4cbd888ba1d 100644 --- a/innobase/include/que0que.h +++ b/innobase/include/que0que.h @@ -117,7 +117,6 @@ que_thr_stop( /************************************************************************** Moves a thread from another state to the QUE_THR_RUNNING state. Increments the n_active_thrs counters of the query graph and transaction. */ -UNIV_INLINE void que_thr_move_to_run_state_for_mysql( /*================================*/ @@ -126,7 +125,6 @@ que_thr_move_to_run_state_for_mysql( /************************************************************************** A patch for MySQL used to 'stop' a dummy query thread used in MySQL select, when there is no error or lock wait. */ -UNIV_INLINE void que_thr_stop_for_mysql_no_error( /*============================*/ diff --git a/innobase/include/que0que.ic b/innobase/include/que0que.ic index e19198aad0e..ae4ed10560f 100644 --- a/innobase/include/que0que.ic +++ b/innobase/include/que0que.ic @@ -256,49 +256,3 @@ que_graph_is_select( return(FALSE); } - -/************************************************************************** -Moves a thread from another state to the QUE_THR_RUNNING state. Increments -the n_active_thrs counters of the query graph and transaction if thr was -not active. */ -UNIV_INLINE -void -que_thr_move_to_run_state_for_mysql( -/*================================*/ - que_thr_t* thr, /* in: an query thread */ - trx_t* trx) /* in: transaction */ -{ - if (!thr->is_active) { - - (thr->graph)->n_active_thrs++; - - trx->n_active_thrs++; - - thr->is_active = TRUE; - - ut_ad((thr->graph)->n_active_thrs == 1); - ut_ad(trx->n_active_thrs == 1); - } - - thr->state = QUE_THR_RUNNING; -} - -/************************************************************************** -A patch for MySQL used to 'stop' a dummy query thread used in MySQL -select, when there is no error or lock wait. */ -UNIV_INLINE -void -que_thr_stop_for_mysql_no_error( -/*============================*/ - que_thr_t* thr, /* in: query thread */ - trx_t* trx) /* in: transaction */ -{ - ut_ad(thr->state == QUE_THR_RUNNING); - - thr->state = QUE_THR_COMPLETED; - - thr->is_active = FALSE; - (thr->graph)->n_active_thrs--; - - trx->n_active_thrs--; -} diff --git a/innobase/include/sync0sync.h b/innobase/include/sync0sync.h index f22cce17a1a..03dd45816aa 100644 --- a/innobase/include/sync0sync.h +++ b/innobase/include/sync0sync.h @@ -55,6 +55,7 @@ Calling this function is obligatory only if the memory buffer containing the mutex is freed. Removes a mutex object from the mutex list. The mutex is checked to be in the reset state. */ +#undef mutex_free /* Fix for MacOS X */ void mutex_free( /*=======*/ diff --git a/innobase/include/univ.i b/innobase/include/univ.i index 5e74b7eb09b..fa5a8aef389 100644 --- a/innobase/include/univ.i +++ b/innobase/include/univ.i @@ -9,7 +9,7 @@ Created 1/20/1994 Heikki Tuuri #ifndef univ_i #define univ_i -#if (defined(_WIN32) || defined(_WIN64)) +#if (defined(_WIN32) || defined(_WIN64)) && !defined(MYSQL_SERVER) #define __WIN__ #include <windows.h> @@ -20,18 +20,28 @@ be defined: #define CRITICAL_SECTION ulint */ +#ifdef _NT_ +#define __NT__ +#endif + #else /* The Unix version */ +/* Most C compilers other than gcc do not know 'extern inline' */ +#if !defined(__GNUC__) && !defined(__WIN__) +#define UNIV_MUST_NOT_INLINE +#endif + /* Include two header files from MySQL to make the Unix flavor used in compiling more Posix-compatible. We assume that 'innobase' is a subdirectory of 'mysql'. */ #include <global.h> #include <my_pthread.h> +#ifndef __WIN__ /* Include <sys/stat.h> to get S_I... macros defined for os0file.c */ #include <sys/stat.h> - +#endif #undef PACKAGE #undef VERSION diff --git a/innobase/include/ut0dbg.h b/innobase/include/ut0dbg.h index a36b022e036..751609b244e 100644 --- a/innobase/include/ut0dbg.h +++ b/innobase/include/ut0dbg.h @@ -9,9 +9,9 @@ Created 1/30/1994 Heikki Tuuri #ifndef ut0dbg_h #define ut0dbg_h +#include "univ.i" #include <assert.h> #include <stdlib.h> -#include "univ.i" #include "os0thread.h" extern ulint ut_dbg_zero; /* This is used to eliminate diff --git a/innobase/include/ut0mem.h b/innobase/include/ut0mem.h index 4d266f34c17..fa46514fe16 100644 --- a/innobase/include/ut0mem.h +++ b/innobase/include/ut0mem.h @@ -9,9 +9,9 @@ Created 5/30/1994 Heikki Tuuri #ifndef ut0mem_h #define ut0mem_h +#include "univ.i" #include <string.h> #include <stdlib.h> -#include "univ.i" UNIV_INLINE void* diff --git a/innobase/include/ut0ut.h b/innobase/include/ut0ut.h index 05d4f455c58..f2c4781c167 100644 --- a/innobase/include/ut0ut.h +++ b/innobase/include/ut0ut.h @@ -9,10 +9,9 @@ Created 1/20/1994 Heikki Tuuri #ifndef ut0ut_h #define ut0ut_h -#include <time.h> -#include <ctype.h> - #include "univ.i" +#include <time.h> +#include <m_ctype.h> typedef time_t ib_time_t; diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c index 894ef9c3840..e93cd3f0364 100644 --- a/innobase/log/log0recv.c +++ b/innobase/log/log0recv.c @@ -882,12 +882,6 @@ recv_recover_page( recv = UT_LIST_GET_NEXT(rec_list, recv); } - /* If the following assert fails, the file page is incompletely - written, and a recovery from a backup is required */ - - ut_a(0 == ut_dulint_cmp(mach_read_from_8(page + FIL_PAGE_LSN), - mach_read_from_8(page + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN))); mutex_enter(&(recv_sys->mutex)); recv_addr->state = RECV_PROCESSED; diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c index 8e9b8482259..b3cb86a1178 100644 --- a/innobase/os/os0file.c +++ b/innobase/os/os0file.c @@ -15,9 +15,6 @@ Created 10/21/1995 Heikki Tuuri /* We assume in this case that the OS has standard Posix aio (at least SunOS 2.6, HP-UX 11i and AIX 4.3 have) */ -#undef __USE_FILE_OFFSET64 - -#include <aio.h> #endif /* We use these mutexes to protect lseek + file i/o operation, if the @@ -163,7 +160,6 @@ os_file_handle_error( os_file_t file, /* in: file pointer */ char* name) /* in: name of a file or NULL */ { - int input_char; ulint err; UT_NOT_USED(file); @@ -171,33 +167,19 @@ os_file_handle_error( err = os_file_get_last_error(); if (err == OS_FILE_DISK_FULL) { -ask_again: - printf("\n"); + fprintf(stderr, "\n"); if (name) { - printf( - "Innobase encountered a problem with file %s.\n", + fprintf(stderr, + "InnoDB: Encountered a problem with file %s.\n", name); } - printf("Disk is full. Try to clean the disk to free space\n"); - printf("before answering the following: How to continue?\n"); - printf("(Y == freed some space: try again)\n"); - printf("(N == crash the database: will restart it)?\n"); -ask_with_no_question: - input_char = getchar(); - - if (input_char == (int) 'N') { - ut_error; - - return(FALSE); - } else if (input_char == (int) 'Y') { + fprintf(stderr, + "InnoDB: Cannot continue operation.\n" + "InnoDB: Disk is full. Try to clean the disk to free space.\n" + "InnoDB: Delete possible created file and restart.\n"); - return(TRUE); - } else if (input_char == (int) '\n') { + exit(1); - goto ask_with_no_question; - } else { - goto ask_again; - } } else if (err == OS_FILE_AIO_RESOURCES_RESERVED) { return(TRUE); @@ -534,8 +516,10 @@ os_file_pread( ulint n, /* in: number of bytes to read */ ulint offset) /* in: offset from where to read */ { + off_t offs = (off_t)offset; + #ifdef HAVE_PREAD - return(pread(file, buf, n, (off_t) offset)); + return(pread(file, buf, n, offs)); #else ssize_t ret; ulint i; @@ -545,7 +529,7 @@ os_file_pread( os_mutex_enter(os_file_seek_mutexes[i]); - ret = lseek(file, (off_t) offset, 0); + ret = lseek(file, offs, 0); if (ret < 0) { os_mutex_exit(os_file_seek_mutexes[i]); @@ -573,10 +557,19 @@ os_file_pwrite( ulint n, /* in: number of bytes to write */ ulint offset) /* in: offset where to write */ { + ssize_t ret; + off_t offs = (off_t)offset; + #ifdef HAVE_PWRITE - return(pwrite(file, buf, n, (off_t) offset)); + ret = pwrite(file, buf, n, offs); + + /* Always do fsync to reduce the probability that when the OS crashes, + a database page is only partially physically written to disk. */ + + ut_a(TRUE == os_file_flush(file)); + + return(ret); #else - ssize_t ret; ulint i; /* Protect the seek / write operation with a mutex */ @@ -584,7 +577,7 @@ os_file_pwrite( os_mutex_enter(os_file_seek_mutexes[i]); - ret = lseek(file, (off_t) offset, 0); + ret = lseek(file, offs, 0); if (ret < 0) { os_mutex_exit(os_file_seek_mutexes[i]); @@ -594,6 +587,11 @@ os_file_pwrite( ret = write(file, buf, n); + /* Always do fsync to reduce the probability that when the OS crashes, + a database page is only partially physically written to disk. */ + + ut_a(TRUE == os_file_flush(file)); + os_mutex_exit(os_file_seek_mutexes[i]); return(ret); @@ -662,7 +660,6 @@ try_again: #else ibool retry; ssize_t ret; - ulint i; #if (UNIV_WORD_SIZE == 8) offset = offset + (offset_high << 32); @@ -670,15 +667,9 @@ try_again: UT_NOT_USED(offset_high); #endif try_again: - /* Protect the seek / read operation with a mutex */ - i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; - - os_mutex_enter(os_file_seek_mutexes[i]); - - ret = os_file_pread(file, buf, n, (off_t) offset); + ret = os_file_pread(file, buf, n, offset); if ((ulint)ret == n) { - os_mutex_exit(os_file_seek_mutexes[i]); return(TRUE); } @@ -747,9 +738,14 @@ try_again: } ret = WriteFile(file, buf, n, &len, NULL); + + /* Always do fsync to reduce the probability that when the OS crashes, + a database page is only partially physically written to disk. */ + + ut_a(TRUE == os_file_flush(file)); os_mutex_exit(os_file_seek_mutexes[i]); - + if (ret && len == n) { return(TRUE); } @@ -763,7 +759,7 @@ try_again: UT_NOT_USED(offset_high); #endif try_again: - ret = os_file_pwrite(file, buf, n, (off_t) offset); + ret = os_file_pwrite(file, buf, n, offset); if ((ulint)ret == n) { return(TRUE); @@ -1344,6 +1340,10 @@ try_again: } } else if (mode == OS_AIO_IBUF) { ut_ad(type == OS_FILE_READ); + /* Reduce probability of deadlock bugs in connection with ibuf: + do not let the ibuf i/o handler sleep */ + + wake_later = FALSE; array = os_aio_ibuf_array; } else if (mode == OS_AIO_LOG) { @@ -1413,7 +1413,7 @@ try_again: return(TRUE); } - goto error_handling; + err = 1; /* Fall through the next if */ } #endif if (err == 0) { @@ -1511,6 +1511,10 @@ os_aio_windows_handle( if (ret && len == slot->len) { ret_val = TRUE; + + if (slot->type == OS_FILE_WRITE) { + ut_a(TRUE == os_file_flush(slot->file)); + } } else { err = GetLastError(); ut_error; @@ -1592,6 +1596,10 @@ os_aio_posix_handle( *message1 = slot->message1; *message2 = slot->message2; + if (slot->type == OS_FILE_WRITE) { + ut_a(TRUE == os_file_flush(slot->file)); + } + os_mutex_exit(array->mutex); os_aio_array_free_slot(array, slot); diff --git a/innobase/os/os0sync.c b/innobase/os/os0sync.c index 4c283431575..c5dd603100d 100644 --- a/innobase/os/os0sync.c +++ b/innobase/os/os0sync.c @@ -247,6 +247,7 @@ os_event_wait_time( return(OS_SYNC_TIME_EXCEEDED); } else { ut_error; + return(1000000); /* dummy value to eliminate compiler warn. */ } #else UT_NOT_USED(time); diff --git a/innobase/os/os0thread.c b/innobase/os/os0thread.c index 05e1e6201a4..11bff73608a 100644 --- a/innobase/os/os0thread.c +++ b/innobase/os/os0thread.c @@ -138,7 +138,11 @@ os_thread_yield(void) #if defined(__WIN__) Sleep(0); #elif (defined(HAVE_SCHED_YIELD) && defined(HAVE_SCHED_H)) - sched_yield(); + sched_yield(); +#elif defined(HAVE_PTHREAD_YIELD_ZERO_ARG) + pthread_yield(); +#elif defined(HAVE_PTHREAD_YIELD_ONE_ARG) + pthread_yield(0); #else os_thread_sleep(0); #endif diff --git a/innobase/pars/lexyy.c b/innobase/pars/lexyy.c index 6ba8ecfbcb1..64b8963028b 100644 --- a/innobase/pars/lexyy.c +++ b/innobase/pars/lexyy.c @@ -6,6 +6,7 @@ #define FLEX_SCANNER +#include "univ.i" #include <stdio.h> @@ -5850,7 +5851,6 @@ Created 12/14/1997 Heikki Tuuri *******************************************************/ #define YYSTYPE que_node_t* -#include "univ.i" #include "pars0pars.h" #include "pars0grm.h" #include "pars0sym.h" diff --git a/innobase/pars/pars0grm.c b/innobase/pars/pars0grm.c index e7317d1f030..7f629a8fe4c 100644 --- a/innobase/pars/pars0grm.c +++ b/innobase/pars/pars0grm.c @@ -97,9 +97,8 @@ que_node_t */ #define YYSTYPE que_node_t* #define alloca mem_alloc -#include <math.h> - #include "univ.i" +#include <math.h> #include "pars0pars.h" #include "mem0mem.h" #include "que0types.h" diff --git a/innobase/pars/pars0grm.y b/innobase/pars/pars0grm.y index a13aeaac1e2..ae8c5ab91ec 100644 --- a/innobase/pars/pars0grm.y +++ b/innobase/pars/pars0grm.y @@ -10,11 +10,11 @@ Created 12/14/1997 Heikki Tuuri /* The value of the semantic attribute is a pointer to a query tree node que_node_t */ #define YYSTYPE que_node_t* -#define alloca mem_alloc - -#include <math.h> #include "univ.i" +#undef alloca +#define alloca mem_alloc +#include <math.h> #include "pars0pars.h" #include "mem0mem.h" #include "que0types.h" diff --git a/innobase/que/que0que.c b/innobase/que/que0que.c index b2c7e3ceea8..ddf8c8ebc43 100644 --- a/innobase/que/que0que.c +++ b/innobase/que/que0que.c @@ -1068,6 +1068,51 @@ que_thr_stop_for_mysql( mutex_exit(&kernel_mutex); } + +/************************************************************************** +Moves a thread from another state to the QUE_THR_RUNNING state. Increments +the n_active_thrs counters of the query graph and transaction if thr was +not active. */ +void +que_thr_move_to_run_state_for_mysql( +/*================================*/ + que_thr_t* thr, /* in: an query thread */ + trx_t* trx) /* in: transaction */ +{ + if (!thr->is_active) { + + (thr->graph)->n_active_thrs++; + + trx->n_active_thrs++; + + thr->is_active = TRUE; + + ut_ad((thr->graph)->n_active_thrs == 1); + ut_ad(trx->n_active_thrs == 1); + } + + thr->state = QUE_THR_RUNNING; +} + +/************************************************************************** +A patch for MySQL used to 'stop' a dummy query thread used in MySQL +select, when there is no error or lock wait. */ +void +que_thr_stop_for_mysql_no_error( +/*============================*/ + que_thr_t* thr, /* in: query thread */ + trx_t* trx) /* in: transaction */ +{ + ut_ad(thr->state == QUE_THR_RUNNING); + + thr->state = QUE_THR_COMPLETED; + + thr->is_active = FALSE; + (thr->graph)->n_active_thrs--; + + trx->n_active_thrs--; +} + /************************************************************************** Prints info of an SQL query graph node. */ diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c index e6182257581..58e0d053947 100644 --- a/innobase/row/row0sel.c +++ b/innobase/row/row0sel.c @@ -48,6 +48,52 @@ to que_run_threads: this is to allow canceling runaway queries */ #define SEL_EXHAUSTED 1 #define SEL_RETRY 2 +/************************************************************************ +Returns TRUE if the user-defined column values in a secondary index record +are the same as the corresponding columns in the clustered index record. */ +static +ibool +row_sel_sec_rec_is_for_clust_rec( +/*=============================*/ + rec_t* sec_rec, + dict_index_t* sec_index, + rec_t* clust_rec, + dict_index_t* clust_index) +{ + dict_col_t* col; + byte* sec_field; + ulint sec_len; + byte* clust_field; + ulint clust_len; + ulint n; + ulint i; + + n = dict_index_get_n_ordering_defined_by_user(sec_index); + + for (i = 0; i < n; i++) { + col = dict_field_get_col( + dict_index_get_nth_field(sec_index, i)); + + clust_field = rec_get_nth_field(clust_rec, + dict_col_get_clust_pos(col), + &clust_len); + sec_field = rec_get_nth_field(sec_rec, i, &sec_len); + + if (sec_len != clust_len) { + + return(FALSE); + } + + if (sec_len != UNIV_SQL_NULL + && ut_memcmp(sec_field, clust_field, sec_len) != 0) { + + return(FALSE); + } + } + + return(TRUE); +} + /************************************************************************* Creates a select node struct. */ @@ -561,6 +607,8 @@ row_sel_get_clust_rec( /* This is a non-locking consistent read: if necessary, fetch a previous version of the record */ + old_vers = NULL; + if (!lock_clust_rec_cons_read_sees(clust_rec, index, node->read_view)) { @@ -579,6 +627,28 @@ row_sel_get_clust_rec( return(DB_SUCCESS); } } + + /* If we had to go to an earlier version of row or the + secondary index record is delete marked, then it may be that + the secondary index record corresponding to clust_rec + (or old_vers) is not rec; in that case we must ignore + such row because in our snapshot rec would not have existed. + Remember that from rec we cannot see directly which transaction + id corresponds to it: we have to go to the clustered index + record. A query where we want to fetch all rows where + the secondary index value is in some interval would return + a wrong result if we would not drop rows which we come to + visit through secondary index records that would not really + exist in our snapshot. */ + + if ((old_vers || rec_get_deleted_flag(rec)) + && !row_sel_sec_rec_is_for_clust_rec(rec, plan->index, + clust_rec, index)) { + clust_rec = NULL; + *out_rec = clust_rec; + + return(DB_SUCCESS); + } } /* Fetch the columns needed in test conditions */ @@ -2105,6 +2175,8 @@ row_sel_get_clust_rec_for_mysql( a previous version of the record */ trx = thr_get_trx(thr); + + old_vers = NULL; if (!lock_clust_rec_cons_read_sees(clust_rec, clust_index, trx->read_view)) { @@ -2121,6 +2193,25 @@ row_sel_get_clust_rec_for_mysql( clust_rec = old_vers; } + + /* If we had to go to an earlier version of row or the + secondary index record is delete marked, then it may be that + the secondary index record corresponding to clust_rec + (or old_vers) is not rec; in that case we must ignore + such row because in our snapshot rec would not have existed. + Remember that from rec we cannot see directly which transaction + id corrsponds to it: we have to go to the clustered index + record. A query where we want to fetch all rows where + the secondary index value is in some interval would return + a wrong result if we would not drop rows which we come to + visit through secondary index records that would not really + exist in our snapshot. */ + + if ((old_vers || rec_get_deleted_flag(rec)) + && !row_sel_sec_rec_is_for_clust_rec(rec, sec_index, + clust_rec, clust_index)) { + clust_rec = NULL; + } } *out_rec = clust_rec; @@ -2609,8 +2700,10 @@ rec_loop: goto next_rec; } - - rec = clust_rec; + + if (prebuilt->need_to_access_clustered) { + rec = clust_rec; + } } /* We found a qualifying row */ diff --git a/innobase/row/row0uins.c b/innobase/row/row0uins.c index 68115895dbb..c9330318ac0 100644 --- a/innobase/row/row0uins.c +++ b/innobase/row/row0uins.c @@ -250,9 +250,12 @@ row_undo_ins_parse_undo_rec( ut_ad(type == TRX_UNDO_INSERT_REC); node->rec_type = type; - /* NOTE that the table has to be explicitly released later */ node->table = dict_table_get_on_id(table_id, node->trx); + if (node->table == NULL) { + return; + } + clust_index = dict_table_get_first_index(node->table); ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref), @@ -280,9 +283,14 @@ row_undo_ins( row_undo_ins_parse_undo_rec(node, thr); - found = row_undo_search_clust_to_pcur(node, thr); + if (node->table == NULL) { + found = FALSE; + } else { + found = row_undo_search_clust_to_pcur(node, thr); + } if (!found) { + trx_undo_rec_release(node->trx, node->undo_no); return(DB_SUCCESS); } diff --git a/innobase/row/row0umod.c b/innobase/row/row0umod.c index 2aa223a6186..70cf0fe5a32 100644 --- a/innobase/row/row0umod.c +++ b/innobase/row/row0umod.c @@ -534,9 +534,16 @@ row_undo_mod_parse_undo_rec( &undo_no, &table_id); node->rec_type = type; - /* NOTE that the table has to be explicitly released later */ node->table = dict_table_get_on_id(table_id, thr_get_trx(thr)); + /* TODO: other fixes associated with DROP TABLE + rollback in the + same table by another user */ + + if (node->table == NULL) { + /* Table was dropped */ + return; + } + clust_index = dict_table_get_first_index(node->table); ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr, @@ -571,12 +578,18 @@ row_undo_mod( row_undo_mod_parse_undo_rec(node, thr); - found = row_undo_search_clust_to_pcur(node, thr); + if (node->table == NULL) { + found = FALSE; + } else { + + found = row_undo_search_clust_to_pcur(node, thr); + } if (!found) { /* It is already undone, or will be undone by another query - thread */ + thread, or table was dropped */ + trx_undo_rec_release(node->trx, node->undo_no); node->state = UNDO_NODE_FETCH_NEXT; return(DB_SUCCESS); diff --git a/isam/_dbug.c b/isam/_dbug.c index fd0a0b46562..d632d5931a5 100644 --- a/isam/_dbug.c +++ b/isam/_dbug.c @@ -85,7 +85,7 @@ void _nisam_print_key(FILE *stream, register N_KEYSEG *keyseg, const uchar *key) key=end; break; case HA_KEYTYPE_INT24: - VOID(fprintf(stream,"%ld",sint3korr(key))); + VOID(fprintf(stream,"%ld",(long) sint3korr(key))); key=end; break; case HA_KEYTYPE_UINT24: diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 55234fcfe13..9221812ea65 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -16,11 +16,11 @@ MA 02111-1307, USA */ #define DONT_USE_RAID +#include <global.h> #if defined(__WIN__) || defined(_WIN32) || defined(_WIN64) #include <winsock.h> #include <odbcinst.h> #endif -#include <global.h> #include <my_sys.h> #include <mysys_err.h> #include <m_string.h> diff --git a/myisam/mi_locking.c b/myisam/mi_locking.c index 057efb96185..e067e80fcf3 100644 --- a/myisam/mi_locking.c +++ b/myisam/mi_locking.c @@ -335,11 +335,10 @@ int _mi_readinfo(register MI_INFO *info, int lock_type, int check_keybuffer) int _mi_writeinfo(register MI_INFO *info, uint operation) { int error,olderror; - MYISAM_SHARE *share; + MYISAM_SHARE *share=info->s; DBUG_ENTER("_mi_writeinfo"); error=0; - share=info->s; if (share->r_locks == 0 && share->w_locks == 0) { olderror=my_errno; /* Remember last error */ @@ -368,7 +367,7 @@ int _mi_writeinfo(register MI_INFO *info, uint operation) { share->changed= 1; /* Mark keyfile changed */ } - DBUG_RETURN(error); + DBUG_RETURN(error); } /* _mi_writeinfo */ diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index 6deb69b49df..d98c10a29a9 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -27,10 +27,10 @@ CLEANFILES = $(test_SCRIPTS) dist-hook: mkdir -p $(distdir)/t $(distdir)/r $(distdir)/include \ $(distdir)/std_data - $(INSTALL_DATA) $(srcdir)/t/*.test $(srcdir)/t/*.opt $(distdir)/t + $(INSTALL_DATA) $(srcdir)/t/*.test $(srcdir)/t/*.opt $(srcdir)/t/*.sh $(distdir)/t $(INSTALL_DATA) $(srcdir)/include/*.inc $(distdir)/include $(INSTALL_DATA) $(srcdir)/r/*.result $(srcdir)/r/*.require $(distdir)/r - $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(distdir)/std_data + $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(srcdir)/std_data/*.001 $(distdir)/std_data install-data-local: $(mkinstalldirs) \ diff --git a/mysql-test/include/have_default_master.inc b/mysql-test/include/have_default_master.inc deleted file mode 100644 index eff1414c16a..00000000000 --- a/mysql-test/include/have_default_master.inc +++ /dev/null @@ -1,3 +0,0 @@ --- require r/have_default_master.require -connection master; -show variables like "port"; diff --git a/mysql-test/include/master-slave.inc b/mysql-test/include/master-slave.inc index 69ab73db6b5..61077f898f6 100644 --- a/mysql-test/include/master-slave.inc +++ b/mysql-test/include/master-slave.inc @@ -9,5 +9,7 @@ connection master; reset master; connection slave; reset slave; +# Clean up old test tables +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; slave start; @r/slave-running.result show status like 'Slave_running'; diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh index 6fd32d37cd0..049ac6b1cd7 100644 --- a/mysql-test/install_test_db.sh +++ b/mysql-test/install_test_db.sh @@ -25,8 +25,8 @@ then data=var/slave-data ldata=$fix_bin/var/slave-data else - data=var/lib - ldata=$fix_bin/var/lib + data=var/master-data + ldata=$fix_bin/var/master-data fi mdata=$data/mysql diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 3e299fb2f25..9216ae5a74f 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -198,7 +198,7 @@ done #-- MYRUN_DIR=$MYSQL_TEST_DIR/var/run -MASTER_MYDDIR="$MYSQL_TEST_DIR/var/lib" +MASTER_MYDDIR="$MYSQL_TEST_DIR/var/master-data" MASTER_MYSOCK="$MYSQL_TMP_DIR/mysql-master.sock" MASTER_MYPID="$MYRUN_DIR/mysqld.pid" MASTER_MYLOG="$MYSQL_TEST_DIR/var/log/mysqld.log" @@ -298,6 +298,8 @@ prompt_user () read unused } +# We can't use diff -u as this isn't portable + show_failed_diff () { reject_file=r/$1.reject @@ -306,7 +308,7 @@ show_failed_diff () then echo "Below are the diffs between actual and expected results:" echo "-------------------------------------------------------" - $DIFF -u $result_file $reject_file + $DIFF -c $result_file $reject_file echo "-------------------------------------------------------" echo "Please e-mail the above, along with the output of mysqlbug" echo "and any other relevant info to bugs@lists.mysql.com" @@ -377,6 +379,8 @@ mysql_install_db () { error "Could not install slave test DBs" exit 1 fi + # Give mysqld some time to die. + sleep $SLEEP_TIME return 0 } @@ -515,7 +519,7 @@ start_slave() --core \ --tmpdir=$MYSQL_TMP_DIR \ --language=english \ - --skip-innodb \ + --skip-innodb --skip-slave-start \ $SMALL_SERVER \ $EXTRA_SLAVE_OPT $EXTRA_SLAVE_MYSQLD_OPT" if [ x$DO_DDD = x1 ] diff --git a/mysql-test/r/backup.result b/mysql-test/r/backup.result index 5bfa1e9013e..2bbe15954dc 100644 --- a/mysql-test/r/backup.result +++ b/mysql-test/r/backup.result @@ -1,5 +1,5 @@ Table Op Msg_type Msg_text -t1 backup error Failed copying .frm file: errno = X +test.t1 backup error Failed copying .frm file: errno = X test.t1 backup status Operation failed Table Op Msg_type Msg_text test.t1 backup status OK diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 2e760ae5b75..5e227313e4a 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -492,3 +492,20 @@ a 1 a 2 MIN(B) MAX(b) 1 1 +id +0 +1 +2 +id +0 +1 +2 +id +0 +1 +2 +id id3 +0 0 +1 1 +2 2 +100 2 diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index ced1a3cd178..2c4a5cecbb1 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -30,3 +30,7 @@ Documentation 0 Host communication 0 kkkkkkkkkkk lllllllllll 3 Test Procedures 0 +1+1 a count(*) +2 a 0 +1+1 a count(*) +2 a 0 diff --git a/mysql-test/r/have_default_master.require b/mysql-test/r/have_default_master.require deleted file mode 100644 index ca8342ffb08..00000000000 --- a/mysql-test/r/have_default_master.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -port 9306 diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index 12f193bbdd9..f030b7fa763 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -449,3 +449,29 @@ a 1 table type possible_keys key key_len ref rows Extra t1 range PRIMARY PRIMARY 4 NULL 1 where used +id +0 +1 +2 +id +0 +1 +2 +id +0 +1 +2 +id id3 +0 0 +1 1 +2 2 +100 2 +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) default NULL, + KEY `a` (`a`) +) TYPE=InnoDB +a +1 +2 +3 diff --git a/mysql-test/r/lock.result b/mysql-test/r/lock.result index 7b116326fc4..7b1be604024 100644 --- a/mysql-test/r/lock.result +++ b/mysql-test/r/lock.result @@ -1,2 +1,6 @@ dummy1 count(distinct id) NULL 1 +Table Op Msg_type Msg_text +test.t1 check status OK +Table Op Msg_type Msg_text +test.t2 check error Table 't2' was not locked with LOCK TABLES diff --git a/mysql-test/r/rpl000014.result b/mysql-test/r/rpl000014.result index d2cb8ee5436..a47c3c91c1d 100644 --- a/mysql-test/r/rpl000014.result +++ b/mysql-test/r/rpl000014.result @@ -1,13 +1,13 @@ File Position Binlog_do_db Binlog_ignore_db master-bin.001 73 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 1 master-bin.001 73 Yes 0 0 +127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 1 master-bin.001 73 No 0 0 +127.0.0.1 root 9999 1 master-bin.001 73 No 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 1 master-bin.001 73 Yes 0 0 +127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 1 master-bin.001 173 Yes 0 0 +127.0.0.1 root 9999 1 master-bin.001 173 Yes 0 0 File Position Binlog_do_db Binlog_ignore_db master-bin.001 73 n diff --git a/mysql-test/r/rpl000015.result b/mysql-test/r/rpl000015.result index 5899d76c82f..58487af27f8 100644 --- a/mysql-test/r/rpl000015.result +++ b/mysql-test/r/rpl000015.result @@ -3,11 +3,11 @@ master-bin.001 73 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter 0 0 0 No 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 test 3306 60 4 No 0 0 +127.0.0.1 test 9998 60 4 No 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 60 4 No 0 0 +127.0.0.1 root 9999 60 4 No 0 0 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 60 master-bin.001 73 Yes 0 0 +127.0.0.1 root 9999 60 master-bin.001 73 Yes 0 0 n 10 45 diff --git a/mysql-test/r/rpl000016.result b/mysql-test/r/rpl000016.result index da9dccae9f4..abe4275a124 100644 --- a/mysql-test/r/rpl000016.result +++ b/mysql-test/r/rpl000016.result @@ -1,5 +1,5 @@ Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 60 master-bin.001 216 Yes 0 0 +127.0.0.1 root 9999 60 master-bin.001 216 Yes 0 0 s Could not break slave Tried hard @@ -10,7 +10,7 @@ master-bin.003 Log_name master-bin.003 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 60 master-bin.003 184 Yes 0 0 +127.0.0.1 root 9999 60 master-bin.003 184 Yes 0 0 m 34 65 @@ -25,6 +25,6 @@ master-bin.006 File Position Binlog_do_db Binlog_ignore_db master-bin.006 131 Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter -127.0.0.1 root 9306 60 master-bin.006 131 Yes 0 0 +127.0.0.1 root 9999 60 master-bin.006 131 Yes 0 0 count(*) 100 diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 8c50570a31d..ce2e5d4f58d 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -67,3 +67,16 @@ t1 0 PRIMARY 1 f1 A 1 NULL NULL t1 0 PRIMARY 2 f2 A 3 NULL NULL t1 0 PRIMARY 3 f3 A 9 NULL NULL t1 0 PRIMARY 4 f4 A 18 NULL NULL +Table Create Table +t1 CREATE TEMPORARY TABLE `t1` ( + `a` int(11) NOT NULL default '0' +) TYPE=MyISAM +Table Create Table +t2 CREATE TEMPORARY TABLE `t2` ( + `a` int(11) NOT NULL default '0' +) TYPE=MyISAM +Table Create Table +t1 CREATE TABLE `t1` ( + `test_set` set('val1','val2','val3') NOT NULL default '', + `name` char(20) default 'O''Brien' +) TYPE=MyISAM COMMENT='it''s a table' diff --git a/mysql-test/r/shw000001.result b/mysql-test/r/shw000001.result deleted file mode 100644 index c8056c74f0b..00000000000 --- a/mysql-test/r/shw000001.result +++ /dev/null @@ -1,5 +0,0 @@ -Table Create Table -t1 CREATE TABLE `t1` ( - `test_set` set('val1','val2','val3') NOT NULL default '', - `name` char(20) default 'O''Brien' -) TYPE=MyISAM COMMENT='it''s a table' diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index 564491fc520..96296e238fd 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -678,3 +678,30 @@ CREATE TABLE t1 ( INSERT INTO t1 VALUES (1, 1); SELECT MIN(B),MAX(b) FROM t1 WHERE t1.a = 1; drop table t1; + +# +# Test problem with BDB and lock tables with duplicate write. +# + +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=bdb; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +--error 690 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +select id from t1; +UNLOCK TABLES; +DROP TABLE t1; + +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=bdb; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +begin; +--error 690 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); +commit; +select id,id3 from t1; +UNLOCK TABLES; +DROP TABLE t1; diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index edd3c1fff7e..e75841dc6d0 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -2,6 +2,7 @@ # Test of group (Failed for Lars Hoss <lh@pbm.de>) # +drop table if exists t1,t2; CREATE TABLE t1 ( spID int(10) unsigned, userID int(10) unsigned, @@ -208,3 +209,14 @@ select value,description,bug_id from t2 left join t1 on t2.program=t1.product an select value,description,COUNT(bug_id) from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA" group by value; drop table t1,t2; + +# +# Problem with functions and group functions when no matching rows +# + +create table t1 (foo int); +insert into t1 values (1); +select 1+1, "a",count(*) from t1 where foo in (2); +insert into t1 values (1); +select 1+1,"a",count(*) from t1 where foo in (2); +drop table t1; diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test index b18091656b6..cef53ce8165 100644 --- a/mysql-test/t/innodb.test +++ b/mysql-test/t/innodb.test @@ -429,3 +429,50 @@ create table t1 (a int primary key,b int, c int, d int, e int, f int, g int, h insert into t1 values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); explain select * from t1 where a > 0 and a < 50; drop table t1; + +# +# Test lock tables +# + +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=innodb; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +--error 690 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +select id from t1; +UNLOCK TABLES; +DROP TABLE t1; + +create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=innodb; +insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL'); +LOCK TABLES t1 WRITE; +begin; +--error 690 +insert into t1 values (99,1,2,'D'),(1,1,2,'D'); +select id from t1; +insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D'); +commit; +select id,id3 from t1; +UNLOCK TABLES; +DROP TABLE t1; + +# +# Test prefix key +# +--error 1089 +create table t1 (a char(20), unique (a(5))) type=innodb; +create table t1 (a char(20), index (a(5))) type=innodb; +show create table t1; +drop table t1; + +# +# Test using temporary table and auto_increment +# + +create temporary table t1 (a int not null auto_increment, primary key(a)) type=innodb; +insert into t1 values (NULL),(NULL),(NULL); +delete from t1 where a=3; +insert into t1 values (NULL); +select * from t1; +drop table t1; diff --git a/mysql-test/t/lock.test b/mysql-test/t/lock.test index 777129ec814..385713174d2 100644 --- a/mysql-test/t/lock.test +++ b/mysql-test/t/lock.test @@ -21,3 +21,35 @@ LOCK TABLE t1 WRITE,t2 write; insert into t2 SELECT * from t1; update t1 set id=1 where id=-1; drop table t1,t2; + + +# +# Check bug with INSERT ... SELECT with lock tables +# + +CREATE TABLE t1 ( + index1 smallint(6) default NULL, + nr smallint(6) default NULL, + KEY index1(index1) +) TYPE=MyISAM; + +CREATE TABLE t2 ( + nr smallint(6) default NULL, + name varchar(20) default NULL +) TYPE=MyISAM; + +INSERT INTO t2 VALUES (1,'item1'); +INSERT INTO t2 VALUES (2,'item2'); + +# problem begins here! +lock tables t1 write, t2 read; +insert into t1 select 1,nr from t2 where name='item1'; +insert into t1 select 2,nr from t2 where name='item2'; +unlock tables; +check table t1; + +# Check error message +lock tables t1 write; +check table t2; +unlock tables; +drop table t1,t2; diff --git a/mysql-test/t/rpl000014.test b/mysql-test/t/rpl000014.test index 34e160a760c..b501d63b10e 100644 --- a/mysql-test/t/rpl000014.test +++ b/mysql-test/t/rpl000014.test @@ -1,18 +1,21 @@ source include/master-slave.inc; -source include/have_default_master.inc; connection master; show master status; save_master_pos; connection slave; sync_with_master; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; change master to master_log_pos=73; slave stop; change master to master_log_pos=73; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; slave start; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; change master to master_log_pos=173; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; connection master; show master status; diff --git a/mysql-test/t/rpl000015.test b/mysql-test/t/rpl000015.test index 825d1317bbc..73a10bed7b3 100644 --- a/mysql-test/t/rpl000015.test +++ b/mysql-test/t/rpl000015.test @@ -1,6 +1,5 @@ connect (master,localhost,root,,test,0,mysql-master.sock); connect (slave,localhost,root,,test,0, mysql-slave.sock); -source include/have_default_master.inc; connection master; reset master; show master status; @@ -9,12 +8,15 @@ connection slave; reset slave; show slave status; change master to master_host='127.0.0.1'; +--replace_result 3306 9998 9306 9999 3334 9999 3335 9999 show slave status; eval change master to master_host='127.0.0.1',master_user='root', master_password='',master_port=$MASTER_MYPORT; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; slave start; sync_with_master; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; connection master; drop table if exists t1; diff --git a/mysql-test/t/rpl000016.test b/mysql-test/t/rpl000016.test index a1450089898..7b46bc75498 100644 --- a/mysql-test/t/rpl000016.test +++ b/mysql-test/t/rpl000016.test @@ -1,6 +1,5 @@ connect (master,localhost,root,,test,0,mysql-master.sock); connect (slave,localhost,root,,test,0,mysql-slave.sock); -source include/have_default_master.inc; system cat /dev/null > var/slave-data/master.info; system chmod 000 var/slave-data/master.info; connection slave; @@ -23,6 +22,7 @@ insert into t1 values('Could not break slave'),('Tried hard'); save_master_pos; connection slave; sync_with_master; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; select * from t1; connection master; @@ -68,6 +68,7 @@ insert into t2 values (65); save_master_pos; connection slave; sync_with_master; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; select * from t2; connection master; @@ -87,6 +88,7 @@ connection slave; slave stop; slave start; sync_with_master; +--replace_result 9306 9999 3334 9999 3335 9999 show slave status; # because of concurrent insert, the table may not be up to date # if we do not lock diff --git a/mysql-test/t/rpl000018-master.sh b/mysql-test/t/rpl000018-master.sh index 71f0f12d0c5..e570f106ec6 100755 --- a/mysql-test/t/rpl000018-master.sh +++ b/mysql-test/t/rpl000018-master.sh @@ -1,3 +1,3 @@ -rm -f $MYSQL_TEST_DIR/var/lib/master-bin.* -cp $MYSQL_TEST_DIR/std_data/master-bin.001 $MYSQL_TEST_DIR/var/lib/ -echo ./master-bin.001 > $MYSQL_TEST_DIR/var/lib/master-bin.index +rm -f $MYSQL_TEST_DIR/var/master-data/master-bin.* +cp $MYSQL_TEST_DIR/std_data/master-bin.001 $MYSQL_TEST_DIR/var/master-data/ +echo ./master-bin.001 > $MYSQL_TEST_DIR/var/master-data/master-bin.index diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index d4be1a6d25a..f4f58c8c885 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -48,3 +48,20 @@ show index from t1; repair table t1; show index from t1; drop table t1; + +# +# Test of SHOW CREATE +# + +create temporary table t1 (a int not null); +show create table t1; +alter table t1 rename t2; +show create table t2; +drop table t2; + +create table t1 ( + test_set set( 'val1', 'val2', 'val3' ) not null default '', + name char(20) default 'O''Brien' + ) comment = 'it\'s a table' ; +show create table t1 ; +drop table t1; diff --git a/mysql-test/t/shw000001.test b/mysql-test/t/shw000001.test deleted file mode 100644 index 6b24d8a44c7..00000000000 --- a/mysql-test/t/shw000001.test +++ /dev/null @@ -1,8 +0,0 @@ -use test; -drop table if exists t1; -create table t1 ( - test_set set( 'val1', 'val2', 'val3' ) not null default '', - name char(20) default 'O''Brien' - ) comment = 'it\'s a table' ; -show create table t1 ; -drop table t1; diff --git a/mysys/my_error.c b/mysys/my_error.c index 6887126e6ec..4aa946aa6c3 100644 --- a/mysys/my_error.c +++ b/mysys/my_error.c @@ -66,7 +66,7 @@ int my_error(int nr,myf MyFlags, ...) while (isdigit(*tpos) || *tpos == '.' || *tpos == '-') tpos++; if (*tpos == 'l') /* Skipp 'l' argument */ - *tpos++; + tpos++; if (*tpos == 's') /* String parameter */ { par = va_arg(ap, char *); diff --git a/mysys/raid.cc b/mysys/raid.cc index a92647d1d95..48aa5cdb134 100644 --- a/mysys/raid.cc +++ b/mysys/raid.cc @@ -788,7 +788,7 @@ Fstat(int fd, MY_STAT *stat_area, myf MyFlags ) DBUG_PRINT("enter",("fd: %d MyFlags: %d",fd,MyFlags)); uint i; int error=0; - MY_STAT my_stat; + MY_STAT status; stat_area->st_size=0; stat_area->st_mtime=0; stat_area->st_atime=0; @@ -796,12 +796,12 @@ Fstat(int fd, MY_STAT *stat_area, myf MyFlags ) for(i=0 ; i < _raid_chunks ; i++) { - if (my_fstat(_fd_vector[i],&my_stat,MyFlags)) + if (my_fstat(_fd_vector[i],&status,MyFlags)) error=1; - stat_area->st_size+=my_stat.st_size; - set_if_bigger(stat_area->st_mtime,my_stat.st_mtime); - set_if_bigger(stat_area->st_atime,my_stat.st_atime); - set_if_bigger(stat_area->st_ctime,my_stat.st_ctime); + stat_area->st_size+=status.st_size; + set_if_bigger(stat_area->st_mtime,status.st_mtime); + set_if_bigger(stat_area->st_atime,status.st_atime); + set_if_bigger(stat_area->st_ctime,status.st_ctime); } DBUG_RETURN(error); } diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 030d8b5c0d6..84dac59018b 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -62,9 +62,10 @@ do done for i in extra/comp_err extra/replace extra/perror extra/resolveip \ - extra/my_print_defaults isam/isamchk isam/pack_isam myisam/myisamchk \ + extra/my_print_defaults extra/resolve_stack_dump \ + isam/isamchk isam/pack_isam myisam/myisamchk \ myisam/myisampack sql/mysqld sql/mysqlbinlog \ - client/mysql sql/mysqld client/mysqlshow \ + client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \ client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \ client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \ client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest @@ -104,8 +105,8 @@ rm -f $BASE/share/mysql/Makefile* $BASE/share/mysql/*/*.OLD $CP mysql-test/mysql-test-run mysql-test/install_test_db $BASE/mysql-test/ $CP mysql-test/README $BASE/mysql-test/README $CP mysql-test/include/*.inc $BASE/mysql-test/include -$CP mysql-test/std_data/*.dat $BASE/mysql-test/std_data -$CP mysql-test/t/*.test mysql-test/t/*.opt $BASE/mysql-test/t +$CP mysql-test/std_data/*.dat mysql-test/std_data/*.001 $BASE/mysql-test/std_data +$CP mysql-test/t/*.test mysql-test/t/*.opt mysql-test/t/*.sh $BASE/mysql-test/t $CP mysql-test/r/*.result mysql-test/r/*.require $BASE/mysql-test/r $CP scripts/* $BASE/bin diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh index da8c6fced53..1c26bf8e2d6 100644 --- a/scripts/mysqlhotcopy.sh +++ b/scripts/mysqlhotcopy.sh @@ -30,7 +30,7 @@ mysqlhotcopy - fast on-line hot-backup utility for local MySQL databases and tab mysqlhotcopy --method='scp -Bq -i /usr/home/foo/.ssh/identity' --user=root --password=secretpassword \ db_1./^nice_table/ user@some.system.dom:~/path/to/new_directory -WARNING: THIS IS VERY MUCH A FIRST-CUT ALPHA. Comments/patches welcome. +WARNING: THIS PROGRAM IS STILL IN BETA. Comments/patches welcome. =cut diff --git a/sql-bench/Results/ATIS-mysql-NT_4.0 b/sql-bench/Results/ATIS-mysql-NT_4.0 index 0b6f896a13b..413a5e512bf 100644 --- a/sql-bench/Results/ATIS-mysql-NT_4.0 +++ b/sql-bench/Results/ATIS-mysql-NT_4.0 @@ -1,20 +1,20 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 0:38:04 +Testing server 'MySQL 3.23.37' at 2001-04-19 13:49:16 ATIS table test
Creating tables
-Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for create_table (28): 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Inserting data
-Time to insert (9768): 5 wallclock secs ( 0.95 usr 1.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to insert (9768): 6 wallclock secs ( 0.81 usr 1.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Retrieving data
-Time for select_simple_join (500): 3 wallclock secs ( 1.59 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_join (100): 4 wallclock secs ( 1.39 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key_prefix_join (100): 19 wallclock secs (12.20 usr 4.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_distinct (800): 17 wallclock secs ( 4.72 usr 1.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (2800): 20 wallclock secs ( 3.14 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_simple_join (500): 3 wallclock secs ( 1.52 usr 0.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_join (100): 4 wallclock secs ( 1.41 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key_prefix_join (100): 18 wallclock secs (12.05 usr 4.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_distinct (800): 17 wallclock secs ( 4.72 usr 1.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_group (2800): 21 wallclock secs ( 3.06 usr 1.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 68 wallclock secs (24.02 usr 9.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 69 wallclock secs (23.58 usr 10.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/RUN-mysql-NT_4.0 b/sql-bench/Results/RUN-mysql-NT_4.0 index 43480bf0fe7..70d6fb2dc0f 100644 --- a/sql-bench/Results/RUN-mysql-NT_4.0 +++ b/sql-bench/Results/RUN-mysql-NT_4.0 @@ -1,96 +1,103 @@ -Benchmark DBD suite: 2.11a -Date of test: 2001-01-17 23:59:27 -Running tests on: Windows NT Version 4.0
-Arguments:
+Benchmark DBD suite: 2.12 +Date of test: 2001-04-19 13:10:13 +Running tests on: Windows NT Version 4.0 +Arguments: Comments: 2x Pentium III XEON 450MHZ, 512M
-Limits from:
-Server version: MySQL 3.23.31 +Limits from: +Server version: MySQL 3.23.37 -alter-table: Total time: 2315 wallclock secs ( 0.78 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -ATIS: Total time: 68 wallclock secs (24.02 usr 9.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -big-tables: Total time: 80 wallclock secs (18.31 usr 19.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -connect: Total time: 183 wallclock secs (60.53 usr 49.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -create: Total time: 995 wallclock secs (13.64 usr 10.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -insert: Total time: 7164 wallclock secs (872.86 usr 441.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -select: Total time: 1297 wallclock secs (113.66 usr 43.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -wisconsin: Total time: 28 wallclock secs ( 7.94 usr 5.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +alter-table: Total time: 2342 wallclock secs ( 0.91 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +ATIS: Total time: 69 wallclock secs (23.58 usr 10.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +big-tables: Total time: 79 wallclock secs (17.44 usr 18.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +connect: Total time: 179 wallclock secs (58.89 usr 48.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +create: Total time: 897 wallclock secs (15.94 usr 10.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +insert: Total time: 6659 wallclock secs (1143.94 usr 544.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +select: Total time: 1556 wallclock secs (127.53 usr 47.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +wisconsin: Total time: 28 wallclock secs ( 7.95 usr 5.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU) All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
-alter_table_add 1225.00 0.47 0.25 0.00 992 -alter_table_drop 1039.00 0.19 0.11 0.00 496 -connect 33.00 11.47 9.97 0.00 10000 -connect+select_1_row 38.00 13.19 12.03 0.00 10000 -connect+select_simple 36.00 12.84 11.23 0.00 10000 -count 35.00 0.06 0.00 0.00 100 -count_distinct 80.00 0.94 0.31 0.00 2000 -count_distinct_big 214.00 23.03 7.88 0.00 120 -count_distinct_group 150.00 3.03 1.20 0.00 1000 -count_distinct_group_on_key 45.00 0.67 0.25 0.00 1000 -count_distinct_group_on_key_parts 150.00 3.31 0.97 0.00 1000 -count_group_on_key_parts 41.00 2.81 0.92 0.00 1000 -count_on_key 377.00 21.58 9.17 0.00 50100 -create+drop 142.00 3.30 2.89 0.00 10000 -create_MANY_tables 245.00 2.73 1.56 0.00 10000 -create_index 25.00 0.00 0.00 0.00 8 -create_key+drop 172.00 5.14 2.92 0.00 10000 -create_table 0.00 0.00 0.01 0.00 31 -delete_all 21.00 0.00 0.02 0.00 12 -delete_all_many_keys 1980.00 0.03 0.03 0.00 1 -delete_big 0.00 0.00 0.00 0.00 1
-delete_big_many_keys 1980.00 0.03 0.03 0.00 128 -delete_key 8.00 0.88 1.34 0.00 10000 -drop_index 25.00 0.00 0.00 0.00 8 -drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 187.00 1.08 1.38 0.00 10000 -insert 234.00 35.02 48.52 0.00 350768 -insert_duplicates 59.00 8.92 14.09 0.00 100000 -insert_key 1853.00 13.92 13.81 0.00 100000 -insert_many_fields 22.00 0.64 0.39 0.00 2000 -insert_select_1_key 8.00 0.00 0.00 0.00 1
-insert_select_2_keys 12.00 0.00 0.00 0.00 1 -min_max 18.00 0.05 0.02 0.00 60 -min_max_on_key 193.00 35.67 14.80 0.00 85000 -multiple_value_insert 12.00 2.50 0.19 0.00 100000 -order_by_big 104.00 64.25 25.28 0.00 10 -order_by_big_key 95.00 69.14 26.05 0.00 10 -order_by_big_key2 90.00 63.38 26.20 0.00 10 -order_by_big_key_desc 96.00 68.61 26.58 0.00 10 -order_by_big_key_diff 100.00 65.05 24.69 0.00 10 -order_by_big_key_prefix 89.00 63.73 25.53 0.00 10 -order_by_key2_diff 11.00 5.53 2.23 0.00 500 -order_by_key_prefix 6.00 3.11 1.09 0.00 500 -order_by_range 9.00 3.02 1.25 0.00 500 -outer_join 118.00 0.00 0.00 0.00 10 -outer_join_found 106.00 0.02 0.00 0.00 10 -outer_join_not_found 58.00 0.02 0.00 0.00 500 -outer_join_on_key 41.00 0.03 0.00 0.00 10 -select_1_row 5.00 1.14 1.80 0.00 10000 -select_2_rows 6.00 0.91 2.03 0.00 10000 -select_big 146.00 83.48 34.54 0.00 10080 -select_column+column 6.00 0.88 1.83 0.00 10000 -select_diff_key 122.00 0.45 0.08 0.00 500 -select_distinct 17.00 4.72 1.55 0.00 800 -select_group 56.00 3.17 1.01 0.00 2911 -select_group_when_MANY_tables 249.00 1.39 2.03 0.00 10000 -select_join 4.00 1.39 0.55 0.00 100 -select_key 194.00 90.98 38.86 0.00 200000 -select_key2 202.00 92.78 37.67 0.00 200000 -select_key_prefix 199.00 93.61 38.05 0.00 200000 -select_key_prefix_join 19.00 12.20 4.53 0.00 100 -select_many_fields 55.00 17.65 18.90 0.00 2000 -select_range 187.00 27.39 9.33 0.00 410 -select_range_key2 29.00 10.31 3.64 0.00 25010 -select_range_prefix 28.00 10.05 4.22 0.00 25010 -select_simple 4.00 0.88 1.61 0.00 10000 -select_simple_join 3.00 1.59 0.67 0.00 500 -update_big 60.00 0.00 0.00 0.00 10 -update_of_key 56.00 4.28 7.00 0.00 50000 -update_of_key_big 33.00 0.06 0.08 0.00 501 -update_of_primary_key_many_keys 580.00 0.03 0.05 0.00 256 -update_with_key 188.00 27.97 41.06 0.00 300000 -update_with_key_prefix 59.00 9.42 12.14 0.00 100000 -wisc_benchmark 9.00 5.44 1.66 0.00 114 -TOTALS 14098.00 1111.56 580.08 0.00 2046247 +alter_table_add 1246.00 0.52 0.22 0.00 992 +alter_table_drop 1043.00 0.27 0.06 0.00 496 +connect 33.00 12.13 9.89 0.00 10000 +connect+select_1_row 39.00 12.91 11.73 0.00 10000 +connect+select_simple 37.00 12.36 12.34 0.00 10000 +count 36.00 0.03 0.02 0.00 100 +count_distinct 48.00 0.56 0.20 0.00 1000 +count_distinct_2 52.00 0.45 0.27 0.00 1000 +count_distinct_big 205.00 22.49 8.11 0.00 120 +count_distinct_group 145.00 2.67 1.19 0.00 1000 +count_distinct_group_on_key 48.00 0.58 0.17 0.00 1000 +count_distinct_group_on_key_parts 145.00 3.02 0.94 0.00 1000 +count_distinct_key_prefix 39.00 0.52 0.17 0.00 1000 +count_group_on_key_parts 40.00 2.73 0.83 0.00 1000 +count_on_key 405.00 22.87 8.23 0.00 50100 +create+drop 134.00 3.78 2.89 0.00 10000 +create_MANY_tables 231.00 3.27 1.58 0.00 10000 +create_index 26.00 0.00 0.00 0.00 8 +create_key+drop 167.00 5.98 2.77 0.00 10000 +create_table 0.00 0.02 0.02 0.00 31 +delete_all 19.00 0.00 0.00 0.00 12 +delete_all_many_keys 1431.00 0.00 0.03 0.00 1 +delete_big 0.00 0.00 0.00 0.00 1 +delete_big_many_keys 1431.00 0.00 0.03 0.00 128 +delete_key 7.00 1.14 1.42 0.00 10000 +drop_index 27.00 0.00 0.00 0.00 8 +drop_table 0.00 0.00 0.00 0.00 28 +drop_table_when_MANY_tables 169.00 1.41 1.42 0.00 10000 +insert 235.00 33.44 47.68 0.00 350768 +insert_duplicates 59.00 9.02 12.91 0.00 100000 +insert_key 1440.00 13.86 11.92 0.00 100000 +insert_many_fields 22.00 0.64 0.45 0.00 2000 +insert_select_1_key 8.00 0.00 0.00 0.00 1 +insert_select_2_keys 12.00 0.00 0.00 0.00 1 +min_max 19.00 0.00 0.02 0.00 60 +min_max_on_key 196.00 37.78 15.60 0.00 85000 +multiple_value_insert 9.00 2.53 0.19 0.00 100000 +order_by_big 101.00 61.84 25.81 0.00 10 +order_by_big_key 93.00 66.86 26.16 0.00 10 +order_by_big_key2 88.00 62.99 24.97 0.00 10 +order_by_big_key_desc 94.00 67.34 25.92 0.00 10 +order_by_big_key_diff 98.00 62.45 25.16 0.00 10 +order_by_big_key_prefix 88.00 62.72 25.19 0.00 10 +order_by_key2_diff 11.00 5.53 2.19 0.00 500 +order_by_key_prefix 6.00 2.94 1.08 0.00 500 +order_by_range 9.00 2.92 1.23 0.00 500 +outer_join 120.00 0.00 0.00 0.00 10 +outer_join_found 106.00 0.00 0.00 0.00 10 +outer_join_not_found 56.00 0.03 0.00 0.00 500 +outer_join_on_key 41.00 0.00 0.00 0.00 10 +select_1_row 5.00 1.23 1.69 0.00 10000 +select_2_rows 6.00 1.00 2.20 0.00 10000 +select_big 139.00 80.53 32.12 0.00 10080 +select_column+column 5.00 1.08 1.75 0.00 10000 +select_diff_key 127.00 0.67 0.05 0.00 500 +select_distinct 17.00 4.72 1.78 0.00 800 +select_group 59.00 3.11 1.34 0.00 2911 +select_group_when_MANY_tables 196.00 1.48 1.77 0.00 10000 +select_join 4.00 1.41 0.53 0.00 100 +select_key 196.00 103.61 37.28 0.00 200000 +select_key2 205.00 93.56 39.66 0.00 200000 +select_key2_return_key 198.00 90.06 35.53 0.00 200000 +select_key2_return_prim 203.00 91.61 35.25 0.00 200000 +select_key_prefix 201.00 93.56 39.13 0.00 200000 +select_key_prefix_join 18.00 12.05 4.75 0.00 100 +select_key_return_key 195.00 89.05 37.13 0.00 200000 +select_many_fields 54.00 16.80 18.40 0.00 2000 +select_query_cache 90.00 5.81 1.91 0.00 10000 +select_query_cache2 91.00 5.55 1.86 0.00 10000 +select_range 186.00 27.06 9.17 0.00 410 +select_range_key2 30.00 10.39 3.47 0.00 25010 +select_range_prefix 28.00 10.19 4.06 0.00 25010 +select_simple 4.00 0.80 1.48 0.00 10000 +select_simple_join 3.00 1.52 0.66 0.00 500 +update_big 66.00 0.00 0.00 0.00 10 +update_of_key 56.00 4.66 6.17 0.00 50000 +update_of_key_big 32.00 0.05 0.11 0.00 501 +update_of_primary_key_many_keys 447.00 0.03 0.00 0.00 256 +update_with_key 190.00 27.05 40.97 0.00 300000 +update_with_key_prefix 58.00 9.02 13.19 0.00 100000 +wisc_benchmark 9.00 5.77 1.45 0.00 114 +TOTALS 13232.00 1396.03 685.87 0.00 2667247 diff --git a/sql-bench/Results/alter-table-mysql-NT_4.0 b/sql-bench/Results/alter-table-mysql-NT_4.0 index 3a75bf1a366..98863586928 100644 --- a/sql-bench/Results/alter-table-mysql-NT_4.0 +++ b/sql-bench/Results/alter-table-mysql-NT_4.0 @@ -1,16 +1,16 @@ -Testing server 'MySQL 3.23.31' at 2001-01-17 23:59:28 +Testing server 'MySQL 3.23.37' at 2001-04-19 13:10:14 Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
-Time for insert (1000) 1 wallclock secs ( 0.13 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert (1000) 0 wallclock secs ( 0.13 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for alter_table_add (992): 1225 wallclock secs ( 0.47 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for alter_table_add (992): 1246 wallclock secs ( 0.52 usr 0.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_index (8): 25 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for create_index (8): 26 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for drop_index (8): 25 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for drop_index (8): 27 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for alter_table_drop (496): 1039 wallclock secs ( 0.19 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for alter_table_drop (496): 1043 wallclock secs ( 0.27 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 2315 wallclock secs ( 0.78 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 2342 wallclock secs ( 0.91 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/big-tables-mysql-NT_4.0 b/sql-bench/Results/big-tables-mysql-NT_4.0 index 8654e5711dd..7f3510f396c 100644 --- a/sql-bench/Results/big-tables-mysql-NT_4.0 +++ b/sql-bench/Results/big-tables-mysql-NT_4.0 @@ -1,19 +1,19 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 0:39:12 +Testing server 'MySQL 3.23.37' at 2001-04-19 13:50:25 Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
-Time to select_many_fields(1000): 20 wallclock secs ( 8.56 usr 9.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_many_fields(1000): 19 wallclock secs ( 8.02 usr 9.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 35 wallclock secs ( 9.09 usr 9.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_many_fields(1000): 35 wallclock secs ( 8.78 usr 8.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing insert VALUES()
-Time to insert_many_fields(1000): 4 wallclock secs ( 0.47 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to insert_many_fields(1000): 3 wallclock secs ( 0.48 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 18 wallclock secs ( 0.17 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to insert_many_fields(1000): 19 wallclock secs ( 0.16 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 80 wallclock secs (18.31 usr 19.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 79 wallclock secs (17.44 usr 18.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/connect-mysql-NT_4.0 b/sql-bench/Results/connect-mysql-NT_4.0 index cf366c0f547..3c3bfa1e112 100644 --- a/sql-bench/Results/connect-mysql-NT_4.0 +++ b/sql-bench/Results/connect-mysql-NT_4.0 @@ -1,30 +1,30 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 0:40:33 +Testing server 'MySQL 3.23.37' at 2001-04-19 13:51:45 Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
-Time to connect (10000): 33 wallclock secs (11.47 usr 9.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to connect (10000): 33 wallclock secs (12.13 usr 9.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 36 wallclock secs (12.84 usr 11.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for connect+select_simple (10000): 37 wallclock secs (12.36 usr 12.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Test simple select
-Time for select_simple (10000): 4 wallclock secs ( 0.88 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_simple (10000): 4 wallclock secs ( 0.80 usr 1.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 38 wallclock secs (13.19 usr 12.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to connect+select_1_row (10000): 39 wallclock secs (12.91 usr 11.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing select 1 row from table
-Time to select_1_row (10000): 5 wallclock secs ( 1.14 usr 1.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_1_row (10000): 5 wallclock secs ( 1.23 usr 1.69 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing select 2 rows from table
-Time to select_2_rows (10000): 6 wallclock secs ( 0.91 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_2_rows (10000): 6 wallclock secs ( 1.00 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Test select with aritmetic (+)
-Time for select_column+column (10000): 6 wallclock secs ( 0.88 usr 1.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_column+column (10000): 5 wallclock secs ( 1.08 usr 1.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing retrieval of big records (65000 bytes)
-Time to select_big (10000): 55 wallclock secs (19.23 usr 8.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_big (10000): 50 wallclock secs (17.36 usr 6.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 183 wallclock secs (60.53 usr 49.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 179 wallclock secs (58.89 usr 48.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/create-mysql-NT_4.0 b/sql-bench/Results/create-mysql-NT_4.0 index 326148f9bbd..2191f28735a 100644 --- a/sql-bench/Results/create-mysql-NT_4.0 +++ b/sql-bench/Results/create-mysql-NT_4.0 @@ -1,18 +1,18 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 0:43:36 +Testing server 'MySQL 3.23.37' at 2001-04-19 13:54:45 Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
-Time for create_MANY_tables (10000): 245 wallclock secs ( 2.73 usr 1.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for create_MANY_tables (10000): 231 wallclock secs ( 3.27 usr 1.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Accessing tables
-Time to select_group_when_MANY_tables (10000): 249 wallclock secs ( 1.39 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to select_group_when_MANY_tables (10000): 196 wallclock secs ( 1.48 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing drop
-Time for drop_table_when_MANY_tables (10000): 187 wallclock secs ( 1.08 usr 1.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for drop_table_when_MANY_tables (10000): 169 wallclock secs ( 1.41 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing create+drop
-Time for create+drop (10000): 142 wallclock secs ( 3.30 usr 2.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for create_key+drop (10000): 172 wallclock secs ( 5.14 usr 2.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 995 wallclock secs (13.64 usr 10.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for create+drop (10000): 134 wallclock secs ( 3.78 usr 2.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for create_key+drop (10000): 167 wallclock secs ( 5.98 usr 2.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 897 wallclock secs (15.94 usr 10.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/insert-mysql-NT_4.0 b/sql-bench/Results/insert-mysql-NT_4.0 index 1e5e47b994e..dddbf6d4c19 100644 --- a/sql-bench/Results/insert-mysql-NT_4.0 +++ b/sql-bench/Results/insert-mysql-NT_4.0 @@ -1,4 +1,4 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 1:00:12 +Testing server 'MySQL 3.23.37' at 2001-04-19 14:09:43 Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
@@ -8,55 +8,58 @@ Creating tables Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
-Time for insert (300000): 203 wallclock secs (30.27 usr 42.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert (300000): 203 wallclock secs (29.19 usr 40.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing insert of duplicates
-Time for insert_duplicates (100000): 59 wallclock secs ( 8.92 usr 14.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert_duplicates (100000): 59 wallclock secs ( 9.02 usr 12.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Retrieving data from the table
-Time for select_big (10:3000000): 90 wallclock secs (63.84 usr 25.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key (10:3000000): 95 wallclock secs (69.14 usr 26.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key_desc (10:3000000): 96 wallclock secs (68.61 usr 26.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key_prefix (10:3000000): 89 wallclock secs (63.73 usr 25.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key2 (10:3000000): 90 wallclock secs (63.38 usr 26.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big_key_diff (10:3000000): 100 wallclock secs (65.05 usr 24.69 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_big (10:3000000): 104 wallclock secs (64.25 usr 25.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_range (500:125750): 9 wallclock secs ( 3.02 usr 1.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key_prefix (500:125750): 6 wallclock secs ( 3.11 usr 1.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for order_by_key2_diff (500:250500): 11 wallclock secs ( 5.53 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_diff_key (500:1000): 122 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_prefix (5010:42084): 15 wallclock secs ( 4.52 usr 1.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_key2 (5010:42084): 16 wallclock secs ( 4.67 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key_prefix (200000): 199 wallclock secs (93.61 usr 38.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key (200000): 194 wallclock secs (90.98 usr 38.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_key2 (200000): 202 wallclock secs (92.78 usr 37.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_big (10:3000000): 88 wallclock secs (62.84 usr 25.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big_key (10:3000000): 93 wallclock secs (66.86 usr 26.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big_key_desc (10:3000000): 94 wallclock secs (67.34 usr 25.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big_key_prefix (10:3000000): 88 wallclock secs (62.72 usr 25.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big_key2 (10:3000000): 88 wallclock secs (62.99 usr 24.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big_key_diff (10:3000000): 98 wallclock secs (62.45 usr 25.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_big (10:3000000): 101 wallclock secs (61.84 usr 25.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_range (500:125750): 9 wallclock secs ( 2.92 usr 1.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_key_prefix (500:125750): 6 wallclock secs ( 2.94 usr 1.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for order_by_key2_diff (500:250500): 11 wallclock secs ( 5.53 usr 2.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_diff_key (500:1000): 127 wallclock secs ( 0.67 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_range_prefix (5010:42084): 15 wallclock secs ( 4.69 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_range_key2 (5010:42084): 16 wallclock secs ( 4.70 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key_prefix (200000): 201 wallclock secs (93.56 usr 39.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key (200000): 196 wallclock secs (103.61 usr 37.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key_return_key (200000): 195 wallclock secs (89.05 usr 37.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key2 (200000): 205 wallclock secs (93.56 usr 39.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key2_return_key (200000): 198 wallclock secs (90.06 usr 35.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_key2_return_prim (200000): 203 wallclock secs (91.61 usr 35.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Test of compares with simple ranges
-Time for select_range_prefix (20000:43500): 13 wallclock secs ( 5.53 usr 2.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range_key2 (20000:43500): 13 wallclock secs ( 5.64 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_group (111): 36 wallclock secs ( 0.03 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (15000): 14 wallclock secs ( 6.42 usr 2.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max (60): 18 wallclock secs ( 0.05 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (100): 31 wallclock secs ( 0.06 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count (100): 35 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (20): 103 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_range_prefix (20000:43500): 13 wallclock secs ( 5.50 usr 2.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_range_key2 (20000:43500): 14 wallclock secs ( 5.69 usr 2.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_group (111): 38 wallclock secs ( 0.05 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for min_max_on_key (15000): 14 wallclock secs ( 6.08 usr 2.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for min_max (60): 19 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_on_key (100): 37 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count (100): 36 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_big (20): 91 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing update of keys with functions
-Time for update_of_key (50000): 56 wallclock secs ( 4.28 usr 7.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_of_key (50000): 56 wallclock secs ( 4.66 usr 6.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_of_key_big (501): 32 wallclock secs ( 0.05 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing update with key
-Time for update_with_key (300000): 188 wallclock secs (27.97 usr 41.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for update_with_key_prefix (100000): 59 wallclock secs ( 9.42 usr 12.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_with_key (300000): 190 wallclock secs (27.05 usr 40.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_with_key_prefix (100000): 58 wallclock secs ( 9.02 usr 13.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing update of all rows
-Time for update_big (10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_big (10): 66 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing left outer join
-Time for outer_join_on_key (10:10): 41 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for outer_join (10:10): 118 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for outer_join_found (10:10): 106 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for outer_join_not_found (500:10): 58 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_on_key (10:10): 41 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join (10:10): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_found (10:10): 106 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for outer_join_not_found (500:10): 56 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
@@ -64,24 +67,24 @@ Time for insert_select_2_keys (1): 12 wallclock secs ( 0.00 usr 0.00 sys + 0. Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing delete
-Time for delete_key (10000): 8 wallclock secs ( 0.88 usr 1.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for delete_all (12): 21 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for delete_key (10000): 7 wallclock secs ( 1.14 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for delete_all (12): 19 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 1853 wallclock secs (13.92 usr 13.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for insert_key (100000): 1440 wallclock secs (13.86 usr 11.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing update of keys
-Time for update_of_primary_key_many_keys (256): 580 wallclock secs ( 0.03 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for update_of_primary_key_many_keys (256): 447 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Deleting rows from the table
-Time for delete_big_many_keys (128): 1980 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for delete_big_many_keys (128): 1431 wallclock secs ( 0.00 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Deleting everything from table
-Time for delete_all_many_keys (1): 1980 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for delete_all_many_keys (1): 1431 wallclock secs ( 0.00 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Inserting 100000 rows with multiple values
-Time for multiple_value_insert (100000): 12 wallclock secs ( 2.50 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for multiple_value_insert (100000): 9 wallclock secs ( 2.53 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 7164 wallclock secs (872.86 usr 441.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 6659 wallclock secs (1143.94 usr 544.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/select-mysql-NT_4.0 b/sql-bench/Results/select-mysql-NT_4.0 index a32f7063a25..d560ccddb99 100644 --- a/sql-bench/Results/select-mysql-NT_4.0 +++ b/sql-bench/Results/select-mysql-NT_4.0 @@ -1,23 +1,30 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 2:59:37 +Testing server 'MySQL 3.23.37' at 2001-04-19 16:00:44 Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
-Time to insert (10000): 7 wallclock secs ( 1.30 usr 1.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to insert (10000): 7 wallclock secs ( 1.27 usr 1.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Test if the database has a query cache +Time for select_query_cache (10000): 90 wallclock secs ( 5.81 usr 1.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU) + +Time for select_query_cache2 (10000): 91 wallclock secs ( 5.55 usr 1.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing big selects on the table
-Time for select_big (70:17207): 1 wallclock secs ( 0.41 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for select_range (410:1057904): 187 wallclock secs (27.39 usr 9.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for min_max_on_key (70000): 179 wallclock secs (29.25 usr 12.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_on_key (50000): 346 wallclock secs (21.52 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_big (70:17207): 1 wallclock secs ( 0.33 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for select_range (410:1057904): 186 wallclock secs (27.06 usr 9.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for min_max_on_key (70000): 182 wallclock secs (31.70 usr 12.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_on_key (50000): 368 wallclock secs (22.81 usr 8.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_group_on_key_parts (1000:100000): 41 wallclock secs ( 2.81 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_group_on_key_parts (1000:100000): 40 wallclock secs ( 2.73 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Testing count(distinct) on the table
-Time for count_distinct (2000:2000): 80 wallclock secs ( 0.94 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key (1000:6000): 45 wallclock secs ( 0.67 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group_on_key_parts (1000:100000): 150 wallclock secs ( 3.31 usr 0.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_group (1000:100000): 150 wallclock secs ( 3.03 usr 1.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Time for count_distinct_big (100:1000000): 111 wallclock secs (23.03 usr 7.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 1297 wallclock secs (113.66 usr 43.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_key_prefix (1000:1000): 39 wallclock secs ( 0.52 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct (1000:1000): 48 wallclock secs ( 0.56 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_2 (1000:1000): 52 wallclock secs ( 0.45 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_group_on_key (1000:6000): 48 wallclock secs ( 0.58 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_group_on_key_parts (1000:100000): 145 wallclock secs ( 3.02 usr 0.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_group (1000:100000): 145 wallclock secs ( 2.67 usr 1.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for count_distinct_big (100:1000000): 114 wallclock secs (22.47 usr 8.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 1556 wallclock secs (127.53 usr 47.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/Results/wisconsin-mysql-NT_4.0 b/sql-bench/Results/wisconsin-mysql-NT_4.0 index f4ed2847ff7..0ef69aa9272 100644 --- a/sql-bench/Results/wisconsin-mysql-NT_4.0 +++ b/sql-bench/Results/wisconsin-mysql-NT_4.0 @@ -1,14 +1,14 @@ -Testing server 'MySQL 3.23.31' at 2001-01-18 3:21:15 +Testing server 'MySQL 3.23.37' at 2001-04-19 16:26:40 Wisconsin benchmark test
-Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Inserting data
-Time to insert (31000): 19 wallclock secs ( 2.50 usr 3.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time to insert (31000): 19 wallclock secs ( 2.17 usr 4.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU) Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
-Time for wisc_benchmark (114): 9 wallclock secs ( 5.44 usr 1.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Time for wisc_benchmark (114): 9 wallclock secs ( 5.77 usr 1.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU) -Total time: 28 wallclock secs ( 7.94 usr 5.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU) +Total time: 28 wallclock secs ( 7.95 usr 5.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU) diff --git a/sql-bench/test-insert.sh b/sql-bench/test-insert.sh index 0cb65cc8c88..82ffcd83487 100644 --- a/sql-bench/test-insert.sh +++ b/sql-bench/test-insert.sh @@ -39,11 +39,6 @@ $opt_read_key_loop_count=$opt_loop_count; chomp($pwd = `pwd`); $pwd = "." if ($pwd eq ''); require "$pwd/bench-init.pl" || die "Can't read Configuration file: $!\n"; -if ($opt_loop_count < 256) -{ - $opt_loop_count=256; # Some tests must have some data to work! -} - if ($opt_small_test) { $opt_loop_count/=100; @@ -62,6 +57,13 @@ elsif ($opt_small_key_tables) $many_keys_loop_count/=10; } +if ($opt_loop_count < 100) +{ + $opt_loop_count=100; # Some tests must have some data to work! +} +$range_loop_count=min($opt_loop_count,$range_loop_count); + + print "Testing the speed of inserting data into 1 table and do some selects on it.\n"; print "The tests are done with a table that has $opt_loop_count rows.\n\n"; diff --git a/sql/filesort.cc b/sql/filesort.cc index e116e2b68e6..610fe2e966f 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -49,7 +49,7 @@ typedef struct st_sort_param { uint sort_length; /* Length of sortarg */ uint keys; /* Max antal nycklar / buffert */ uint ref_length; /* Length of record ref. */ - ha_rows max_rows; + ha_rows max_rows,examined_rows; TABLE *sort_form; /* For quicker make_sortkey */ SORT_FIELD *local_sortorder; SORT_FIELD *end; @@ -91,7 +91,8 @@ static uint sortlength(SORT_FIELD *sortorder,uint length); open a new file is opened */ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length, - SQL_SELECT *select, ha_rows special, ha_rows max_rows) + SQL_SELECT *select, ha_rows special, ha_rows max_rows, + ha_rows *examined_rows) { int error; uint memavl,old_memavl,maxbuffer,skr; @@ -113,6 +114,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length, param.ref_length= table[0]->file->ref_length; param.sort_length=sortlength(sortorder,s_length)+ param.ref_length; param.max_rows= max_rows; + param.examined_rows=0; if (select && select->quick) { @@ -259,7 +261,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length, my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG)); else statistic_add(filesort_rows, records, &LOCK_status); - + *examined_rows= param.examined_rows; #ifdef SKIPP_DBUG_IN_FILESORT DBUG_POP(); /* Ok to DBUG */ #endif @@ -367,6 +369,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, file->rnd_end(); DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ } + if (error == 0) + param->examined_rows++; if (error == 0 && (!select || select->skipp_record() == 0)) { if (idx == param->keys) diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 9dbd7b6c998..32af39e4a0d 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -41,10 +41,9 @@ from the updated tables. Testing of: - - LOCK TABLES - Mark tables that participate in a transaction so that they are not closed during the transaction. We need to test what happens if - MySQL closes a table that is updated by a not commit transaction. + MySQL closes a table that is updated by a not commited transaction. */ @@ -572,6 +571,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) transaction=0; cursor=0; key_read=0; + block_size=8192; // Berkeley DB block size share->fixed_length_row=!(table->db_create_options & HA_OPTION_PACK_RECORD); get_status(); @@ -1701,12 +1701,35 @@ int ha_berkeley::external_lock(THD *thd, int lock_type) DBUG_PRINT("trans",("commiting non-updating transaction")); error=txn_commit((DB_TXN*) thd->transaction.stmt.bdb_tid,0); thd->transaction.stmt.bdb_tid=0; + transaction=0; } } } DBUG_RETURN(error); } + +/* + When using LOCK TABLE's external_lock is only called when the actual + TABLE LOCK is done. + Under LOCK TABLES, each used tables will force a call to start_stmt. +*/ + +int ha_berkeley::start_stmt(THD *thd) +{ + int error=0; + DBUG_ENTER("ha_berkeley::start_stmt"); + if (!thd->transaction.stmt.bdb_tid) + { + error=txn_begin(db_env, (DB_TXN*) thd->transaction.all.bdb_tid, + (DB_TXN**) &thd->transaction.stmt.bdb_tid, + 0); + transaction= (DB_TXN*) thd->transaction.stmt.bdb_tid; + } + DBUG_RETURN(error); +} + + /* The idea with handler::store_lock() is the following: diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 9724d128b1f..9e657d72da1 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -136,6 +136,7 @@ class ha_berkeley: public handler int extra(enum ha_extra_function operation); int reset(void); int external_lock(THD *thd, int lock_type); + int start_stmt(THD *thd); void position(byte *record); int analyze(THD* thd,HA_CHECK_OPT* check_opt); int optimize(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc index f263f693103..8af9de0eaba 100644 --- a/sql/ha_innobase.cc +++ b/sql/ha_innobase.cc @@ -449,7 +449,7 @@ innobase_init(void) if (!innobase_data_file_path) { fprintf(stderr, - "Can't initialize InnoDB as 'innobase_data_file_path' is not set\n"); + "Can't initialize InnoDB as 'innodb_data_file_path' is not set\n"); innodb_skip=1; DBUG_RETURN(FALSE); // Continue without innobase } @@ -1868,7 +1868,7 @@ corresponding row to buf. */ int ha_innobase::index_first( /*=====================*/ - /* out: 0, HA_ERR_KEY_NOT_FOUND, + /* out: 0, HA_ERR_END_OF_FILE, or error code */ mysql_byte* buf) /* in/out: buffer for the row */ { @@ -1879,6 +1879,12 @@ ha_innobase::index_first( error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); + /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */ + + if (error == HA_ERR_KEY_NOT_FOUND) { + error = HA_ERR_END_OF_FILE; + } + DBUG_RETURN(error); } @@ -1899,7 +1905,7 @@ ha_innobase::index_last( error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); - /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */ + /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */ if (error == HA_ERR_KEY_NOT_FOUND) { error = HA_ERR_END_OF_FILE; diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h index e85d73bdae5..429e47523dd 100644 --- a/sql/ha_innobase.h +++ b/sql/ha_innobase.h @@ -83,14 +83,14 @@ class ha_innobase: public handler HA_NO_WRITE_DELAYED | HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE | - HA_NOT_READ_AFTER_KEY), + HA_NOT_READ_AFTER_KEY | HA_NO_PREFIX_CHAR_KEYS), last_dup_key((uint) -1), start_of_scan(0) { } ~ha_innobase() {} - const char* table_type() const { return("Innobase");} + const char* table_type() const { return("InnoDB");} const char** bas_ext() const; ulong option_flag() const { return int_option_flag; } uint max_record_length() const { return HA_MAX_REC_LENGTH; } diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 1a205e54b9d..6409ec5d019 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -50,10 +50,12 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, { THD* thd = (THD*)param->thd; String* packet = &thd->packet; - packet->length(0); + uint length; char msgbuf[MI_MAX_MSG_BUF]; - msgbuf[0] = 0; + char name[NAME_LEN*2+2]; + packet->length(0); + msgbuf[0] = 0; // healthy paranoia ? my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args); msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia @@ -70,9 +72,12 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME)); return; } - net_store_data(packet, param->table_name); + length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) - + name); + net_store_data(packet, name, length); net_store_data(packet, param->op_name); net_store_data(packet, msg_type); + net_store_data(packet, msgbuf); if (my_net_write(&thd->net, (char*)thd->packet.ptr(), thd->packet.length())) fprintf(stderr, @@ -245,6 +250,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) myisamchk_init(¶m); param.thd = thd; param.op_name = (char*)"check"; + param.db_name = table->table_cache_key; param.table_name = table->table_name; param.testflag = check_opt->flags | T_CHECK | T_SILENT; @@ -332,6 +338,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) myisamchk_init(¶m); param.thd = thd; param.op_name = (char*) "analyze"; + param.db_name = table->table_cache_key; param.table_name = table->table_name; param.testflag=(T_FAST | T_CHECK | T_SILENT | T_STATISTICS | T_DONT_CHECK_CHECKSUM); @@ -384,6 +391,7 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; param.op_name = (char*)"restore"; + param.db_name = table->table_cache_key; param.table_name = table->table_name; param.testflag = 0; mi_check_print_error(¶m,errmsg, errno ); @@ -438,6 +446,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; param.op_name = (char*)"backup"; + param.db_name = table->table_cache_key; param.table_name = table->table_name; param.testflag = 0; mi_check_print_error(¶m,errmsg, errno ); @@ -524,6 +533,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) ha_rows rows= file->state->records; DBUG_ENTER("ha_myisam::repair"); + param.db_name = table->table_cache_key; param.table_name = table->table_name; param.tmpfile_createflag = O_RDWR | O_TRUNC; param.using_global_keycache = 1; @@ -533,7 +543,8 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) VOID(fn_format(fixed_name,file->filename,"",MI_NAME_IEXT, 4+ (param.opt_follow_links ? 16 : 0))); - if (mi_lock_database(file,F_WRLCK)) + // Don't lock tables if we have used LOCK TABLE + if (!thd->locked_tables && mi_lock_database(file,F_WRLCK)) { mi_check_print_error(¶m,ER(ER_CANT_LOCK),my_errno); DBUG_RETURN(HA_ADMIN_FAILED); @@ -615,7 +626,8 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) update_state_info(¶m, file, 0); } thd->proc_info=old_proc_info; - mi_lock_database(file,F_UNLCK); + if (!thd->locked_tables) + mi_lock_database(file,F_UNLCK); DBUG_RETURN(error ? HA_ADMIN_FAILED : !optimize_done ? HA_ADMIN_ALREADY_DONE : HA_ADMIN_OK); } diff --git a/sql/handler.h b/sql/handler.h index 638529ab882..076bf783f80 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -73,6 +73,7 @@ #define HA_NOT_READ_AFTER_KEY (HA_DROP_BEFORE_CREATE*2) #define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2) #define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2) +#define HA_NO_PREFIX_CHAR_KEYS (HA_NO_TEMP_TABLES*2) /* Parameters for open() (in register form->filestat) */ /* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */ @@ -265,6 +266,7 @@ public: virtual int extra(enum ha_extra_function operation)=0; virtual int reset()=0; virtual int external_lock(THD *thd, int lock_type)=0; + virtual int start_stmt(THD *thd) {return 0;} virtual int delete_all_rows(); virtual longlong get_auto_increment(); virtual void update_create_info(HA_CREATE_INFO *create_info) {} @@ -344,6 +346,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, bool update_create_info); int ha_delete_table(enum db_type db_type, const char *path); void ha_key_cache(void); +int ha_start_stmt(THD *thd); int ha_commit_trans(THD *thd, THD_TRANS *trans); int ha_rollback_trans(THD *thd, THD_TRANS *trans); int ha_autocommit_or_rollback(THD *thd, int error); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 82dcb0268b4..80f72c30e57 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -393,12 +393,14 @@ void Item_func_reverse::fix_length_and_dec() String *Item_func_replace::val_str(String *str) { String *res,*res2,*res3; - int offset=0; + int offset; uint from_length,to_length; bool alloced=0; #ifdef USE_MB const char *ptr,*end,*strend,*search,*search_end; register uint32 l; + bool binary_str = (args[0]->binary || args[1]->binary || + !use_mb(default_charset_info)); #endif null_value=0; @@ -415,7 +417,8 @@ String *Item_func_replace::val_str(String *str) if ((offset=res->strstr(*res2)) < 0) return res; #else - if (!use_mb(default_charset_info) && (offset=res->strstr(*res2)) < 0) + offset=0; + if (binary_str && (offset=res->strstr(*res2)) < 0) return res; #endif if (!(res3=args[2]->val_str(&tmp_value2))) @@ -424,7 +427,7 @@ String *Item_func_replace::val_str(String *str) to_length= res3->length(); #ifdef USE_MB - if (use_mb(default_charset_info)) + if (!binary_str) { search=res2->ptr(); search_end=search+from_length; @@ -449,6 +452,7 @@ redo: res=copy_if_not_alloced(str,res,res->length()+to_length); } res->replace((uint) offset,from_length,*res3); + offset+=(int) to_length; goto redo; } skipp: diff --git a/sql/lex.h b/sql/lex.h index 7f902787223..0e10ff4949c 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -89,6 +89,7 @@ static SYMBOL symbols[] = { { "COMMIT", SYM(COMMIT_SYM),0,0}, { "COMMITTED", SYM(COMMITTED_SYM),0,0}, { "COMPRESSED", SYM(COMPRESSED_SYM),0,0}, + { "CONCURRENT", SYM(CONCURRENT),0,0}, { "CONSTRAINT", SYM(CONSTRAINT),0,0}, { "CREATE", SYM(CREATE),0,0}, { "CROSS", SYM(CROSS),0,0}, diff --git a/sql/lock.cc b/sql/lock.cc index 915f1831245..23f81c9c164 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -346,7 +346,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (!table_ptr[i]->tmp_table) + if (table_ptr[i]->tmp_table != TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; @@ -366,7 +366,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, for (i=0 ; i < count ; i++) { TABLE *table; - if ((table=table_ptr[i])->tmp_table) + if ((table=table_ptr[i])->tmp_table == TMP_TABLE) continue; *to++=table; enum thr_lock_type lock_type= table->reginfo.lock_type; diff --git a/sql/log.cc b/sql/log.cc index 9601d162d28..4cd93261973 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -103,7 +103,7 @@ MYSQL_LOG::~MYSQL_LOG() void MYSQL_LOG::set_index_file_name(const char* index_file_name) { if (index_file_name) - fn_format(this->index_file_name,index_file_name,mysql_data_home,"-index", + fn_format(this->index_file_name,index_file_name,mysql_data_home,".index", 4); else this->index_file_name[0] = 0; @@ -129,6 +129,32 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name) return 0; } +bool MYSQL_LOG::open_index( int options) +{ + return (index_file < 0 && + (index_file = my_open(index_file_name, options | O_BINARY , + MYF(MY_WME))) < 0); +} + +void MYSQL_LOG::init(enum_log_type log_type_arg) +{ + log_type = log_type_arg; + if (!inited) + { + inited=1; + (void) pthread_mutex_init(&LOCK_log,MY_MUTEX_INIT_SLOW); + (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW); + } +} + +void MYSQL_LOG::close_index() +{ + if(index_file >= 0) + { + my_close(index_file, MYF(0)); + index_file = -1; + } +} void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, const char *new_name) @@ -137,17 +163,11 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, char buff[512]; File file= -1; bool do_magic; - - if (!inited) - { - inited=1; - (void) pthread_mutex_init(&LOCK_log,MY_MUTEX_INIT_SLOW); - (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW); - if (log_type_arg == LOG_BIN && *fn_ext(log_name)) + + if (!inited && log_type_arg == LOG_BIN && *fn_ext(log_name)) no_rotate = 1; - } + init(log_type_arg); - log_type=log_type_arg; if (!(name=my_strdup(log_name,MYF(MY_WME)))) goto err; if (new_name) @@ -208,10 +228,7 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, clean up if failed */ if ((do_magic && my_b_write(&log_file, (byte*) BINLOG_MAGIC, 4)) || - (index_file < 0 && - (index_file = my_open(index_file_name, - O_APPEND | O_BINARY | O_RDWR | O_CREAT, - MYF(MY_WME))) < 0)) + open_index(O_APPEND | O_RDWR | O_CREAT)) goto err; Start_log_event s; bool error; @@ -224,8 +241,7 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, pthread_mutex_unlock(&LOCK_index); if (error) { - my_close(index_file,MYF(0)); - index_file= -1; + close_index(); goto err; } } @@ -825,11 +841,12 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, { /* For slow query log */ if (my_b_printf(&log_file, - "# Time: %lu Lock_time: %lu Rows_sent: %lu\n", + "# Time: %lu Lock_time: %lu Rows_sent: %lu Rows_examined: %lu\n", (ulong) (current_time - query_start), (ulong) (thd->time_after_lock - query_start), - (ulong) thd->sent_row_count) == (uint) -1) - tmp_errno=errno; + (ulong) thd->sent_row_count, + (ulong) thd->examined_row_count) == (uint) -1) + tmp_errno=errno; } if (thd->db && strcmp(thd->db,db)) { // Database changed diff --git a/sql/log_event.cc b/sql/log_event.cc index d643952c5b0..ac985c266c8 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -280,7 +280,7 @@ void Log_event::print_timestamp(FILE* file, time_t* ts) } -void Start_log_event::print(FILE* file, bool short_form) +void Start_log_event::print(FILE* file, bool short_form, char* last_db) { if (short_form) return; @@ -293,7 +293,7 @@ void Start_log_event::print(FILE* file, bool short_form) fflush(file); } -void Stop_log_event::print(FILE* file, bool short_form) +void Stop_log_event::print(FILE* file, bool short_form, char* last_db) { if (short_form) return; @@ -303,7 +303,7 @@ void Stop_log_event::print(FILE* file, bool short_form) fflush(file); } -void Rotate_log_event::print(FILE* file, bool short_form) +void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) { if (short_form) return; @@ -441,7 +441,7 @@ Query_log_event::Query_log_event(const char* buf, int event_len): *((char*)query+q_len) = 0; } -void Query_log_event::print(FILE* file, bool short_form) +void Query_log_event::print(FILE* file, bool short_form, char* last_db) { char buff[40],*end; // Enough for SET TIMESTAMP if (!short_form) @@ -451,7 +451,15 @@ void Query_log_event::print(FILE* file, bool short_form) (ulong) thread_id, (ulong) exec_time, error_code); } - if (db && db[0]) + bool same_db = 0; + + if(db && last_db) + { + if(!(same_db = !memcmp(last_db, db, db_len + 1))) + memcpy(last_db, db, db_len + 1); + } + + if (db && db[0] && !same_db) fprintf(file, "use %s;\n", db); end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); *end++=';'; @@ -507,7 +515,7 @@ int Intvar_log_event::write_data(IO_CACHE* file) return my_b_write(file, (byte*) buf, sizeof(buf)); } -void Intvar_log_event::print(FILE* file, bool short_form) +void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) { char llbuff[22]; if(!short_form) @@ -625,7 +633,7 @@ void Load_log_event::copy_log_event(const char *buf, ulong data_len) } -void Load_log_event::print(FILE* file, bool short_form) +void Load_log_event::print(FILE* file, bool short_form, char* last_db) { if (!short_form) { @@ -634,7 +642,15 @@ void Load_log_event::print(FILE* file, bool short_form) thread_id, exec_time); } - if(db && db[0]) + bool same_db = 0; + + if(db && last_db) + { + if(!(same_db = !memcmp(last_db, db, db_len + 1))) + memcpy(last_db, db, db_len + 1); + } + + if(db && db[0] && !same_db) fprintf(file, "use %s;\n", db); fprintf(file, "LOAD DATA INFILE '%s' ", fname); @@ -678,7 +694,7 @@ void Load_log_event::print(FILE* file, bool short_form) } if((int)skip_lines > 0) - fprintf(file, " IGNORE %d LINES ", skip_lines); + fprintf(file, " IGNORE %ld LINES ", (long) skip_lines); if (num_fields) { diff --git a/sql/log_event.h b/sql/log_event.h index 0f4945bae3c..41f847e8d92 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -100,7 +100,7 @@ public: virtual ~Log_event() {} virtual int get_data_size() { return 0;} - virtual void print(FILE* file, bool short_form = 0) = 0; + virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0; void print_timestamp(FILE* file, time_t *ts = 0); void print_header(FILE* file); @@ -169,7 +169,7 @@ public: ; } - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; #define DUMPFILE_FLAG 0x1 @@ -312,7 +312,7 @@ public: ; } - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; extern char server_version[SERVER_VERSION_LENGTH]; @@ -350,7 +350,7 @@ public: // sizeof(binlog_version) + sizeof(server_version) sizeof(created) return 2 + sizeof(server_version) + 4; } - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; class Intvar_log_event: public Log_event @@ -369,7 +369,7 @@ public: int write_data(IO_CACHE* file); - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; class Stop_log_event: public Log_event @@ -388,7 +388,7 @@ public: } ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; class Rotate_log_event: public Log_event @@ -416,7 +416,7 @@ public: int get_data_size() { return ident_len;} int write_data(IO_CACHE* file); - void print(FILE* file, bool short_form = 0); + void print(FILE* file, bool short_form = 0, char* last_db = 0); }; #endif diff --git a/sql/mini_client.cc b/sql/mini_client.cc index fa1b9da38a8..38180c0c6c8 100644 --- a/sql/mini_client.cc +++ b/sql/mini_client.cc @@ -330,8 +330,14 @@ mc_net_safe_read(MYSQL *mysql) if(errno != EINTR) { mc_end_server(mysql); - net->last_errno=CR_SERVER_LOST; - strmov(net->last_error,ER(net->last_errno)); + if(net->last_errno != ER_NET_PACKET_TOO_LARGE) + { + net->last_errno=CR_SERVER_LOST; + strmov(net->last_error,ER(net->last_errno)); + } + else + strmov(net->last_error, "Packet too large - increase \ +max_allowed_packet on this server"); } return(packet_error); } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 2c4ad21de95..3fdc50b1521 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -614,7 +614,8 @@ void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form, int use_record_cache, bool print_errors); void end_read_record(READ_RECORD *info); ha_rows filesort(TABLE **form,struct st_sort_field *sortorder, uint s_length, - SQL_SELECT *select, ha_rows special,ha_rows max_rows); + SQL_SELECT *select, ha_rows special,ha_rows max_rows, + ha_rows *examined_rows); void change_double_for_sort(double nr,byte *to); int get_quick_record(SQL_SELECT *select); int calc_weekday(long daynr,bool sunday_first_day_of_week); diff --git a/sql/mysqlbinlog.cc b/sql/mysqlbinlog.cc index 49daa04ffff..f0a9692cc2d 100644 --- a/sql/mysqlbinlog.cc +++ b/sql/mysqlbinlog.cc @@ -38,6 +38,7 @@ ulong mysqld_net_retry_count = 10L; ulong net_read_timeout= NET_READ_TIMEOUT; ulong net_write_timeout= NET_WRITE_TIMEOUT; uint test_flags = 0; +FILE *result_file; #ifndef DBUG_OFF static const char* default_dbug_option = "d:t:o,/tmp/mysqlbinlog.trace"; @@ -46,18 +47,19 @@ static const char* default_dbug_option = "d:t:o,/tmp/mysqlbinlog.trace"; static struct option long_options[] = { #ifndef DBUG_OFF - {"debug", optional_argument, 0, '#'}, + {"debug", optional_argument, 0, '#'}, #endif - {"help", no_argument, 0, '?'}, - {"host", required_argument, 0, 'h'}, - {"offset", required_argument, 0, 'o'}, - {"password", required_argument, 0, 'p'}, - {"port", required_argument, 0, 'P'}, - {"position", required_argument, 0, 'j'}, - {"short-form", no_argument, 0, 's'}, - {"table", required_argument, 0, 't'}, - {"user", required_argument, 0, 'u'}, - {"version", no_argument, 0, 'V'}, + {"help", no_argument, 0, '?'}, + {"host", required_argument, 0, 'h'}, + {"offset", required_argument, 0, 'o'}, + {"password", required_argument, 0, 'p'}, + {"port", required_argument, 0, 'P'}, + {"position", required_argument, 0, 'j'}, + {"result-file", required_argument, 0, 'r'}, + {"short-form", no_argument, 0, 's'}, + {"table", required_argument, 0, 't'}, + {"user", required_argument, 0, 'u'}, + {"version", no_argument, 0, 'V'}, }; void sql_print_error(const char *format,...); @@ -106,7 +108,7 @@ static void die(const char* fmt, ...) static void print_version() { - printf("%s Ver 1.2 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE); + printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE); } @@ -133,6 +135,7 @@ the mysql command line client\n\n"); -P, --port=port Use port to connect to the remove server\n\ -u, --user=username Connect to the remove server as username\n\ -p, --password=password Password to connect to remote server\n\ +-r, --result-file=file Direct output to a given file\n\ -j, --position=N Start reading the binlog at position N\n\ -t, --table=name Get raw table dump using COM_TABLE_DUMB\n\ -V, --version Print version and exit.\n\ @@ -163,17 +166,18 @@ static void dump_remote_file(NET* net, const char* fname) die("Failed reading a packet during the dump of %s ", fname); if(!short_form) - (void)my_fwrite(stdout, (byte*) net->read_pos, packet_len, MYF(0)); + (void)my_fwrite(result_file, (byte*) net->read_pos, packet_len,MYF(0)); } - fflush(stdout); + fflush(result_file); } static int parse_args(int *argc, char*** argv) { int c, opt_index = 0; - while((c = getopt_long(*argc, *argv, "so:#::h:j:u:p:P:t:?V", long_options, + result_file = stdout; + while((c = getopt_long(*argc, *argv, "so:#::h:j:u:p:P:r:t:?V", long_options, &opt_index)) != EOF) { switch(c) @@ -210,6 +214,11 @@ static int parse_args(int *argc, char*** argv) pass = my_strdup(optarg, MYF(0)); break; + case 'r': + if (!(result_file = my_fopen(optarg, O_WRONLY | O_BINARY, MYF(MY_WME)))) + exit(1); + break; + case 'u': use_remote = 1; user = my_strdup(optarg, MYF(0)); @@ -276,20 +285,21 @@ static void dump_remote_table(NET* net, const char* db, const char* table) die("Error sending the table dump command"); for(;;) - { - uint packet_len = my_net_read(net); - if(packet_len == 0) break; // end of file - if(packet_len == packet_error) - die("Error reading packet in table dump"); - my_fwrite(stdout, (byte*)net->read_pos, packet_len, MYF(MY_WME)); - fflush(stdout); - } + { + uint packet_len = my_net_read(net); + if(packet_len == 0) break; // end of file + if(packet_len == packet_error) + die("Error reading packet in table dump"); + my_fwrite(result_file, (byte*)net->read_pos, packet_len, MYF(MY_WME)); + fflush(result_file); + } } static void dump_remote_log_entries(const char* logname) { char buf[128]; + char last_db[FN_REFLEN+1] = ""; uint len; NET* net = &mysql->net; if(!position) position = 4; // protect the innocent from spam @@ -323,7 +333,7 @@ Unfortunately, no sweepstakes today, adjusted position to 4\n"); len - 1); if(ev) { - ev->print(stdout, short_form); + ev->print(result_file, short_form, last_db); if(ev->get_type_code() == LOAD_EVENT) dump_remote_file(net, ((Load_log_event*)ev)->fname); delete ev; @@ -338,6 +348,7 @@ static void dump_local_log_entries(const char* logname) File fd = -1; IO_CACHE cache,*file= &cache; ulonglong rec_count = 0; + char last_db[FN_REFLEN+1] = ""; if (logname && logname[0] != '-') { @@ -349,7 +360,7 @@ static void dump_local_log_entries(const char* logname) } else { - if (init_io_cache(file, fileno(stdout), 0, READ_CACHE, (my_off_t) 0, + if (init_io_cache(file, fileno(result_file), 0, READ_CACHE, (my_off_t) 0, 0, MYF(MY_WME | MY_NABP | MY_DONT_CHECK_FILESIZE))) exit(1); if (position) @@ -395,9 +406,9 @@ Could not read entry at offset %s : Error in log format or read error", if (rec_count >= offset) { if (!short_form) - printf("# at %s\n",llstr(old_off,llbuff)); + fprintf(result_file, "# at %s\n",llstr(old_off,llbuff)); - ev->print(stdout, short_form); + ev->print(result_file, short_form, last_db); } rec_count++; delete ev; @@ -445,6 +456,8 @@ int main(int argc, char** argv) dump_log_entries(*(argv++)); } } + if (result_file != stdout) + my_fclose(result_file, MYF(0)); if (use_remote) mc_mysql_close(mysql); return 0; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index bbda3eba1bb..2ed1a30d8f6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -99,6 +99,11 @@ int deny_severity = LOG_WARNING; typedef fp_except fp_except_t; #endif +#ifdef _AIX41 +extern "C" int initgroups(const char *,int); +#endif + + /* We can't handle floating point expections with threads, so disable this on freebsd */ @@ -151,7 +156,7 @@ static pthread_cond_t COND_handler_count; static uint handler_count; #endif #ifdef __WIN__ -static bool opt_console=0; +static bool opt_console=0,start_mode=0; #endif #ifdef HAVE_BERKELEY_DB @@ -1114,9 +1119,12 @@ static void start_signal_handler(void) #ifdef HAVE_LINUXTHREADS static sig_handler write_core(int sig); -#if defined(__i386__) && !defined(HAVE_STACK_TRACE_ON_SEGV) -#define SIGRETURN_FRAME_COUNT 1 -#define PTR_SANE(p) ((char*)p >= heap_start && (char*)p <= heap_end) +#if defined (__i386__) || defined(__alpha__) +#define LINUX_STACK_TRACE +#endif + +#ifdef LINUX_STACK_TRACE +#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end) extern char* __bss_start; static char* heap_start, *heap_end; @@ -1125,43 +1133,101 @@ inline __volatile__ void print_str(const char* name, const char* val, int max_len) { fprintf(stderr, "%s at %p ", name, val); - if (!PTR_SANE(val)) - { - fprintf(stderr, " is invalid pointer\n"); - return; - } + if(!PTR_SANE(val)) + { + fprintf(stderr, " is invalid pointer\n"); + return; + } fprintf(stderr, "= "); - for (; max_len && PTR_SANE(val) && *val; --max_len) + for(; max_len && PTR_SANE(val) && *val; --max_len) fputc(*val++, stderr); fputc('\n', stderr); } +#endif + + +#ifdef LINUX_STACK_TRACE +#define SIGRETURN_FRAME_COUNT 1 + +#ifdef __alpha__ +// The only way to backtrace without a symbol table on alpha +// to find stq fp,N(sp), and the first byte +// of the instruction opcode will give us the value of N. From this +// we can find where the old value of fp is stored + +#define MAX_INSTR_IN_FUNC 10000 + +inline uchar** find_prev_fp(uint32* pc, uchar** fp) +{ + int i; + for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) + { + uchar* p = (uchar*)pc; + if(p[2] == 222 && p[3] == 35) + { + return (uchar**)((uchar*)fp - *(short int*)p); + } + } + return 0; +} + +inline uint32* find_prev_pc(uint32* pc, uchar** fp) +{ + int i; + for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc) + { + char* p = (char*)pc; + if(p[1] == 0 && p[2] == 94 && p[3] == -73) + { + uint32* prev_pc = (uint32*)*((fp+p[0]/sizeof(fp))); + return prev_pc; + } + } + return 0; +} + +#endif inline __volatile__ void trace_stack() { uchar **stack_bottom; - uchar** ebp; - LINT_INIT(ebp); + uchar** fp; + LINT_INIT(fp); LINT_INIT(stack_bottom); fprintf(stderr, "Attempting backtrace. You can use the following information to find out\n\ -where mysqld died. If you see no messages after this, something went\n\ +where mysqld died. If you see no messages after this, something went\n\ terribly wrong...\n"); THD* thd = current_thd; uint frame_count = 0; +#ifdef __i386__ __asm __volatile__ ("movl %%ebp,%0" - :"=r"(ebp) - :"r"(ebp)); - if (!ebp) + :"=r"(fp) + :"r"(fp)); + if (!fp) { fprintf(stderr, "frame pointer (ebp) is NULL, did you compile with\n\ -fomit-frame-pointer? Aborting backtrace!\n"); return; } +#endif +#ifdef __alpha__ + __asm __volatile__ ("mov $15,%0" + :"=r"(fp) + :"r"(fp)); + if (!fp) + { + fprintf(stderr, "frame pointer (fp) is NULL, did you compile with\n\ +-fomit-frame-pointer? Aborting backtrace!\n"); + return; + } +#endif + if (!thd) { - fprintf(stderr, "Cannot determine thread, ebp=%p, backtrace may not be correct.\n", ebp); + fprintf(stderr, "Cannot determine thread, fp=%p, backtrace may not be correct.\n", fp); /* Assume that the stack starts at the previous even 65K */ ulong tmp= min(0x10000,thread_stack); stack_bottom= (uchar**) (((ulong) &stack_bottom + tmp) & @@ -1169,32 +1235,77 @@ terribly wrong...\n"); } else stack_bottom = (uchar**) thd->thread_stack; - if (ebp > stack_bottom || ebp < stack_bottom - thread_stack) + if (fp > stack_bottom || fp < stack_bottom - thread_stack) { - fprintf(stderr, - "Bogus stack limit or frame pointer, aborting backtrace.\n"); + fprintf(stderr, "Bogus stack limit or frame pointer,\ + fp=%p, stack_bottom=%p, thread_stack=%ld, aborting backtrace.\n", + fp, stack_bottom, thread_stack); return; } fprintf(stderr, "Stack range sanity check OK, backtrace follows:\n"); +#ifdef __alpha__ + fprintf(stderr, "Warning: Alpha stacks are difficult -\ + will be taking some wild guesses, stack trace may be incorrect or \ + terminate abruptly\n"); + // On Alpha, we need to get pc + uint32* pc; + __asm __volatile__ ("bsr %0, do_next; do_next: " + :"=r"(pc) + :"r"(pc)); +#endif - while (ebp < stack_bottom) + while (fp < stack_bottom) { - uchar** new_ebp = (uchar**)*ebp; +#ifdef __i386__ + uchar** new_fp = (uchar**)*fp; fprintf(stderr, "%p\n", frame_count == SIGRETURN_FRAME_COUNT ? - *(ebp+17) : *(ebp+1)); - if (new_ebp <= ebp ) + *(fp+17) : *(fp+1)); +#endif +#ifdef __alpha__ + uchar** new_fp = find_prev_fp(pc, fp); + if(frame_count == SIGRETURN_FRAME_COUNT - 1) + { + new_fp += 90; + } + + if(fp && pc) + { + pc = find_prev_pc(pc, fp); + if(pc) + fprintf(stderr, "%p\n", pc); + else + { + fprintf(stderr, "Not smart enough to deal with the rest\ + of this stack\n"); + goto print_glob_vars; + } + } + else + { + fprintf(stderr, "Not smart enough to deal with the rest of \ + this stack\n"); + goto print_glob_vars; + } +#endif + if (new_fp <= fp ) { - fprintf(stderr, "\ -New value of ebp failed sanity check, terminating backtrace!\n"); - return; + fprintf(stderr, "New value of fp=%p failed sanity check,\ + terminating stack trace!\n", new_fp); + goto print_glob_vars; } - ebp = new_ebp; + fp = new_fp; ++frame_count; } - fprintf(stderr, "Stack trace successful, tryint to get some variables.\n\ + + fprintf(stderr, "Stack trace seems successful - bottom reached\n"); + + print_glob_vars: + fprintf(stderr, "Please read http://www.mysql.com/doc/U/s/Using_stack_trace.html and follow instructions on how to resolve the stack trace. Resolved\n\ +stack trace is much more helpful in diagnosing the problem, so please do \n\ +resolve it\n"); + fprintf(stderr, "Trying to get some variables.\n\ Some pointers may be invalid and cause the dump to abort...\n"); - heap_start = __bss_start; heap_end = (char*)sbrk(0); print_str("thd->query", thd->query, 1024); fprintf(stderr, "thd->thread_id = %ld\n", thd->thread_id); @@ -1205,7 +1316,11 @@ In some cases of really bad corruption, this value may be invalid\n", fprintf(stderr, "Please use the information above to create a repeatable\n\ test case for the crash, and send it to bugs@lists.mysql.com\n"); } -#endif /* HAVE_LINUXTHREADS */ +#endif +#endif + +#ifdef HAVE_LINUXTHREADS +#define UNSAFE_DEFAULT_LINUX_THREADS 200 #endif static sig_handler handle_segfault(int sig) @@ -1215,18 +1330,50 @@ static sig_handler handle_segfault(int sig) // so not having the mutex is not as bad as possibly using a buggy // mutex - so we keep things simple if (segfaulted) - return; + { + fprintf(stderr, "Fatal signal %d while backtracing\n", sig); + exit(1); + } + segfaulted = 1; fprintf(stderr,"\ mysqld got signal %d;\n\ -The manual section 'Debugging a MySQL server' tells you how to use a\n\ -stack trace and/or the core file to produce a readable backtrace that may\n\ -help in finding out why mysqld died.\n",sig); +This could be because you hit a bug. It is also possible that \n\ +this binary or one of the libraries it was linked agaist is \n\ +corrupt, improperly built, or misconfigured. This error can also be\n\ +caused by malfunctioning hardware.", sig); + fprintf(stderr, "We will try our best to scrape up some info\n\ +that will hopefully help diagnose the problem, but since we have already\n\ +crashed, something is definitely wrong and this may fail\n"); + fprintf(stderr, "key_buffer_size=%ld\n", keybuff_size); + fprintf(stderr, "record_buffer=%ld\n", my_default_record_cache_size); + fprintf(stderr, "sort_buffer=%ld\n", sortbuff_size); + fprintf(stderr, "max_used_connections=%ld\n", max_used_connections); + fprintf(stderr, "max_connections=%ld\n", max_connections); + fprintf(stderr, "threads_connected=%d\n", thread_count); + fprintf(stderr, "It is possible that mysqld could use up to \n\ +key_buffer_size + (record_buffer + sort_buffer)*max_connections = %ld K\n\ +bytes of memory\n", (keybuff_size + (my_default_record_cache_size + + sortbuff_size) * max_connections)/ 1024); + fprintf(stderr, "Hope that's ok, if not, decrease some variables in the\n\ +equation\n"); + #if defined(HAVE_LINUXTHREADS) -#if defined(__i386__) && !defined(HAVE_STACK_TRACE_ON_SEGV) - trace_stack(); - fflush(stderr); -#endif /* __i386__ && !HAVE_STACK_TRACE_ON_SEGV */ + + if(sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS) + { + fprintf(stderr, "You seem to be running 32-bit Linux and\n\ + have %d concurrent connections. If you have not\n\ +changed STACK_SIZE in LinuxThreads and build the binary yourself,\n\ +LinuxThreads is quite likely to steal a part of global heap for a \n\ +thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n", + thread_count); + } +#ifdef LINUX_STACK_TRACE + if(!(test_flags & TEST_NO_STACKTRACE)) + trace_stack(); + fflush(stderr); +#endif /* LINUX_STACK_TRACE */ if (test_flags & TEST_CORE_ON_SIGNAL) write_core(sig); #endif /* HAVE_LINUXTHREADS */ @@ -1255,7 +1402,12 @@ static void init_signals(void) struct sigaction sa; sa.sa_flags = 0; sigemptyset(&sa.sa_mask); sigprocmask(SIG_SETMASK,&sa.sa_mask,NULL); - if (!(test_flags & TEST_NO_STACKTRACE)) + +#ifdef LINUX_STACK_TRACE + heap_start = (char*)&__bss_start; +#endif + + if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL)) { sa.sa_handler=handle_segfault; sigaction(SIGSEGV, &sa, NULL); @@ -1959,12 +2111,24 @@ The server will not act as a slave."); sql_print_error("After lock_thread_count"); #endif #else - // remove the event, because it will not be valid anymore - Service.SetShutdownEvent(0); - if(hEventShutdown) CloseHandle(hEventShutdown); - // if it was started as service on NT try to stop the service - if(Service.IsNT()) - Service.Stop(); + if (Service.IsNT()) + { + if(start_mode) + { + if (WaitForSingleObject(hEventShutdown,INFINITE)==WAIT_OBJECT_0) + Service.Stop(); + } + else + { + Service.SetShutdownEvent(0); + if(hEventShutdown) CloseHandle(hEventShutdown); + } + } + else + { + Service.SetShutdownEvent(0); + if(hEventShutdown) CloseHandle(hEventShutdown); + } #endif /* Wait until cleanup is done */ @@ -2017,6 +2181,7 @@ int main(int argc, char **argv) else if (argc == 1) // No arguments; start as a service { // init service + start_mode = 1; long tmp=Service.Init(MYSQL_SERVICENAME,mysql_service); return 0; } @@ -2051,7 +2216,7 @@ static int bootstrap(FILE *file) if (pthread_create(&thd->real_id,&connection_attrib,handle_bootstrap, (void*) thd)) { - sql_print_error("Warning: Can't create thread to handle bootstrap"); + sql_print_error("Warning: Can't create thread to handle bootstrap"); return -1; } /* Wait for thread to die */ @@ -2492,6 +2657,7 @@ enum options { OPT_TEMP_POOL, OPT_DO_PSTACK, OPT_TX_ISOLATION, OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER, OPT_GEMINI_UNBUFFERED_IO, OPT_SKIP_SAFEMALLOC, + OPT_SKIP_STACK_TRACE }; static struct option long_options[] = { @@ -2615,11 +2781,12 @@ static struct option long_options[] = { {"skip-locking", no_argument, 0, (int) OPT_SKIP_LOCK}, {"skip-host-cache", no_argument, 0, (int) OPT_SKIP_HOST_CACHE}, {"skip-name-resolve", no_argument, 0, (int) OPT_SKIP_RESOLVE}, + {"skip-networking", no_argument, 0, (int) OPT_SKIP_NETWORKING}, {"skip-new", no_argument, 0, (int) OPT_SKIP_NEW}, {"skip-safemalloc", no_argument, 0, (int) OPT_SKIP_SAFEMALLOC}, {"skip-show-database", no_argument, 0, (int) OPT_SKIP_SHOW_DB}, {"skip-slave-start", no_argument, 0, (int) OPT_SKIP_SLAVE_START}, - {"skip-networking", no_argument, 0, (int) OPT_SKIP_NETWORKING}, + {"skip-stack-trace", no_argument, 0, (int) OPT_SKIP_STACK_TRACE}, {"skip-thread-priority", no_argument, 0, (int) OPT_SKIP_PRIOR}, {"sql-bin-update-same", no_argument, 0, (int) OPT_SQL_BIN_UPDATE_SAME}, #include "sslopt-longopts.h" @@ -2754,7 +2921,7 @@ CHANGEABLE_VAR changeable_vars[] = { 16384, 1024, 1024*1024L, MALLOC_OVERHEAD, 1024 }, { "net_retry_count", (long*) &mysqld_net_retry_count, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1 }, - { "net_read_timeout", (long*) &net_read_timeout, + { "net_read_timeout", (long*) &net_read_timeout, NET_READ_TIMEOUT, 1, 65535, 0, 1 }, { "net_write_timeout", (long*) &net_write_timeout, NET_WRITE_TIMEOUT, 1, 65535, 0, 1 }, @@ -2764,7 +2931,7 @@ CHANGEABLE_VAR changeable_vars[] = { 0, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD, IO_SIZE }, { "record_buffer", (long*) &my_default_record_cache_size, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE }, - { "slow_launch_time", (long*) &slow_launch_time, + { "slow_launch_time", (long*) &slow_launch_time, 2L, 0L, ~0L, 0, 1 }, { "sort_buffer", (long*) &sortbuff_size, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, 1 }, @@ -2842,7 +3009,7 @@ struct show_var_st init_vars[]= { {"join_buffer_size", (char*) &join_buff_size, SHOW_LONG}, {"key_buffer_size", (char*) &keybuff_size, SHOW_LONG}, {"language", language, SHOW_CHAR}, - {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, + {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, #ifdef HAVE_MLOCKALL {"locked_in_memory", (char*) &locked_in_memory, SHOW_BOOL}, #endif @@ -2974,7 +3141,7 @@ static void use_help(void) { print_version(); printf("Use '--help' or '--no-defaults --help' for a list of available options\n"); -} +} static void usage(void) { @@ -3054,15 +3221,16 @@ static void usage(void) Don't use concurrent insert with MyISAM\n\ --skip-delay-key-write\n\ Ignore the delay_key_write option for all tables\n\ + --skip-host-cache Don't cache host names\n\ --skip-locking Don't use system locking. To use isamchk one has\n\ to shut down the server.\n\ --skip-name-resolve Don't resolve hostnames.\n\ All hostnames are IP's or 'localhost'\n\ --skip-networking Don't allow connection with TCP/IP.\n\ - --skip-new Don't use new, possible wrong routines.\n\ - --skip-host-cache Don't cache host names\n"); + --skip-new Don't use new, possible wrong routines.\n"); /* We have to break the string here because of VC++ limits */ puts("\ + --skip-stack-trace Don't print a stack trace on failure\n\ --skip-show-database Don't allow 'SHOW DATABASE' commands\n\ --skip-thread-priority\n\ Don't give threads different priorities.\n\ @@ -3245,11 +3413,11 @@ static void get_options(int argc,char **argv) case 'P': mysql_port= (unsigned int) atoi(optarg); break; -#if !defined(DBUG_OFF) && defined(SAFEMALLOC) +#if !defined(DBUG_OFF) && defined(SAFEMALLOC) case OPT_SAFEMALLOC_MEM_LIMIT: safemalloc_mem_limit = atoi(optarg); break; -#endif +#endif case OPT_SOCKET: mysql_unix_port= optarg; break; @@ -3319,14 +3487,14 @@ static void get_options(int argc,char **argv) break; // needs to be handled (as no-op) in non-debugging mode for test suite case (int)OPT_DISCONNECT_SLAVE_EVENT_COUNT: -#ifndef DBUG_OFF +#ifndef DBUG_OFF disconnect_slave_event_count = atoi(optarg); -#endif +#endif break; case (int)OPT_ABORT_SLAVE_EVENT_COUNT: -#ifndef DBUG_OFF +#ifndef DBUG_OFF abort_slave_event_count = atoi(optarg); -#endif +#endif break; case (int) OPT_LOG_SLAVE_UPDATES: opt_log_slave_updates = 1; @@ -3508,6 +3676,9 @@ static void get_options(int argc,char **argv) case (int) OPT_WANT_CORE: test_flags |= TEST_CORE_ON_SIGNAL; break; + case (int) OPT_SKIP_STACK_TRACE: + test_flags|=TEST_NO_STACKTRACE; + break; case (int) OPT_BIND_ADDRESS: if (optarg && isdigit(optarg[0])) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 98be3639a06..b95b97d670f 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -697,7 +697,9 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables, ** and that all key blocks are half full (normally things are ** much better) */ - uint keys_per_block= head->file->block_size/2/head->key_info[param.real_keynr[idx]].key_length+1; + uint keys_per_block= head->file->block_size/2/ + (head->key_info[param.real_keynr[idx]].key_length+ + head->file->ref_length) + 1; found_read_time=((double) (found_records+keys_per_block-1)/ (double) keys_per_block); } diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt index 74139b30a85..ff29fffe958 100644 --- a/sql/share/english/errmsg.txt +++ b/sql/share/english/errmsg.txt @@ -90,7 +90,7 @@ "File '%-.80s' already exists", "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld", "Records: %ld Duplicates: %ld", -"Incorrect sub part key. The used key part isn't a string or the used length is longer than the key part", +"Incorrect sub part key. The used key part isn't a string, the used length is longer than the key part or the table handler doesn't support unique sub keys", "You can't delete all columns with ALTER TABLE. Use DROP TABLE instead", "Can't DROP '%-.64s'. Check that column/key exists", "Records: %ld Duplicates: %ld Warnings: %ld", diff --git a/sql/slave.cc b/sql/slave.cc index 0220f574112..6b9c376a625 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1191,7 +1191,7 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused))) pthread_cond_broadcast(&COND_slave_start); pthread_mutex_unlock(&LOCK_slave); - int error = 1; + // int error = 1; bool retried_once = 0; ulonglong last_failed_pos = 0; @@ -1293,9 +1293,19 @@ try again, log '%s' at postion %s", RPL_LOG_NAME, sql_print_error("Slave thread killed while reading event"); goto err; } - + + if (event_len == packet_error) { + if(mc_mysql_errno(mysql) == ER_NET_PACKET_TOO_LARGE) + { + sql_print_error("Log entry on master is longer than \ +max_allowed_packet on slave. Slave thread will be aborted. If the entry is \ +really supposed to be that long, restart the server with a higher value of \ +max_allowed_packet. The current value is %ld", max_allowed_packet); + goto err; + } + thd->proc_info = "Waiting to reconnect after a failed read"; if(mysql->net.vio) vio_close(mysql->net.vio); @@ -1369,7 +1379,7 @@ the slave thread with \"mysqladmin start-slave\". We stopped at log \ } } - error = 0; + // error = 0; err: // print the current replication position sql_print_error("Slave thread exiting, replication stopped in log '%s' at \ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index a0bb003eeb4..b164242af44 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1405,6 +1405,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) &refresh)) && refresh) ; if (table) { + int error; table_list->table=table; table->grant= table_list->grant; if (thd->locked_tables) @@ -1416,7 +1417,12 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) my_printf_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, ER(ER_TABLE_NOT_LOCKED_FOR_WRITE), MYF(0),table_list->name); - DBUG_RETURN(0); + table=0; + } + else if ((error=table->file->start_stmt(thd))) + { + table->file->print_error(error,MYF(0)); + table=0; } thd->proc_info=0; DBUG_RETURN(table); @@ -1443,10 +1449,10 @@ int open_and_lock_tables(THD *thd,TABLE_LIST *tables) int lock_tables(THD *thd,TABLE_LIST *tables) { + TABLE_LIST *table; if (tables && !thd->locked_tables) { uint count=0; - TABLE_LIST *table; for (table = tables ; table ; table=table->next) count++; TABLE **start,**ptr; @@ -1457,6 +1463,18 @@ int lock_tables(THD *thd,TABLE_LIST *tables) if (!(thd->lock=mysql_lock_tables(thd,start,count))) return -1; /* purecov: inspected */ } + else + { + for (table = tables ; table ; table=table->next) + { + int error; + if ((error=table->table->file->start_stmt(thd))) + { + table->table->file->print_error(error,MYF(0)); + return -1; + } + } + } return 0; } @@ -1483,8 +1501,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, DBUG_RETURN(0); /* purecov: inspected */ if (openfrm(path, table_name, - (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | - HA_TRY_READ_ONLY), + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, tmp_table)) @@ -1493,11 +1510,13 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, } tmp_table->file->extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL - tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked - tmp_table->tmp_table = 1; + tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked + tmp_table->tmp_table = (tmp_table->file->has_transactions() ? + TRANSACTIONAL_TMP_TABLE : TMP_TABLE); tmp_table->table_cache_key=(char*) (tmp_table+1); - tmp_table->key_length= (uint) (strmov(strmov(tmp_table->table_cache_key,db) - +1, table_name) + tmp_table->key_length= (uint) (strmov((tmp_table->real_name= + strmov(tmp_table->table_cache_key,db) + +1), table_name) - tmp_table->table_cache_key)+1; int4store(tmp_table->table_cache_key + tmp_table->key_length, thd->slave_proxy_id); diff --git a/sql/sql_class.h b/sql/sql_class.h index fdfa6ec411b..ac6c50d0166 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -71,9 +71,12 @@ public: ~MYSQL_LOG(); pthread_mutex_t* get_log_lock() { return &LOCK_log; } void set_index_file_name(const char* index_file_name = 0); + void init(enum_log_type log_type_arg); void open(const char *log_name,enum_log_type log_type, const char *new_name=0); void new_file(void); + bool open_index(int options); + void close_index(); bool write(THD *thd, enum enum_server_command command,const char *format,...); bool write(THD *thd, const char *query, uint query_length, time_t query_start=0); @@ -265,7 +268,7 @@ public: #endif ulonglong next_insert_id,last_insert_id,current_insert_id, limit_found_rows; ha_rows select_limit,offset_limit,default_select_limit,cuted_fields, - max_join_size,sent_row_count; + max_join_size, sent_row_count, examined_row_count; table_map used_tables; ulong query_id,version, inactive_timeout,options,thread_id; ulong gemini_spin_retries; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b5edf084b53..916af73acae 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1842,6 +1842,7 @@ mysql_execute_command(void) { thd->lock=thd->locked_tables; thd->locked_tables=0; // Will be automaticly closed + end_active_trans(thd); } if (thd->global_read_lock) { @@ -2293,7 +2294,7 @@ mysql_init_query(THD *thd) thd->lex.table_list.next= (byte**) &thd->lex.table_list.first; thd->fatal_error=0; // Safety thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0; - thd->sent_row_count=0; + thd->sent_row_count=thd->examined_row_count=0; DBUG_VOID_RETURN; } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 05e64670df5..e5039d118be 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -132,7 +132,7 @@ static int send_file(THD *thd) } -static File open_log(IO_CACHE *log, const char *log_file_name, +File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg) { File file; @@ -294,7 +294,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags) goto err; } - if ((file=open_log(&log, log_file_name, &errmsg)) < 0) + if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0) goto err; if(pos < 4) @@ -483,7 +483,7 @@ sweepstakes if you report the bug"; // fake Rotate_log event just in case it did not make it to the log // otherwise the slave make get confused about the offset - if ((file=open_log(&log, log_file_name, &errmsg)) < 0 || + if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 || fake_rotate_event(net, packet, log_file_name, &errmsg)) goto err; @@ -694,7 +694,9 @@ int change_master(THD* thd) glob_mi.pos = lex_mi->pos; if(lex_mi->host) - strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host)); + { + strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host)); + } if(lex_mi->user) strmake(glob_mi.user, lex_mi->user, sizeof(glob_mi.user)); if(lex_mi->password) diff --git a/sql/sql_repl.h b/sql/sql_repl.h index f8a67f51aa2..68f2b4ba6c4 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -9,6 +9,9 @@ extern uint32 server_id; extern bool server_id_supplied; extern I_List<i_string> binlog_do_db, binlog_ignore_db; +File open_binlog(IO_CACHE *log, const char *log_file_name, + const char **errmsg); + int start_slave(THD* thd = 0, bool net_report = 1); int stop_slave(THD* thd = 0, bool net_report = 1); int change_master(THD* thd); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2f417fc2618..6262219439d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -36,7 +36,8 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, DYNAMIC_ARRAY *keyuse,List<Item_func_match> &ftfuncs); -static bool update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, +static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, + JOIN_TAB *join_tab, uint tables,COND *conds,table_map table_map, List<Item_func_match> &ftfuncs); static int sort_keyuse(KEYUSE *a,KEYUSE *b); @@ -106,12 +107,14 @@ static uint find_shortest_key(TABLE *table, key_map usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit); static int create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit); -static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields); +static bool fix_having(JOIN *join, Item **having); +static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields, + Item *having); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, - ulong offset); + ulong offset,Item *having); static int remove_dup_with_hash_index(THD *thd, TABLE *table, uint field_count, Field **first_field, - ulong key_length); + ulong key_length,Item *having); static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count); static ulong used_blob_length(CACHE_FIELD **ptr); static bool store_record_in_cache(JOIN_CACHE *cache); @@ -210,7 +213,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, { if (item->with_sum_func) flag|=1; - else if (!item->const_item()) + else if (!(flag & 2) && !item->const_item()) flag|=2; } if (flag == 3) @@ -265,7 +268,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, join.join_tab=0; join.tmp_table_param.copy_field=0; join.sum_funcs=0; - join.send_records=join.found_records=0; + join.send_records=join.found_records=join.examined_rows=0; join.tmp_table_param.end_write_records= HA_POS_ERROR; join.first_record=join.sort_and_group=0; join.select_options=select_options; @@ -725,8 +728,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, if (select_distinct && ! group) { thd->proc_info="Removing duplicates"; - if (remove_duplicates(&join,tmp_table,fields)) - goto err; /* purecov: inspected */ + if (having) + having->update_used_tables(); + if (remove_duplicates(&join,tmp_table,fields, having)) + goto err; /* purecov: inspected */ + having=0; select_distinct=0; } tmp_table->reginfo.lock_type=TL_UNLOCK; @@ -877,9 +883,9 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, table->reginfo.not_exists_optimize=0; bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys); all_table_map|= table->map; + s->join=join; if ((s->on_expr=tables->on_expr)) { - // table->maybe_null=table->outer_join=1; // Mark for send fields if (!table->file->records) { // Empty table s->key_dependent=s->dependent=0; @@ -950,7 +956,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } if (conds || outer_join) - if (update_ref_and_keys(keyuse_array,stat,join->tables, + if (update_ref_and_keys(join->thd,keyuse_array,stat,join->tables, conds,~outer_join,ftfuncs)) DBUG_RETURN(1); @@ -1453,8 +1459,9 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) */ static bool -update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables, - COND *cond, table_map normal_tables,List<Item_func_match> &ftfuncs) +update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, + uint tables, COND *cond, table_map normal_tables, + List<Item_func_match> &ftfuncs) { uint and_level,i,found_eq_constant; @@ -1462,8 +1469,7 @@ update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables, KEY_FIELD *key_fields,*end; if (!(key_fields=(KEY_FIELD*) - my_malloc(sizeof(key_fields[0])* - (current_thd->cond_count+1)*2,MYF(0)))) + thd->alloc(sizeof(key_fields[0])*(thd->cond_count+1)*2))) return TRUE; /* purecov: inspected */ and_level=0; end=key_fields; if (cond) @@ -1477,14 +1483,10 @@ update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables, } } if (init_dynamic_array(keyuse,sizeof(KEYUSE),20,64)) - { - my_free((gptr) key_fields,MYF(0)); return TRUE; - } /* fill keyuse with found key parts */ for (KEY_FIELD *field=key_fields ; field != end ; field++) add_key_part(keyuse,field); - my_free((gptr) key_fields,MYF(0)); } if (ftfuncs.elements) @@ -1905,7 +1907,7 @@ cache_record_length(JOIN *join,uint idx) { uint length; JOIN_TAB **pos,**end; - THD *thd=current_thd; + THD *thd=join->thd; length=0; for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ; @@ -2087,7 +2089,7 @@ get_best_combination(JOIN *join) } else { - THD *thd=current_thd; + THD *thd=join->thd; for (i=0 ; i < keyparts ; keyuse++,i++) { while (keyuse->keypart != i || @@ -2233,6 +2235,7 @@ make_simple_join(JOIN *join,TABLE *tmp_table) join_tab->ref.key = -1; join_tab->not_used_in_distinct=0; join_tab->read_first_record= join_init_read_record; + join_tab->join=join; bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record)); tmp_table->status=0; tmp_table->null_row=0; @@ -2551,8 +2554,11 @@ join_free(JOIN *join) tab->table->key_read=0; tab->table->file->extra(HA_EXTRA_NO_KEYREAD); } - tab->table->file->index_end(); + /* Don't free index if we are using read_record */ + if (!tab->read_record.table) + tab->table->file->index_end(); } + end_read_record(&tab->read_record); } join->table=0; } @@ -3390,7 +3396,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE; table->blob_ptr_size=mi_portable_sizeof_char_ptr; table->map=1; - table->tmp_table=1; + table->tmp_table= TMP_TABLE; table->db_low_byte_first=1; // True for HEAP and MyISAM table->temp_pool_slot = temp_pool_slot; @@ -3931,8 +3937,8 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error, table->file=0; *table =new_table; table->file->change_table_ptr(table); - - thd->proc_info=save_proc_info; + thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ? + "Copying to tmp table on disk" : save_proc_info); DBUG_RETURN(0); err: @@ -4112,6 +4118,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) bool not_used_in_distinct=join_tab->not_used_in_distinct; ha_rows found_records=join->found_records; READ_RECORD *info= &join_tab->read_record; + join->examined_rows++; do { @@ -4446,7 +4453,8 @@ join_init_read_record(JOIN_TAB *tab) { if (tab->select && tab->select->quick) tab->select->quick->reset(); - init_read_record(&tab->read_record,current_thd, tab->table, tab->select,1,1); + init_read_record(&tab->read_record, tab->join->thd, tab->table, + tab->select,1,1); return (*tab->read_record.read_record)(&tab->read_record); } @@ -4499,6 +4507,7 @@ join_init_read_next_with_key(READ_RECORD *info) return 0; } + static int join_init_read_last_with_key(JOIN_TAB *tab) { @@ -4669,7 +4678,11 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), else { if (!join->first_record) + { + /* No matching rows for group function */ clear_tables(join); + copy_fields(&join->tmp_table_param); + } if (join->having && join->having->val_int() == 0) error= -1; // Didn't satisfy having else @@ -4914,7 +4927,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (idx < (int) join->send_group_parts) { if (!join->first_record) + { + /* No matching rows for group function */ clear_tables(join); + copy_fields(&join->tmp_table_param); + } copy_sum_funcs(join->sum_funcs); if (!join->having || join->having->val_int()) { @@ -5235,6 +5252,7 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit) { SORT_FIELD *sortorder; uint length; + ha_rows examined_rows; TABLE *table=tab->table; SQL_SELECT *select=tab->select; DBUG_ENTER("create_sort_index"); @@ -5273,12 +5291,13 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit) } } table->found_records=filesort(&table,sortorder,length, - select, 0L, select_limit); + select, 0L, select_limit, &examined_rows); delete select; // filesort did select tab->select=0; tab->select_cond=0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; + tab->join->examined_rows+=examined_rows; if (table->key_read) // Restore if we used indexes { table->key_read=0; @@ -5290,6 +5309,38 @@ err: } +/* +** Add the HAVING criteria to table->select +*/ + +static bool fix_having(JOIN *join, Item **having) +{ + (*having)->update_used_tables(); // Some tables may have been const + JOIN_TAB *table=&join->join_tab[join->const_tables]; + table_map used_tables= join->const_table_map | table->table->map; + + Item* sort_table_cond=make_cond_for_table(*having,used_tables,used_tables); + if (sort_table_cond) + { + if (!table->select) + if (!(table->select=new SQL_SELECT)) + return 1; + if (!table->select->cond) + table->select->cond=sort_table_cond; + else // This should never happen + if (!(table->select->cond=new Item_cond_and(table->select->cond, + sort_table_cond))) + return 1; + table->select_cond=table->select->cond; + DBUG_EXECUTE("where",print_where(table->select_cond, + "select and having");); + *having=make_cond_for_table(*having,~ (table_map) 0,~used_tables); + DBUG_EXECUTE("where",print_where(*having,"having after make_cond");); + } + return 0; +} + + /***************************************************************************** ** Remove duplicates from tmp table ** This should be recoded to add a uniuqe index to the table and remove @@ -5330,7 +5381,7 @@ static void free_blobs(Field **ptr) static int -remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields) +remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having) { int error; ulong reclength,offset; @@ -5367,9 +5418,10 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields) sortbuff_size))) error=remove_dup_with_hash_index(join->thd, entry, field_count, first_field, - reclength); + reclength, having); else - error=remove_dup_with_compare(join->thd, entry, first_field, offset); + error=remove_dup_with_compare(join->thd, entry, first_field, offset, + having); free_blobs(first_field); DBUG_RETURN(error); @@ -5377,19 +5429,19 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields) static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, - ulong offset) + ulong offset, Item *having) { handler *file=table->file; - char *org_record,*new_record; + char *org_record,*new_record, *record; int error; ulong reclength=table->reclength-offset; DBUG_ENTER("remove_dup_with_compare"); - org_record=(char*) table->record[0]+offset; + org_record=(char*) (record=table->record[0])+offset; new_record=(char*) table->record[1]+offset; file->rnd_init(); - error=file->rnd_next(table->record[0]); + error=file->rnd_next(record); for (;;) { if (thd->killed) @@ -5406,6 +5458,12 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, break; goto err; } + if (having && !having->val_int()) + { + if ((error=file->delete_row(record))) + goto err; + continue; + } if (copy_blobs(first_field)) { my_error(ER_OUT_OF_SORTMEMORY,MYF(0)); @@ -5418,7 +5476,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, bool found=0; for (;;) { - if ((error=file->rnd_next(table->record[0]))) + if ((error=file->rnd_next(record))) { if (error == HA_ERR_RECORD_DELETED) continue; @@ -5428,19 +5486,19 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, } if (compare_record(table, first_field) == 0) { - if ((error=file->delete_row(table->record[0]))) + if ((error=file->delete_row(record))) goto err; } else if (!found) { found=1; - file->position(table->record[0]); // Remember position + file->position(record); // Remember position } } if (!found) break; // End of file /* Restart search on next row */ - error=file->restart_rnd_next(table->record[0],file->ref); + error=file->restart_rnd_next(record,file->ref); } file->extra(HA_EXTRA_NO_CACHE); @@ -5461,7 +5519,8 @@ err: static int remove_dup_with_hash_index(THD *thd, TABLE *table, uint field_count, Field **first_field, - ulong key_length) + ulong key_length, + Item *having) { byte *key_buffer, *key_pos, *record=table->record[0]; int error; @@ -5509,6 +5568,12 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, break; goto err; } + if (having && !having->val_int()) + { + if ((error=file->delete_row(record))) + goto err; + continue; + } /* copy fields to key buffer */ field_length=field_lengths; @@ -5524,7 +5589,8 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, if ((error=file->delete_row(record))) goto err; } - (void) hash_insert(&hash, key_pos-key_length); + else + (void) hash_insert(&hash, key_pos-key_length); key_pos+=extra_length; } my_free((char*) key_buffer,MYF(0)); diff --git a/sql/sql_select.h b/sql/sql_select.h index 9539bb14842..87157b1465f 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -102,6 +102,7 @@ typedef struct st_join_table { bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct; TABLE_REF ref; JOIN_CACHE cache; + JOIN *join; } JOIN_TAB; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index faa899b719f..ac89b7a2782 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -786,7 +786,10 @@ store_create_info(THD *thd, TABLE *table, String *packet) List<Item> field_list; char tmp[MAX_FIELD_WIDTH]; String type(tmp, sizeof(tmp)); - packet->append("CREATE TABLE ", 13); + if (table->tmp_table) + packet->append("CREATE TEMPORARY TABLE ", 23); + else + packet->append("CREATE TABLE ", 13); append_identifier(thd,packet,table->real_name); packet->append(" (\n", 3); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 77aaf1edae4..ad39b91a5ca 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -479,12 +479,16 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, } } else if (column->length > length || - (f_is_packed(sql_field->pack_flag) && column->length != length)) + ((f_is_packed(sql_field->pack_flag) || + ((file->option_flag() & HA_NO_PREFIX_CHAR_KEYS) && + (key_info->flags & HA_NOSAME))) && + column->length != length)) { my_error(ER_WRONG_SUB_KEY,MYF(0)); DBUG_RETURN(-1); } - length=column->length; + if (!(file->option_flag() & HA_NO_PREFIX_CHAR_KEYS)) + length=column->length; } else if (length == 0) { @@ -1426,21 +1430,20 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info, create_list,key_list,1,1))) // no logging DBUG_RETURN(error); + + if (table->tmp_table) + new_table=open_table(thd,new_db,tmp_name,tmp_name,0); + else { - if (table->tmp_table) - new_table=open_table(thd,new_db,tmp_name,tmp_name,0); - else - { - char path[FN_REFLEN]; - (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,tmp_name); - fn_format(path,path,"","",4); - new_table=open_temporary_table(thd, path, new_db, tmp_name,0); - } - if (!new_table) - { - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - goto err; - } + char path[FN_REFLEN]; + (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,tmp_name); + fn_format(path,path,"","",4); + new_table=open_temporary_table(thd, path, new_db, tmp_name,0); + } + if (!new_table) + { + VOID(quick_rm_table(new_db_type,new_db,tmp_name)); + goto err; } save_time_stamp=new_table->time_stamp; @@ -1633,6 +1636,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, TABLE_LIST tables; List<Item> fields; List<Item> all_fields; + ha_rows examined_rows; DBUG_ENTER("copy_data_between_tables"); if (!(copy= new Copy_field[to->fields])) @@ -1668,7 +1672,8 @@ copy_data_between_tables(TABLE *from,TABLE *to, if (setup_order(thd, &tables, fields, all_fields, order) || !(sortorder=make_unireg_sortorder(order, &length)) || (from->found_records = filesort(&from, sortorder, length, - (SQL_SELECT *) 0, 0L, HA_POS_ERROR)) + (SQL_SELECT *) 0, 0L, HA_POS_ERROR, + &examined_rows)) == HA_POS_ERROR) goto err; }; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 357ba41046c..33c0a4d1758 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -72,339 +72,220 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token END_OF_INPUT -%token ACTION +%token EQ +%token EQUAL_SYM +%token GE +%token GT_SYM +%token LE +%token LT +%token NE +%token IS +%token SHIFT_LEFT +%token SHIFT_RIGHT +%token SET_VAR + +%token AVG_SYM +%token COUNT_SYM +%token MAX_SYM +%token MIN_SYM +%token SUM_SYM +%token STD_SYM + %token ADD +%token ALTER %token AFTER_SYM -%token AGAINST +%token ANALYZE_SYM +%token BEGIN_SYM +%token CHANGE +%token COMMENT_SYM +%token COMMIT_SYM +%token CREATE +%token CROSS +%token DELETE_SYM +%token DROP +%token INSERT +%token FLUSH_SYM +%token SELECT_SYM +%token MASTER_SYM +%token REPAIR +%token RESET_SYM +%token PURGE +%token SLAVE +%token START_SYM +%token STOP_SYM +%token TRUNCATE_SYM +%token ROLLBACK_SYM +%token OPTIMIZE +%token SHOW +%token UPDATE_SYM +%token KILL_SYM +%token LOAD +%token LOCK_SYM +%token UNLOCK_SYM + +%token ACTION %token AGGREGATE_SYM %token ALL -%token ALTER -%token ANALYZE_SYM %token AND %token AS %token ASC -%token ATAN -%token AUTOCOMMIT %token AUTO_INC +%token AUTOCOMMIT %token AVG_ROW_LENGTH -%token AVG_SYM -%token BACKUP_SYM -%token BEGIN_SYM -%token BENCHMARK_SYM +%token BACKUP_SYM %token BERKELEY_DB_SYM -%token BETWEEN_SYM -%token BIGINT %token BINARY -%token BIT_AND -%token BIT_OR %token BIT_SYM -%token BLOB_SYM %token BOOL_SYM %token BOTH %token BY %token CASCADE -%token CASE_SYM -%token CHANGE -%token CHANGED -%token CHAR_SYM %token CHECKSUM_SYM %token CHECK_SYM -%token COALESCE -%token CLOSE_SYM +%token COMMITTED_SYM %token COLUMNS %token COLUMN_SYM -%token COMMENT_SYM -%token COMMITTED_SYM -%token COMMIT_SYM -%token COMPRESSED_SYM -%token CONCAT -%token CONCAT_WS +%token CONCURRENT %token CONSTRAINT -%token COUNT_SYM -%token CREATE -%token CROSS -%token CURDATE -%token CURTIME -%token DATABASE %token DATABASES %token DATA_SYM -%token DATETIME -%token DATE_ADD_INTERVAL -%token DATE_SUB_INTERVAL -%token DATE_SYM -%token DAY_HOUR_SYM -%token DAY_MINUTE_SYM -%token DAY_SECOND_SYM -%token DAY_SYM -%token DECIMAL_SYM -%token DECODE_SYM %token DEFAULT %token DELAYED_SYM %token DELAY_KEY_WRITE_SYM -%token DELETE_SYM %token DESC %token DESCRIBE %token DISTINCT -%token DOUBLE_SYM -%token DROP -%token DUMPFILE %token DYNAMIC_SYM -%token ELSE -%token ELT_FUNC %token ENCLOSED -%token ENCODE_SYM -%token ENCRYPT -%token END -%token ENUM -%token EQ -%token EQUAL_SYM %token ESCAPED %token ESCAPE_SYM %token EXISTS -%token EXPORT_SET %token EXTENDED_SYM -%token EXTRACT_SYM -%token FAST_SYM -%token FIELD_FUNC %token FILE_SYM %token FIRST_SYM %token FIXED_SYM %token FLOAT_NUM -%token FLOAT_SYM -%token FLUSH_SYM %token FOREIGN -%token FORMAT_SYM -%token FOR_SYM %token FROM -%token FROM_UNIXTIME %token FULL -%token FULLTEXT_SYM -%token FUNC_ARG0 -%token FUNC_ARG1 -%token FUNC_ARG2 -%token FUNC_ARG3 -%token GE +%token FULLTEXT_SYM +%token GEMINI_SYM %token GEMINI_SPIN_RETRIES -%token GEMINI_SYM -%token GLOBAL_SYM +%token GLOBAL_SYM %token GRANT %token GRANTS %token GREATEST_SYM %token GROUP -%token GROUP_UNIQUE_USERS -%token GT_SYM %token HAVING -%token HANDLER_SYM %token HEAP_SYM %token HEX_NUM %token HIGH_PRIORITY %token HOSTS_SYM -%token HOUR_MINUTE_SYM -%token HOUR_SECOND_SYM -%token HOUR_SYM %token IDENT -%token IDENTIFIED_SYM -%token IF %token IGNORE_SYM %token INDEX %token INFILE %token INNER_SYM %token INNOBASE_SYM -%token INSERT -%token INSERT_ID -%token INTERVAL_SYM %token INTO -%token INT_SYM %token IN_SYM -%token IS +%token ISOLATION %token ISAM_SYM -%token ISOLATION %token JOIN_SYM %token KEYS %token KEY_SYM -%token KILL_SYM -%token LAST_INSERT_ID -%token LAST_SYM -%token LE %token LEADING %token LEAST_SYM -%token LEFT -%token LEVEL_SYM +%token LEVEL_SYM %token LEX_HOSTNAME %token LIKE -%token LIMIT %token LINES -%token LOAD %token LOCAL_SYM -%token LOCATE -%token LOCK_SYM %token LOGS_SYM -%token LONGBLOB -%token LONGTEXT %token LONG_NUM %token LONG_SYM %token LOW_PRIORITY -%token LT -%token MAKE_SET_SYM -%token MASTER_CONNECT_RETRY_SYM -%token MASTER_HOST_SYM -%token MASTER_LOG_FILE_SYM -%token MASTER_LOG_POS_SYM -%token MASTER_PASSWORD_SYM -%token MASTER_PORT_SYM -%token MASTER_SYM -%token MASTER_USER_SYM +%token MASTER_HOST_SYM +%token MASTER_USER_SYM +%token MASTER_LOG_FILE_SYM +%token MASTER_LOG_POS_SYM +%token MASTER_PASSWORD_SYM +%token MASTER_PORT_SYM +%token MASTER_CONNECT_RETRY_SYM %token MATCH %token MAX_ROWS -%token MAX_SYM -%token MEDIUMBLOB -%token MEDIUMINT -%token MEDIUMTEXT %token MEDIUM_SYM %token MERGE_SYM -%token MINUTE_SECOND_SYM -%token MINUTE_SYM %token MIN_ROWS -%token MIN_SYM -%token MODE_SYM -%token MODIFY_SYM -%token MONTH_SYM %token MYISAM_SYM %token NATIONAL_SYM %token NATURAL %token NCHAR_SYM -%token NE -%token NEXT_SYM %token NOT -%token NOW_SYM %token NO_SYM %token NULL_SYM %token NUM -%token NUMERIC_SYM %token ON %token OPEN_SYM -%token OPTIMIZE %token OPTION %token OPTIONALLY %token OR -%token ORDER_SYM %token OR_OR_CONCAT +%token ORDER_SYM %token OUTER %token OUTFILE +%token DUMPFILE %token PACK_KEYS_SYM %token PARTIAL -%token PASSWORD -%token POSITION_SYM -%token PRECISION -%token PREV_SYM %token PRIMARY_SYM %token PRIVILEGES -%token PROCEDURE %token PROCESS %token PROCESSLIST_SYM -%token PURGE -%token QUICK %token RAID_0_SYM -%token RAID_CHUNKS -%token RAID_CHUNKSIZE %token RAID_STRIPED_SYM %token RAID_TYPE -%token RAND +%token RAID_CHUNKS +%token RAID_CHUNKSIZE %token READ_SYM -%token REAL %token REAL_NUM %token REFERENCES %token REGEXP %token RELOAD %token RENAME -%token REPAIR %token REPEATABLE_SYM -%token REPLACE -%token RESET_SYM -%token RESTORE_SYM +%token RESTORE_SYM %token RESTRICT %token REVOKE -%token RIGHT -%token ROLLBACK_SYM -%token ROUND %token ROWS_SYM %token ROW_FORMAT_SYM %token ROW_SYM -%token SECOND_SYM -%token SELECT_SYM +%token SET %token SERIALIZABLE_SYM %token SESSION_SYM -%token SET -%token SET_VAR -%token SHARE_SYM -%token SHIFT_LEFT -%token SHIFT_RIGHT -%token SHOW %token SHUTDOWN -%token SLAVE -%token SMALLINT -%token SQL_AUTO_IS_NULL -%token SQL_BIG_RESULT -%token SQL_BIG_SELECTS -%token SQL_BIG_TABLES -%token SQL_BUFFER_RESULT -%token SQL_LOG_BIN -%token SQL_LOG_OFF -%token SQL_LOG_UPDATE -%token SQL_LOW_PRIORITY_UPDATES -%token SQL_MAX_JOIN_SIZE -%token SQL_QUOTE_SHOW_CREATE -%token SQL_SAFE_UPDATES -%token SQL_SELECT_LIMIT -%token SQL_SMALL_RESULT -%token SQL_WARNINGS %token STARTING -%token START_SYM %token STATUS_SYM -%token STD_SYM -%token STOP_SYM %token STRAIGHT_JOIN -%token STRING_SYM -%token SUBSTRING -%token SUBSTRING_INDEX -%token SUM_SYM %token TABLES %token TABLE_SYM %token TEMPORARY %token TERMINATED %token TEXT_STRING -%token TEXT_SYM -%token THEN_SYM -%token TIMESTAMP -%token TIME_SYM -%token TINYBLOB -%token TINYINT -%token TINYTEXT %token TO_SYM %token TRAILING %token TRANSACTION_SYM -%token TRIM -%token TRUNCATE_SYM %token TYPE_SYM -%token UDA_CHAR_SUM -%token UDA_FLOAT_SUM -%token UDA_INT_SUM -%token UDF_CHAR_FUNC -%token UDF_FLOAT_FUNC -%token UDF_INT_FUNC +%token FUNC_ARG0 +%token FUNC_ARG1 +%token FUNC_ARG2 +%token FUNC_ARG3 %token UDF_RETURNS_SYM %token UDF_SONAME_SYM %token UDF_SYM -%token UNCOMMITTED_SYM +%token UNCOMMITTED_SYM %token UNION_SYM %token UNIQUE_SYM -%token UNIQUE_USERS -%token UNIX_TIMESTAMP -%token UNLOCK_SYM -%token UNSIGNED -%token UPDATE_SYM %token USAGE -%token USER %token USE_SYM %token USING %token VALUES @@ -418,6 +299,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token BLOB_SYM %token CHAR_SYM %token CHANGED +%token COALESCE %token DATETIME %token DATE_SYM %token DECIMAL_SYM @@ -426,6 +308,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token FAST_SYM %token FLOAT_SYM %token INT_SYM +%token LIMIT %token LONGBLOB %token LONGTEXT %token MEDIUMBLOB @@ -446,18 +329,85 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token UNSIGNED %token VARBINARY %token VARCHAR -%token VARIABLES %token VARYING +%token ZEROFILL + +%token AGAINST +%token ATAN +%token BETWEEN_SYM +%token BIT_AND +%token BIT_OR +%token CASE_SYM +%token CONCAT +%token CONCAT_WS +%token CURDATE +%token CURTIME +%token DATABASE +%token DATE_ADD_INTERVAL +%token DATE_SUB_INTERVAL +%token DAY_HOUR_SYM +%token DAY_MINUTE_SYM +%token DAY_SECOND_SYM +%token DAY_SYM +%token DECODE_SYM +%token ELSE +%token ELT_FUNC +%token ENCODE_SYM +%token ENCRYPT +%token EXPORT_SET +%token EXTRACT_SYM +%token FIELD_FUNC +%token FORMAT_SYM +%token FOR_SYM +%token FROM_UNIXTIME +%token GROUP_UNIQUE_USERS +%token HOUR_MINUTE_SYM +%token HOUR_SECOND_SYM +%token HOUR_SYM +%token IDENTIFIED_SYM +%token IF +%token INSERT_ID +%token INTERVAL_SYM +%token LAST_INSERT_ID +%token LEFT +%token LOCATE +%token MAKE_SET_SYM +%token MINUTE_SECOND_SYM +%token MINUTE_SYM +%token MODE_SYM +%token MODIFY_SYM +%token MONTH_SYM +%token NOW_SYM +%token PASSWORD +%token POSITION_SYM +%token PROCEDURE +%token RAND +%token REPLACE +%token RIGHT +%token ROUND +%token SECOND_SYM +%token SHARE_SYM +%token SUBSTRING +%token SUBSTRING_INDEX +%token TRIM +%token UDA_CHAR_SUM +%token UDA_FLOAT_SUM +%token UDA_INT_SUM +%token UDF_CHAR_FUNC +%token UDF_FLOAT_FUNC +%token UDF_INT_FUNC +%token UNIQUE_USERS +%token UNIX_TIMESTAMP +%token USER %token WEEK_SYM %token WHEN_SYM -%token WHERE -%token WITH -%token WORK_SYM -%token WRITE_SYM -%token YEARWEEK +%token WORK_SYM %token YEAR_MONTH_SYM %token YEAR_SYM -%token ZEROFILL +%token YEARWEEK +%token BENCHMARK_SYM +%token END +%token THEN_SYM %token SQL_BIG_TABLES %token SQL_BIG_SELECTS @@ -472,7 +422,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token SQL_BUFFER_RESULT %token SQL_WARNINGS %token SQL_AUTO_IS_NULL -%token SQL_CALC_FOUND_ROWS %token SQL_SAFE_UPDATES %token SQL_QUOTE_SHOW_CREATE %token SQL_SLAVE_SKIP_COUNTER @@ -2441,6 +2390,12 @@ opt_local: /* empty */ { $$=0;} | LOCAL_SYM { $$=1;} +load_data_lock: + /* empty */ { Lex->lock_option= current_thd->update_lock_default; } + | CONCURRENT { Lex->lock_option= TL_WRITE_CONCURRENT_INSERT ; } + | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; } + + opt_duplicate: /* empty */ { Lex->duplicates=DUP_ERROR; } | REPLACE { Lex->duplicates=DUP_REPLACE; } @@ -2599,6 +2554,7 @@ keyword: | COMMIT_SYM {} | COMMITTED_SYM {} | COMPRESSED_SYM {} + | CONCURRENT {} | DATA_SYM {} | DATETIME {} | DATE_SYM {} diff --git a/sql/table.h b/sql/table.h index a0e037222dc..706c499d852 100644 --- a/sql/table.h +++ b/sql/table.h @@ -41,6 +41,8 @@ typedef struct st_grant_info uint want_privilege; } GRANT_INFO; +enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2}; + /* Table cache entry struct */ class Field_timestamp; @@ -83,10 +85,11 @@ struct st_table { uint blob_ptr_size; /* 4 or 8 */ uint next_number_key_offset; int current_lock; /* Type of lock on table */ + enum tmp_table_type tmp_table; my_bool copy_blobs; /* copy_blobs when storing */ my_bool null_row; /* All columns are null */ my_bool maybe_null,outer_join; /* Used with OUTER JOIN */ - my_bool distinct,tmp_table,const_table; + my_bool distinct,const_table; my_bool key_read; my_bool crypted; my_bool db_low_byte_first; /* Portable row format */ diff --git a/support-files/Makefile.am b/support-files/Makefile.am index 164fd50a89c..00e8b13b12c 100644 --- a/support-files/Makefile.am +++ b/support-files/Makefile.am @@ -18,7 +18,6 @@ ## Process this file with automake to create Makefile.in EXTRA_DIST = mysql.spec.sh \ - mysql-max.spec.sh \ my-small.cnf.sh \ my-medium.cnf.sh \ my-large.cnf.sh \ @@ -34,7 +33,6 @@ pkgdata_DATA = my-small.cnf \ my-huge.cnf \ mysql-log-rotate \ mysql-@VERSION@.spec \ - mysql-max-@VERSION@.spec \ binary-configure pkgdata_SCRIPTS = mysql.server @@ -44,7 +42,6 @@ CLEANFILES = my-small.cnf \ my-large.cnf \ my-huge.cnf \ mysql.spec \ - mysql-max-@VERSION@.spec \ mysql-@VERSION@.spec \ mysql-log-rotate \ mysql.server \ @@ -55,10 +52,6 @@ mysql-@VERSION@.spec: mysql.spec rm -f $@ cp mysql.spec $@ -mysql-max-@VERSION@.spec: mysql-max.spec - rm -f $@ - cp mysql-max.spec $@ - SUFFIXES = .sh .sh: |