summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <tim@threads.polyesthetic.msg>2001-05-14 14:23:45 -0400
committerunknown <tim@threads.polyesthetic.msg>2001-05-14 14:23:45 -0400
commitaa07f65caee02cddd38a9a250ac3024ea70dcb2b (patch)
tree20d648d337c79f661f35f953b2955e8d5a316b2c
parent1115a79126c7031c8ebedae237b02b56af95f1ed (diff)
parent258a55ff6884b85e06714090947bdb53160811b9 (diff)
downloadmariadb-git-aa07f65caee02cddd38a9a250ac3024ea70dcb2b.tar.gz
Merge work.mysql.com:/home/bk/mysql
into threads.polyesthetic.msg:/usr/local/src/my/3 BitKeeper/etc/logging_ok: auto-union sql/sql_select.cc: Auto merged
-rw-r--r--.bzrignore1
-rw-r--r--BUILD/compile-ia64-O012
-rw-r--r--BUILD/compile-ia64-O0-sgicc12
-rw-r--r--BUILD/compile-ia64-O212
-rw-r--r--BUILD/compile-ia64-O2-sgicc12
-rw-r--r--BUILD/compile-ia64-O68
-rw-r--r--BitKeeper/etc/logging_ok4
-rwxr-xr-xBuild-tools/Do-compile89
-rwxr-xr-xBuild-tools/Do-patch-file2
-rw-r--r--Docs/manual.texi970
-rw-r--r--acinclude.m48
-rw-r--r--bdb/dist/Makefile.in3
-rw-r--r--bdb/include/log.h1
-rw-r--r--bdb/log/log.c14
-rw-r--r--bdb/log/log_rec.c2
-rw-r--r--client/Makefile.am3
-rw-r--r--client/errmsg.c14
-rw-r--r--client/mysqladmin.c15
-rw-r--r--client/mysqlcheck.c685
-rw-r--r--client/mysqldump.c175
-rw-r--r--client/mysqltest.c3
-rw-r--r--configure.in15
-rw-r--r--extra/resolve_stack_dump.c2
-rw-r--r--include/global.h3
-rw-r--r--include/m_ctype.h1
-rw-r--r--include/myisam.h6
-rw-r--r--include/mysql.h8
-rw-r--r--innobase/buf/buf0buf.c56
-rw-r--r--innobase/buf/buf0flu.c6
-rw-r--r--innobase/buf/buf0rea.c6
-rw-r--r--innobase/configure.in43
-rw-r--r--innobase/include/buf0buf.h10
-rw-r--r--innobase/include/univ.i14
-rw-r--r--innobase/include/ut0dbg.h2
-rw-r--r--innobase/include/ut0mem.h2
-rw-r--r--innobase/include/ut0ut.h5
-rw-r--r--innobase/log/log0recv.c6
-rw-r--r--innobase/os/os0file.c111
-rw-r--r--innobase/os/os0sync.c1
-rw-r--r--innobase/pars/lexyy.c2
-rw-r--r--innobase/pars/pars0grm.c6
-rw-r--r--innobase/pars/pars0grm.y6
-rw-r--r--innobase/row/row0sel.c97
-rw-r--r--innobase/row/row0uins.c12
-rw-r--r--innobase/row/row0umod.c19
-rw-r--r--isam/_dbug.c2
-rw-r--r--libmysql/libmysql.c2
-rw-r--r--mysql-test/Makefile.am2
-rw-r--r--mysql-test/include/master-slave.inc2
-rw-r--r--mysql-test/install_test_db.sh4
-rw-r--r--mysql-test/mysql-test-run.sh10
-rw-r--r--mysql-test/r/backup.result2
-rw-r--r--mysql-test/r/group_by.result4
-rw-r--r--mysql-test/r/innodb.result9
-rw-r--r--mysql-test/r/rpl000014.result8
-rw-r--r--mysql-test/r/rpl000015.result6
-rw-r--r--mysql-test/r/rpl000016.result6
-rw-r--r--mysql-test/r/show_check.result13
-rw-r--r--mysql-test/r/shw000001.result5
-rw-r--r--mysql-test/t/group_by.test12
-rw-r--r--mysql-test/t/innodb.test20
-rw-r--r--mysql-test/t/rpl000014.test4
-rw-r--r--mysql-test/t/rpl000015.test3
-rw-r--r--mysql-test/t/rpl000016.test3
-rwxr-xr-xmysql-test/t/rpl000018-master.sh6
-rw-r--r--mysql-test/t/show_check.test17
-rw-r--r--mysql-test/t/shw000001.test8
-rw-r--r--mysys/mf_keycache.c14
-rw-r--r--mysys/mf_qsort.c229
-rw-r--r--mysys/my_error.c2
-rw-r--r--mysys/raid.cc12
-rw-r--r--scripts/make_binary_distribution.sh7
-rw-r--r--scripts/mysqlhotcopy.sh2
-rw-r--r--sql-bench/Results/ATIS-mysql-NT_4.018
-rw-r--r--sql-bench/Results/RUN-mysql-NT_4.0185
-rw-r--r--sql-bench/Results/alter-table-mysql-NT_4.014
-rw-r--r--sql-bench/Results/big-tables-mysql-NT_4.012
-rw-r--r--sql-bench/Results/connect-mysql-NT_4.020
-rw-r--r--sql-bench/Results/create-mysql-NT_4.014
-rw-r--r--sql-bench/Results/insert-mysql-NT_4.091
-rw-r--r--sql-bench/Results/select-mysql-NT_4.033
-rw-r--r--sql-bench/Results/wisconsin-mysql-NT_4.010
-rw-r--r--sql-bench/crash-me.sh2
-rw-r--r--sql/Makefile.am15
-rw-r--r--sql/filesort.cc10
-rw-r--r--sql/ha_berkeley.cc1
-rw-r--r--sql/ha_innobase.cc12
-rw-r--r--sql/ha_innobase.h4
-rw-r--r--sql/ha_myisam.cc16
-rw-r--r--sql/handler.h1
-rw-r--r--sql/item_strfunc.cc10
-rw-r--r--sql/lex.h1
-rw-r--r--sql/lock.cc4
-rw-r--r--sql/log.cc55
-rw-r--r--sql/log_event.cc34
-rw-r--r--sql/log_event.h14
-rw-r--r--sql/mini_client.cc10
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/mysqlbinlog.cc67
-rw-r--r--sql/mysqld.cc240
-rw-r--r--sql/opt_range.cc4
-rw-r--r--sql/share/english/errmsg.txt2
-rw-r--r--sql/slave.cc16
-rw-r--r--sql/sql_base.cc13
-rw-r--r--sql/sql_class.h5
-rw-r--r--sql/sql_parse.cc2
-rw-r--r--sql/sql_repl.cc10
-rw-r--r--sql/sql_repl.h3
-rw-r--r--sql/sql_select.cc163
-rw-r--r--sql/sql_select.h3
-rw-r--r--sql/sql_show.cc5
-rw-r--r--sql/sql_table.cc39
-rw-r--r--sql/sql_yacc.yy12
-rw-r--r--sql/stacktrace.c215
-rw-r--r--sql/stacktrace.h51
-rw-r--r--sql/table.h5
-rw-r--r--support-files/Makefile.am7
-rw-r--r--support-files/my-huge.cnf.sh2
-rw-r--r--support-files/my-large.cnf.sh2
-rw-r--r--support-files/my-medium.cnf.sh2
-rw-r--r--support-files/my-small.cnf.sh2
121 files changed, 3085 insertions, 1272 deletions
diff --git a/.bzrignore b/.bzrignore
index 5fa3658e1ff..502efd1f30a 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -247,3 +247,4 @@ bdb/dist/config.hin
innobase/ib_config.h
innobase/ib_config.h.in
mysql.proj
+client/mysqlcheck
diff --git a/BUILD/compile-ia64-O0 b/BUILD/compile-ia64-O0
deleted file mode 100644
index d07067289b4..00000000000
--- a/BUILD/compile-ia64-O0
+++ /dev/null
@@ -1,12 +0,0 @@
-make -k clean
-/bin/rm -f */.deps/*.P config.cache
-
-aclocal; autoheader; aclocal; automake; autoconf
-
-CFLAGS="-O0 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O0 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex
-make
-strip sql/mysqld
-
-
-
-
diff --git a/BUILD/compile-ia64-O0-sgicc b/BUILD/compile-ia64-O0-sgicc
deleted file mode 100644
index c5e14eab033..00000000000
--- a/BUILD/compile-ia64-O0-sgicc
+++ /dev/null
@@ -1,12 +0,0 @@
-make -k clean
-/bin/rm -f */.deps/*.P config.cache
-
-aclocal; autoheader; aclocal; automake; autoconf
-
-CC=sgicc CFLAGS="-O0" CXX=sgicc CXXFLAGS="-O0" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex
-make
-strip sql/mysqld
-
-
-
-
diff --git a/BUILD/compile-ia64-O2 b/BUILD/compile-ia64-O2
deleted file mode 100644
index 140d34b2466..00000000000
--- a/BUILD/compile-ia64-O2
+++ /dev/null
@@ -1,12 +0,0 @@
-make -k clean
-/bin/rm -f */.deps/*.P config.cache
-
-aclocal; autoheader; aclocal; automake; autoconf
-
-CFLAGS="-O2 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O2 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex
-make
-strip sql/mysqld
-
-
-
-
diff --git a/BUILD/compile-ia64-O2-sgicc b/BUILD/compile-ia64-O2-sgicc
deleted file mode 100644
index 64b2ff17beb..00000000000
--- a/BUILD/compile-ia64-O2-sgicc
+++ /dev/null
@@ -1,12 +0,0 @@
-make -k clean
-/bin/rm -f */.deps/*.P config.cache
-
-aclocal; autoheader; aclocal; automake; autoconf
-
-CC=sgicc CFLAGS="-O2" CXX=sgicc CXXFLAGS="-O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex
-make
-strip sql/mysqld
-
-
-
-
diff --git a/BUILD/compile-ia64-O6 b/BUILD/compile-ia64-O6
deleted file mode 100644
index 8792c0e1479..00000000000
--- a/BUILD/compile-ia64-O6
+++ /dev/null
@@ -1,8 +0,0 @@
-make -k clean
-/bin/rm -f */.deps/*.P config.cache
-
-aclocal; autoheader; aclocal; automake; autoconf
-
-CFLAGS="-O6 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O6 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charsets=complex
-make
-strip sql/mysqld
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 106f63e161e..f9538335046 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -1,7 +1,11 @@
heikki@donna.mysql.fi
+jani@hynda.mysql.fi
jcole@abel.spaceapes.com
+jcole@main.burghcom.com
+jcole@tetra.spaceapes.com
monty@donna.mysql.fi
monty@work.mysql.com
+paul@central.snake.net
sasha@mysql.sashanet.com
serg@serg.mysql.com
tim@threads.polyesthetic.msg
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 011ad9662de..3ef9ba614f9 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -3,22 +3,29 @@
use Getopt::Long;
$opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env="";
$opt_dbd_options=$opt_perl_options=$opt_suffix="";
-$opt_tmp="";
-$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=0;
-GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i") || usage();
+$opt_tmp=$version_suffix="";
+$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0;
+$opt_innodb=$opt_bdb=0;
+
+GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage();
usage() if ($opt_help || $opt_Information);
usage() if (!$opt_distribution);
+if ($opt_innodb || $opt_bdb)
+{
+ $version_suffix="-max";
+}
+
chomp($host=`hostname`);
$full_host_name=$host;
-print "$host: Compiling MySQL at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
+print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
$connect_option= ($opt_tcpip ? "--host=$host" : "");
$host =~ /^([^.-]*)/;
$host=$1 . $opt_suffix;
$email="$opt_user\@mysql.com";
$pwd = `pwd`; chomp($pwd);
-$log="$pwd/Logs/$host.log";
+$log="$pwd/Logs/$host$version_suffix.log";
$opt_distribution =~ /(mysql-[^\/]*)\.tar/;
$ver=$1;
$gcc_version=which("gcc");
@@ -72,13 +79,16 @@ if ($opt_stage == 0)
log_system("$host/bin/mysqladmin --no-defaults -u root -P 9306 -h $host -s shutdown");
log_system("$host/bin/mysqladmin --no-defaults -u root -P 9307 -h $host -s shutdown");
}
- system("mkdir $host") if (! -d $host);
+ if (!$opt_use_old_distribution)
+ {
+ system("mkdir $host") if (! -d $host);
+ system("touch $host/mysql-fix-for-glob");
+ rm_all(<$host/mysql-*>);
+ system("mkdir $host/bin") if (! -d "$host/bin");
+ }
system("mkdir $bench_tmpdir") if (! -d $bench_tmpdir);
- system("touch $host/mysql-fix-for-glob");
- rm_all(<$host/mysql-*>);
rm_all("$host/test");
system("mkdir $host/test") if (! -d "$host/test");
- system("mkdir $host/bin") if (! -d "$host/bin");
system("mkdir Logs") if (! -d "Logs");
system("mv $log ${log}-old") if (-f $log);
unlink($log);
@@ -91,15 +101,16 @@ select STDOUT;
$|=1;
safe_cd("$host");
-if ($opt_stage == 0)
+if ($opt_stage == 0 && ! $opt_use_old_distribution)
{
safe_system("gunzip < $opt_distribution | $tar xf -");
}
safe_cd($ver);
if ($opt_stage <= 1)
-{
+{
$opt_config_options.=" --with-low-memory" if ($opt_with_low_memory);
unlink("config.cache");
+ log_system("$make clean") if ($opt_use_old_distribution);
if ($opt_static_server)
{
$opt_config_options.=" --with-mysqld-ldflags=-all-static";
@@ -109,7 +120,15 @@ if ($opt_stage <= 1)
$opt_config_options.=" --with-client-ldflags=-all-static";
}
$opt_config_options.= " --disable-shared"; # Default for binary versions
- check_system("$opt_config_env ./configure --prefix=/usr/local/mysql \"--with-comment=Official MySQL binary\" --with-extra-charsets=complex $opt_config_options","Thank you for choosing MySQL");
+ if ($opt_bdb)
+ {
+ $opt_config_options.= " --with-berkeley-db"
+ }
+ if ($opt_innodb)
+ {
+ $opt_config_options.= " --with-innodb"
+ }
+ check_system("$opt_config_env ./configure --prefix=/usr/local/mysql \"--with-comment=Official MySQL$version_suffix binary\" --with-extra-charsets=complex \"--with-server-suffix=$version_suffix\" $opt_config_options","Thank you for choosing MySQL");
if (-d "$pwd/$host/include-mysql")
{
safe_system("cp -r $pwd/$host/include-mysql/* $pwd/$host/$ver/include");
@@ -118,7 +137,7 @@ if ($opt_stage <= 1)
if ($opt_stage <= 2)
{
- unlink($opt_distribution) if (!$opt_no_delete);
+ unlink($opt_distribution) if (!$opt_no_delete && !$opt_use_old_distribution);
safe_system("$make");
}
@@ -140,19 +159,21 @@ if ($opt_stage <= 3)
# Unpack and test the binary distrubtion
#
+$tar_file=<$pwd/$host/*.tar.gz>;
+if (!defined($tar_file))
+{
+ $tar_file=<$pwd/$host/*.tgz>;
+}
if ($opt_stage <= 4 && !$opt_no_test)
{
rm_all(<$pwd/$host/test/*>);
safe_cd("$pwd/$host/test");
- $tar_file=<$pwd/$host/*.tar.gz>;
- if (!defined($tar_file))
- {
- $tar_file=<$pwd/$host/*.tgz>;
- }
safe_system("gunzip < $tar_file | $tar xf -");
}
-$test_dir=<$pwd/$host/test/$ver-*>;
+$tar_file =~ /(mysql-[^\/]*)\.tar/;
+$ver=$1;
+$test_dir="$pwd/$host/test/$ver";
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
@@ -164,12 +185,22 @@ if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
if ($opt_stage <= 6 && !$opt_no_test)
{
+ my $extra;
safe_cd($test_dir);
log_system("./bin/mysqladmin --no-defaults -u root -S $mysql_unix_port -s shutdown") || info("There was no mysqld running\n");
sleep(2);
log_system("rm -f ./data/mysql/*");
check_system("scripts/mysql_install_db --no-defaults --skip-locking","https://order");
- safe_system("./bin/mysqld --no-defaults --basedir . --datadir ./data --skip-locking >> $log 2>&1 &");
+ $extra="";
+ if ($opt_bdb)
+ {
+ $extra.=" -O bdb_cache_size=16M";
+ }
+ if ($opt_innodb)
+ {
+ $extra.=" --innodb_data_file_path=ibdata1:100M";
+ }
+ safe_system("./bin/mysqld --no-defaults --basedir . --datadir ./data --skip-locking $extra >> $log 2>&1 &");
sleep(2);
}
@@ -219,6 +250,14 @@ if ($opt_stage <= 9 && !$opt_no_test)
log_system("rm -f output/*");
$tmp= $opt_fast_benchmark ? "--fast --user root --small-test" : "";
check_system("perl ./run-all-tests --log --die-on-errors $connect_option $tmp","RUN-mysql");
+ if ($opt_bdb)
+ {
+ check_system("perl ./run-all-tests --log --suffix=\"_bdb\" --die-on-errors $connect_option $tmp --create-option=\"type=bdb\"","RUN-mysql");
+ }
+ if ($opt_innodb)
+ {
+ check_system("perl ./run-all-tests --log --suffix=\"_innodb\" --die-on-errors $connect_option $tmp --create-option=\"type=innodb\"","RUN-mysql");
+ }
}
if ($opt_stage <= 10 && $opt_result)
@@ -240,16 +279,22 @@ exit 0;
sub usage
{
print <<EOF;
-$0 version 1.1
+$0 version 1.2
$0 takes the following options:
--help or --Information
Show this help
+--innodb
+Compile with support for Innodb tables
+
+--bdb
+Compile with support for Berkeley DB tables
+
--user 'user_name'
Mail 'user_name'\@analytikerna.se if something went wrong.
-If user is empty then no mail is sent.
+If user is empty then no mail is sent.
--distribution 'distribution_file'
Name of the MySQL distribution file.
diff --git a/Build-tools/Do-patch-file b/Build-tools/Do-patch-file
index 4a45855fbb7..68003962c56 100755
--- a/Build-tools/Do-patch-file
+++ b/Build-tools/Do-patch-file
@@ -40,5 +40,5 @@ chmod a+r,o-w $RESULT binary/*
mv $RESULT $PATCH_DIR
cp binary/mysqlcom-* binary/mysql*win* /net/web/home/production/data/nweb/customer/Downloads
rm binary/mysqlcom-*
-mv binary/*Max* $RESULT_DIR_MAX
+mv binary/*Max* mv binary/*-max* $RESULT_DIR_MAX
cp binary/* $RESULT_DIR
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 02b9c64dddb..5a37604a4eb 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -69,7 +69,7 @@ END-INFO-DIR-ENTRY
@sp 10
@center @titlefont{@strong{MySQL} Reference Manual}
@sp 10
-@center Copyright @copyright{} 1997-2001 TcX AB, Detron HB and MySQL Finland AB
+@center Copyright @copyright{} 1997-2001 MySQL AB
@c blank page after title page makes page 1 be a page front.
@c also makes the back of the title page blank.
@page
@@ -514,13 +514,13 @@ MyISAM Table Formats
BDB or Berkeley_DB Tables
-* BDB overview::
-* BDB install::
-* BDB start::
-* BDB characteristic::
-* BDB TODO::
-* BDB portability::
-* BDB errors::
+* BDB overview:: Overview of BDB Tables
+* BDB install:: Installing BDB
+* BDB start:: BDB startup options
+* BDB characteristic:: Some characteristic of @code{BDB} tables:
+* BDB TODO:: Some things we need to fix for BDB in the near future:
+* BDB portability:: Operating systems supported by @strong{BDB}
+* BDB errors:: Errors You May Get When Using BDB Tables
GEMINI Tables
@@ -533,18 +533,18 @@ InnoDB Tables
* InnoDB overview:: InnoDB tables overview
* InnoDB start:: InnoDB startup options
-* Creating an InnoDB database:: Creating an InnoDB database. Creating an InnoDB database
+* Creating an InnoDB database:: Creating an InnoDB database. Creating an InnoDB database. Creating an InnoDB database.
* Using InnoDB tables:: Creating InnoDB tables
* Adding and removing:: Adding and removing InnoDB data and log files
* Backing up:: Backing up and recovering an InnoDB database
* Moving:: Moving an InnoDB database to another machine
-* InnoDB transaction model:: InnoDB transaction model. InnoDB transaction model
+* InnoDB transaction model:: InnoDB transaction model.
* Implementation:: Implementation of multiversioning
* Table and index:: Table and index structures
* File space management:: File space management and disk i/o
* Error handling:: Error handling
* InnoDB restrictions:: Some restrictions on InnoDB tables
-* InnoDB contact information:: InnoDB contact information. InnoDB contact information
+* InnoDB contact information:: InnoDB contact information.
MySQL Tutorial
@@ -666,7 +666,7 @@ Speed of Queries that Access or Update Data
MySQL Utilites
* Programs:: What do the executables do?
-* mysqld-max::
+* mysqld-max:: mysqld-max, An extended mysqld server
* safe_mysqld:: safe_mysqld, the wrapper around mysqld
* mysqld_multi:: Program for managing multiple @strong{MySQL} servers
* mysql:: The command line tool
@@ -759,6 +759,7 @@ Problems and Common Errors
* No matching rows:: Solving problems with no matching rows
* ALTER TABLE problems:: Problems with @code{ALTER TABLE}.
* Change column order:: How to change the order of columns in a table
+* Temporary table problems::
Some Common Errors When Using MySQL
@@ -926,7 +927,7 @@ Changes in release 4.0.x (Development; Alpha)
Changes in release 3.23.x (Stable)
-* News-3.23.38::
+* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
* News-3.23.36:: Changes in release 3.23.36
* News-3.23.35:: Changes in release 3.23.35
@@ -1087,6 +1088,7 @@ Debugging a MySQL server
* Compiling for debugging::
* Making trace files::
+* Using gdb on mysqld::
* Using stack trace::
* Using log files::
* Reproducable test case::
@@ -1590,9 +1592,9 @@ contributing to the development of @strong{MySQL}.
@item Pages @tab 800
@item Price @tab $49.99 US
@item Downloadable examples @tab
- @uref{http://www.mysql.com/Downloads/Contrib/Examples/samp_db.tar.gz, @code{samp_db.tar.gz}}
+ @uref{http://www.kitebird.com/mysql-book/, @code{samp_db} distribution}
@item Errata @tab
- @uref{http://www.mysql.com/documentation/pauls-mysql-book-errata.html, are available here}
+ @uref{http://www.kitebird.com/mysql-book/errata.html, are available here}
@end multitable
Foreword by Michael ``Monty'' Widenius, @strong{MySQL} Moderator.
@@ -2131,7 +2133,7 @@ because of bugs in @strong{MySQL}.
@cindex retrieving, data
@cindex data, ISAM table handler
-@item The MyISAM table handler --- Gamma
+@item The MyISAM table handler --- Stable
This is new in @strong{MySQL} Version 3.23. It's largely based on the ISAM
table code but has a lot of new and very useful features.
@@ -2222,7 +2224,7 @@ The Berkeley DB code is very stable, but we are still improving the interface
between @strong{MySQL} and BDB tables, so it will take some time before this
is as tested as the other table types.
-@item Innodb Tables -- Alpha
+@item InnoDB Tables -- Alpha
This is a very recent addition to @code{MySQL} and is not very tested yet.
@item Automatic recovery of MyISAM tables - Beta
@@ -2393,7 +2395,7 @@ Apart from the following links, you can find and download a lot of
Information about the German MySQL mailing list.
@item @uref{http://www2.rent-a-database.de/mysql/}
-@strong{MySQL} manual in German.
+@strong{MySQL} handbook in German.
@item @uref{http://www.bitmover.com:8888//home/bk/mysql}
Web access to the @strong{MySQL} BitKeeper repository.
@@ -2441,6 +2443,9 @@ New Client libraries for the Mac OS Classic (Macintosh).
@item @uref{http://www.lilback.com/macsql/}
Client libraries for Mac OS Classic (Macintosh).
+
+@item @uref{http://sixk.maniasys.com/index_en.html}
+MySQL for Amiga
@end table
@subheading Perl-related Links
@@ -2715,7 +2720,7 @@ Popular iODBC Driver Manager (libiodbc) now available as Open Source.
@item @uref{http://users.ids.net/~bjepson/freeODBC/}
The FreeODBC Pages.
-@item @uref{http:/http://genix.net/unixODBC/}
+@item @uref{http://genix.net/unixODBC/}
The unixODBC Project goals are to develop and promote unixODBC to be the
definitive standard for ODBC on the Linux platform. This is to include GUI
support for KDE.
@@ -2772,7 +2777,7 @@ environment.
@item @uref{http://www.wix.com/mysql-hosting/}
Registry of Web providers who support @strong{MySQL}.
-@item @uref{http://www.softagency.co.jp/mysql/index.en.phtml}
+@item @uref{http://www.softagency.co.jp/mysql/index.en.html}
Links about using @strong{MySQL} in Japan/Asia.
@item @uref{http://abattoir.cc.ndsu.nodak.edu/~nem/mysql/udf/}
@@ -2892,11 +2897,6 @@ same (or similar) query. Uses PHP and @strong{MySQL}.
@item @uref{http://www.stopbit.com/}
Stopbit - A technology news site using @strong{MySQL} and PHP.
-@c Added 990604
-@c EMAIL: ah@dybdahl.dk
-@item @uref{http://www.jokes2000.com/scripts/}
-Example scripts at Jokes2000.
-
@item @uref{http://www.linuxsupportline.com/~kalendar/}
KDE based calendar manager - The calendar manager has both single user
(file based) and multi-user (@strong{MySQL} database) support.
@@ -3101,8 +3101,23 @@ unsubscribe from the @code{myodbc} list, send a message to
@email{myodbc-subscribe@@lists.mysql.com} or
@email{myodbc-unsubscribe@@lists.mysql.com}.
-There is also a german mailing list. You can find information about this
-at: @uref{http://www.4t2.com/mysql}.
+The following table shows some @strong{MySQL} mailing in other languages than
+English. Note that these are not operated by @strong{MySQL AB}, so we can't
+guarantee the quality on these.
+
+@table @code
+@item @email{mysql-france-subscribe@@yahoogroups.com, A French mailing list}
+@item @email{list@@tinc.net, A Korean mailing list}
+Email @code{subscribe mysql your@@email.address} to this list.
+@item @email{mysql-de-request@@lists.4t2.com, A German mailing list}
+Email @code{subscribe mysql-de your@@email.address} to this list.
+You can find information about this mailing list at
+@uref{http://www.4t2.com/mysql}.
+@item @email{mysql-br-request@@listas.linkway.com.br, A Portugese mailing list}
+Email @code{subscribe mysql-br your@@email.address} to this list.
+@item @email{mysql-alta@@elistas.net, A Spanish mailing list}
+Email @code{subscribe mysql your@@email.address} to this list.
+@end table
@cindex net etiquette
@cindex mailing lists, archive location
@@ -5009,6 +5024,8 @@ sucessfully on the following operating system/thread package combinations:
@item
AIX 4.x with native threads. @xref{IBM-AIX}.
@item
+Amiga.
+@item
BSDI 2.x with the included MIT-pthreads package. @xref{BSDI}.
@item
BSDI 3.0, 3.1 and 4.x with native threads. @xref{BSDI}.
@@ -5918,12 +5935,15 @@ A reasonable @code{tar} to unpack the distribution. GNU @code{tar} is
known to work. Sun @code{tar} is known to have problems.
@item
-A working ANSI C++ compiler. @code{gcc} >= 2.8.1, @code{egcs} >=
-1.0.2, SGI C++, and SunPro C++ are some of the compilers that are known to
-work. @code{libg++} is not needed when using @code{gcc}. @code{gcc}
-2.7.x has a bug that makes it impossible to compile some perfectly legal
-C++ files, such as @file{sql/sql_base.cc}. If you only have @code{gcc} 2.7.x,
-you must upgrade your @code{gcc} to be able to compile @strong{MySQL}.
+A working ANSI C++ compiler. @code{gcc} >= 2.95.2, @code{egcs} >= 1.0.2
+or @code{egcs 2.91.66}, SGI C++, and SunPro C++ are some of the
+compilers that are known to work. @code{libg++} is not needed when
+using @code{gcc}. @code{gcc} 2.7.x has a bug that makes it impossible
+to compile some perfectly legal C++ files, such as
+@file{sql/sql_base.cc}. If you only have @code{gcc} 2.7.x, you must
+upgrade your @code{gcc} to be able to compile @strong{MySQL}. @code{gcc}
+2.8.1 is also known to have problems on some platforms so it should be
+avoided if there exists a new compiler for the platform..
@code{gcc} >= 2.95.2 is recommended when compiling @strong{MySQL}
Version 3.23.x.
@@ -5981,9 +6001,15 @@ shell> scripts/mysql_install_db
shell> chown -R root /usr/local/mysql
shell> chown -R mysql /usr/local/mysql/var
shell> chgrp -R mysql /usr/local/mysql
+shell> cp support-files/my-medium.cnf /etc/my.cnf
shell> /usr/local/mysql/bin/safe_mysqld --user=mysql &
@end example
+If you want have support for InnoDB tables, you should edit the
+@code{/etc/my.cnf} file and remove the @code{#} character before the
+parameters that starts with @code{innodb_...}. @xref{Option
+files}. @xref{InnoDB start}.
+
If you start from a source RPM, then do the following:
@example
@@ -7376,7 +7402,7 @@ This will avoid problems with the @code{libstdc++} library and with C++
exceptions.
If this doesn't help, you should compile a debug version and run
-it with a trace file or under @code{gdb}. @xref{Debugging server}.
+it with a trace file or under @code{gdb}. @xref{Using gdb on mysqld}.
@node SunOS, Linux, Solaris x86, Source install system issues
@subsection SunOS 4 Notes
@@ -7725,7 +7751,7 @@ For the source distribution of @code{glibc} 2.0.7, a patch that is easy to
apply and is tested with @strong{MySQL} may be found at:
@example
-@uref{http://www.mysql.com/Download/Linux/glibc-2.0.7-total-patch.tar.gz}
+@uref{http://www.mysql.com/Downloads/Linux/glibc-2.0.7-total-patch.tar.gz}
@end example
If you experience crashes like these when you build @strong{MySQL}, you can
@@ -8515,8 +8541,8 @@ We recommend the following @code{configure} line with @code{egcs} and
@code{gcc 2.95} on AIX:
@example
-CC="gcc -pipe -mcpu=power2 -Wa,-many" \
-CXX="gcc -pipe -mcpu=power2 -Wa,-many" \
+CC="gcc -pipe -mcpu=power -Wa,-many" \
+CXX="gcc -pipe -mcpu=power -Wa,-many" \
CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti" \
./configure --prefix=/usr/local/mysql --with-low-memory
@end example
@@ -8528,6 +8554,21 @@ available. We don't know if the @code{-fno-exceptions} is required with
option generates faster code, we recommend that you should always use this
option with @code{egcs / gcc}.
+If you get a problem with assembler code try changing the -mcpu=xxx to
+match your cpu. Typically power2, power, or powerpc may need to be used,
+alternatively you might need to use 604 or 604e. I'm not positive but I
+would think using "power" would likely be safe most of the time, even on
+a power2 machine.
+
+If you don't know what your cpu is then do a "uname -m", this will give
+you back a string that looks like "000514676700", with a format of
+xxyyyyyymmss where xx and ss are always 0's, yyyyyy is a unique system
+id and mm is the id of the CPU Planar. A chart of these values can be
+found at
+@uref{http://www.rs6000.ibm.com/doc_link/en_US/a_doc_lib/cmds/aixcmds5/uname.htm}.
+This will give you a machine type and a machine model you can use to
+determine what type of cpu you have.
+
If you have problems with signals (@strong{MySQL} dies unexpectedly
under high load) you may have found an OS bug with threads and
signals. In this case you can tell @strong{MySQL} not to use signals by
@@ -8548,6 +8589,29 @@ On some versions of AIX, linking with @code{libbind.a} makes
@code{getservbyname} core dump. This is an AIX bug and should be reported
to IBM.
+For AIX 4.2.1 and gcc you have to do the following changes.
+
+After configuring, edit @file{config.h} and @file{include/my_config.h}
+and change the line that says
+
+@example
+#define HAVE_SNPRINTF 1
+@end example
+
+to
+
+@example
+#undef HAVE_SNPRINTF
+@end example
+
+And finally, in @file{mysqld.cc} you need to add a prototype for initgoups.
+
+@example
+#ifdef _AIX41
+extern "C" int initgroups(const char *,int);
+#endif
+@end example
+
@node HP-UX 10.20, HP-UX 11.x, IBM-AIX, Source install system issues
@subsection HP-UX Version 10.20 Notes
@@ -8675,6 +8739,16 @@ The optimization flags used by @strong{MySQL} (-O3) are not recognized by HP's
compilers. I did not change the flags.
@end itemize
+If you get the following error from @code{configure}
+
+@example
+checking for cc option to accept ANSI C... no
+configure: error: MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.
+@end example
+
+Check that you don't have the path to the K&R compiler before the path
+to the HP-UX C and C++ compiler.
+
@node Mac OS X, BEOS, HP-UX 11.x, Source install system issues
@subsection Mac OS X Notes
@@ -8748,7 +8822,7 @@ This is also described in the @file{README} file that comes with the
@subsection Installing MySQL on Windows
If you don't have a copy of the @strong{MySQL} distribution, you should
-first download one from @uref{http://www.mysql.com/}.
+first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}.
If you plan to connect to @strong{MySQL} from some other program, you will
probably also need the @strong{MyODBC} driver. You can find this at the
@@ -8775,6 +8849,36 @@ specify all paths with @samp{/} instead of @samp{\}. If you use
@samp{\}, you need to specify this twice, as @samp{\} is the escape
character in @strong{MySQL}. @xref{Option files}.
+Starting from @strong{MySQL} 3.23.38 the windows distribution includes
+both the normal and the @strong{MySQL-Max} binaries. The main benefit
+of using the normal @code{mysqld.exe} binary is that it's a little
+faster and uses less resources.
+
+Here is a list of the different @strong{MySQL} servers you can use:
+
+@multitable @columnfractions .25 .75
+@item @code{mysqld} @tab
+Compiled with full debugging and automatic memory allocation checking,
+symbolic links, BDB and InnoDB tables.
+@item @code{mysqld-opt} @tab
+Optimized binary with no support for transactional tables.
+@item @code{mysqld-nt} @tab
+Optimized for a Pentium pro processor. Has support for
+named pipes. One can run this version on Win98, but in
+this case no named pipes are created and one must
+have TCP/IP installed.
+@item mysqld-max @tab
+Optimized binary with support for symbolic links, BDB and InnoDB tables.
+@item mysqld-max-nt @tab
+Like mysqld-max, but compiled with support for named pipes.
+@end multitable
+
+All of the above binaries are optimized for the Pentium pro processor but
+should work on any Intel processor >= i386.
+
+NOTE: If you want to use InnoDB tables, you need to specify some startup
+options in your my.ini file! @xref{InnoDB start}.
+
@node Win95 start, NT start, Windows installation, Windows
@subsection Starting MySQL on Windows 95 or Windows 98
@@ -8789,15 +8893,6 @@ Winsock 2! You can get the newest Winsock from
@uref{http://www.microsoft.com/}. Win98 has the new Winsock 2 library, so
the above doesn't apply for Win98.
-There are 2 different @strong{MySQL} servers you can use:
-
-@multitable @columnfractions .25 .75
-@item @code{mysqld} @tab Compiled with full debugging and automatic memory allocation checking
-@item @code{mysqld-opt} @tab Optimized for a Pentium processor.
-@end multitable
-
-Both of the above should work on any Intel processor >= i386.
-
To start the @code{mysqld} server, you should start an MS-DOS window and type:
@example
@@ -8821,11 +8916,10 @@ If @code{mysqld} doesn't start, please check whether or not the
try to start it with @code{mysqld --standalone}; In this case you may
get some useful information on the screen that may help solve this.
-The last option is to start @code{mysqld} with @code{--debug}. In this
-case @code{mysqld} will write a log file in @file{\mysqld.trace}
-that should contain the reason why @code{mysqld} doesn't start. If you
-make a bug report about this, please only send the lines to the mailing list
-where something seems to go wrong!
+The last option is to start @code{mysqld} with @code{--standalone
+--debug}. In this case @code{mysqld} will write a log file in
+@file{C:\mysqld.trace} that should contain the reason why @code{mysqld}
+doesn't start. @xref{Making trace files}.
@node NT start, Windows running, Win95 start, Windows
@subsection Starting MySQL on NT or Windows 2000
@@ -8844,10 +8938,15 @@ should install @strong{MySQL} as a service on NT/Win2000:
@example
C:\mysql\bin\mysqld-nt --install
+
+or
+
+C:\mysql\bin\mysqld-max-nt --install
@end example
-(You could use the @code{mysqld} or @code{mysqld-opt} servers on NT,
-but those cannot be started as a service or use named pipes.)
+(You can also use @code{mysqld} binaries that doesn't end with
+@code{-nt.exe} on NT, but those cannot be started as a service or use
+named pipes.)
You can start and stop the @strong{MySQL} service with:
@@ -8907,6 +9006,7 @@ C:\mysql\bin\mysqld --standalone --debug
@end example
The last version gives you a debug trace in @file{C:\mysqld.trace}.
+@xref{Making trace files}.
@node Windows running, Windows and SSH, NT start, Windows
@subsection Running MySQL on Windows
@@ -9795,6 +9895,10 @@ reload the grant tables.
@cindex server, starting problems
@cindex problems, starting the server
+If you are going to use tables that support transactions (BDB, InnoDB or
+Gemini), you should first create a my.cnf file and set startup options
+for the table types you plan to use. @xref{Table types}.
+
Generally, you start the @code{mysqld} server in one of three ways:
@itemize @bullet
@@ -9932,15 +10036,8 @@ this:
This problem occurs only on systems that don't have a working thread
library and for which @strong{MySQL} must be configured to use MIT-pthreads.
-On Windows, you can try to start @code{mysqld} as follows:
-
-@example
-C:\mysql\bin\mysqld --standalone --debug
-@end example
-
-This will not run in the background and it should also write a trace in
-@file{\mysqld.trace}, which may help you determine the source of your
-problems. @xref{Windows}.
+If you can't get mysqld to start you can try to make a trace file
+to find the problem. @xref{Making trace files}.
If you are using BDB (Berkeley DB) tables, you should familiarize
yourself with the different BDB specific startup options. @xref{BDB start}.
@@ -9948,7 +10045,7 @@ yourself with the different BDB specific startup options. @xref{BDB start}.
If you are using Gemini tables, refer to the Gemini-specific startup options.
@xref{GEMINI start}.
-If you are using Innodb tables, refer to the Innodb-specific startup
+If you are using InnoDB tables, refer to the InnoDB-specific startup
options. @xref{InnoDB start}.
@node Automatic start, Command-line options, Starting server, Post-installation
@@ -10057,6 +10154,10 @@ Chroot mysqld daemon during startup. Recommended security measure. It will
somewhat limit @code{LOAD DATA INFILE} and @code{SELECT ... INTO OUTFILE}
though.
+@item --core-file
+Write a core file if @code{mysqld} dies. For some systems you must also
+specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}.
+
@item -h, --datadir=path
Path to the database root.
@@ -10069,7 +10170,7 @@ Set the default table type for tables. @xref{Table types}.
@item --debug[...]=
If @strong{MySQL} is configured with @code{--with-debug}, you can use this
option to get a trace file of what @code{mysqld} is doing.
-@xref{The DBUG package}.
+@xref{Making trace files}.
@item --delay-key-write-for-all-tables
Don't flush key buffers between writes for any @code{MyISAM} table.
@@ -10211,6 +10312,10 @@ gives everyone @emph{full access} to all databases! (You can tell a running
server to start using the grant tables again by executing @code{mysqladmin
flush-privileges} or @code{mysqladmin reload}.)
+@item --skip-host-cache
+Never use host name cache for faster name-ip resolution, but query DNS server
+on every connect instead. @xref{DNS}.
+
@item --skip-locking
Don't use system locking. To use @code{isamchk} or @code{myisamchk} you must
shut down the server. @xref{Stability}. Note that in @strong{MySQL} Version
@@ -10226,14 +10331,14 @@ Don't listen for TCP/IP connections at all. All interaction with
@code{mysqld} must be made via Unix sockets. This option is highly
recommended for systems where only local requests are allowed. @xref{DNS}.
-@item --skip-host-cache
-Never use host name cache for faster name-ip resolution, but query DNS server
-on every connect instead. @xref{DNS}.
-
@item --skip-new
Don't use new, possible wrong routines. Implies @code{--skip-delay-key-write}.
This will also set default table type to @code{ISAM}. @xref{ISAM}.
+@item --skip-stack-trace
+Don't write stack traces. This option is useful when you are running
+@code{mysqld} under a debugger. @xref{Debugging server}.
+
@item --skip-safemalloc
If @strong{MySQL} is configured with @code{--with-debug=full}, all programs
will check the memory for overruns for every memory allocation and memory
@@ -11934,10 +12039,11 @@ communication.
All other information is transferred as text that can be read by anyone
who is able to watch the connection. If you are concerned about this,
you can use the compressed protocol (in @strong{MySQL} Version 3.22 and above)
-to make things much harder. To make things even more secure you should
-use @code{ssh} (see @uref{http://www.cs.hut.fi/ssh}). With this, you
-can get an encrypted TCP/IP connection between a @strong{MySQL} server
-and a @strong{MySQL} client.
+to make things much harder. To make things even more secure you should use
+@code{ssh}. You can find an open source ssh client at
+@uref{http://www.openssh.org}, and a commercial ssh client at
+@uref{http://www.ssh.com}. With this, you can get an encrypted TCP/IP
+connection between a @strong{MySQL} server and a @strong{MySQL} client.
To make a @strong{MySQL} system secure, you should strongly consider the
following suggestions:
@@ -13734,7 +13840,7 @@ it down and restarting it.
If everything else fails, start the @code{mysqld} daemon with a debugging
option (for example, @code{--debug=d,general,query}). This will print host and
user information about attempted connections, as well as information about
-each command issued. @xref{Debugging server}.
+each command issued. @xref{Making trace files}.
@item
If you have any other problems with the @strong{MySQL} grant tables and
@@ -18176,6 +18282,11 @@ per-connection basis. It will not be changed by another client. It will not
even be changed if you update another @code{AUTO_INCREMENT} column with a
non-magic value (that is, a value that is not @code{NULL} and not @code{0}).
+If you insert many rows at the same time with an insert statement,
+@code{LAST_INSERT_ID()} returns the value for the first inserted row.
+The reason for this is so that you it makes it possible to easily reproduce
+the same @code{INSERT} statement against some other server.
+
@cindex sequence emulation
If @code{expr} is given as an argument to @code{LAST_INSERT_ID()} in an
@code{UPDATE} clause, then the value of the argument is returned as a
@@ -18679,9 +18790,10 @@ When you insert a value of @code{NULL} (recommended) or @code{0} into an
@xref{mysql_insert_id, , @code{mysql_insert_id()}}.
If you delete the row containing the maximum value for an
-@code{AUTO_INCREMENT} column, the value will be reused with an ISAM
-table but not with a @code{MyISAM} table. If you delete all rows in the
-table with @code{DELETE FROM table_name} (without a @code{WHERE}) in
+@code{AUTO_INCREMENT} column, the value will be reused with an
+@code{ISAM}, @code{BDB} or @code{INNODB} table but not with a
+@code{MyISAM} table. If you delete all rows in the table with
+@code{DELETE FROM table_name} (without a @code{WHERE}) in
@code{AUTOCOMMIT} mode, the sequence starts over for both table types.
@strong{NOTE:} There can be only one @code{AUTO_INCREMENT} column per
@@ -19691,7 +19803,7 @@ SELECT [STRAIGHT_JOIN] [SQL_SMALL_RESULT] [SQL_BIG_RESULT] [SQL_BUFFER_RESULT]
[ORDER BY @{unsigned_integer | col_name | formula@} [ASC | DESC] ,...]
[LIMIT [offset,] rows]
[PROCEDURE procedure_name]
- [FOR UPDATE | IN SHARE MODE]]
+ [FOR UPDATE | LOCK IN SHARE MODE]]
@end example
@c help end
@@ -20433,7 +20545,8 @@ like you could do this, but that was a bug that has been corrected.
@section @code{LOAD DATA INFILE} Syntax
@example
-LOAD DATA [LOW_PRIORITY] [LOCAL] INFILE 'file_name.txt' [REPLACE | IGNORE]
+LOAD DATA [LOW_PRIORITY | CONCURRENT] [LOCAL] INFILE 'file_name.txt'
+ [REPLACE | IGNORE]
INTO TABLE tbl_name
[FIELDS
[TERMINATED BY '\t']
@@ -20461,6 +20574,12 @@ If you specify the keyword @code{LOW_PRIORITY}, execution of the
@code{LOAD DATA} statement is delayed until no other clients are reading
from the table.
+If you specify the keyword @code{CONCURRENT} with a @code{MyISAM} table,
+then other threads can retrieve data from the table while @code{LOAD
+DATA} is executing. Using this option will of course affect the
+performance of @code{LOAD DATA} a bit even if no other thread is using
+the table at the same time.
+
Using @code{LOCAL} will be a bit slower than letting the server access the
files directly, because the contents of the file must travel from the client
host to the server host. On the other hand, you do not need the
@@ -21662,7 +21781,7 @@ if @code{--skip-bdb} is used.
@code{YES} if @code{mysqld} supports Gemini tables. @code{DISABLED}
if @code{--skip-gemini} is used.
@item @code{have_innodb}
-@code{YES} if @code{mysqld} supports Innodb tables. @code{DISABLED}
+@code{YES} if @code{mysqld} supports InnoDB tables. @code{DISABLED}
if @code{--skip-innodb} is used.
@item @code{have_raid}
@code{YES} if @code{mysqld} supports the @code{RAID} option.
@@ -22453,11 +22572,11 @@ non-transactional table will not change.
If you are using @code{BEGIN} or @code{SET AUTOCOMMIT=0}, you
should use the @strong{MySQL} binary log for backups instead of the
-old update log; The transaction is stored in the binary log
-in one chunk, during @code{COMMIT}, the to ensure and @code{ROLLBACK}:ed
-transactions are not stored. @xref{Binary log}.
+older update log. Transactions are stored in the binary log
+in one chunk, upon @code{COMMIT}, to ensure that transactions which are
+rolled back are not stored. @xref{Binary log}.
-The following commands automatically ends an transaction (as if you had done
+The following commands automatically end a transaction (as if you had done
a @code{COMMIT} before executing the command):
@multitable @columnfractions .33 .33 .33
@@ -23270,9 +23389,19 @@ type it should use for the table. @strong{MySQL} will always create a
@code{.frm} file to hold the table and column definitions. Depending on
the table type, the index and data will be stored in other files.
+Note that to use @code{InnoDB} tables you have to use at least
+the @code{innodb_data_file_path} startup option. @xref{InnoDB start}.
+
The default table type in @strong{MySQL} is @code{MyISAM}. If you are
trying to use a table type that is not compiled-in or activated,
-@strong{MySQL} will instead create a table of type @code{MyISAM}.
+@strong{MySQL} will instead create a table of type @code{MyISAM}. This
+is a very useful feature when you want to copy tables between different
+SQL servers that supports different table types (like copying tables to
+a slave that is optimized for speed by not having transactional tables).
+This automatic table changing can however also be very confusing for new
+@strong{MySQL} users. We plan to fix this by introducing warnings in
+@strong{MySQL} 4.0 and giving a warning when a table type is automaticly
+changed.
You can convert tables between different types with the @code{ALTER
TABLE} statement. @xref{ALTER TABLE, , @code{ALTER TABLE}}.
@@ -23372,7 +23501,7 @@ Internal handling of one @code{AUTO_INCREMENT} column. @code{MyISAM}
will automatically update this on @code{INSERT/UPDATE}. The
@code{AUTO_INCREMENT} value can be reset with @code{myisamchk}. This
will make @code{AUTO_INCREMENT} columns faster (at least 10 %) and old
-numbers will not be reused as with the old ISAM. Note that when an
+numbers will not be reused as with the old @code{ISAM}. Note that when an
@code{AUTO_INCREMENT} is defined on the end of a multi-part-key the old
behavior is still present.
@item
@@ -23741,7 +23870,7 @@ is not signaled to the other servers.
@section MERGE Tables
@code{MERGE} tables are new in @strong{MySQL} Version 3.23.25. The code
-is still in beta, but should stabilize soon!
+is still in gamma, but should be resonable stable.
A @code{MERGE} table is a collection of identical @code{MyISAM} tables
that can be used as one. You can only @code{SELECT}, @code{DELETE}, and
@@ -23754,8 +23883,8 @@ will only clear the mapping for the table, not delete everything in the
mapped tables. (We plan to fix this in 4.0).
With identical tables we mean that all tables are created with identical
-column information. You can't put a MERGE over tables where the columns
-are packed differently or doesn't have exactly the same columns.
+column and key information. You can't put a MERGE over tables where the
+columns are packed differently or doesn't have exactly the same columns.
Some of the tables can however be compressed with @code{myisampack}.
@xref{myisampack}.
@@ -23790,8 +23919,10 @@ More efficient repairs. It's easier to repair the individual files that
are mapped to a @code{MERGE} file than trying to repair a real big file.
@item
Instant mapping of many files as one. A @code{MERGE} table uses the
-index of the individual tables. It doesn't need an index of its one.
-This makes @code{MERGE} table collections VERY fast to make or remap.
+index of the individual tables. It doesn't need to maintain an index of
+its one. This makes @code{MERGE} table collections VERY fast to make or
+remap. Note that you must specify the key definitions when you create
+a @code{MERGE} table!.
@item
If you have a set of tables that you join to a big table on demand or
batch, you should instead create a @code{MERGE} table on them on demand.
@@ -24004,62 +24135,62 @@ SUM_OVER_ALL_KEYS(max_length_of_key + sizeof(char*) * 2)
@section BDB or Berkeley_DB Tables
@menu
-* BDB overview::
-* BDB install::
-* BDB start::
-* BDB characteristic::
-* BDB TODO::
-* BDB portability::
-* BDB errors::
+* BDB overview:: Overview of BDB Tables
+* BDB install:: Installing BDB
+* BDB start:: BDB startup options
+* BDB characteristic:: Some characteristic of @code{BDB} tables:
+* BDB TODO:: Some things we need to fix for BDB in the near future:
+* BDB portability:: Operating systems supported by @strong{BDB}
+* BDB errors:: Errors You May Get When Using BDB Tables
@end menu
@node BDB overview, BDB install, BDB, BDB
-@subsection Overview over BDB tables
+@subsection Overview of BDB Tables
-BDB tables are included in the @strong{MySQL} source distribution
-starting from 3.23.34 and will be activated in the @strong{MySQL}-max
+Support for BDB tables is included in the @strong{MySQL} source distribution
+starting from Version 3.23.34 and is activated in the @strong{MySQL}-Max
binary.
-Berkeley DB (@uref{http://www.sleepycat.com}) has provided
-@strong{MySQL} with a transaction-safe table handler. This will survive
-crashes and also provides @code{COMMIT} and @code{ROLLBACK} on
-transactions. The @strong{MySQL} source distribution comes with a BDB
-distribution that has a couple of small patches to make it work more
-smoothly with @strong{MySQL}. You can't use a not-patched @code{BDB}
-version with @strong{MySQL}.
+BerkeleyDB, available at @uref{http://www.sleepycat.com/} has provided
+@strong{MySQL} with a transactional table handler. By using BerkeleyDB
+tables, your tables may have a greater chance of surviving crashes, and also
+provides @code{COMMIT} and @code{ROLLBACK} on transactions. The
+@strong{MySQL} source distribution comes with a BDB distribution that has a
+couple of small patches to make it work more smoothly with @strong{MySQL}.
+You can't use a non-patched @code{BDB} version with @strong{MySQL}.
-We at MySQL AB are working in close cooperating with Sleepycat to
-keep the quality of the @strong{MySQL} - BDB interface high.
+We at @strong{MySQL AB} are working in close cooperation with Sleepycat to
+keep the quality of the @strong{MySQL}/BDB interface high.
When it comes to supporting BDB tables, we are committed to help our
users to locate the problem and help creating a reproducable test case
for any problems involving BDB tables. Any such test case will be
forwarded to Sleepycat who in turn will help us find and fix the
-problem. As this is a two stage operating, any problems with BDB tables
-may take a little longer for us to fix than for other table handlers,
-but as the Berkeley code itself has been used by many other applications
-than @strong{MySQL} we don't envision any big problems with this.
-@xref{Table handler support}.
+problem. As this is a two stage operation, any problems with BDB tables
+may take a little longer for us to fix than for other table handlers.
+However, as the BerkeleyDB code itself has been used by many other
+applications than @strong{MySQL}, we don't envision any big problems with
+this. @xref{Table handler support}.
@node BDB install, BDB start, BDB overview, BDB
@subsection Installing BDB
If you have downloaded a binary version of @strong{MySQL} that includes
-support for Berkeley DB, simply follow the instructions for
-installing a binary version of @strong{MySQL}. @xref{Installing binary}.
-@xref{mysqld-max}.
+support for BerkeleyDB, simply follow the instructions for installing a
+binary version of @strong{MySQL}.
+@xref{Installing binary}. @xref{mysqld-max}.
To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
-3.23.34 or newer and configure @code{MySQL} with the
-@code{--with-berkeley-db} option. @xref{Installing source}.
+Version 3.23.34 or newer and configure @code{MySQL} with the
+@code{--with-berkeley-db} option. @xref{Installing source}.
@example
cd /path/to/source/of/mysql-3.23.34
./configure --with-berkeley-db
@end example
-Please refer to the manual provided by @code{BDB} distribution for
-more/updated information.
+Please refer to the manual provided with the @code{BDB} distribution for
+more updated information.
Even though Berkeley DB is in itself very tested and reliable,
the @strong{MySQL} interface is still considered beta quality.
@@ -24373,18 +24504,18 @@ NuSphere is working on removing these limitations.
@menu
* InnoDB overview:: InnoDB tables overview
* InnoDB start:: InnoDB startup options
-* Creating an InnoDB database:: Creating an InnoDB database. Creating an InnoDB database
+* Creating an InnoDB database:: Creating an InnoDB database. Creating an InnoDB database. Creating an InnoDB database.
* Using InnoDB tables:: Creating InnoDB tables
* Adding and removing:: Adding and removing InnoDB data and log files
* Backing up:: Backing up and recovering an InnoDB database
* Moving:: Moving an InnoDB database to another machine
-* InnoDB transaction model:: InnoDB transaction model. InnoDB transaction model
+* InnoDB transaction model:: InnoDB transaction model.
* Implementation:: Implementation of multiversioning
* Table and index:: Table and index structures
* File space management:: File space management and disk i/o
* Error handling:: Error handling
* InnoDB restrictions:: Some restrictions on InnoDB tables
-* InnoDB contact information:: InnoDB contact information. InnoDB contact information
+* InnoDB contact information:: InnoDB contact information.
@end menu
@node InnoDB overview, InnoDB start, InnoDB, InnoDB
@@ -24394,55 +24525,60 @@ InnoDB tables are included in the @strong{MySQL} source distribution
starting from 3.23.34a and are activated in the @strong{MySQL -max}
binary.
-If you have downloaded a binary version of MySQL that includes
-support for InnoDB, simply follow the instructions for
-installing a binary version of MySQL.
-See section 4.6 'Installing a MySQL Binary Distribution'.
+If you have downloaded a binary version of @strong{MySQL} that includes
+support for InnoDB (mysqld-max), simply follow the instructions for
+installing a binary version of @strong{MySQL}. @xref{Installing binary}.
+@xref{mysqld-max}.
-To compile MySQL with InnoDB support, download MySQL-3.23.34a or newer
-and configure @code{MySQL} with the
-@code{--with-innobase} option. Starting from MySQL-3.23.37 the option
-is @code{--with-innodb}. See section
-4.7 'Installing a MySQL Source Distribution'.
+To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer
+and configure @code{MySQL} with the @code{--with-innodb} option.
+@xref{Installing source}.
@example
cd /path/to/source/of/mysql-3.23.37
./configure --with-innodb
@end example
-InnoDB provides MySQL with a transaction safe table handler with
+InnoDB provides @strong{MySQL} with a transaction safe table handler with
commit, rollback, and crash recovery capabilities. InnoDB does
locking on row level, and also provides an Oracle-style consistent
non-locking read in @code{SELECTS}, which increases transaction
concurrency. There is not need for lock escalation in InnoDB,
because row level locks in InnoDB fit in very small space.
-Technically, InnoDB is a database backend placed under MySQL. InnoDB
+InnoDB has been designed for maximum performance when processing
+large data volumes. Its CPU efficiency is probably not
+matched by any other disk-based relational database engine.
+
+Technically, InnoDB is a database backend placed under @strong{MySQL}. InnoDB
has its own buffer pool for caching data and indexes in main
memory. InnoDB stores its tables and indexes in a tablespace, which
may consist of several files. This is different from, for example,
@code{MyISAM} tables where each table is stored as a separate file.
InnoDB is distributed under the GNU GPL License Version 2 (of June 1991).
-In the source distribution of MySQL, InnoDB appears as a subdirectory.
+In the source distribution of @strong{MySQL}, InnoDB appears as a subdirectory.
@node InnoDB start, Creating an InnoDB database, InnoDB overview, InnoDB
@subsection InnoDB startup options
-Beginning from MySQL-3.23.37 the prefix of the options is changed
+Beginning from @strong{MySQL}-3.23.37 the prefix of the options is changed
from @code{innobase_...} to @code{innodb_...}.
-To use InnoDB tables you must specify configuration parameters
-in the MySQL configuration file in the @code{[mysqld]} section of
-the configuration file @file{my.cnf}.
-Suppose you have a Windows NT machine with 128 MB RAM and a
-single 10 GB hard disk.
-Below is an example of possible configuration parameters in @file{my.cnf} for
-InnoDB:
+To use InnoDB tables you @strong{MUST} specify configuration parameters
+in the @strong{MySQL} configuration file in the @code{[mysqld]} section of
+the configuration file @file{my.cnf}. @xref{Option files}.
+
+The only required parameter to use InnoDB is @code{innodb_data_file_path},
+but you should set others if you want to get a better performance.
+
+Suppose you have a Windows NT machine with 128 MB RAM and a single 10 GB
+hard disk. Below is an example of possible configuration parameters in
+@file{my.cnf} for InnoDB:
@example
-innodb_data_home_dir = c:\ibdata
innodb_data_file_path = ibdata1:2000M;ibdata2:2000M
+innodb_data_home_dir = c:\ibdata
set-variable = innodb_mirrored_log_groups=1
innodb_log_group_home_dir = c:\iblogs
set-variable = innodb_log_files_in_group=3
@@ -24464,8 +24600,8 @@ Below is an example of possible configuration parameters in @file{my.cnf} for
InnoDB:
@example
-innodb_data_home_dir = /
innodb_data_file_path = ibdata/ibdata1:2000M;dr2/ibdata/ibdata2:2000M
+innodb_data_home_dir = /
set-variable = innodb_mirrored_log_groups=1
innodb_log_group_home_dir = /dr3
set-variable = innodb_log_files_in_group=3
@@ -24535,7 +24671,7 @@ log archiving. The value of this parameter should currently be set the
same as @code{innodb_log_group_home_dir}.
@item @code{innodb_log_archive} @tab
This value should currently be set to 0. As recovery from a backup is
-done by MySQL using its own log files, there is currently no need to
+done by @strong{MySQL} using its own log files, there is currently no need to
archive InnoDB log files.
@item @code{innodb_buffer_pool_size} @tab
The size of the memory buffer InnoDB uses to cache data and indexes of
@@ -24550,7 +24686,7 @@ and other internal data structures. A sensible value for this might be
2M, but the more tables you have in your application the more you will
need to allocate here. If InnoDB runs out of memory in this pool, it
will start to allocate memory from the operating system, and write
-warning messages to the MySQL error log.
+warning messages to the @strong{MySQL} error log.
@item @code{innodb_file_io_threads} @tab
Number of file i/o threads in InnoDB. Normally, this should be 4, but
on Windows NT disk i/o may benefit from a larger number.
@@ -24567,15 +24703,15 @@ resolve the situation.
@node Creating an InnoDB database, Using InnoDB tables, InnoDB start, InnoDB
@subsection Creating an InnoDB database
-Suppose you have installed MySQL and have edited @file{my.cnf} so that
+Suppose you have installed @strong{MySQL} and have edited @file{my.cnf} so that
it contains the necessary InnoDB configuration parameters.
-Before starting MySQL you should check that the directories you have
+Before starting @strong{MySQL} you should check that the directories you have
specified for InnoDB data files and log files exist and that you have
access rights to those directories. InnoDB
cannot create directories, only files. Check also you have enough disk space
for the data and log files.
-When you now start MySQL, InnoDB will start creating your data files
+When you now start @strong{MySQL}, InnoDB will start creating your data files
and log files. InnoDB will print something like the following:
@example
@@ -24600,9 +24736,9 @@ InnoDB: Started
mysqld: ready for connections
@end example
-A new InnoDB database has now been created. You can connect to the MySQL
-server with the usual MySQL client programs like @code{mysql}.
-When you shut down the MySQL server with @file{mysqladmin shutdown},
+A new InnoDB database has now been created. You can connect to the @strong{MySQL}
+server with the usual @strong{MySQL} client programs like @code{mysql}.
+When you shut down the @strong{MySQL} server with @file{mysqladmin shutdown},
InnoDB output will be like the following:
@example
@@ -24617,7 +24753,7 @@ will see the files created. The log directory will also contain
a small file named @file{ib_arch_log_0000000000}. That file
resulted from the database creation, after which InnoDB switched off
log archiving.
-When MySQL is again started, the output will be like the following:
+When @strong{MySQL} is again started, the output will be like the following:
@example
~/mysqlm/sql > mysqld
@@ -24627,17 +24763,17 @@ mysqld: ready for connections
@subsubsection If something goes wrong in database creation
-If something goes wrong in an InnoDB database creation, you should delete
-all files created by InnoDB. This means all data files, all log files,
-the small archived log file, and in the case you already did create
-some InnoDB tables, delete also the corresponding @file{.frm}
-files for these tables from the MySQL database directories. Then you can
-try the InnoDB database creation again.
+If something goes wrong in an InnoDB database creation, you should
+delete all files created by InnoDB. This means all data files, all log
+files, the small archived log file, and in the case you already did
+create some InnoDB tables, delete also the corresponding @file{.frm}
+files for these tables from the @strong{MySQL} database
+directories. Then you can try the InnoDB database creation again.
@node Using InnoDB tables, Adding and removing, Creating an InnoDB database, InnoDB
@subsection Creating InnoDB tables
-Suppose you have started the MySQL client with the command
+Suppose you have started the @strong{MySQL} client with the command
@code{mysql test}.
To create a table in the InnoDB format you must specify
@code{TYPE = InnoDB} in the table creation SQL command:
@@ -24648,15 +24784,15 @@ CREATE TABLE CUSTOMER (A INT, B CHAR (20), INDEX (A)) TYPE = InnoDB;
This SQL command will create a table and an index on column @code{A}
into the InnoDB tablespace consisting of the data files you specified
-in @file{my.cnf}. In addition MySQL will create a file
-@file{CUSTOMER.frm} to the MySQL database directory @file{test}.
+in @file{my.cnf}. In addition @strong{MySQL} will create a file
+@file{CUSTOMER.frm} to the @strong{MySQL} database directory @file{test}.
Internally, InnoDB will add to its own data dictionary an entry
for table @code{'test/CUSTOMER'}. Thus you can create a table
-of the same name @code{CUSTOMER} in another database of MySQL, and
+of the same name @code{CUSTOMER} in another database of @strong{MySQL}, and
the table names will not collide inside InnoDB.
You can query the amount of free space in the InnoDB tablespace
-by issuing the table status command of MySQL for any table you have
+by issuing the table status command of @strong{MySQL} for any table you have
created with @code{TYPE = InnoDB}. Then the amount of free
space in the tablespace appears in the table comment section in the
output of @code{SHOW}. An example:
@@ -24674,7 +24810,7 @@ You must drop the tables individually. Also take care not to delete or
add @file{.frm} files to your InnoDB database manually: use
@code{CREATE TABLE} and @code{DROP TABLE} commands.
InnoDB has its own internal data dictionary, and you will get problems
-if the MySQL @file{.frm} files are out of 'sync' with the InnoDB
+if the @strong{MySQL} @file{.frm} files are out of 'sync' with the InnoDB
internal data dictionary.
@node Adding and removing, Backing up, Using InnoDB tables, InnoDB
@@ -24682,8 +24818,8 @@ internal data dictionary.
You cannot increase the size of an InnoDB data file. To add more into
your tablespace you have to add a new data file. To do this you have to
-shut down your MySQL database, edit the @file{my.cnf} file, adding a
-new file to @code{innodb_data_file_path}, and then start MySQL
+shut down your @strong{MySQL} database, edit the @file{my.cnf} file, adding a
+new file to @code{innodb_data_file_path}, and then start @strong{MySQL}
again.
Currently you cannot remove a data file from InnoDB. To decrease the
@@ -24692,11 +24828,11 @@ all your tables, create a new database, and import your tables to the
new database.
If you want to change the number or the size of your InnoDB log files,
-you have to shut down MySQL and make sure that it shuts down without errors.
+you have to shut down @strong{MySQL} and make sure that it shuts down without errors.
Then copy the old log files into a safe place just in case something
went wrong in the shutdown and you will need them to recover the
database. Delete then the old log files from the log file directory,
-edit @file{my.cnf}, and start MySQL again. InnoDB will tell
+edit @file{my.cnf}, and start @strong{MySQL} again. InnoDB will tell
you at the startup that it is creating new log files.
@node Backing up, Moving, Adding and removing, InnoDB
@@ -24707,7 +24843,7 @@ To take a 'binary' backup of your database you have to do the following:
@itemize @bullet
@item
-Shut down your MySQL database and make sure it shuts down without errors.
+Shut down your @strong{MySQL} database and make sure it shuts down without errors.
@item
Copy all your data files into a safe place.
@item
@@ -24738,12 +24874,12 @@ dumps. Then you can take the binary backup, and you will then have
a consistent snapshot of your database in two formats.
To be able to recover your InnoDB database to the present from the
-binary backup described above, you have to run your MySQL database
-with the general logging and log archiving of MySQL switched on. Here
-by the general logging we mean the logging mechanism of the MySQL server
+binary backup described above, you have to run your @strong{MySQL} database
+with the general logging and log archiving of @strong{MySQL} switched on. Here
+by the general logging we mean the logging mechanism of the @strong{MySQL} server
which is independent of InnoDB logs.
-To recover from a crash of your MySQL server process, the only thing
+To recover from a crash of your @strong{MySQL} server process, the only thing
you have to do is to restart it. InnoDB will automatically check the
logs and perform a roll-forward of the database to the present.
InnoDB will automatically roll back uncommitted transactions which were
@@ -24778,7 +24914,7 @@ mysqld: ready for connections
If your database gets corrupted or your disk fails, you have
to do the recovery from a backup. In the case of corruption, you should
first find a backup which is not corrupted. From a backup do the recovery
-from the general log files of MySQL according to instructions in the
+from the general log files of @strong{MySQL} according to instructions in the
MySQL manual.
@subsubsection Checkpoints
@@ -24844,7 +24980,7 @@ to lock every row in the database, or any random subset of the rows,
without InnoDB running out of memory.
In InnoDB all user activity happens inside transactions. If the
-auto commit mode is used in MySQL, then each SQL statement
+auto commit mode is used in @strong{MySQL}, then each SQL statement
will form a single transaction. If the auto commit mode is
switched off, then we can think that a user always has a transaction
open. If he issues
@@ -24895,10 +25031,10 @@ happen that meanwhile some other user has deleted the parent row
from the table @code{PARENT}, and you are not aware of that.
The solution is to perform the @code{SELECT} in a locking
-mode, @code{IN SHARE MODE}.
+mode, @code{LOCK IN SHARE MODE}.
@example
-SELECT * FROM PARENT WHERE NAME = 'Jones' IN SHARE MODE;
+SELECT * FROM PARENT WHERE NAME = 'Jones' LOCK IN SHARE MODE;
@end example
Performing a read in share mode means that we read the latest
@@ -24997,7 +25133,7 @@ table.
@code{SELECT ... FROM ...} : this is a consistent read, reading a
snapshot of the database and setting no locks.
@item
-@code{SELECT ... FROM ... IN SHARE MODE} : sets shared next-key locks
+@code{SELECT ... FROM ... LOCK IN SHARE MODE} : sets shared next-key locks
on all index records the read encounters.
@item
@code{SELECT ... FROM ... FOR UPDATE} : sets exclusive next-key locks
@@ -25012,7 +25148,7 @@ on the duplicate index record.
@code{INSERT INTO T SELECT ... FROM S WHERE ...} sets an exclusive
(non-next-key) lock on each row inserted into @code{T}. Does
the search on @code{S} as a consistent read, but sets shared next-key
-locks on @code{S} if the MySQL logging is on. InnoDB has to set
+locks on @code{S} if the @strong{MySQL} logging is on. InnoDB has to set
locks in the latter case because in roll-forward recovery from a
backup every SQL statement has to be executed in exactly the same
way as it was done originally.
@@ -25032,10 +25168,10 @@ lock on every record the search encounters.
lock on every record the search encounters.
@item
@code{LOCK TABLES ... } : sets table locks. In the implementation
-the MySQL layer of code sets these locks. The automatic deadlock detection
+the @strong{MySQL} layer of code sets these locks. The automatic deadlock detection
of InnoDB cannot detect deadlocks where such table locks are involved:
see the next section below. See also section 13 'InnoDB restrictions'
-about the following: since MySQL does know about row level locks,
+about the following: since @strong{MySQL} does know about row level locks,
it is possible that you
get a table lock on a table where another user currently has row level
locks. But that does not put transaction integerity into danger.
@@ -25046,7 +25182,7 @@ locks. But that does not put transaction integerity into danger.
InnoDB automatically detects a deadlock of transactions and rolls
back the transaction whose lock request was the last one to build
a deadlock, that is, a cycle in the waits-for graph of transactions.
-InnoDB cannot detect deadlocks where a lock set by a MySQL
+InnoDB cannot detect deadlocks where a lock set by a @strong{MySQL}
@code{LOCK TABLES} statement is involved, or if a lock set
in another table handler than InnoDB is involved. You have to resolve
these situations using @code{innodb_lock_wait_timeout} set in
@@ -25147,7 +25283,7 @@ of the index records.
If index records are inserted in a sequential (ascending or descending)
order, the resulting index pages will be about 15/16 full.
If records are inserted in a random order, then the pages will be
-1/2 - 15/16 full. If the fillfactor of an index page drops below 1/4,
+1/2 - 15/16 full. If the fillfactor of an index page drops below 1/2,
InnoDB will try to contract the index tree to free the page.
@subsubsection Insert buffering
@@ -25217,7 +25353,7 @@ Each secondary index record contains also all the fields defined
for the clustered index key.
@item
A record contains also a pointer to each field of the record.
-If the total length of the fields in a record is < 256 bytes, then
+If the total length of the fields in a record is < 128 bytes, then
the pointer is 1 byte, else 2 bytes.
@end itemize
@@ -25300,6 +25436,28 @@ but remember that deleted rows can be physically removed only in a
purge operation after they are no longer needed in transaction rollback or
consistent read.
+@subsubsection Defragmenting a table
+
+If there are random insertions or deletions
+in the indexes of a table, the indexes
+may become fragmented. By fragmentation we mean that the physical ordering
+of the index pages on the disk is not close to the alphabetical ordering
+of the records on the pages, or that there are many unused pages in the
+64-page blocks which were allocated to the index.
+
+It can speed up index scans if you
+periodically use @code{mysqldump} to dump the table to
+a text file, drop the table, and reload it from the dump.
+Another way to do the defragmenting is to @code{ALTER} the table type to
+@code{MyISAM} and back to @code{InnoDB} again.
+Note that a @code{MyISAM} table must fit in a single file
+on your operating system.
+
+If the insertions to and index are always ascending and
+records are deleted only from the end, then the the file space management
+algorithm of InnoDB guarantees that fragmentation in the index will
+not occur.
+
@node Error handling, InnoDB restrictions, File space management, InnoDB
@subsection Error handling
@@ -25313,7 +25471,7 @@ The following list specifies the error handling of InnoDB.
@itemize @bullet
@item
If you run out of file space in the tablespace,
-you will get the MySQL @code{'Table is full'} error
+you will get the @strong{MySQL} @code{'Table is full'} error
and InnoDB rolls back the SQL statement.
@item
A transaction deadlock or a timeout in a lock wait will give
@@ -25328,7 +25486,7 @@ statement.
@item
A 'row too long' error rolls back the SQL statement.
@item
-Other errors are mostly detected by the MySQL layer of code, and
+Other errors are mostly detected by the @strong{MySQL} layer of code, and
they roll back the corresponding SQL statement.
@end itemize
@@ -25336,19 +25494,20 @@ they roll back the corresponding SQL statement.
@subsection Some restrictions on InnoDB tables
@itemize @bullet
-@item You cannot create an index on a prefix of a column:
+@item
+If you try to create an unique index on a prefix of a column you will get an
+error:
@example
-@code{CREATE TABLE T (A CHAR(20), B INT, INDEX T_IND (A(5))) TYPE = InnoDB;
-}
+CREATE TABLE T (A CHAR(20), B INT, UNIQUE (A(5))) TYPE = InnoDB;
@end example
-The above will not work. For a MyISAM table the above would create an index
-where only the first 5 characters from column @code{A} are stored.
+If you create a non unique index on a prefix of a column, InnoDB will
+create an index over the whole column.
@item
@code{INSERT DELAYED} is not supported for InnoDB tables.
@item
-The MySQL @code{LOCK TABLES} operation does not know of InnoDB
+The @strong{MySQL} @code{LOCK TABLES} operation does not know of InnoDB
row level locks set in already completed SQL statements: this means that
you can get a table lock on a table even if there still exist transactions
of other users which have row level locks on the same table. Thus
@@ -25365,7 +25524,7 @@ A table cannot contain more than 1000 columns.
@item
@code{DELETE FROM TABLE} does not regenerate the table but instead
deletes all rows, one by one, which is not that fast. In future versions
-of MySQL you can use @code{TRUNCATE} which is fast.
+of @strong{MySQL} you can use @code{TRUNCATE} which is fast.
@item
Before dropping a database with InnoDB tables one has to drop
the individual InnoDB tables first.
@@ -29303,6 +29462,9 @@ index operations (querying/dumping/statistics).
@itemize @bullet
@item Make all operations with @code{FULLTEXT} index @strong{faster}.
@item Support for braces @code{()} in boolean full-text search.
+@item Phrase search, proximity operators
+@item Boolean search can work without @code{FULLTEXT} index
+(yes, @strong{very} slow).
@item Support for "always-index words". They could be any strings
the user wants to treat as words, examples are "C++", "AS/400", "TCP/IP", etc.
@item Support for full-text search in @code{MERGE} tables.
@@ -31446,7 +31608,7 @@ We can find the result from crash-me on a lot of different databases at
@menu
* Programs:: What do the executables do?
-* mysqld-max::
+* mysqld-max:: mysqld-max, An extended mysqld server
* safe_mysqld:: safe_mysqld, the wrapper around mysqld
* mysqld_multi:: Program for managing multiple @strong{MySQL} servers
* mysql:: The command line tool
@@ -31597,12 +31759,20 @@ the following configure options:
@multitable @columnfractions .3 .7
@item @strong{Option} @tab @strong{Comment}
-@item --with-server-suffix=-Max @tab Add a suffix to the @code{mysqld} version string.
+@item --with-server-suffix=-max @tab Add a suffix to the @code{mysqld} version string.
@item --with-bdb @tab Support for Berkeley DB (BDB) tables
@item --with-innodb @tab Support for InnoDB tables.
@item CFLAGS=-DUSE_SYMDIR @tab Symbolic links support for Windows.
@end multitable
+You can find the @strong{MySQL}-max binaries at
+@uref{http://www.mysql.com/downloads/mysql-max-3.23.html}.
+
+The windows @strong{MySQL} 3.23 binary distribution includes both the
+standard @strong{mysqld.exe} binary and the @code{mysqld-max.exe} binary.
+@uref{http://www.mysql.com/downloads/mysql-3.23.html}.
+@xref{Windows installation}.
+
Note that as Berkeley DB and InnoDB are not available for all platforms,
some of the @code{Max} binaries may not have support for both of these.
You can check which table types are supported by doing the following
@@ -31631,6 +31801,13 @@ The meaning of the values are:
@item DISABLED @tab The xxxx option is disabled because one started @code{mysqld} with @code{--skip-xxxx} or because one didn't start @code{mysqld} with all needed options to enable the option. In this case the @code{hostname.err} file should contain a reason for why the option is disabled.
@end multitable
+@strong{NOTE}: To be able to create InnoDB tables you @strong{MUST} edit
+your startup options to include at least the @code{innodb_data_file_path}
+option. @xref{InnoDB start}.
+
+To get better performance for BDB tables, you should add some configuration
+options for these too. @xref{BDB start}.
+
@code{safe_mysqld} will automaticly try to start any @code{mysqld} binary
with the @code{-max} prefix. This makes it very easy to test out a
another @code{mysqld} binary in an existing installation. Just
@@ -31643,6 +31820,23 @@ feature. It just installs the @code{mysqld-max} executable and
@code{safe_mysqld} will automaticly use this executable when
@code{safe_mysqld} is restarted.
+The following table shows which table types our standard @strong{MySQL-Max}
+binaries includes:
+
+@multitable @columnfractions .4 .3 .3
+@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB}
+@item AIX 4.3 @tab N @tab Y
+@item HPUX 11.0 @tab N @tab Y
+@item Linux-Alpha @tab N @tab Y
+@item Linux-Intel @tab Y @tab Y
+@item Linux-Ia64 @tab N @tab Y
+@item Solaris-intel @tab N @tab Y
+@item Solaris-sparc @tab Y @tab Y
+@item SCO OSR5 @tab Y @tab Y
+@item UnixWare @tab Y @tab Y
+@item Windows/NT @tab Y @tab Y
+@end multitable
+
@cindex tools, safe_mysqld
@cindex scripts
@cindex @code{safe_mysqld}
@@ -32168,9 +32362,9 @@ the @code{mysql} variables that affect your queries.
@cindex @code{safe-mode} command
A useful startup option for beginners (introduced in @strong{MySQL}
-Version 3.23.11) is @code{--safe-mode} (or @code{--i-am-a-dummy} for
+Version 3.23.11) is @code{--safe-updates} (or @code{--i-am-a-dummy} for
users that has at some time done a @code{DELETE FROM table_name} but
-forgot the @code{WHERE} clause. When using this option, @code{mysql}
+forgot the @code{WHERE} clause). When using this option, @code{mysql}
sends the following command to the @strong{MySQL} server when opening
the connection:
@@ -32495,6 +32689,10 @@ used.)
@item -q, --quick
Don't buffer query, dump directly to stdout. Uses @code{mysql_use_result()}
to do this.
+@item -r, --result-file=...
+Direct output to a given file. This option should be used in MSDOS,
+because it prevents new line '\n' from being converted to '\n\r' (new
+line + carriage return).
@item -S /path/to/socket, --socket=/path/to/socket
The socket file to use when connecting to @code{localhost} (which is the
default host).
@@ -34172,9 +34370,8 @@ Record file is crashed
@item
Got error ### from table handler
-To get more information about the error you can do @code{perror
-###}. Here is the most common errors that indicates a problem with the
-table:
+To get more information about the error you can run @code{perror ###}. Here
+is the most common errors that indicates a problem with the table:
@example
shell> perror 126 127 132 134 135 136 141 144 145
@@ -34192,22 +34389,13 @@ shell> perror 126 127 132 134 135 136 141 144 145
Note that error 135, no more room in record file, is not an error that
can be fixed by a simple repair. In this case you have to do:
-@itemize @bullet
-@item
-@code{CREATE TABLE ...} for the table with proper @code{MAX_ROWS} and
-@code{AVG_ROW_LENGTH} values. @xref{CREATE TABLE}.
-@item
-Copy the data over from the old table with @code{INSERT INTO new_table
-SELECT * from old_table}.
-@item
-Rename the old table to the new table:
-@code{RENAME old_table to tmp_table, new_table to old_table}
-@item
-Delete the old table: @code{DROP TABLE tmp_table}.
-@end itemize
+@example
+ALTER TABLE table MAX_ROWS=xxx AVG_ROW_LENGTH=yyy;
+@end example
+
@end itemize
-In these cases, you must repair your tables. @code{myisamchk}
+In the other cases, you must repair your tables. @code{myisamchk}
can usually detect and fix most things that go wrong.
The repair process involves up to four stages, described below. Before you
@@ -34217,12 +34405,12 @@ that @code{mysqld} runs as (and to you, because you need to access the files
you are checking). If it turns out you need to modify files, they must also
be writable by you.
-If you are using @strong{MySQL} Version 3.23.16 and above, you can (and should) use the
-@code{CHECK} and @code{REPAIR} commands to check and repair @code{MyISAM}
-tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}.
+If you are using @strong{MySQL} Version 3.23.16 and above, you can (and
+should) use the @code{CHECK} and @code{REPAIR} commands to check and repair
+@code{MyISAM} tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}.
The manual section about table maintenance includes the options to
-@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}.
+@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}.
The following section is for the cases where the above command fails or
if you want to use the extended features that @code{isamchk}/@code{myisamchk} provides.
@@ -34484,12 +34672,13 @@ functions. Consult this file to see how UDF calling conventions work.
For mysqld to be able to use UDF functions, you should configure MySQL
with @code{--with-mysqld-ldflags=-rdynamic} The reason is that to on
-many platforms you can load a dynamic library (with @code{dlopen()})
-from a static linked program, which you would get if you are using
-@code{--with-mysqld-ldflags=-all-static} If you want to use an UDF that
-needs to access symbols from mysqld (like the @code{methaphone} example
-in @file{sql/udf_example.cc} that uses @code{default_charset_info}), you must
-link the program with @code{-rdynamic}. (see @code{man dlopen}).
+many platforms (including Linux) you can load a dynamic library (with
+@code{dlopen()}) from a static linked program, which you would get if
+you are using @code{--with-mysqld-ldflags=-all-static} If you want to
+use an UDF that needs to access symbols from mysqld (like the
+@code{methaphone} example in @file{sql/udf_example.cc} that uses
+@code{default_charset_info}), you must link the program with
+@code{-rdynamic}. (see @code{man dlopen}).
For each function that you want to use in SQL statements, you should define
corresponding C (or C++) functions. In the discussion below, the name
@@ -34906,12 +35095,13 @@ one that has been loaded with @code{CREATE FUNCTION} and not removed with
@node Adding native function, , Adding UDF, Adding functions
@section Adding a New Native Function
-The procedure for adding a new native function is described below. Note that
-you cannot add native functions to a binary distribution because the procedure
-involves modifying @strong{MySQL} source code. You must compile
-@strong{MySQL} yourself from a source distribution. Also note that if you
-migrate to another version of @strong{MySQL} (for example, when a new version is
-released), you will need to repeat the procedure with the new version.
+The procedure for adding a new native function is described below. Note
+that you cannot add native functions to a binary distribution because
+the procedure involves modifying @strong{MySQL} source code. You must
+compile @strong{MySQL} yourself from a source distribution. Also note
+that if you migrate to another version of @strong{MySQL} (for example,
+when a new version is released), you will need to repeat the procedure
+with the new version.
To add a new native @strong{MySQL} function, follow these steps:
@@ -35582,6 +35772,8 @@ INSERT INTO foo (auto,text) VALUES(NULL,'text');
INSERT INTO foo2 (id,text) VALUES(LAST_INSERT_ID(),'text');
@end example
+@xref{Getting unique ID}.
+
For the benefit of some ODBC applications (at least Delphi and Access),
the following query can be used to find a newly inserted row:
@example
@@ -35742,6 +35934,7 @@ pre-allocated MYSQL struct.
* No matching rows:: Solving problems with no matching rows
* ALTER TABLE problems:: Problems with @code{ALTER TABLE}.
* Change column order:: How to change the order of columns in a table
+* Temporary table problems::
@end menu
This chapter lists some common problems and error messages that users have
@@ -35980,6 +36173,7 @@ Try the following:
@enumerate
@item
Start @code{mysqld} from @code{gdb} (or in another debugger).
+@xref{Using gdb on mysqld}.
@item
Run your test scripts.
@@ -36139,6 +36333,10 @@ server closes the connection after 8 hours if nothing has happened. You
can change the time limit by setting the @code{wait_timeout} variable when
you start mysqld.
+Another common reason to receive the @code{MySQL server has gone away} error
+is because you have issued a ``close'' on your @strong{MySQL} connection
+and then tried to run a query on the closed connection.
+
You can check that the @strong{MySQL} hasn't died by executing
@code{mysqladmin version} and examining the uptime.
@@ -36671,6 +36869,17 @@ thread that is waiting on the disk-full condition will allow the other
threads to continue.
@end itemize
+Exceptions to the above behaveour is when you use @code{REPAIR} or
+@code{OPTIMIZE} or when the indexes are created in a batch after an
+@code{LOAD DATA INFILE} or after an @code{ALTER TABLE} statement.
+
+All of the above commands may use big temporary files that left to
+themself would cause big problems for the rest of the system. If
+@strong{MySQL} gets disk full while doing any of the above operations,
+it will remove the big temporary files and mark the table as crashed
+(except for @code{ALTER TABLE}, in which the old table will be left
+unchanged).
+
@node Multiple sql commands, Temporary files, Full disk, Problems
@section How to Run SQL Commands from a Text File
@@ -37288,7 +37497,7 @@ simple rename should get your data back.
@cindex columns, changing
@cindex changing, column order
@cindex tables, changing column order
-@node Change column order, , ALTER TABLE problems, Problems
+@node Change column order, Temporary table problems, ALTER TABLE problems, Problems
@section How To Change the Order of Columns in a Table
The whole point of SQL is to abstract the application from the data
@@ -37327,6 +37536,32 @@ Drop or rename @code{old_table}.
@code{ALTER TABLE new_table RENAME old_table}.
@end enumerate
+@cindex temporary tables, problems
+@node Temporary table problems, , Change column order, Problems
+@section TEMPORARY TABLE problems
+
+The following are a list of the limitations with @code{TEMPORARY TABLES}.
+
+@itemize @bullet
+@item
+A temporary table can only be of type @code{HEAP}, @code{ISAM} or
+@code{MyISAM}.
+@item
+You can't use temporary tables more than once in the same query.
+For example, the following doesn't work.
+
+@example
+select * from temporary_table, temporary_table as t2;
+@end example
+
+We plan to fix the above in 4.0.
+@item
+You can't use @code{RENAME} on a @code{TEMPORARY} table.
+Note that @code{ALTER TABLE org_name RENAME new_name} works!
+
+We plan to fix the above in 4.0.
+@end itemize
+
@cindex problems, solving
@cindex solving, problems
@cindex databases, replicating
@@ -37762,6 +37997,20 @@ file name is given, it defaults to the name of the host machine followed
by @code{-bin}. If file name is given, but it doesn't contain a path, the
file is written in the data directory.
+You can use the following options to @code{mysqld} to affect what is logged
+to the binary log:
+
+@multitable @columnfractions .4 .6
+@item @code{binlog-do-db=database_name} @tab
+Tells the master it should log updates for the specified database, and
+exclude all others not explicitly mentioned.
+(Example: @code{binlog-do-db=some_database})
+
+@item @code{binlog-ignore-db=database_name} @tab
+Tells the master that updates to the given database should not be logged
+to the binary log (Example: @code{binlog-ignore-db=some_database})
+@end multitable
+
To the binary log filename @code{mysqld} will append an extension that is a
number that is incremented each time you execute @code{mysqladmin
refresh}, execute @code{mysqladmin flush-logs}, execute the @code{FLUSH LOGS}
@@ -38098,7 +38347,7 @@ greater detail in the next section.
@multitable @columnfractions .3 .7
@item @strong{mysql_affected_rows()} @tab
-Returns the number of rows affected by the last @code{UPDATE},
+Returns the number of rows changed/deleted/inserted by the last @code{UPDATE},
@code{DELETE}, or @code{INSERT} query.
@item @strong{mysql_close()} @tab
@@ -38286,7 +38535,7 @@ expects a counted string. If the string contains binary data (which may
include null bytes), you must use @code{mysql_real_query()}.
For each non-@code{SELECT} query (for example, @code{INSERT}, @code{UPDATE},
-@code{DELETE}), you can find out how many rows were affected (changed)
+@code{DELETE}), you can find out how many rows were changed (affected)
by calling @code{mysql_affected_rows()}.
For @code{SELECT} queries, you retrieve the selected rows as a result set.
@@ -38462,19 +38711,19 @@ A string representation of the error may be obtained by calling
@subsubheading Description
-Returns the number of rows affected (changed) by the last @code{UPDATE},
-@code{DELETE} or @code{INSERT} query. May be called immediately after
-@code{mysql_query()} for @code{UPDATE}, @code{DELETE}, or @code{INSERT}
-statements. For @code{SELECT} statements, @code{mysql_affected_rows()}
-works like @code{mysql_num_rows()}.
-
-@code{mysql_affected_rows()} is currently implemented as a macro.
+Returns the number of rows changed by the last @code{UPDATE}, deleted by
+the last @code{DELETE} or inserted by the last @code{INSERT}
+statement. May be called immediately after @code{mysql_query()} for
+@code{UPDATE}, @code{DELETE}, or @code{INSERT} statements. For
+@code{SELECT} statements, @code{mysql_affected_rows()} works like
+@code{mysql_num_rows()}.
@subsubheading Return Values
An integer greater than zero indicates the number of rows affected or
-retrieved. Zero indicates that no records matched the @code{WHERE} clause in
-the query or that no query has yet been executed. -1 indicates that the
+retrieved. Zero indicates that no records where updated for an
+@code{UPDATE} statement, no rows matched the @code{WHERE} clause in the
+query or that no query has yet been executed. -1 indicates that the
query returned an error or that, for a @code{SELECT} query,
@code{mysql_affected_rows()} was called prior to calling
@code{mysql_store_result()}.
@@ -38487,9 +38736,18 @@ None.
@example
mysql_query(&mysql,"UPDATE products SET cost=cost*1.25 WHERE group=10");
-printf("%d products updated",mysql_affected_rows(&mysql));
+printf("%ld products updated",(long) mysql_affected_rows(&mysql));
@end example
+If one specifies the flag @code{CLIENT_FOUND_ROWS} when connecting to
+@code{mysqld}, @code{mysql_affected_rows()} will return the number of
+rows matched by the @code{WHERE} statement for @code{UPDATE} statements.
+
+Note that when one uses a @code{REPLACE} command,
+@code{mysql_affected_rows()} will return 2 if the new row replaced and
+old row. This is because in this case one row was inserted and then the
+duplicate was deleted.
+
@findex @code{mysql_close()}
@node mysql_close, mysql_connect, mysql_affected_rows, C API functions
@subsubsection @code{mysql_close()}
@@ -40305,6 +40563,12 @@ You must call @code{mysql_store_result()} or @code{mysql_use_result()}
for every query that successfully retrieves data (@code{SELECT},
@code{SHOW}, @code{DESCRIBE}, @code{EXPLAIN}).
+You don't have to call @code{mysql_store_result()} or
+@code{mysql_use_result()} for other queries, but it will not do any
+harm or cause any notable performance if you call @code{mysql_store_result()}
+in all cases. You can detect if the query didn't have a result set by
+checking if @code{mysql_store_result()} returns 0 (more about this later one).
+
If you want to know if the query should return a result set or not, you can
use @code{mysql_field_count()} to check for this.
@xref{mysql_field_count, @code{mysql_field_count}}.
@@ -42056,7 +42320,6 @@ with the @code{--gdb} and @code{--debug} options to @code{mysql-test-run}.
If you have not compiled @strong{MySQL} for debugging you should probably
do that. Just specify the @code{--with-debug} options to @code{configure}!
@xref{Installing source}.
-
@end itemize
@page
@@ -42192,7 +42455,6 @@ more than one way to compute}
@item @uref{http://www.yaboo.dk/, Yaboo - Yet Another BOOkmarker}
-@item @uref{http://www.yahoosuck.com, Yahoosuck}
@item @uref{http://www.ozsearch.com.au, OzSearch Internet Guide}
@@ -42205,7 +42467,7 @@ more than one way to compute}
@itemize @bullet
-@item @uref{http:www.spylog.ru/, SpyLOG ; A very popular Web counter site}
+@item @uref{http://www.spylog.ru/, SpyLOG ; A very popular Web counter site}
@item @uref{http://www.tucows.com/, TuCows Network; Free Software archive}
@@ -42227,8 +42489,6 @@ more than one way to compute}
@item @uref{http://www.game-developer.com/,The Game Development Search Engine}
-@item @uref{http://www.i-run.com/html/cookbook.html,My-Recipe.com; Cookbook at i-run.com}
-
@item @uref{www.theinnkeeper.com, The Innkeeper Vacation Guides}
@item @uref{http://www.macgamedatabase.com/, The Mac Game Database uses PHP and MySQL}
@@ -42360,7 +42620,7 @@ the @strong{MySQL} database
@itemize @bullet
@c @item @uref{http://www.wh200th.com, White House 200th Anniversary site}
-@item @uref{http://war.jgaa.com:8080/support/index.php3, Jgaa's Internet - Official Support Site}
+@item @uref{http://support.jgaa.com/, Jgaa's Internet - Official Support Site}
@item @uref{http://io.incluso.com, Ionline - online publication:} @strong{MySQL},
PHP, Java, Web programming, DB development
@@ -42424,10 +42684,6 @@ Ecommerce site that is selling computers.
@appendixsec Programming
-@itemize @bullet
-@item @uref{http://www.perl.org/cpan-testers, The Perl CPAN Testers results page}
-@end itemize
-
@cindex web pages, miscellaneous
@appendixsec Uncategorized Pages
@@ -42847,6 +43103,15 @@ of several databases simultaneously. By Innovative-IT Development AB.
@item @uref{http://www.mysql.com/downloads/gui-clients.html, MySQLGUI}
The @strong{MySQL} GUI client homepage. By Sinisa at @strong{MySQL AB}.
+@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_navigator_0.9.0.tar.gz, MySQL navigator 0.9}
+MySQL Navigator is MySQL database server GUI client program. The purpose
+of MySQL Navigator is to provide a useful client interface to MySQL
+database servers, whilst supporting multiple operating systems and
+languages. You can currently import/export database, enter queries, get
+result sets, edit scripts, run scripts, add, alter, and delete users,
+and retrieve client and server information. Uses QT 2.2. GPL
+@uref{http://sql.kldp.org/mysql, Home page for MySQL Navigator}.
+
@item @uref{http://www.mysql.com/Downloads/Win32/secman.zip, MySQL Security GUI}
A user and security management GUI for @strong{MySQL} on Windows.
By Martin Jeremic.
@@ -42897,6 +43162,8 @@ import-/export-files. (Freeware). By Ansgar Becker.
@item @uref{http://www.mysql.com/Downloads/Win32/W9xstop.zip,Utility from Artronic to stop MySQL on win9x}.
+@item @uref{http://bardo.hyperlink.cz/mysqlmon,a light weight GUI client for Windows}.
+
@item @uref{http://dbtools.vila.bol.com.br/, Dbtools}
A tool to manage @strong{MySQL} databases. Currently only for Windows.
Some features:
@@ -42925,8 +43192,8 @@ An open source client for exploring databases and executing SQL. Supports
A query tool for @strong{MySQL} and PostgreSQL.
@item @uref{http://dbman.linux.cz/,dbMan}
A query tool written in Perl. Uses DBI and Tk.
-@item @uref{http://www.mysql.com/Downloads/Win32/Msc201.EXE, Mascon 2.1.15}
-@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc201.EXE, Free Mascon 2.1.14}
+@item @uref{http://www.mysql.com/Downloads/Win32/Msc201.EXE, Mascon 202}
+@item @uref{http://www.mysql.com/Downloads/Win32/FrMsc202.EXE, Free Mascon 202}
Mascon is a powerful Win32 GUI for the administering @strong{MySQL} server
databases. Mascon's features include visual table design, connections to
multiple servers, data and blob editing of tables, security setting, SQL
@@ -43015,6 +43282,10 @@ Apache module to include HTML from @strong{MySQL} queries into your pages,
and run update queries. Originally written to implement a simple fast
low-overhead banner-rotation system. By Sasha Pachev.
+@item @uref{http://htcheck.sourceforge.net, htCheck} - URL checker with
+MySQL backend. Spidered URLs can later be queried using SQL to retrieve
+various kinds of information, eg. broken links. Written by Gabriele Bartolini.
+
@item @uref{http://www.odbsoft.com/cook/sources.htm}
This package has various functions for generating html code from a SQL
table structure and for generating SQL statements (Select, Insert,
@@ -43135,7 +43406,7 @@ An authentication module for the Cyrus IMAP server. By Aaron Newsome.
@appendixsec Converters
@itemize @bullet
-item @uref{http://www.mysql.com/Downloads/Contrib/mssql2mysql.txt, mssql2mysql.txt}
+@item @uref{http://www.mysql.com/Downloads/Contrib/mssql2mysql.txt, mssql2mysql.txt}
Converter from MS-SQL to MySQL. By Michael Kofler.
@uref{http://www.kofler.cc/mysql/mssql2mysql.html, mssql2mysql home page}.
@@ -43187,18 +43458,23 @@ your actual @strong{MySQL} server either way. Free of charge. See
Created by Laurent Bossavit of NetDIVE.
@strong{NOTE:} Doesn't work with Access2!
-@item @uref{http://www.mysql.com/Downloads/Contrib/msql2mysqlWrapper-1.0.tgz, /msql2mysqlWrapper 1.0}
+@item @uref{http://www.mysql.com/Downloads/Contrib/mdb2sql.bas, mdb2sql.bas}
+Converter from Access97 to @strong{MySQL} by Moshe Gurvich.
+
+@item @uref{http://www.mysql.com/Downloads/Contrib/msql2mysqlWrapper-1.0.tgz, msql2mysqlWrapper 1.0}
A C wrapper from @code{mSQL} to @strong{MySQL}. By @email{alfred@@sb.net}
@item @uref{http://www.mysql.com/Downloads/Contrib/sqlconv.pl, sqlconv.pl}
-A simple script that can be used to copy fields from one @strong{MySQL} table to
-another in bulk. Basically, you can run @code{mysqldump} and pipe it to
+A simple script that can be used to copy fields from one @strong{MySQL} table
+to another in bulk. Basically, you can run @code{mysqldump} and pipe it to
the @code{sqlconv.pl} script. The script will parse through the
@code{mysqldump} output and will rearrange the fields so they can be
inserted into a new table. An example is when you want to create a new
table for a different site you are working on, but the table is just a
bit different (that is - fields in different order, etc.).
By Steve Shreeve.
+@item @uref{http://www.mysql.com/Downloads/Contrib/oracledump, oracledump}
+Perl program to convert Oracle databases to @strong{MySQL}. By Johan Andersson.
@end itemize
@appendixsec Using MySQL with Other Products
@@ -43840,9 +44116,9 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
@itemize @bullet
@item
-Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This make it
-possible to know how many rows a query would have returned if one hadn't
-used @code{LIMIT}.
+Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This makes it
+possible to know how many rows a query would have returned
+without a @code{LIMIT} clause.
@item
Changed output format of @code{SHOW OPEN TABLES}.
@item
@@ -43851,6 +44127,9 @@ Allow @code{SELECT expression LIMIT ...}.
Added @code{IDENTITY} as a synonym for @code{AUTO_INCREMENT} (like Sybase).
@item
Added @code{ORDER BY} syntax to @code{UPDATE} and @code{DELETE}.
+@item
+Optimized queries of type:
+@code{SELECT DISTINCT * from table_name ORDER by key_part1 LIMIT #}
@end itemize
@node News-3.23.x, News-3.22.x, News-4.0.x, News
@@ -43866,7 +44145,7 @@ A new ISAM library which is tuned for SQL and supports large files.
@item @strong{BerkeleyDB} or @strong{BDB}
Uses the Berkeley DB library from Sleepycat Software to implement
transaction-safe tables.
-@item @strong{Innodb}
+@item @strong{InnoDB}
A transaction-safe table handler that supports row level locking, and many
Oracle-like features.
@c change "three" to "four" above when uncommenting this
@@ -43897,7 +44176,7 @@ users uses this code as the rest of the code and because of this we are
not yet 100% confident in this code.
@menu
-* News-3.23.38::
+* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
* News-3.23.36:: Changes in release 3.23.36
* News-3.23.35:: Changes in release 3.23.35
@@ -43943,6 +44222,36 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.38
@itemize @bullet
@item
+Fixed a bug in @code{REPLACE()} when using the ujis character set.
+@item
+Applied Sleepycat BDB patches 3.2.9.1 and 3.2.9.2.
+@item
+Added option @code{--skip-stack-trace} to @code{mysqld}.
+@item
+@code{CREATE TEMPORARY} now works with @code{InnoDB} tables.
+@item
+@code{InnoDB} now promotes sub keys to whole keys.
+@item
+Added option @code{CONCURRENT} to @code{LOAD DATA}.
+@item
+Better error message when slave @code{max_allowed_packet} is to low to
+read a very long log event from the master
+@item
+Fixed bug when too many rows where removed when using
+@code{SELECT DISTINCT ... HAVING}.
+@item
+@code{SHOW CREATE TABLE} now returns @code{TEMPORARY} for temporary tables.
+@item
+Added @code{Rows_examined} to slow query log.
+@item
+Fixed problems with function returning empty string when using
+together with a group functions and a @code{WHERE} that didn't match any rows.
+@item
+New program @code{mysqlcheck}.
+@item
+Added database name to output for administrative commands like @code{CHECK},
+@code{REPAIR}, @code{OPTIMIZE}.
+@item
Lots of portability fixes for InnoDB.
@item
Changed optimizer so that queries like
@@ -43951,7 +44260,7 @@ will use index on @code{key_part1} instead of @code{filesort}.
@item
Fixed bug when doing
@code{LOCK TABLE to_table WRITE,...; INSERT INTO to_table... SELECT ...}
-when to_table was empty.
+when @code{to_table} was empty.
@item
Fixed bug with @code{LOCK TABLE} and BDB tables.
@end itemize
@@ -43977,35 +44286,35 @@ Fixed bug when using indexes on @code{CHAR(255) NULL} columns.
Slave thread will now be started even if @code{master-host} is not set, as
long as @code{server-id} is set and valid @code{master.info} is present
@item
-Partial updates ( terminated with kill) are now logged with a special error
+Partial updates (terminated with kill) are now logged with a special error
code to the binary log. Slave will refuse to execute them if the error code
indicates the update was terminated abnormally, and will have to be recovered
with @code{SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START} after a manual sanity
-check/correction of data integrity
+check/correction of data integrity.
@item
Fixed bug that erroneously logged a drop of internal temporary table
-on thread termination to the binary log - bug affected replication
+on thread termination to the binary log - bug affected replication.
@item
Fixed a bug in @code{REGEXP()} on 64-bit machines.
@item
@code{UPDATE} and @code{DELETE} with @code{WHERE unique_key_part IS NULL}
didn't update/delete all rows.
@item
-Disabled @code{INSERT DELAYED} for tables that supports transactions.
+Disabled @code{INSERT DELAYED} for tables that support transactions.
@item
Fixed bug when using date functions on @code{TEXT}/@code{BLOB} column
with wrong date format.
@item
-UDF's now also works on windows. (Patch by Ralph Mason)
+UDFs now also work on Windows. (Patch by Ralph Mason)
@item
Fixed bug in @code{ALTER TABLE} and @code{LOAD DATA INFILE} that disabled
-key-sorting. These command should now be faster in most cases.
+key-sorting. These commands should now be faster in most cases.
@item
Fixed performance bug where reopened tables (tables that had been
waiting for @code{FLUSH} or @code{REPAIR}) would not use indexes for the
next query.
@item
-Fixed problem with @code{ALTER TABLE} to Innobase tables on Freebsd.
+Fixed problem with @code{ALTER TABLE} to Innobase tables on FreeBSD.
@item
Added @code{mysqld} variables @code{myisam_max_sort_file_size} and
@code{myisam_max_extra_sort_file_size}.
@@ -44024,14 +44333,15 @@ Added @code{--skip-safemalloc} option to @code{mysqld}.
@appendixsubsec Changes in release 3.23.36
@itemize @bullet
@item
-Fixed a bug that allowed you to use database names with @code{.}. This
-fixes a serious security issue when @code{mysqld} is run as root.
+Fixed a bug that allowed you to use database names containing a @samp{.}
+character. This fixes a serious security issue when @code{mysqld} is run
+as root.
@item
Fixed bug when thread creation failed (could happen when doing a LOT
of connections in a short time).
@item
-Don't free the key cache on @code{FLUSH TABLES} as this will cause problems
-with temporary tables.
+Fixed some problems with @code{FLUSH TABLES} and @code{TEMPORARY} tables.
+(Problem with freeing the key cache and error @code{Can't reopen table...}).
@item
Fixed a problem in Innobase with other character sets than @code{latin1}
and another problem when using many columns.
@@ -48981,12 +49291,16 @@ mysql> UPDATE tbl_name SET KEY=KEY+1,KEY=KEY+1;
will update @code{KEY} with @code{2} instead of with @code{1}.
@item
You can't use temporary tables more than once in the same query.
+For example, the following doesn't work.
@example
select * from temporary_table, temporary_table as t2;
@end example
@item
+@code{RENAME} doesn't work with @code{TEMPORARY} tables.
+
+@item
The optimizer may handle @code{DISTINCT} differently if you are using
'hidden' columns in a join or not. In a join, hidden columns are
counted as part of the result (even if they are not shown) while in
@@ -49214,6 +49528,8 @@ Add @code{record_in_range()} method to @code{MERGE} tables to be
able to choose the right index when there is many to choose from. We should
also extend the info interface to get the key distribution for each index,
of @code{analyze} is run on all sub tables.
+@item
+@code{SET SQL_DEFAULT_TABLE_TYPE=[MyISAM | INNODB | BDB | GEMINI | HEAP]}.
@end itemize
@node TODO future, TODO sometime, TODO MySQL 4.0, TODO
@@ -49226,6 +49542,25 @@ Fail safe replication.
Subqueries.
@code{select id from t where grp in (select grp from g where u > 100)}
@item
+Derieved tables.
+@example
+select a.col1, b.col2 from (select max(col1) as col1 from root_table ) a,
+other_table b where a.col1=b.col1
+@end example
+
+This could be done by automaticly create temporary tables for the
+derived tables for the duration of the query.
+@item
+Add @code{PREPARE} of statements and sending of parameters to @code{mysqld}.
+@item
+Extend the server/client protocol to support warnings.
+@item
+Add options to the server/protocol protocol to get progress notes
+for long running commands.
+@item
+Add database and real table name (in case of alias) to the MYSQL_FIELD
+structure.
+@item
Don't allow more than a defined number of threads to run MyISAM recover
at the same time.
@item
@@ -49239,8 +49574,6 @@ Multiple result sets.
Change the protocol to allow binary transfer of values. To do this
efficiently, we need to add an API to allow binding of variables.
@item
-Add @code{PREPARE} of statements and sending of parameters to @code{mysqld}.
-@item
Make it possible to specify @code{long_query_time} with a granularity
in microseconds.
@item
@@ -49675,6 +50008,7 @@ problems that may be unique to your environment.
@menu
* Compiling for debugging::
* Making trace files::
+* Using gdb on mysqld::
* Using stack trace::
* Using log files::
* Reproducable test case::
@@ -49689,7 +50023,7 @@ If you have some very specific problem, you can always try to debug
whether or not @strong{MySQL} was compiled with debugging by doing:
@code{mysqld --help}. If the @code{--debug} flag is listed with the
options then you have debugging enabled. @code{mysqladmin ver} also
-lists the @code{mysqld} version as @code{mysql ... -debug} in this case.
+lists the @code{mysqld} version as @code{mysql ... --debug} in this case.
If you are using gcc or egcs, the recommended configure line is:
@@ -49728,22 +50062,50 @@ send mail to @email{mysql@@lists.mysql.com} and ask for help. Please use the
@code{mysqlbug} script for all bug reports or questions regarding the
@strong{MySQL} version you are using!
-@node Making trace files, Using stack trace, Compiling for debugging, Debugging server
-@appendixsubsec Creating trace files and using gdb on mysqld
+In the windows @strong{MySQL} distribution @code{mysqld.exe} is by
+default compiled with support for trace files.
+
+@node Making trace files, Using gdb on mysqld, Compiling for debugging, Debugging server
+@appendixsubsec Creating trace files
+
+If the @code{mysqld} server doesn't start or if you can cause the
+@code{mysqld} server to crash quickly, you can try to create a trace
+file to find the problem.
-If you can cause the @code{mysqld} server to crash quickly, you can try to
-create a trace file of this:
+To do this you have to have a @code{mysqld} that is compiled for debugging.
+You can check this by executing @code{mysqld -V}. If the version number
+ends with @code{-debug}, it's compiled with support for trace files.
-Start the @code{mysqld} server with a trace log in @file{/tmp/mysqld.trace}.
-The log file will get very @emph{BIG}.
+Start the @code{mysqld} server with a trace log in @file{/tmp/mysqld.trace}
+(or @file{C:\mysqld.trace} on windows):
-@code{mysqld --debug --log}
+@code{mysqld --debug}
-or you can start it with
+On Windows you should also use the @code{--standalone} flag to not start
+@code{mysqld} as a service.
-@code{mysqld --debug=d,info,error,query,general,where:O,/tmp/mysql.trace}
+Note that the trace file will get very @emph{BIG}!
-which only prints information with the most interesting tags.
+If you want to have a smaller trace file, you can use something like:
+
+@code{mysqld --debug=d,info,error,query,general,where:O,/tmp/mysqld.trace}
+
+which only prints information with the most interesting tags in
+@file{/tmp/mysqld.trace}.
+
+If you make a bug report about this, please only send the lines from the
+trace file to the appropriate mailing list where something seems to go
+wrong! If you can't locate the wrong place, you can ftp the trace file,
+together with a full bug report, to
+@uref{ftp://support.mysql.com/pub/mysql/secret} so that a @strong{MySQL}
+developer can take a look a this.
+
+The trace file is made with the @strong{DBUG} package by Fred Fish.
+@xref{The DBUG package}.
+
+@cindex gdb, using
+@node Using gdb on mysqld, Using stack trace, Making trace files, Debugging server
+@appendixsubsec Debugging mysqld under gdb
On most system you can also start @code{mysqld} from @code{gdb} to get
more information if @code{mysqld} crashes.
@@ -49752,11 +50114,14 @@ With some older @code{gdb} versions on Linux you must use @code{run
--one-thread} if you want to be able to debug @code{mysqld} threads. In
this case you can only have one thread active at a time.
+When running @code{mysqld} under gdb, you should disable the stack trace
+with @code{--skip-stack-trace} to be able to catch segfaults within gdb.
+
It's very hard to debug @strong{MySQL} under @code{gdb} if you do a lot of
new connections the whole time as @code{gdb} doesn't free the memory for
old threads. You can avoid this problem by starting @code{mysqld} with
@code{-O thread_cache_size= 'max_connections +1'}. In most cases just
-using @code{-O thread_cache_size= 5'} will help a lot!
+using @code{-O thread_cache_size=5'} will help a lot!
If you want to get a core dump on Linux if @code{mysqld} dies with a
SIGSEGV signal, you can start mysqld with the @code{--core-file} option.
@@ -49765,8 +50130,7 @@ find out why @code{mysqld} died:
@example
shell> gdb mysqld core
-gdb> backtrace
-gdb> info local
+gdb> backtrace full
gdb> exit
@end example
@@ -49798,15 +50162,7 @@ Here is an example how to debug mysqld:
shell> gdb /usr/local/libexec/mysqld
gdb> run
...
-back # Do this when mysqld crashes
-info locals
-up
-info locals
-up
-...
-(until you get some information about local variables)
-
-quit
+backtrace full # Do this when mysqld crashes
@end example
Include the above output in a mail generated with @code{mysqlbug} and
@@ -49829,7 +50185,7 @@ debugging information by using the @code{trace} method or by
setting the @code{DBI_TRACE} environment variable.
@xref{Perl DBI Class, , Perl @code{DBI} Class}.
-@node Using stack trace, Using log files, Making trace files, Debugging server
+@node Using stack trace, Using log files, Using gdb on mysqld, Debugging server
@appendixsubsec Using a stack trace
On some operating systems, the error log will contain a stack trace if
@@ -50044,7 +50400,7 @@ A very common mistake is to use an old @file{mysql.h} file from an old
The @strong{MySQL} server and most @strong{MySQL} clients are compiled
with the DBUG package originally made by Fred Fish. When one has configured
@strong{MySQL} for debugging, this package makes it possible to get a trace
-file of what the program is debugging.
+file of what the program is debugging. @xref{Making trace files}.
One uses the debug package by invoking the program with the
@code{--debug="..."} or the @code{-#...} option.
diff --git a/acinclude.m4 b/acinclude.m4
index 34e5ce9e873..ab2ea5cddd1 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -199,7 +199,7 @@ fi
AC_DEFUN(MYSQL_PTHREAD_YIELD,
[AC_CACHE_CHECK([if pthread_yield takes zero arguments], ac_cv_pthread_yield_zero_arg,
-[AC_TRY_COMPILE([#define _GNU_SOURCE
+[AC_TRY_LINK([#define _GNU_SOURCE
#include <pthread.h>
#ifdef __cplusplus
extern "C"
@@ -214,7 +214,7 @@ then
fi
]
[AC_CACHE_CHECK([if pthread_yield takes 1 argument], ac_cv_pthread_yield_one_arg,
-[AC_TRY_COMPILE([#define _GNU_SOURCE
+[AC_TRY_LINK([#define _GNU_SOURCE
#include <pthread.h>
#ifdef __cplusplus
extern "C"
@@ -550,7 +550,8 @@ AC_DEFUN(MYSQL_STACK_DIRECTION,
AC_DEFUN(MYSQL_FUNC_ALLOCA,
[
-# Since we have heard that alloca fails on IRIX never define it on a SGI machine
+# Since we have heard that alloca fails on IRIX never define it on a
+# SGI machine
if test ! "$host_vendor" = "sgi"
then
AC_REQUIRE_CPP()dnl Set CPP; we run AC_EGREP_CPP conditionally.
@@ -930,6 +931,7 @@ dnl circular references.
../innobase/odbc/libodbc.a\
../innobase/srv/libsrv.a\
../innobase/que/libque.a\
+ ../innobase/srv/libsrv.a\
../innobase/dict/libdict.a\
../innobase/ibuf/libibuf.a\
../innobase/row/librow.a\
diff --git a/bdb/dist/Makefile.in b/bdb/dist/Makefile.in
index 73f82cd5648..4a47953e3e1 100644
--- a/bdb/dist/Makefile.in
+++ b/bdb/dist/Makefile.in
@@ -997,3 +997,6 @@ strerror@o@: $(srcdir)/clib/strerror.c
$(CC) $(CFLAGS) $?
vsnprintf@o@: $(srcdir)/clib/vsnprintf.c
$(CC) $(CFLAGS) $?
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/bdb/include/log.h b/bdb/include/log.h
index 1cac0492252..81ecb4174a6 100644
--- a/bdb/include/log.h
+++ b/bdb/include/log.h
@@ -198,6 +198,7 @@ struct __fname {
*/
typedef enum {
DB_LV_INCOMPLETE,
+ DB_LV_NONEXISTENT,
DB_LV_NORMAL,
DB_LV_OLD_READABLE,
DB_LV_OLD_UNREADABLE
diff --git a/bdb/log/log.c b/bdb/log/log.c
index 69af1624824..8ddb7bcaf7d 100644
--- a/bdb/log/log.c
+++ b/bdb/log/log.c
@@ -309,13 +309,13 @@ __log_find(dblp, find_first, valp, statusp)
int find_first, *valp;
logfile_validity *statusp;
{
- logfile_validity clv_status, status;
+ logfile_validity logval_status, status;
u_int32_t clv, logval;
int cnt, fcnt, ret;
const char *dir;
char **names, *p, *q, savech;
- clv_status = status = DB_LV_NORMAL;
+ logval_status = status = DB_LV_NONEXISTENT;
/* Return a value of 0 as the log file number on failure. */
*valp = 0;
@@ -385,10 +385,14 @@ __log_find(dblp, find_first, valp, statusp)
* as a valid log file.
*/
break;
+ case DB_LV_NONEXISTENT:
+ /* Should never happen. */
+ DB_ASSERT(0);
+ break;
case DB_LV_NORMAL:
case DB_LV_OLD_READABLE:
logval = clv;
- clv_status = status;
+ logval_status = status;
break;
case DB_LV_OLD_UNREADABLE:
/*
@@ -410,7 +414,7 @@ __log_find(dblp, find_first, valp, statusp)
*/
if (!find_first) {
logval = clv;
- clv_status = status;
+ logval_status = status;
}
break;
}
@@ -420,7 +424,7 @@ __log_find(dblp, find_first, valp, statusp)
err: __os_dirfree(names, fcnt);
__os_freestr(p);
- *statusp = clv_status;
+ *statusp = logval_status;
return (ret);
}
diff --git a/bdb/log/log_rec.c b/bdb/log/log_rec.c
index ad6d9f7ead2..493dd06d4c6 100644
--- a/bdb/log/log_rec.c
+++ b/bdb/log/log_rec.c
@@ -430,7 +430,7 @@ __log_add_logid(dbenv, logp, dbp, ndx)
TAILQ_INIT(&logp->dbentry[i].dblist);
else
TAILQ_REINSERT_HEAD(
- &logp->dbentry[i].dblist, dbp, links);
+ &logp->dbentry[i].dblist, dbtmp, links);
}
/* Initialize the new entries. */
diff --git a/client/Makefile.am b/client/Makefile.am
index 77f6cb72ff1..24221dcab74 100644
--- a/client/Makefile.am
+++ b/client/Makefile.am
@@ -21,13 +21,14 @@ INCLUDES = -I$(srcdir)/../include \
-I..
LIBS = @CLIENT_LIBS@
LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysql/libmysqlclient.la
-bin_PROGRAMS = mysql mysqladmin mysqlshow mysqldump mysqlimport mysqltest
+bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow mysqldump mysqlimport mysqltest
noinst_PROGRAMS = insert_test select_test thread_test
noinst_HEADERS = sql_string.h completion_hash.h my_readline.h
mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc
mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS)
mysql_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
mysqladmin_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
+mysqlcheck_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
mysqlshow_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
mysqldump_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
mysqlimport_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
diff --git a/client/errmsg.c b/client/errmsg.c
index a31386e70b4..711228e459d 100644
--- a/client/errmsg.c
+++ b/client/errmsg.c
@@ -36,14 +36,16 @@ const char *client_errors[]=
"MySQL client got out of memory",
"Wrong host info",
"Localhost via UNIX socket",
- "%s via TCP/IP",
+ "%-.64s via TCP/IP",
"Error in server handshake",
"Lost connection to MySQL server during query",
"Commands out of sync; You can't run this command now",
"Verbindung ueber Named Pipe; Host: %-.64s",
"Kann nicht auf Named Pipe warten. Host: %-.64s pipe: %-.32s (%lu)",
"Kann Named Pipe nicht oeffnen. Host: %-.64s pipe: %-.32s (%lu)",
- "Kann den Status der Named Pipe nicht setzen. Host: %-.64s pipe: %-.32s (%lu)"
+ "Kann den Status der Named Pipe nicht setzen. Host: %-.64s pipe: %-.32s (%lu)",
+ "Can't initialize character set %-.64s (path: %-.64s)",
+ "Got packet bigger than 'max_allowed_packet'"
};
#else /* ENGLISH */
@@ -60,19 +62,21 @@ const char *client_errors[]=
"MySQL client run out of memory",
"Wrong host info",
"Localhost via UNIX socket",
- "%s via TCP/IP",
+ "%-.64s via TCP/IP",
"Error in server handshake",
"Lost connection to MySQL server during query",
"Commands out of sync; You can't run this command now",
- "%s via named pipe",
+ "%-.64s via named pipe",
"Can't wait for named pipe to host: %-.64s pipe: %-.32s (%lu)",
"Can't open named pipe to host: %-.64s pipe: %-.32s (%lu)",
"Can't set state of named pipe to host: %-.64s pipe: %-.32s (%lu)",
+ "Can't initialize character set %-.64s (path: %-.64s)",
+ "Got packet bigger than 'max_allowed_packet'"
};
#endif
void init_client_errs(void)
{
- errmsg[CLIENT_ERRMAP] = &client_errors[0];
+ my_errmsg[CLIENT_ERRMAP] = &client_errors[0];
}
diff --git a/client/mysqladmin.c b/client/mysqladmin.c
index bda86c881e3..1e6bf3c5219 100644
--- a/client/mysqladmin.c
+++ b/client/mysqladmin.c
@@ -28,7 +28,7 @@
#include <my_pthread.h> /* because of signal() */
#endif
-#define ADMIN_VERSION "8.19"
+#define ADMIN_VERSION "8.20"
#define MAX_MYSQL_VAR 64
#define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */
#define MAX_TRUNC_LENGTH 3
@@ -417,19 +417,13 @@ static my_bool execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_DROP:
{
- char buff[FN_REFLEN+20];
if (argc < 2)
{
my_printf_error(0,"Too few arguments to drop",MYF(ME_BELL));
return 1;
}
- sprintf(buff,"drop database `%.*s`",FN_REFLEN,argv[1]);
- if (mysql_query(mysql,buff))
- {
- my_printf_error(0,"DROP DATABASE failed; error: '%-.200s'",
- MYF(ME_BELL), mysql_error(mysql));
+ if (drop_db(mysql,argv[1]))
return 1;
- }
argc--; argv++;
break;
}
@@ -867,7 +861,8 @@ static int drop_db(MYSQL *mysql, const char *db)
{
puts("Dropping the database is potentially a very bad thing to do.");
puts("Any data stored in the database will be destroyed.\n");
- printf("Do you really want to drop the '%s' database [y/N]\n",db);
+ printf("Do you really want to drop the '%s' database [y/N] ",db);
+ fflush(stdout);
VOID(fgets(buf,sizeof(buf)-1,stdin));
if ((*buf != 'y') && (*buf != 'Y'))
{
@@ -878,7 +873,7 @@ static int drop_db(MYSQL *mysql, const char *db)
sprintf(name_buff,"drop database %.*s",FN_REFLEN,db);
if (mysql_query(mysql,name_buff))
{
- my_printf_error(0,"drop of '%s' failed;\nerror: '%s'",MYF(ME_BELL),
+ my_printf_error(0,"DROP DATABASE %s failed;\nerror: '%s'",MYF(ME_BELL),
db,mysql_error(mysql));
return 1;
}
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
new file mode 100644
index 00000000000..3d4d4597ef5
--- /dev/null
+++ b/client/mysqlcheck.c
@@ -0,0 +1,685 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
+
+#define CHECK_VERSION "1.01"
+
+#include <global.h>
+#include <my_sys.h>
+#include <m_string.h>
+#include <m_ctype.h>
+
+#include "mysql.h"
+#include "mysql_version.h"
+#include "mysqld_error.h"
+#include <getopt.h>
+#include "sslopt-vars.h"
+
+#include <m_string.h>
+
+/* Exit codes */
+
+#define EX_USAGE 1
+#define EX_MYSQLERR 2
+
+static MYSQL mysql_connection, *sock = 0;
+static my_bool opt_alldbs = 0, opt_check_only_changed = 0, opt_extended = 0,
+ opt_compress = 0, opt_databases = 0, opt_fast = 0,
+ opt_medium_check = 0, opt_quick = 0, opt_all_in_1 = 0,
+ opt_silent = 0, opt_auto_repair = 0, ignore_errors = 0;
+static uint verbose = 0, opt_mysql_port=0;
+static my_string opt_mysql_unix_port = 0;
+static char *opt_password = 0, *current_user = 0, *default_charset = 0,
+ *current_host = 0;
+static int first_error = 0;
+DYNAMIC_ARRAY tables4repair;
+
+enum operations {DO_CHECK, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE};
+
+enum options {OPT_CHARSETS_DIR=256, OPT_COMPRESS, OPT_DEFAULT_CHARSET,
+ OPT_TABLES, OPT_AUTO_REPAIR};
+
+static struct option long_options[] =
+{
+ {"all-databases", no_argument, 0, 'A'},
+ {"all-in-1", no_argument, 0, '1'},
+ {"auto-repair", no_argument, 0, OPT_AUTO_REPAIR},
+ {"analyze", no_argument, 0, 'a'},
+ {"character-sets-dir", required_argument, 0, OPT_CHARSETS_DIR},
+ {"check", no_argument, 0, 'c'},
+ {"check-only-changed", no_argument, 0, 'C'},
+ {"compress", no_argument, 0, OPT_COMPRESS},
+ {"databases", no_argument, 0, 'B'},
+ {"debug", optional_argument, 0, '#'},
+ {"default-character-set", required_argument, 0, OPT_DEFAULT_CHARSET},
+ {"fast", no_argument, 0, 'F'},
+ {"force", no_argument, 0, 'f'},
+ {"extended", no_argument, 0, 'e'},
+ {"help", no_argument, 0, '?'},
+ {"host", required_argument, 0, 'h'},
+ {"medium-check", no_argument, 0, 'm'},
+ {"optimize", no_argument, 0, 'o'},
+ {"password", optional_argument, 0, 'p'},
+#ifdef __WIN__
+ {"pipe", no_argument, 0, 'W'},
+#endif
+ {"port", required_argument, 0, 'P'},
+ {"quick", no_argument, 0, 'q'},
+ {"repair", no_argument, 0, 'r'},
+ {"silent", no_argument, 0, 's'},
+ {"socket", required_argument, 0, 'S'},
+#include "sslopt-longopts.h"
+ {"tables", no_argument, 0, OPT_TABLES},
+#ifndef DONT_ALLOW_USER_CHANGE
+ {"user", required_argument, 0, 'u'},
+#endif
+ {"verbose", no_argument, 0, 'v'},
+ {"version", no_argument, 0, 'V'},
+ {0, 0, 0, 0}
+};
+
+static const char *load_default_groups[] = { "mysqlcheck", "client", 0 };
+
+
+static void print_version(void);
+static void usage(void);
+static int get_options(int *argc, char ***argv);
+static int process_all_databases();
+static int process_databases(char **db_names);
+static int process_selected_tables(char *db, char **table_names, int tables);
+static int process_all_tables_in_db(char *database);
+static int use_db(char *database);
+static int handle_request_for_tables(char *tables, uint length);
+static int dbConnect(char *host, char *user,char *passwd);
+static void dbDisconnect(char *host);
+static void DBerror(MYSQL *mysql, const char *when);
+static void safe_exit(int error);
+static void print_result();
+int what_to_do = 0;
+
+static void print_version(void)
+{
+ printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, CHECK_VERSION,
+ MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
+} /* print_version */
+
+
+static void usage(void)
+{
+ print_version();
+ puts("By Jani Tolonen, 2001-04-20, MySQL Development Team\n");
+ puts("This software comes with ABSOLUTELY NO WARRANTY. This is free");
+ puts("software and you are welcome to modify and redistribute it");
+ puts("under the GPL license.\n");
+ puts("This program can be used to CHECK (-c,-m,-C), REPAIR (-r), ANALYZE (-a)");
+ puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be");
+ puts("used same time. It works on MyISAM and in some cases on BDB tables.");
+ puts("Please consult the MySQL manual for latest information about the");
+ puts("above. The options -c,-r,-a and -o are exclusive to each other, which");
+ puts("means that the last option will be used, if several was specified.\n");
+ puts("The option -c will be used by default, if none was specified. You");
+ puts("can change the default behavior by making a symbolic link, or");
+ puts("copying this file somewhere with another name, the alternatives are:");
+ puts("mysqlrepair: The default option will be -r");
+ puts("mysqlanalyze: The default option will be -a");
+ puts("mysqloptimize: The default option will be -o\n");
+ printf("Usage: %s [OPTIONS] database [tables]\n", my_progname);
+ printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n",
+ my_progname);
+ printf("OR %s [OPTIONS] --all-databases\n", my_progname);
+ printf("\
+ -A, --all-databases Check all the databases. This will be same as\n\
+ --databases with all databases selected\n\
+ -1, --all-in-1 Instead of making one query for each table, execute\n\
+ all queries in 1 query separately for each database.\n\
+ Table names will be in a comma separeted list.\n\
+ -a, --analyze Analyze given tables.\n\
+ --auto-repair If a checked table is corrupted, automatically fix\n\
+ it. Repairing will be done after all tables have\n\
+ been checked, if corrupted ones were found.\n\
+ -#, --debug=... Output debug log. Often this is 'd:t:o,filename'\n\
+ --character-sets-dir=...\n\
+ Directory where character sets are\n\
+ -c, --check Check table for errors\n\
+ -C, --check-only-changed\n\
+ Check only tables that have changed since last check\n\
+ or haven't been closed properly.\n\
+ --compress Use compression in server/client protocol.\n\
+ -?, --help Display this help message and exit.\n\
+ -B, --databases To check several databases. Note the difference in\n\
+ usage; In this case no tables are given. All name\n\
+ arguments are regarded as databasenames.\n\
+ --default-character-set=...\n\
+ Set the default character set\n\
+ -F, --fast Check only tables that hasn't been closed properly\n\
+ -f, --force Continue even if we get an sql-error.\n\
+ -e, --extended If you are using this option with CHECK TABLE,\n\
+ it will ensure that the table is 100 percent\n\
+ consistent, but will take a long time.\n\n");
+printf("\
+ If you are using this option with REPAIR TABLE,\n\
+ it will run an extended repair on the table, which\n\
+ may not only take a long time to execute, but\n\
+ may produce a lot of garbage rows also!\n\
+ -h, --host=... Connect to host.\n\
+ -m, --medium-check Faster than extended-check, but only finds 99.99 percent\n\
+ of all errors. Should be good enough for most cases.\n\
+ -o, --optimize Optimize table\n\
+ -p, --password[=...] Password to use when connecting to server.\n\
+ If password is not given it's solicited on the tty.\n");
+#ifdef __WIN__
+ puts("-W, --pipe Use named pipes to connect to server");
+#endif
+ printf("\
+ -P, --port=... Port number to use for connection.\n\
+ -q, --quick If you are using this option with CHECK TABLE, it\n\
+ prevents the check from scanning the rows to check\n\
+ for wrong links. This is the fastest check.\n\n\
+ If you are using this option with REPAIR TABLE, it\n\
+ will try to repair only the index tree. This is\n\
+ the fastest repair method for a table.\n\
+ -r, --repair Can fix almost anything except unique keys that aren't\n\
+ unique.\n\
+ -s, --silent Print only error messages.\n\
+ -S, --socket=... Socket file to use for connection.\n\
+ --tables Overrides option --databases (-B).\n");
+#include "sslopt-usage.h"
+#ifndef DONT_ALLOW_USER_CHANGE
+ printf("\
+ -u, --user=# User for login if not current user.\n");
+#endif
+ printf("\
+ -v, --verbose Print info about the various stages.\n\
+ -V, --version Output version information and exit.\n");
+ print_defaults("my", load_default_groups);
+} /* usage */
+
+
+static int get_options(int *argc, char ***argv)
+{
+ int c, option_index;
+ my_bool tty_password = 0;
+
+ if (*argc == 1)
+ {
+ usage();
+ exit(0);
+ }
+
+ load_defaults("my", load_default_groups, argc, argv);
+ while ((c = getopt_long(*argc, *argv, "#::p::h:u:P:S:BaAcCdeFfmqorsvVw:?I1",
+ long_options, &option_index)) != EOF)
+ {
+ switch(c) {
+ case 'a':
+ what_to_do = DO_ANALYZE;
+ break;
+ case '1':
+ opt_all_in_1 = 1;
+ break;
+ case 'A':
+ opt_alldbs = 1;
+ break;
+ case OPT_AUTO_REPAIR:
+ opt_auto_repair = 1;
+ break;
+ case OPT_DEFAULT_CHARSET:
+ default_charset = optarg;
+ break;
+ case OPT_CHARSETS_DIR:
+ charsets_dir = optarg;
+ break;
+ case 'c':
+ what_to_do = DO_CHECK;
+ break;
+ case 'C':
+ what_to_do = DO_CHECK;
+ opt_check_only_changed = 1;
+ break;
+ case 'e':
+ opt_extended = 1;
+ break;
+ case OPT_COMPRESS:
+ opt_compress = 1;
+ break;
+ case 'B':
+ opt_databases = 1;
+ break;
+ case 'F':
+ opt_fast = 1;
+ break;
+ case 'f':
+ ignore_errors = 1;
+ break;
+ case 'I': /* Fall through */
+ case '?':
+ usage();
+ exit(0);
+ case 'h':
+ my_free(current_host, MYF(MY_ALLOW_ZERO_PTR));
+ current_host = my_strdup(optarg, MYF(MY_WME));
+ break;
+ case 'm':
+ what_to_do = DO_CHECK;
+ opt_medium_check = 1;
+ break;
+ case 'o':
+ what_to_do = DO_OPTIMIZE;
+ break;
+#ifndef DONT_ALLOW_USER_CHANGE
+ case 'u':
+ current_user = optarg;
+ break;
+#endif
+ case 'p':
+ if (optarg)
+ {
+ char *start = optarg;
+ my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR));
+ opt_password = my_strdup(optarg, MYF(MY_FAE));
+ while (*optarg) *optarg++= 'x'; /* Destroy argument */
+ if (*start)
+ start[1] = 0; /* Cut length of argument */
+ }
+ else
+ tty_password = 1;
+ break;
+ case 'P':
+ opt_mysql_port = (unsigned int) atoi(optarg);
+ break;
+ case 'q':
+ opt_quick = 1;
+ break;
+ case 'r':
+ what_to_do = DO_REPAIR;
+ break;
+ case 'S':
+ opt_mysql_unix_port = optarg;
+ break;
+ case 's':
+ opt_silent = 1;
+ break;
+ case 'W':
+#ifdef __WIN__
+ opt_mysql_unix_port = MYSQL_NAMEDPIPE;
+#endif
+ break;
+ case '#':
+ DBUG_PUSH(optarg ? optarg : "d:t:o");
+ break;
+ case OPT_TABLES:
+ opt_databases = 0;
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'V': print_version(); exit(0);
+ default:
+ fprintf(stderr, "%s: Illegal option character '%c'\n", my_progname,
+ opterr);
+#include "sslopt-case.h"
+ }
+ }
+ if (!what_to_do)
+ {
+ int pnlen = strlen(my_progname);
+
+ if (pnlen < 6) // name too short
+ what_to_do = DO_CHECK;
+ else if (!strcmp("repair", my_progname + pnlen - 6))
+ what_to_do = DO_REPAIR;
+ else if (!strcmp("analyze", my_progname + pnlen - 7))
+ what_to_do = DO_ANALYZE;
+ else if (!strcmp("optimize", my_progname + pnlen - 8))
+ what_to_do = DO_OPTIMIZE;
+ else
+ what_to_do = DO_CHECK;
+ }
+ if (default_charset)
+ {
+ if (set_default_charset_by_name(default_charset, MYF(MY_WME)))
+ exit(1);
+ }
+ (*argc) -= optind;
+ (*argv) += optind;
+ if (*argc > 0 && opt_alldbs)
+ {
+ printf("You should give only options, no arguments at all, with option\n");
+ printf("--all-databases. Please see %s --help for more information.\n",
+ my_progname);
+ return 1;
+ }
+ if (*argc < 1 && !opt_alldbs)
+ {
+ printf("You forgot to give the arguments! Please see %s --help\n",
+ my_progname);
+ printf("for more information.\n");
+ return 1;
+ }
+ if (tty_password)
+ opt_password = get_tty_password(NullS);
+ return(0);
+} /* get_options */
+
+
+static int process_all_databases()
+{
+ MYSQL_ROW row;
+ MYSQL_RES *tableres;
+ int result = 0;
+
+ if (mysql_query(sock, "SHOW DATABASES") ||
+ !(tableres = mysql_store_result(sock)))
+ {
+ my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s",
+ MYF(0), mysql_error(sock));
+ return 1;
+ }
+ while ((row = mysql_fetch_row(tableres)))
+ {
+ if (process_all_tables_in_db(row[0]))
+ result = 1;
+ }
+ return result;
+}
+/* process_all_databases */
+
+
+static int process_databases(char **db_names)
+{
+ int result = 0;
+ for ( ; *db_names ; db_names++)
+ {
+ if (process_all_tables_in_db(*db_names))
+ result = 1;
+ }
+ return result;
+} /* process_databases */
+
+
+static int process_selected_tables(char *db, char **table_names, int tables)
+{
+ if (use_db(db))
+ return 1;
+ if (opt_all_in_1)
+ {
+ char *table_names_comma_sep, *end;
+ int i, tot_length = 0;
+
+ for (i = 0; i < tables; i++)
+ tot_length += strlen(*(table_names + i)) + 1;
+
+ if (!(table_names_comma_sep = (char *)
+ my_malloc((sizeof(char) * tot_length) + 1, MYF(MY_WME))))
+ return 1;
+
+ for (end = table_names_comma_sep + 1; tables > 0;
+ tables--, table_names++)
+ {
+ end = strmov(end, *table_names);
+ *end++= ',';
+ }
+ *--end = 0;
+ handle_request_for_tables(table_names_comma_sep + 1, tot_length - 1);
+ my_free(table_names_comma_sep, MYF(0));
+ }
+ else
+ for (; tables > 0; tables--, table_names++)
+ handle_request_for_tables(*table_names, strlen(*table_names));
+ return 0;
+} /* process_selected_tables */
+
+
+static int process_all_tables_in_db(char *database)
+{
+ MYSQL_RES *res;
+ MYSQL_ROW row;
+
+ LINT_INIT(res);
+ if (use_db(database))
+ return 1;
+ if (!(mysql_query(sock, "SHOW TABLES") ||
+ (res = mysql_store_result(sock))))
+ return 1;
+
+ if (opt_all_in_1)
+ {
+ char *tables, *end;
+ uint tot_length = 0;
+
+ while ((row = mysql_fetch_row(res)))
+ tot_length += strlen(row[0]) + 1;
+ mysql_data_seek(res, 0);
+
+ if (!(tables=(char *) my_malloc(sizeof(char)*tot_length+1, MYF(MY_WME))))
+ {
+ mysql_free_result(res);
+ return 1;
+ }
+ for (end = tables + 1; (row = mysql_fetch_row(res)) ;)
+ {
+ end = strmov(end, row[0]);
+ *end++= ',';
+ }
+ *--end = 0;
+ if (tot_length)
+ handle_request_for_tables(tables + 1, tot_length - 1);
+ my_free(tables, MYF(0));
+ }
+ else
+ {
+ while ((row = mysql_fetch_row(res)))
+ handle_request_for_tables(row[0], strlen(row[0]));
+ }
+ mysql_free_result(res);
+ return 0;
+} /* process_all_tables_in_db */
+
+
+static int use_db(char *database)
+{
+ if (mysql_select_db(sock, database))
+ {
+ DBerror(sock, "when selecting the database");
+ return 1;
+ }
+ return 0;
+} /* use_db */
+
+
+static int handle_request_for_tables(char *tables, uint length)
+{
+ char *query, *end, options[100];
+ const char *op = 0;
+
+ options[0] = 0;
+ switch (what_to_do) {
+ case DO_CHECK:
+ op = "CHECK";
+ end = options;
+ if (opt_quick) end = strmov(end, "QUICK");
+ if (opt_fast) end = strmov(end, "FAST");
+ if (opt_medium_check) end = strmov(end, "MEDIUM"); /* Default */
+ if (opt_extended) end = strmov(end, "EXTENDED");
+ if (opt_check_only_changed) end = strmov(end, "CHANGED");
+ break;
+ case DO_REPAIR:
+ op = "REPAIR";
+ end = options;
+ if (opt_quick) end = strmov(end, "QUICK");
+ if (opt_extended) end = strmov(end, "EXTENDED");
+ break;
+ case DO_ANALYZE:
+ op = "ANALYZE";
+ break;
+ case DO_OPTIMIZE:
+ op = "OPTIMIZE";
+ break;
+ }
+
+ if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME))))
+ return 1;
+ sprintf(query, "%s TABLE %s %s", op, options, tables);
+ if (mysql_query(sock, query))
+ {
+ sprintf(options, "when executing '%s TABLE'", op);
+ DBerror(sock, options);
+ return 1;
+ }
+ print_result();
+ my_free(query, MYF(0));
+ return 0;
+}
+
+
+static void print_result()
+{
+ MYSQL_RES *res;
+ MYSQL_ROW row;
+ char prev[NAME_LEN*2+2];
+ int i;
+
+ res = mysql_use_result(sock);
+ prev[0] = '\0';
+ for (i = 0; (row = mysql_fetch_row(res)); i++)
+ {
+ int changed = strcmp(prev, row[0]);
+ int status = !strcmp(row[2], "status");
+ if (opt_silent && status)
+ continue;
+ if (status && changed)
+ printf("%-50s %s", row[0], row[3]);
+ else if (!status && changed)
+ {
+ printf("%s\n%-9s: %s", row[0], row[2], row[3]);
+ if (what_to_do != DO_REPAIR && opt_auto_repair)
+ insert_dynamic(&tables4repair, row[0]);
+ }
+ else
+ printf("%-9s: %s", row[2], row[3]);
+ strmov(prev, row[0]);
+ putchar('\n');
+ }
+ mysql_free_result(res);
+}
+
+
+static int dbConnect(char *host, char *user, char *passwd)
+{
+ DBUG_ENTER("dbConnect");
+ if (verbose)
+ {
+ fprintf(stderr, "# Connecting to %s...\n", host ? host : "localhost");
+ }
+ mysql_init(&mysql_connection);
+ if (opt_compress)
+ mysql_options(&mysql_connection, MYSQL_OPT_COMPRESS, NullS);
+#ifdef HAVE_OPENSSL
+ if (opt_use_ssl)
+ mysql_ssl_set(&mysql_connection, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
+ opt_ssl_capath);
+#endif
+ if (!(sock = mysql_real_connect(&mysql_connection, host, user, passwd,
+ NULL, opt_mysql_port, opt_mysql_unix_port, 0)))
+ {
+ DBerror(&mysql_connection, "when trying to connect");
+ return 1;
+ }
+ return 0;
+} /* dbConnect */
+
+
+static void dbDisconnect(char *host)
+{
+ if (verbose)
+ fprintf(stderr, "# Disconnecting from %s...\n", host ? host : "localhost");
+ mysql_close(sock);
+} /* dbDisconnect */
+
+
+static void DBerror(MYSQL *mysql, const char *when)
+{
+ DBUG_ENTER("DBerror");
+ my_printf_error(0,"Got error: %d: %s %s", MYF(0),
+ mysql_errno(mysql), mysql_error(mysql), when);
+ safe_exit(EX_MYSQLERR);
+ DBUG_VOID_RETURN;
+} /* DBerror */
+
+
+static void safe_exit(int error)
+{
+ if (!first_error)
+ first_error= error;
+ if (ignore_errors)
+ return;
+ if (sock)
+ mysql_close(sock);
+ exit(error);
+}
+
+
+int main(int argc, char **argv)
+{
+ MY_INIT(argv[0]);
+ /*
+ ** Check out the args
+ */
+ if (get_options(&argc, &argv))
+ {
+ my_end(0);
+ exit(EX_USAGE);
+ }
+ if (dbConnect(current_host, current_user, opt_password))
+ exit(EX_MYSQLERR);
+
+ if (opt_auto_repair &&
+ init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,64))
+ {
+ first_error = 1;
+ goto end;
+ }
+
+ if (opt_alldbs)
+ process_all_databases();
+ /* Only one database and selected table(s) */
+ else if (argc > 1 && !opt_databases)
+ process_selected_tables(*argv, (argv + 1), (argc - 1));
+ /* One or more databases, all tables */
+ else
+ process_databases(argv);
+ if (opt_auto_repair)
+ {
+ uint i;
+
+ if (!opt_silent && tables4repair.elements)
+ puts("\nRepairing tables");
+ what_to_do = DO_REPAIR;
+ for (i = 0; i < tables4repair.elements ; i++)
+ {
+ char *name= (char*) dynamic_array_ptr(&tables4repair, i);
+ handle_request_for_tables(name, strlen(name));
+ }
+ }
+ end:
+ dbDisconnect(current_host);
+ if (opt_auto_repair)
+ delete_dynamic(&tables4repair);
+ my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR));
+ my_end(0);
+ return(first_error!=0);
+} /* main */
diff --git a/client/mysqldump.c b/client/mysqldump.c
index ce6c64aa00e..4893c13a0a0 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -37,7 +37,7 @@
** Tõnu Samuel <tonu@please.do.not.remove.this.spam.ee>
**/
-#define DUMP_VERSION "8.13"
+#define DUMP_VERSION "8.14"
#include <global.h>
#include <my_sys.h>
@@ -73,7 +73,7 @@ static my_bool verbose=0,tFlag=0,cFlag=0,dFlag=0,quick=0, extended_insert = 0,
lock_tables=0,ignore_errors=0,flush_logs=0,replace=0,
ignore=0,opt_drop=0,opt_keywords=0,opt_lock=0,opt_compress=0,
opt_delayed=0,create_options=0,opt_quoted=0,opt_databases=0,
- opt_alldbs=0,opt_create_db=0,opt_first_slave=0;
+ opt_alldbs=0,opt_create_db=0,opt_first_slave=0;
static MYSQL mysql_connection,*sock=0;
static char insert_pat[12 * 1024],*opt_password=0,*current_user=0,
*current_host=0,*path=0,*fields_terminated=0,
@@ -85,6 +85,7 @@ static int first_error=0;
extern ulong net_buffer_length;
static DYNAMIC_STRING extended_row;
#include "sslopt-vars.h"
+FILE *result_file;
enum options {OPT_FTB=256, OPT_LTB, OPT_ENC, OPT_O_ENC, OPT_ESC, OPT_KEYWORDS,
OPT_LOCKS, OPT_DROP, OPT_OPTIMIZE, OPT_DELAYED, OPT_TABLES,
@@ -127,6 +128,7 @@ static struct option long_options[] =
{"port", required_argument, 0, 'P'},
{"quick", no_argument, 0, 'q'},
{"quote-names", no_argument, 0, 'Q'},
+ {"result-file", required_argument, 0, 'r'},
{"set-variable", required_argument, 0, 'O'},
{"socket", required_argument, 0, 'S'},
#include "sslopt-longopts.h"
@@ -227,6 +229,10 @@ puts("\
-P, --port=... Port number to use for connection.\n\
-q, --quick Don't buffer query, dump directly to stdout.\n\
-Q, --quote-names Quote table and column names with `\n\
+ -r, --result-file=... Direct output to a given file. This option should be\n\
+ used in MSDOS, because it prevents new line '\\n'\n\
+ from being converted to '\\n\\r' (newline + carriage\n\
+ return).\n\
-S, --socket=... Socket file to use for connection.\n\
--tables Overrides option --databases (-B).\n");
#include "sslopt-usage.h"
@@ -284,9 +290,11 @@ static int get_options(int *argc,char ***argv)
int c,option_index;
my_bool tty_password=0;
+ result_file=stdout;
load_defaults("my",load_default_groups,argc,argv);
set_all_changeable_vars(changeable_vars);
- while ((c=getopt_long(*argc,*argv,"#::p::h:u:O:P:S:T:EBaAcCdefFlnqtvVw:?Ix",
+ while ((c=getopt_long(*argc,*argv,
+ "#::p::h:u:O:P:r:S:T:EBaAcCdefFlnqtvVw:?Ix",
long_options, &option_index)) != EOF)
{
switch(c) {
@@ -346,6 +354,11 @@ static int get_options(int *argc,char ***argv)
case 'P':
opt_mysql_port= (unsigned int) atoi(optarg);
break;
+ case 'r':
+ if (!(result_file = my_fopen(optarg, O_WRONLY | O_BINARY,
+ MYF(MY_WME))))
+ exit(1);
+ break;
case 'S':
opt_mysql_unix_port= optarg;
break;
@@ -589,7 +602,7 @@ static uint getTableStructure(char *table, char* db)
char *strpos, *table_name;
const char *delayed;
char name_buff[NAME_LEN+3],table_buff[NAME_LEN+3];
- FILE *sql_file = stdout;
+ FILE *sql_file = result_file;
DBUG_ENTER("getTableStructure");
delayed= opt_delayed ? " DELAYED " : "";
@@ -625,8 +638,8 @@ static uint getTableStructure(char *table, char* db)
O_WRONLY, MYF(MY_WME));
if (!sql_file) /* If file couldn't be opened */
{
- safe_exit(EX_MYSQLERR);
- DBUG_RETURN(0);
+ safe_exit(EX_MYSQLERR);
+ DBUG_RETURN(0);
}
write_heder(sql_file, db);
}
@@ -724,9 +737,9 @@ static uint getTableStructure(char *table, char* db)
if (init)
{
if (!tFlag)
- fputs(",\n",sql_file);
+ fputs(",\n",sql_file);
if (cFlag)
- strpos=strmov(strpos,", ");
+ strpos=strmov(strpos,", ");
}
init=1;
if (cFlag)
@@ -734,20 +747,20 @@ static uint getTableStructure(char *table, char* db)
if (!tFlag)
{
if (opt_keywords)
- fprintf(sql_file, " %s.%s %s", table_name,
- quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]);
+ fprintf(sql_file, " %s.%s %s", table_name,
+ quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]);
else
- fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME],name_buff),
- row[SHOW_TYPE]);
+ fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME],
+ name_buff), row[SHOW_TYPE]);
if (row[SHOW_DEFAULT])
{
- fputs(" DEFAULT ", sql_file);
- unescape(sql_file,row[SHOW_DEFAULT],lengths[SHOW_DEFAULT]);
+ fputs(" DEFAULT ", sql_file);
+ unescape(sql_file,row[SHOW_DEFAULT],lengths[SHOW_DEFAULT]);
}
if (!row[SHOW_NULL][0])
- fputs(" NOT NULL", sql_file);
+ fputs(" NOT NULL", sql_file);
if (row[SHOW_EXTRA][0])
- fprintf(sql_file, " %s",row[SHOW_EXTRA]);
+ fprintf(sql_file, " %s",row[SHOW_EXTRA]);
}
}
numFields = (uint) mysql_num_rows(tableRes);
@@ -761,9 +774,9 @@ static uint getTableStructure(char *table, char* db)
if (mysql_query(sock, buff))
{
fprintf(stderr, "%s: Can't get keys for table '%s' (%s)\n",
- my_progname, table, mysql_error(sock));
+ my_progname, table, mysql_error(sock));
if (sql_file != stdout)
- my_fclose(sql_file, MYF(MY_WME));
+ my_fclose(sql_file, MYF(MY_WME));
safe_exit(EX_MYSQLERR);
DBUG_RETURN(0);
}
@@ -776,16 +789,16 @@ static uint getTableStructure(char *table, char* db)
{
if (atoi(row[3]) == 1)
{
- keynr++;
- #ifdef FORCE_PRIMARY_KEY
- if (atoi(row[1]) == 0 && primary_key == INT_MAX)
- primary_key=keynr;
- #endif
- if (!strcmp(row[2],"PRIMARY"))
- {
- primary_key=keynr;
- break;
- }
+ keynr++;
+#ifdef FORCE_PRIMARY_KEY
+ if (atoi(row[1]) == 0 && primary_key == INT_MAX)
+ primary_key=keynr;
+#endif
+ if (!strcmp(row[2],"PRIMARY"))
+ {
+ primary_key=keynr;
+ break;
+ }
}
}
mysql_data_seek(tableRes,0);
@@ -794,21 +807,21 @@ static uint getTableStructure(char *table, char* db)
{
if (atoi(row[3]) == 1)
{
- if (keynr++)
- putc(')', sql_file);
- if (atoi(row[1])) /* Test if duplicate key */
- /* Duplicate allowed */
- fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff));
- else if (keynr == primary_key)
- fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */
- else
- fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff));
+ if (keynr++)
+ putc(')', sql_file);
+ if (atoi(row[1])) /* Test if duplicate key */
+ /* Duplicate allowed */
+ fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff));
+ else if (keynr == primary_key)
+ fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */
+ else
+ fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff));
}
else
- putc(',', sql_file);
+ putc(',', sql_file);
fputs(quote_name(row[4],name_buff), sql_file);
if (row[7])
- fprintf(sql_file, " (%s)",row[7]); /* Sub key */
+ fprintf(sql_file, " (%s)",row[7]); /* Sub key */
}
if (keynr)
putc(')', sql_file);
@@ -820,28 +833,28 @@ static uint getTableStructure(char *table, char* db)
sprintf(buff,"show table status like '%s'",table);
if (mysql_query(sock, buff))
{
- if (mysql_errno(sock) != ER_PARSE_ERROR)
- { /* If old MySQL version */
- if (verbose)
- fprintf(stderr,
- "# Warning: Couldn't get status information for table '%s' (%s)\n",
- table,mysql_error(sock));
- }
+ if (mysql_errno(sock) != ER_PARSE_ERROR)
+ { /* If old MySQL version */
+ if (verbose)
+ fprintf(stderr,
+ "# Warning: Couldn't get status information for table '%s' (%s)\n",
+ table,mysql_error(sock));
+ }
}
else if (!(tableRes=mysql_store_result(sock)) ||
- !(row=mysql_fetch_row(tableRes)))
+ !(row=mysql_fetch_row(tableRes)))
{
- fprintf(stderr,
- "Error: Couldn't read status information for table '%s' (%s)\n",
- table,mysql_error(sock));
+ fprintf(stderr,
+ "Error: Couldn't read status information for table '%s' (%s)\n",
+ table,mysql_error(sock));
}
else
{
- fputs("/*!",sql_file);
- print_value(sql_file,tableRes,row,"type=","Type",0);
- print_value(sql_file,tableRes,row,"","Create_options",0);
- print_value(sql_file,tableRes,row,"comment=","Comment",1);
- fputs(" */",sql_file);
+ fputs("/*!",sql_file);
+ print_value(sql_file,tableRes,row,"type=","Type",0);
+ print_value(sql_file,tableRes,row,"","Create_options",0);
+ print_value(sql_file,tableRes,row,"comment=","Comment",1);
+ fputs(" */",sql_file);
}
mysql_free_result(tableRes); /* Is always safe to free */
}
@@ -960,14 +973,14 @@ static void dumpTable(uint numFields, char *table)
}
else
{
- printf("\n#\n# Dumping data for table '%s'\n", table);
+ fprintf(result_file,"\n#\n# Dumping data for table '%s'\n", table);
sprintf(query, "SELECT * FROM %s", quote_name(table,table_buff));
if (where)
{
- printf("# WHERE: %s\n",where);
+ fprintf(result_file,"# WHERE: %s\n",where);
strxmov(strend(query), " WHERE ",where,NullS);
}
- puts("#\n");
+ fputs("#\n\n", result_file);
if (mysql_query(sock, query))
{
@@ -994,7 +1007,8 @@ static void dumpTable(uint numFields, char *table)
}
if (opt_lock)
- printf("LOCK TABLES %s WRITE;\n", quote_name(table,table_buff));
+ fprintf(result_file,"LOCK TABLES %s WRITE;\n",
+ quote_name(table,table_buff));
total_length=net_buffer_length; /* Force row break */
row_break=0;
@@ -1007,7 +1021,7 @@ static void dumpTable(uint numFields, char *table)
ulong *lengths=mysql_fetch_lengths(res);
rownr++;
if (!extended_insert)
- fputs(insert_pat,stdout);
+ fputs(insert_pat,result_file);
mysql_field_seek(res,0);
for (i = 0; i < mysql_num_fields(res); i++)
@@ -1061,17 +1075,17 @@ static void dumpTable(uint numFields, char *table)
else
{
if (i)
- putchar(',');
+ fputc(',',result_file);
if (row[i])
{
if (!IS_NUM_FIELD(field))
- unescape(stdout, row[i], lengths[i]);
+ unescape(result_file, row[i], lengths[i]);
else
- fputs(row[i],stdout);
+ fputs(row[i],result_file);
}
else
{
- fputs("NULL",stdout);
+ fputs("NULL",result_file);
}
}
}
@@ -1084,27 +1098,25 @@ static void dumpTable(uint numFields, char *table)
if (total_length + row_length < net_buffer_length)
{
total_length += row_length;
- putchar(','); /* Always row break */
- fputs(extended_row.str,stdout);
+ fputc(',',result_file); /* Always row break */
+ fputs(extended_row.str,result_file);
}
else
{
if (row_break)
- puts(";");
+ fputs(";\n", result_file);
row_break=1; /* This is first row */
- fputs(insert_pat,stdout);
- fputs(extended_row.str,stdout);
+ fputs(insert_pat,result_file);
+ fputs(extended_row.str,result_file);
total_length = row_length+init_length;
}
}
else
- {
- puts(");");
- }
+ fputs(");\n", result_file);
}
if (extended_insert && row_break)
- puts(";"); /* If not empty table */
- fflush(stdout);
+ fputs(";\n", result_file); /* If not empty table */
+ fflush(result_file);
if (mysql_errno(sock))
{
sprintf(query,"%s: Error %d: %s when dumping table '%s' at row: %ld\n",
@@ -1118,7 +1130,7 @@ static void dumpTable(uint numFields, char *table)
return;
}
if (opt_lock)
- puts("UNLOCK TABLES;");
+ fputs("UNLOCK TABLES;\n", result_file);
mysql_free_result(res);
}
} /* dumpTable */
@@ -1194,10 +1206,11 @@ static int init_dumping(char *database)
{
if (opt_databases || opt_alldbs)
{
- printf("\n#\n# Current Database: %s\n#\n", database);
+ fprintf(result_file,"\n#\n# Current Database: %s\n#\n", database);
if (!opt_create_db)
- printf("\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n", database);
- printf("\nUSE %s;\n", database);
+ fprintf(result_file,"\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n",
+ database);
+ fprintf(result_file,"\nUSE %s;\n", database);
}
}
if (extended_insert)
@@ -1329,7 +1342,7 @@ int main(int argc, char **argv)
if (dbConnect(current_host, current_user, opt_password))
exit(EX_MYSQLERR);
if (!path)
- write_heder(stdout, *argv);
+ write_heder(result_file, *argv);
if (opt_first_slave)
{
@@ -1365,7 +1378,9 @@ int main(int argc, char **argv)
}
}
dbDisconnect(current_host);
- puts("");
+ fputs("\n", result_file);
+ if (result_file != stdout)
+ my_fclose(result_file, MYF(0));
my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR));
if (extended_insert)
dynstr_free(&extended_row);
diff --git a/client/mysqltest.c b/client/mysqltest.c
index f03439680eb..5eaefe165d7 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -569,7 +569,7 @@ int eval_expr(VAR* v, const char* p, const char** p_end)
else
{
v->str_val = (char*)p;
- v->str_val_len = (p_end && *p_end) ? *p_end - p : strlen(p);
+ v->str_val_len = (p_end && *p_end) ? (int) (*p_end - p) : (int) strlen(p);
v->int_val=atoi(p);
v->int_dirty=0;
return 0;
@@ -1766,6 +1766,7 @@ static void init_var_hash()
die("Variable hash initialization failed");
var_from_env("MASTER_MYPORT", "9306");
var_from_env("SLAVE_MYPORT", "9307");
+ var_from_env("MYSQL_TEST_DIR", "");
}
int main(int argc, char** argv)
diff --git a/configure.in b/configure.in
index c22014eaa19..021f25308d8 100644
--- a/configure.in
+++ b/configure.in
@@ -119,6 +119,17 @@ AC_PROG_AWK
AC_PROG_CC
AC_PROG_CXX
AC_PROG_CPP
+
+# Fix for sgi gcc / sgiCC which tries to emulate gcc
+if test "$CC" = "sgicc"
+then
+ ac_cv_prog_gcc="no"
+fi
+if test "$CXX" = "sgi++"
+then
+ GXX="no"
+fi
+
if test "$ac_cv_prog_gcc" = "yes"
then
AS="$CC -c"
@@ -390,7 +401,7 @@ AM_PROG_CC_STDC
if test "$am_cv_prog_cc_stdc" = "no"
then
- AC_MSG_ERROR([MySQL requiers a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.])
+ AC_MSG_ERROR([MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.])
fi
NOINST_LDFLAGS=
@@ -1215,7 +1226,7 @@ fi
AC_SUBST(COMPILATION_COMMENT)
AC_MSG_CHECKING("need of special linking flags")
-if test "$IS_LINUX" = "true" -a "$all_is_static" != "yes"
+if test "$IS_LINUX" = "true" -a "$ac_cv_prog_gcc" = "yes" -a "$all_is_static" != "yes"
then
LDFLAGS="$LDFLAGS -rdynamic"
AC_MSG_RESULT("-rdynamic")
diff --git a/extra/resolve_stack_dump.c b/extra/resolve_stack_dump.c
index 8976a7698b8..bda23a41efd 100644
--- a/extra/resolve_stack_dump.c
+++ b/extra/resolve_stack_dump.c
@@ -303,7 +303,7 @@ static void do_resolve()
uchar* addr = (uchar*)read_addr(&p);
if(resolve_addr(addr, &se))
fprintf(fp_out, "%p %s + %d\n", addr, se.symbol,
- addr - se.addr);
+ (int) (addr - se.addr));
else
fprintf(fp_out, "%p (?)\n", addr);
diff --git a/include/global.h b/include/global.h
index 2c9157630f4..a11600a96fc 100644
--- a/include/global.h
+++ b/include/global.h
@@ -332,7 +332,8 @@ typedef int File; /* File descriptor */
typedef int my_socket; /* File descriptor for sockets */
#define INVALID_SOCKET -1
#endif
-typedef RETSIGTYPE sig_handler; /* Function to handle signals */
+/* Type for fuctions that handles signals */
+#define sig_handler RETSIGTYPE
typedef void (*sig_return)();/* Returns type from signal */
#if defined(__GNUC__) && !defined(_lint)
typedef char pchar; /* Mixed prototypes can take char */
diff --git a/include/m_ctype.h b/include/m_ctype.h
index 438b7b34c9a..645c07b79ae 100644
--- a/include/m_ctype.h
+++ b/include/m_ctype.h
@@ -66,6 +66,7 @@ extern CHARSET_INFO compiled_charsets[];
#endif
/* Don't include std ctype.h when this is included */
#define _CTYPE_H
+#define _CTYPE_H_
#define _CTYPE_INCLUDED
#define __CTYPE_INCLUDED
#define _CTYPE_USING /* Don't put names in global namespace. */
diff --git a/include/myisam.h b/include/myisam.h
index c4e26c5fb22..9b006467ac8 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -333,9 +333,9 @@ typedef struct st_mi_check_param
ulonglong unique_count[MI_MAX_KEY_SEG+1];
ha_checksum key_crc[MI_MAX_POSSIBLE_KEY];
ulong rec_per_key_part[MI_MAX_KEY_SEG*MI_MAX_POSSIBLE_KEY];
- void* thd;
- char* table_name;
- char* op_name;
+ void *thd;
+ char *db_name,*table_name;
+ char *op_name;
} MI_CHECK;
diff --git a/include/mysql.h b/include/mysql.h
index 350ce860a2f..b5d918a98af 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -20,6 +20,14 @@
#ifndef _mysql_h
#define _mysql_h
+#ifdef __CYGWIN__ /* CYGWIN implements a UNIX API */
+#undef WIN
+#undef _WIN
+#undef _WIN32
+#undef _WIN64
+#undef __WIN__
+#endif
+
#ifndef MYSQL_SERVER
#ifdef __cplusplus
extern "C" {
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index 34fbc5b9f98..0046a3761a6 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -204,7 +204,28 @@ ulint buf_dbg_counter = 0; /* This is used to insert validation
ibool buf_debug_prints = FALSE; /* If this is set TRUE,
the program prints info whenever
read-ahead or flush occurs */
-
+
+/************************************************************************
+Calculates a page checksum which is stored to the page when it is written
+to a file. Note that we must be careful to calculate the same value
+on 32-bit and 64-bit architectures. */
+
+ulint
+buf_calc_page_checksum(
+/*===================*/
+ /* out: checksum */
+ byte* page) /* in: buffer page */
+{
+ ulint checksum;
+
+ checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
+ + ut_fold_binary(page + FIL_PAGE_DATA, UNIV_PAGE_SIZE - FIL_PAGE_DATA
+ - FIL_PAGE_END_LSN);
+ checksum = checksum & 0xFFFFFFFF;
+
+ return(checksum);
+}
+
/************************************************************************
Initializes a buffer control block when the buf_pool is created. */
static
@@ -1171,12 +1192,36 @@ buf_page_io_complete(
dulint id;
dict_index_t* index;
ulint io_type;
+ ulint checksum;
ut_ad(block);
io_type = block->io_fix;
if (io_type == BUF_IO_READ) {
+ checksum = buf_calc_page_checksum(block->frame);
+
+ /* From version 3.23.38 up we store the page checksum
+ to the 4 upper bytes of the page end lsn field */
+
+ if ((mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(block->frame + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN + 4))
+ || (checksum != mach_read_from_4(block->frame
+ + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN)
+ && mach_read_from_4(block->frame + FIL_PAGE_LSN)
+ != mach_read_from_4(block->frame
+ + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN))) {
+ fprintf(stderr,
+ "InnoDB: Database page corruption or a failed\n"
+ "InnoDB: file read of page %lu.\n", block->offset);
+ fprintf(stderr,
+ "InnoDB: You may have to recover from a backup.\n");
+ exit(1);
+ }
+
if (recv_recovery_is_on()) {
recv_recover_page(TRUE, block->frame, block->space,
block->offset);
@@ -1208,17 +1253,8 @@ buf_page_io_complete(
ut_ad(buf_pool->n_pend_reads > 0);
buf_pool->n_pend_reads--;
buf_pool->n_pages_read++;
-/*
- if (0 != ut_dulint_cmp(
- mach_read_from_8(block->frame + FIL_PAGE_LSN),
- mach_read_from_8(block->frame + UNIV_PAGE_SIZE
- - FIL_PAGE_END_LSN))) {
- printf("DB error: file page corrupted!\n");
- ut_error;
- }
-*/
rw_lock_x_unlock_gen(&(block->lock), BUF_IO_READ);
rw_lock_x_unlock_gen(&(block->read_lock), BUF_IO_READ);
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index 443256cca34..90bdde1ebc6 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -222,6 +222,12 @@ buf_flush_write_block_low(
mach_write_to_8(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
block->newest_modification);
+ /* We overwrite the first 4 bytes of the end lsn field to store
+ a page checksum */
+
+ mach_write_to_4(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
+ buf_calc_page_checksum(block->frame));
+
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
(void*)block->frame, (void*)block);
diff --git a/innobase/buf/buf0rea.c b/innobase/buf/buf0rea.c
index 13e9ed0476b..644dd226a0e 100644
--- a/innobase/buf/buf0rea.c
+++ b/innobase/buf/buf0rea.c
@@ -73,11 +73,13 @@ buf_read_page_low(
sync = TRUE;
}
#endif
- if (trx_sys_hdr_page(space, offset)) {
+ if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) {
/* Trx sys header is so low in the latching order that we play
safe and do not leave the i/o-completion to an asynchronous
- i/o-thread: */
+ i/o-thread. Ibuf bitmap pages must always be read with
+ syncronous i/o, to make sure they do not get involved in
+ thread deadlocks. */
sync = TRUE;
}
diff --git a/innobase/configure.in b/innobase/configure.in
index b2fa034479b..83d302c6dc4 100644
--- a/innobase/configure.in
+++ b/innobase/configure.in
@@ -4,13 +4,41 @@ AC_CANONICAL_SYSTEM
AM_MAINTAINER_MODE
AM_CONFIG_HEADER(ib_config.h)
AM_INIT_AUTOMAKE(ib, 0.90)
+
+# This is need before AC_PROG_CC
+#
+
+if test "x${CFLAGS-}" = x ; then
+ cflags_is_set=no
+else
+ cflags_is_set=yes
+fi
+
+if test "x${CPPFLAGS-}" = x ; then
+ cppflags_is_set=no
+else
+ cppflags_is_set=yes
+fi
+
+if test "x${LDFLAGS-}" = x ; then
+ ldflags_is_set=no
+else
+ ldflags_is_set=yes
+fi
+
+# The following hack should ensure that configure doesn't add optimizing
+# or debugging flags to CFLAGS or CXXFLAGS
+CFLAGS="$CFLAGS "
+CXXFLAGS="$CXXFLAGS "
+
AC_PROG_CC
AC_PROG_RANLIB
AC_PROG_INSTALL
AC_CHECK_HEADERS(aio.h sched.h)
AC_CHECK_SIZEOF(int, 4)
AC_CHECK_FUNCS(sched_yield)
-AC_C_INLINE
+AC_CHECK_FUNCS(fdatasync)
+#AC_C_INLINE Already checked in MySQL
AC_C_BIGENDIAN
# Build optimized or debug version ?
@@ -56,10 +84,15 @@ else
fi
case "$target_os" in
- hp*) AC_DEFINE(UNIV_MUST_NOT_INLINE, 1,
- No inlining because gcc broken on HP-UX);;
- *sgi-irix*) AC_DEFINE(UNIV_MUST_NOT_INLINE, 1,
- No inlining because cc broken on irix);;
+ hp*)
+ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
+ irix*)
+ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
+ osf*)
+ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
+ sysv5uw7*)
+ # Problem when linking on SCO
+ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
esac
AC_OUTPUT(Makefile os/Makefile ut/Makefile btr/Makefile
diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h
index 09883fbb037..5e90f5952fc 100644
--- a/innobase/include/buf0buf.h
+++ b/innobase/include/buf0buf.h
@@ -342,6 +342,16 @@ buf_frame_get_modify_clock(
/*=======================*/
/* out: value */
buf_frame_t* frame); /* in: pointer to a frame */
+/************************************************************************
+Calculates a page checksum which is stored to the page when it is written
+to a file. Note that we must be careful to calculate the same value
+on 32-bit and 64-bit architectures. */
+
+ulint
+buf_calc_page_checksum(
+/*===================*/
+ /* out: checksum */
+ byte* page); /* in: buffer page */
/**************************************************************************
Gets the page number of a pointer pointing within a buffer frame containing
a file page. */
diff --git a/innobase/include/univ.i b/innobase/include/univ.i
index 5e74b7eb09b..fa5a8aef389 100644
--- a/innobase/include/univ.i
+++ b/innobase/include/univ.i
@@ -9,7 +9,7 @@ Created 1/20/1994 Heikki Tuuri
#ifndef univ_i
#define univ_i
-#if (defined(_WIN32) || defined(_WIN64))
+#if (defined(_WIN32) || defined(_WIN64)) && !defined(MYSQL_SERVER)
#define __WIN__
#include <windows.h>
@@ -20,18 +20,28 @@ be defined:
#define CRITICAL_SECTION ulint
*/
+#ifdef _NT_
+#define __NT__
+#endif
+
#else
/* The Unix version */
+/* Most C compilers other than gcc do not know 'extern inline' */
+#if !defined(__GNUC__) && !defined(__WIN__)
+#define UNIV_MUST_NOT_INLINE
+#endif
+
/* Include two header files from MySQL to make the Unix flavor used
in compiling more Posix-compatible. We assume that 'innobase' is a
subdirectory of 'mysql'. */
#include <global.h>
#include <my_pthread.h>
+#ifndef __WIN__
/* Include <sys/stat.h> to get S_I... macros defined for os0file.c */
#include <sys/stat.h>
-
+#endif
#undef PACKAGE
#undef VERSION
diff --git a/innobase/include/ut0dbg.h b/innobase/include/ut0dbg.h
index a36b022e036..751609b244e 100644
--- a/innobase/include/ut0dbg.h
+++ b/innobase/include/ut0dbg.h
@@ -9,9 +9,9 @@ Created 1/30/1994 Heikki Tuuri
#ifndef ut0dbg_h
#define ut0dbg_h
+#include "univ.i"
#include <assert.h>
#include <stdlib.h>
-#include "univ.i"
#include "os0thread.h"
extern ulint ut_dbg_zero; /* This is used to eliminate
diff --git a/innobase/include/ut0mem.h b/innobase/include/ut0mem.h
index 4d266f34c17..fa46514fe16 100644
--- a/innobase/include/ut0mem.h
+++ b/innobase/include/ut0mem.h
@@ -9,9 +9,9 @@ Created 5/30/1994 Heikki Tuuri
#ifndef ut0mem_h
#define ut0mem_h
+#include "univ.i"
#include <string.h>
#include <stdlib.h>
-#include "univ.i"
UNIV_INLINE
void*
diff --git a/innobase/include/ut0ut.h b/innobase/include/ut0ut.h
index 05d4f455c58..f2c4781c167 100644
--- a/innobase/include/ut0ut.h
+++ b/innobase/include/ut0ut.h
@@ -9,10 +9,9 @@ Created 1/20/1994 Heikki Tuuri
#ifndef ut0ut_h
#define ut0ut_h
-#include <time.h>
-#include <ctype.h>
-
#include "univ.i"
+#include <time.h>
+#include <m_ctype.h>
typedef time_t ib_time_t;
diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c
index 894ef9c3840..e93cd3f0364 100644
--- a/innobase/log/log0recv.c
+++ b/innobase/log/log0recv.c
@@ -882,12 +882,6 @@ recv_recover_page(
recv = UT_LIST_GET_NEXT(rec_list, recv);
}
- /* If the following assert fails, the file page is incompletely
- written, and a recovery from a backup is required */
-
- ut_a(0 == ut_dulint_cmp(mach_read_from_8(page + FIL_PAGE_LSN),
- mach_read_from_8(page + UNIV_PAGE_SIZE
- - FIL_PAGE_END_LSN)));
mutex_enter(&(recv_sys->mutex));
recv_addr->state = RECV_PROCESSED;
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index 8e9b8482259..065d9b6f553 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -15,9 +15,6 @@ Created 10/21/1995 Heikki Tuuri
/* We assume in this case that the OS has standard Posix aio (at least SunOS
2.6, HP-UX 11i and AIX 4.3 have) */
-#undef __USE_FILE_OFFSET64
-
-#include <aio.h>
#endif
/* We use these mutexes to protect lseek + file i/o operation, if the
@@ -163,7 +160,6 @@ os_file_handle_error(
os_file_t file, /* in: file pointer */
char* name) /* in: name of a file or NULL */
{
- int input_char;
ulint err;
UT_NOT_USED(file);
@@ -171,33 +167,19 @@ os_file_handle_error(
err = os_file_get_last_error();
if (err == OS_FILE_DISK_FULL) {
-ask_again:
- printf("\n");
+ fprintf(stderr, "\n");
if (name) {
- printf(
- "Innobase encountered a problem with file %s.\n",
+ fprintf(stderr,
+ "InnoDB: Encountered a problem with file %s.\n",
name);
}
- printf("Disk is full. Try to clean the disk to free space\n");
- printf("before answering the following: How to continue?\n");
- printf("(Y == freed some space: try again)\n");
- printf("(N == crash the database: will restart it)?\n");
-ask_with_no_question:
- input_char = getchar();
-
- if (input_char == (int) 'N') {
- ut_error;
-
- return(FALSE);
- } else if (input_char == (int) 'Y') {
+ fprintf(stderr,
+ "InnoDB: Cannot continue operation.\n"
+ "InnoDB: Disk is full. Try to clean the disk to free space.\n"
+ "InnoDB: Delete possible created file and restart.\n");
- return(TRUE);
- } else if (input_char == (int) '\n') {
+ exit(1);
- goto ask_with_no_question;
- } else {
- goto ask_again;
- }
} else if (err == OS_FILE_AIO_RESOURCES_RESERVED) {
return(TRUE);
@@ -317,6 +299,13 @@ try_again:
UT_NOT_USED(purpose);
+ /* On Linux opening a file in the O_SYNC mode seems to be much
+ more efficient than calling an explicit fsync or fdatasync after
+ each write */
+
+#ifdef O_SYNC
+ create_flag = create_flag | O_SYNC;
+#endif
if (create_mode == OS_FILE_CREATE) {
file = open(name, create_flag, S_IRUSR | S_IWUSR | S_IRGRP
| S_IWGRP | S_IROTH | S_IWOTH);
@@ -510,8 +499,18 @@ os_file_flush(
#else
int ret;
- ret = fsync(file);
+#ifdef O_SYNC
+ /* We open all files with the O_SYNC option, which means there
+ should be no need for fsync or fdatasync. In practice such a need
+ may be because on a Linux Xeon computer "donna" the OS seemed to be
+ fooled to believe that 500 disk writes/second are possible. */
+ ret = 0;
+#elif defined(HAVE_FDATASYNC)
+ ret = fdatasync(file);
+#else
+ ret = fsync(file);
+#endif
if (ret == 0) {
return(TRUE);
}
@@ -534,8 +533,10 @@ os_file_pread(
ulint n, /* in: number of bytes to read */
ulint offset) /* in: offset from where to read */
{
+ off_t offs = (off_t)offset;
+
#ifdef HAVE_PREAD
- return(pread(file, buf, n, (off_t) offset));
+ return(pread(file, buf, n, offs));
#else
ssize_t ret;
ulint i;
@@ -545,7 +546,7 @@ os_file_pread(
os_mutex_enter(os_file_seek_mutexes[i]);
- ret = lseek(file, (off_t) offset, 0);
+ ret = lseek(file, offs, 0);
if (ret < 0) {
os_mutex_exit(os_file_seek_mutexes[i]);
@@ -573,10 +574,19 @@ os_file_pwrite(
ulint n, /* in: number of bytes to write */
ulint offset) /* in: offset where to write */
{
+ ssize_t ret;
+ off_t offs = (off_t)offset;
+
#ifdef HAVE_PWRITE
- return(pwrite(file, buf, n, (off_t) offset));
+ ret = pwrite(file, buf, n, offs);
+
+ /* Always do fsync to reduce the probability that when the OS crashes,
+ a database page is only partially physically written to disk. */
+
+ ut_a(TRUE == os_file_flush(file));
+
+ return(ret);
#else
- ssize_t ret;
ulint i;
/* Protect the seek / write operation with a mutex */
@@ -584,7 +594,7 @@ os_file_pwrite(
os_mutex_enter(os_file_seek_mutexes[i]);
- ret = lseek(file, (off_t) offset, 0);
+ ret = lseek(file, offs, 0);
if (ret < 0) {
os_mutex_exit(os_file_seek_mutexes[i]);
@@ -594,6 +604,11 @@ os_file_pwrite(
ret = write(file, buf, n);
+ /* Always do fsync to reduce the probability that when the OS crashes,
+ a database page is only partially physically written to disk. */
+
+ ut_a(TRUE == os_file_flush(file));
+
os_mutex_exit(os_file_seek_mutexes[i]);
return(ret);
@@ -662,7 +677,6 @@ try_again:
#else
ibool retry;
ssize_t ret;
- ulint i;
#if (UNIV_WORD_SIZE == 8)
offset = offset + (offset_high << 32);
@@ -670,15 +684,9 @@ try_again:
UT_NOT_USED(offset_high);
#endif
try_again:
- /* Protect the seek / read operation with a mutex */
- i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES;
-
- os_mutex_enter(os_file_seek_mutexes[i]);
-
- ret = os_file_pread(file, buf, n, (off_t) offset);
+ ret = os_file_pread(file, buf, n, offset);
if ((ulint)ret == n) {
- os_mutex_exit(os_file_seek_mutexes[i]);
return(TRUE);
}
@@ -747,9 +755,14 @@ try_again:
}
ret = WriteFile(file, buf, n, &len, NULL);
+
+ /* Always do fsync to reduce the probability that when the OS crashes,
+ a database page is only partially physically written to disk. */
+
+ ut_a(TRUE == os_file_flush(file));
os_mutex_exit(os_file_seek_mutexes[i]);
-
+
if (ret && len == n) {
return(TRUE);
}
@@ -763,7 +776,7 @@ try_again:
UT_NOT_USED(offset_high);
#endif
try_again:
- ret = os_file_pwrite(file, buf, n, (off_t) offset);
+ ret = os_file_pwrite(file, buf, n, offset);
if ((ulint)ret == n) {
return(TRUE);
@@ -1344,6 +1357,10 @@ try_again:
}
} else if (mode == OS_AIO_IBUF) {
ut_ad(type == OS_FILE_READ);
+ /* Reduce probability of deadlock bugs in connection with ibuf:
+ do not let the ibuf i/o handler sleep */
+
+ wake_later = FALSE;
array = os_aio_ibuf_array;
} else if (mode == OS_AIO_LOG) {
@@ -1413,7 +1430,7 @@ try_again:
return(TRUE);
}
- goto error_handling;
+ err = 1; /* Fall through the next if */
}
#endif
if (err == 0) {
@@ -1511,6 +1528,10 @@ os_aio_windows_handle(
if (ret && len == slot->len) {
ret_val = TRUE;
+
+ if (slot->type == OS_FILE_WRITE) {
+ ut_a(TRUE == os_file_flush(slot->file));
+ }
} else {
err = GetLastError();
ut_error;
@@ -1592,6 +1613,10 @@ os_aio_posix_handle(
*message1 = slot->message1;
*message2 = slot->message2;
+ if (slot->type == OS_FILE_WRITE) {
+ ut_a(TRUE == os_file_flush(slot->file));
+ }
+
os_mutex_exit(array->mutex);
os_aio_array_free_slot(array, slot);
diff --git a/innobase/os/os0sync.c b/innobase/os/os0sync.c
index 4c283431575..c5dd603100d 100644
--- a/innobase/os/os0sync.c
+++ b/innobase/os/os0sync.c
@@ -247,6 +247,7 @@ os_event_wait_time(
return(OS_SYNC_TIME_EXCEEDED);
} else {
ut_error;
+ return(1000000); /* dummy value to eliminate compiler warn. */
}
#else
UT_NOT_USED(time);
diff --git a/innobase/pars/lexyy.c b/innobase/pars/lexyy.c
index 6ba8ecfbcb1..64b8963028b 100644
--- a/innobase/pars/lexyy.c
+++ b/innobase/pars/lexyy.c
@@ -6,6 +6,7 @@
#define FLEX_SCANNER
+#include "univ.i"
#include <stdio.h>
@@ -5850,7 +5851,6 @@ Created 12/14/1997 Heikki Tuuri
*******************************************************/
#define YYSTYPE que_node_t*
-#include "univ.i"
#include "pars0pars.h"
#include "pars0grm.h"
#include "pars0sym.h"
diff --git a/innobase/pars/pars0grm.c b/innobase/pars/pars0grm.c
index e7317d1f030..e06cba4e69d 100644
--- a/innobase/pars/pars0grm.c
+++ b/innobase/pars/pars0grm.c
@@ -95,11 +95,11 @@
/* The value of the semantic attribute is a pointer to a query tree node
que_node_t */
#define YYSTYPE que_node_t*
-#define alloca mem_alloc
-
-#include <math.h>
#include "univ.i"
+#undef alloca
+#define alloca mem_alloc
+#include <math.h>
#include "pars0pars.h"
#include "mem0mem.h"
#include "que0types.h"
diff --git a/innobase/pars/pars0grm.y b/innobase/pars/pars0grm.y
index a13aeaac1e2..ae8c5ab91ec 100644
--- a/innobase/pars/pars0grm.y
+++ b/innobase/pars/pars0grm.y
@@ -10,11 +10,11 @@ Created 12/14/1997 Heikki Tuuri
/* The value of the semantic attribute is a pointer to a query tree node
que_node_t */
#define YYSTYPE que_node_t*
-#define alloca mem_alloc
-
-#include <math.h>
#include "univ.i"
+#undef alloca
+#define alloca mem_alloc
+#include <math.h>
#include "pars0pars.h"
#include "mem0mem.h"
#include "que0types.h"
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index e6182257581..58e0d053947 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -48,6 +48,52 @@ to que_run_threads: this is to allow canceling runaway queries */
#define SEL_EXHAUSTED 1
#define SEL_RETRY 2
+/************************************************************************
+Returns TRUE if the user-defined column values in a secondary index record
+are the same as the corresponding columns in the clustered index record. */
+static
+ibool
+row_sel_sec_rec_is_for_clust_rec(
+/*=============================*/
+ rec_t* sec_rec,
+ dict_index_t* sec_index,
+ rec_t* clust_rec,
+ dict_index_t* clust_index)
+{
+ dict_col_t* col;
+ byte* sec_field;
+ ulint sec_len;
+ byte* clust_field;
+ ulint clust_len;
+ ulint n;
+ ulint i;
+
+ n = dict_index_get_n_ordering_defined_by_user(sec_index);
+
+ for (i = 0; i < n; i++) {
+ col = dict_field_get_col(
+ dict_index_get_nth_field(sec_index, i));
+
+ clust_field = rec_get_nth_field(clust_rec,
+ dict_col_get_clust_pos(col),
+ &clust_len);
+ sec_field = rec_get_nth_field(sec_rec, i, &sec_len);
+
+ if (sec_len != clust_len) {
+
+ return(FALSE);
+ }
+
+ if (sec_len != UNIV_SQL_NULL
+ && ut_memcmp(sec_field, clust_field, sec_len) != 0) {
+
+ return(FALSE);
+ }
+ }
+
+ return(TRUE);
+}
+
/*************************************************************************
Creates a select node struct. */
@@ -561,6 +607,8 @@ row_sel_get_clust_rec(
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
+ old_vers = NULL;
+
if (!lock_clust_rec_cons_read_sees(clust_rec, index,
node->read_view)) {
@@ -579,6 +627,28 @@ row_sel_get_clust_rec(
return(DB_SUCCESS);
}
}
+
+ /* If we had to go to an earlier version of row or the
+ secondary index record is delete marked, then it may be that
+ the secondary index record corresponding to clust_rec
+ (or old_vers) is not rec; in that case we must ignore
+ such row because in our snapshot rec would not have existed.
+ Remember that from rec we cannot see directly which transaction
+ id corresponds to it: we have to go to the clustered index
+ record. A query where we want to fetch all rows where
+ the secondary index value is in some interval would return
+ a wrong result if we would not drop rows which we come to
+ visit through secondary index records that would not really
+ exist in our snapshot. */
+
+ if ((old_vers || rec_get_deleted_flag(rec))
+ && !row_sel_sec_rec_is_for_clust_rec(rec, plan->index,
+ clust_rec, index)) {
+ clust_rec = NULL;
+ *out_rec = clust_rec;
+
+ return(DB_SUCCESS);
+ }
}
/* Fetch the columns needed in test conditions */
@@ -2105,6 +2175,8 @@ row_sel_get_clust_rec_for_mysql(
a previous version of the record */
trx = thr_get_trx(thr);
+
+ old_vers = NULL;
if (!lock_clust_rec_cons_read_sees(clust_rec, clust_index,
trx->read_view)) {
@@ -2121,6 +2193,25 @@ row_sel_get_clust_rec_for_mysql(
clust_rec = old_vers;
}
+
+ /* If we had to go to an earlier version of row or the
+ secondary index record is delete marked, then it may be that
+ the secondary index record corresponding to clust_rec
+ (or old_vers) is not rec; in that case we must ignore
+ such row because in our snapshot rec would not have existed.
+ Remember that from rec we cannot see directly which transaction
+ id corrsponds to it: we have to go to the clustered index
+ record. A query where we want to fetch all rows where
+ the secondary index value is in some interval would return
+ a wrong result if we would not drop rows which we come to
+ visit through secondary index records that would not really
+ exist in our snapshot. */
+
+ if ((old_vers || rec_get_deleted_flag(rec))
+ && !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
+ clust_rec, clust_index)) {
+ clust_rec = NULL;
+ }
}
*out_rec = clust_rec;
@@ -2609,8 +2700,10 @@ rec_loop:
goto next_rec;
}
-
- rec = clust_rec;
+
+ if (prebuilt->need_to_access_clustered) {
+ rec = clust_rec;
+ }
}
/* We found a qualifying row */
diff --git a/innobase/row/row0uins.c b/innobase/row/row0uins.c
index 68115895dbb..c9330318ac0 100644
--- a/innobase/row/row0uins.c
+++ b/innobase/row/row0uins.c
@@ -250,9 +250,12 @@ row_undo_ins_parse_undo_rec(
ut_ad(type == TRX_UNDO_INSERT_REC);
node->rec_type = type;
- /* NOTE that the table has to be explicitly released later */
node->table = dict_table_get_on_id(table_id, node->trx);
+ if (node->table == NULL) {
+ return;
+ }
+
clust_index = dict_table_get_first_index(node->table);
ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
@@ -280,9 +283,14 @@ row_undo_ins(
row_undo_ins_parse_undo_rec(node, thr);
- found = row_undo_search_clust_to_pcur(node, thr);
+ if (node->table == NULL) {
+ found = FALSE;
+ } else {
+ found = row_undo_search_clust_to_pcur(node, thr);
+ }
if (!found) {
+ trx_undo_rec_release(node->trx, node->undo_no);
return(DB_SUCCESS);
}
diff --git a/innobase/row/row0umod.c b/innobase/row/row0umod.c
index 2aa223a6186..70cf0fe5a32 100644
--- a/innobase/row/row0umod.c
+++ b/innobase/row/row0umod.c
@@ -534,9 +534,16 @@ row_undo_mod_parse_undo_rec(
&undo_no, &table_id);
node->rec_type = type;
- /* NOTE that the table has to be explicitly released later */
node->table = dict_table_get_on_id(table_id, thr_get_trx(thr));
+ /* TODO: other fixes associated with DROP TABLE + rollback in the
+ same table by another user */
+
+ if (node->table == NULL) {
+ /* Table was dropped */
+ return;
+ }
+
clust_index = dict_table_get_first_index(node->table);
ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
@@ -571,12 +578,18 @@ row_undo_mod(
row_undo_mod_parse_undo_rec(node, thr);
- found = row_undo_search_clust_to_pcur(node, thr);
+ if (node->table == NULL) {
+ found = FALSE;
+ } else {
+
+ found = row_undo_search_clust_to_pcur(node, thr);
+ }
if (!found) {
/* It is already undone, or will be undone by another query
- thread */
+ thread, or table was dropped */
+ trx_undo_rec_release(node->trx, node->undo_no);
node->state = UNDO_NODE_FETCH_NEXT;
return(DB_SUCCESS);
diff --git a/isam/_dbug.c b/isam/_dbug.c
index fd0a0b46562..d632d5931a5 100644
--- a/isam/_dbug.c
+++ b/isam/_dbug.c
@@ -85,7 +85,7 @@ void _nisam_print_key(FILE *stream, register N_KEYSEG *keyseg, const uchar *key)
key=end;
break;
case HA_KEYTYPE_INT24:
- VOID(fprintf(stream,"%ld",sint3korr(key)));
+ VOID(fprintf(stream,"%ld",(long) sint3korr(key)));
key=end;
break;
case HA_KEYTYPE_UINT24:
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 55234fcfe13..9221812ea65 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -16,11 +16,11 @@
MA 02111-1307, USA */
#define DONT_USE_RAID
+#include <global.h>
#if defined(__WIN__) || defined(_WIN32) || defined(_WIN64)
#include <winsock.h>
#include <odbcinst.h>
#endif
-#include <global.h>
#include <my_sys.h>
#include <mysys_err.h>
#include <m_string.h>
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index 1f116a6e313..d98c10a29a9 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -30,7 +30,7 @@ dist-hook:
$(INSTALL_DATA) $(srcdir)/t/*.test $(srcdir)/t/*.opt $(srcdir)/t/*.sh $(distdir)/t
$(INSTALL_DATA) $(srcdir)/include/*.inc $(distdir)/include
$(INSTALL_DATA) $(srcdir)/r/*.result $(srcdir)/r/*.require $(distdir)/r
- $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(distdir)/std_data
+ $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(srcdir)/std_data/*.001 $(distdir)/std_data
install-data-local:
$(mkinstalldirs) \
diff --git a/mysql-test/include/master-slave.inc b/mysql-test/include/master-slave.inc
index 69ab73db6b5..61077f898f6 100644
--- a/mysql-test/include/master-slave.inc
+++ b/mysql-test/include/master-slave.inc
@@ -9,5 +9,7 @@ connection master;
reset master;
connection slave;
reset slave;
+# Clean up old test tables
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
slave start;
@r/slave-running.result show status like 'Slave_running';
diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh
index 6fd32d37cd0..049ac6b1cd7 100644
--- a/mysql-test/install_test_db.sh
+++ b/mysql-test/install_test_db.sh
@@ -25,8 +25,8 @@ then
data=var/slave-data
ldata=$fix_bin/var/slave-data
else
- data=var/lib
- ldata=$fix_bin/var/lib
+ data=var/master-data
+ ldata=$fix_bin/var/master-data
fi
mdata=$data/mysql
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index ccf1aa81270..ece2e42f40b 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -188,7 +188,7 @@ done
#--
MYRUN_DIR=$MYSQL_TEST_DIR/var/run
-MASTER_MYDDIR="$MYSQL_TEST_DIR/var/lib"
+MASTER_MYDDIR="$MYSQL_TEST_DIR/var/master-data"
MASTER_MYSOCK="$MYSQL_TMP_DIR/mysql-master.sock"
MASTER_MYPID="$MYRUN_DIR/mysqld.pid"
MASTER_MYLOG="$MYSQL_TEST_DIR/var/log/mysqld.log"
@@ -288,6 +288,8 @@ prompt_user ()
read unused
}
+# We can't use diff -u as this isn't portable
+
show_failed_diff ()
{
reject_file=r/$1.reject
@@ -296,7 +298,7 @@ show_failed_diff ()
then
echo "Below are the diffs between actual and expected results:"
echo "-------------------------------------------------------"
- $DIFF -u $result_file $reject_file
+ $DIFF -c $result_file $reject_file
echo "-------------------------------------------------------"
echo "Please e-mail the above, along with the output of mysqlbug"
echo "and any other relevant info to bugs@lists.mysql.com"
@@ -367,6 +369,8 @@ mysql_install_db () {
error "Could not install slave test DBs"
exit 1
fi
+ # Give mysqld some time to die.
+ sleep $SLEEP_TIME
return 0
}
@@ -488,7 +492,7 @@ start_slave()
--core \
--tmpdir=$MYSQL_TMP_DIR \
--language=english \
- --skip-innodb \
+ --skip-innodb --skip-slave-start \
$SMALL_SERVER \
$EXTRA_SLAVE_OPT $EXTRA_SLAVE_MYSQLD_OPT"
if [ x$DO_DDD = x1 ]
diff --git a/mysql-test/r/backup.result b/mysql-test/r/backup.result
index 5bfa1e9013e..2bbe15954dc 100644
--- a/mysql-test/r/backup.result
+++ b/mysql-test/r/backup.result
@@ -1,5 +1,5 @@
Table Op Msg_type Msg_text
-t1 backup error Failed copying .frm file: errno = X
+test.t1 backup error Failed copying .frm file: errno = X
test.t1 backup status Operation failed
Table Op Msg_type Msg_text
test.t1 backup status OK
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index ced1a3cd178..2c4a5cecbb1 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -30,3 +30,7 @@ Documentation 0
Host communication 0
kkkkkkkkkkk lllllllllll 3
Test Procedures 0
+1+1 a count(*)
+2 a 0
+1+1 a count(*)
+2 a 0
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 319a9095cbc..f030b7fa763 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -466,3 +466,12 @@ id id3
1 1
2 2
100 2
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(20) default NULL,
+ KEY `a` (`a`)
+) TYPE=InnoDB
+a
+1
+2
+3
diff --git a/mysql-test/r/rpl000014.result b/mysql-test/r/rpl000014.result
index d2cb8ee5436..a47c3c91c1d 100644
--- a/mysql-test/r/rpl000014.result
+++ b/mysql-test/r/rpl000014.result
@@ -1,13 +1,13 @@
File Position Binlog_do_db Binlog_ignore_db
master-bin.001 73
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 1 master-bin.001 73 Yes 0 0
+127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 1 master-bin.001 73 No 0 0
+127.0.0.1 root 9999 1 master-bin.001 73 No 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 1 master-bin.001 73 Yes 0 0
+127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 1 master-bin.001 173 Yes 0 0
+127.0.0.1 root 9999 1 master-bin.001 173 Yes 0 0
File Position Binlog_do_db Binlog_ignore_db
master-bin.001 73
n
diff --git a/mysql-test/r/rpl000015.result b/mysql-test/r/rpl000015.result
index 5899d76c82f..58487af27f8 100644
--- a/mysql-test/r/rpl000015.result
+++ b/mysql-test/r/rpl000015.result
@@ -3,11 +3,11 @@ master-bin.001 73
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
0 0 0 No 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 test 3306 60 4 No 0 0
+127.0.0.1 test 9998 60 4 No 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 60 4 No 0 0
+127.0.0.1 root 9999 60 4 No 0 0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 60 master-bin.001 73 Yes 0 0
+127.0.0.1 root 9999 60 master-bin.001 73 Yes 0 0
n
10
45
diff --git a/mysql-test/r/rpl000016.result b/mysql-test/r/rpl000016.result
index da9dccae9f4..abe4275a124 100644
--- a/mysql-test/r/rpl000016.result
+++ b/mysql-test/r/rpl000016.result
@@ -1,5 +1,5 @@
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 60 master-bin.001 216 Yes 0 0
+127.0.0.1 root 9999 60 master-bin.001 216 Yes 0 0
s
Could not break slave
Tried hard
@@ -10,7 +10,7 @@ master-bin.003
Log_name
master-bin.003
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 60 master-bin.003 184 Yes 0 0
+127.0.0.1 root 9999 60 master-bin.003 184 Yes 0 0
m
34
65
@@ -25,6 +25,6 @@ master-bin.006
File Position Binlog_do_db Binlog_ignore_db
master-bin.006 131
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9306 60 master-bin.006 131 Yes 0 0
+127.0.0.1 root 9999 60 master-bin.006 131 Yes 0 0
count(*)
100
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index 8c50570a31d..ce2e5d4f58d 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -67,3 +67,16 @@ t1 0 PRIMARY 1 f1 A 1 NULL NULL
t1 0 PRIMARY 2 f2 A 3 NULL NULL
t1 0 PRIMARY 3 f3 A 9 NULL NULL
t1 0 PRIMARY 4 f4 A 18 NULL NULL
+Table Create Table
+t1 CREATE TEMPORARY TABLE `t1` (
+ `a` int(11) NOT NULL default '0'
+) TYPE=MyISAM
+Table Create Table
+t2 CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) NOT NULL default '0'
+) TYPE=MyISAM
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `test_set` set('val1','val2','val3') NOT NULL default '',
+ `name` char(20) default 'O''Brien'
+) TYPE=MyISAM COMMENT='it''s a table'
diff --git a/mysql-test/r/shw000001.result b/mysql-test/r/shw000001.result
deleted file mode 100644
index c8056c74f0b..00000000000
--- a/mysql-test/r/shw000001.result
+++ /dev/null
@@ -1,5 +0,0 @@
-Table Create Table
-t1 CREATE TABLE `t1` (
- `test_set` set('val1','val2','val3') NOT NULL default '',
- `name` char(20) default 'O''Brien'
-) TYPE=MyISAM COMMENT='it''s a table'
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index edd3c1fff7e..e75841dc6d0 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -2,6 +2,7 @@
# Test of group (Failed for Lars Hoss <lh@pbm.de>)
#
+drop table if exists t1,t2;
CREATE TABLE t1 (
spID int(10) unsigned,
userID int(10) unsigned,
@@ -208,3 +209,14 @@ select value,description,bug_id from t2 left join t1 on t2.program=t1.product an
select value,description,COUNT(bug_id) from t2 left join t1 on t2.program=t1.product and t2.value=t1.component where program="AAAAA" group by value;
drop table t1,t2;
+
+#
+# Problem with functions and group functions when no matching rows
+#
+
+create table t1 (foo int);
+insert into t1 values (1);
+select 1+1, "a",count(*) from t1 where foo in (2);
+insert into t1 values (1);
+select 1+1,"a",count(*) from t1 where foo in (2);
+drop table t1;
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index 60e29ca33c4..cef53ce8165 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -456,3 +456,23 @@ commit;
select id,id3 from t1;
UNLOCK TABLES;
DROP TABLE t1;
+
+#
+# Test prefix key
+#
+--error 1089
+create table t1 (a char(20), unique (a(5))) type=innodb;
+create table t1 (a char(20), index (a(5))) type=innodb;
+show create table t1;
+drop table t1;
+
+#
+# Test using temporary table and auto_increment
+#
+
+create temporary table t1 (a int not null auto_increment, primary key(a)) type=innodb;
+insert into t1 values (NULL),(NULL),(NULL);
+delete from t1 where a=3;
+insert into t1 values (NULL);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/rpl000014.test b/mysql-test/t/rpl000014.test
index ca83342c41d..b501d63b10e 100644
--- a/mysql-test/t/rpl000014.test
+++ b/mysql-test/t/rpl000014.test
@@ -4,14 +4,18 @@ show master status;
save_master_pos;
connection slave;
sync_with_master;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
change master to master_log_pos=73;
slave stop;
change master to master_log_pos=73;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
slave start;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
change master to master_log_pos=173;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
connection master;
show master status;
diff --git a/mysql-test/t/rpl000015.test b/mysql-test/t/rpl000015.test
index b6e3ceaf419..73a10bed7b3 100644
--- a/mysql-test/t/rpl000015.test
+++ b/mysql-test/t/rpl000015.test
@@ -8,12 +8,15 @@ connection slave;
reset slave;
show slave status;
change master to master_host='127.0.0.1';
+--replace_result 3306 9998 9306 9999 3334 9999 3335 9999
show slave status;
eval change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=$MASTER_MYPORT;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
slave start;
sync_with_master;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
connection master;
drop table if exists t1;
diff --git a/mysql-test/t/rpl000016.test b/mysql-test/t/rpl000016.test
index b322858f388..7b46bc75498 100644
--- a/mysql-test/t/rpl000016.test
+++ b/mysql-test/t/rpl000016.test
@@ -22,6 +22,7 @@ insert into t1 values('Could not break slave'),('Tried hard');
save_master_pos;
connection slave;
sync_with_master;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
select * from t1;
connection master;
@@ -67,6 +68,7 @@ insert into t2 values (65);
save_master_pos;
connection slave;
sync_with_master;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
select * from t2;
connection master;
@@ -86,6 +88,7 @@ connection slave;
slave stop;
slave start;
sync_with_master;
+--replace_result 9306 9999 3334 9999 3335 9999
show slave status;
# because of concurrent insert, the table may not be up to date
# if we do not lock
diff --git a/mysql-test/t/rpl000018-master.sh b/mysql-test/t/rpl000018-master.sh
index 71f0f12d0c5..e570f106ec6 100755
--- a/mysql-test/t/rpl000018-master.sh
+++ b/mysql-test/t/rpl000018-master.sh
@@ -1,3 +1,3 @@
-rm -f $MYSQL_TEST_DIR/var/lib/master-bin.*
-cp $MYSQL_TEST_DIR/std_data/master-bin.001 $MYSQL_TEST_DIR/var/lib/
-echo ./master-bin.001 > $MYSQL_TEST_DIR/var/lib/master-bin.index
+rm -f $MYSQL_TEST_DIR/var/master-data/master-bin.*
+cp $MYSQL_TEST_DIR/std_data/master-bin.001 $MYSQL_TEST_DIR/var/master-data/
+echo ./master-bin.001 > $MYSQL_TEST_DIR/var/master-data/master-bin.index
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index d4be1a6d25a..f4f58c8c885 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -48,3 +48,20 @@ show index from t1;
repair table t1;
show index from t1;
drop table t1;
+
+#
+# Test of SHOW CREATE
+#
+
+create temporary table t1 (a int not null);
+show create table t1;
+alter table t1 rename t2;
+show create table t2;
+drop table t2;
+
+create table t1 (
+ test_set set( 'val1', 'val2', 'val3' ) not null default '',
+ name char(20) default 'O''Brien'
+ ) comment = 'it\'s a table' ;
+show create table t1 ;
+drop table t1;
diff --git a/mysql-test/t/shw000001.test b/mysql-test/t/shw000001.test
deleted file mode 100644
index 6b24d8a44c7..00000000000
--- a/mysql-test/t/shw000001.test
+++ /dev/null
@@ -1,8 +0,0 @@
-use test;
-drop table if exists t1;
-create table t1 (
- test_set set( 'val1', 'val2', 'val3' ) not null default '',
- name char(20) default 'O''Brien'
- ) comment = 'it\'s a table' ;
-show create table t1 ;
-drop table t1;
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index da02ec3da18..d63ddbf3702 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -232,7 +232,7 @@ static inline void link_file_to_changed(SEC_LINK *next)
}
-#ifndef DBUG_OFF
+#if !defined(DBUG_OFF) && !defined(EXTRA_DEBUG)
#define DBUG_OFF /* This should work */
#endif
@@ -327,7 +327,7 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
}
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("start of key_cache_write",1););
+ DBUG_EXECUTE("check_keycache",test_key_cache("start of key_cache_write",1););
#endif
if (_my_disk_blocks > 0)
{ /* We have key_cacheing */
@@ -367,7 +367,7 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
}
end:
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("end of key_cache_write",1););
+ DBUG_EXECUTE("check_keycache",test_key_cache("end of key_cache_write",1););
#endif
return(error);
} /* key_cache_write */
@@ -381,7 +381,7 @@ static SEC_LINK *find_key_block(int file, my_off_t filepos, int *error)
reg1 SEC_LINK *next,**start;
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("start of find_key_block",0););
+ DBUG_EXECUTE("check_keycache",test_key_cache("start of find_key_block",0););
#endif
*error=0;
@@ -459,7 +459,7 @@ static SEC_LINK *find_key_block(int file, my_off_t filepos, int *error)
}
_my_used_last=next;
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("end of find_key_block",0););
+ DBUG_EXECUTE("check_keycache",test_key_cache("end of find_key_block",0););
#endif
return next;
} /* find_key_block */
@@ -529,7 +529,7 @@ int flush_key_blocks(File file, enum flush_type type)
pthread_mutex_lock(&THR_LOCK_keycache);
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("start of flush_key_blocks",0););
+ DBUG_EXECUTE("check_keycache",test_key_cache("start of flush_key_blocks",0););
#endif
cache=cache_buff; /* If no key cache */
if (_my_disk_blocks > 0 &&
@@ -607,7 +607,7 @@ int flush_key_blocks(File file, enum flush_type type)
}
}
#ifndef DBUG_OFF
- DBUG_EXECUTE("exec",test_key_cache("end of flush_key_blocks",0););
+ DBUG_EXECUTE("check_keycache",test_key_cache("end of flush_key_blocks",0););
#endif
pthread_mutex_unlock(&THR_LOCK_keycache);
if (cache != cache_buff)
diff --git a/mysys/mf_qsort.c b/mysys/mf_qsort.c
index e029e148ddb..7074abac96e 100644
--- a/mysys/mf_qsort.c
+++ b/mysys/mf_qsort.c
@@ -1,40 +1,28 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
+/* Copyright (C) 1991, 1992, 1996, 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Written by Douglas C. Schmidt (schmidt@ics.uci.edu).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA */
-
-/* Plug-compatible replacement for UNIX qsort.
- Copyright (C) 1989 Free Software Foundation, Inc.
- Written by Douglas C. Schmidt (schmidt@ics.uci.edu)
- Optimized and modyfied for mysys by monty.
-This file is part of GNU CC.
-
-GNU QSORT is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 1, or (at your option)
-any later version.
-
-GNU QSORT is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
-You should have received a copy of the GNU General Public License
-along with GNU QSORT; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+/*
+ Modifications by monty:
+ - Uses mysys include files
+ - Small fixes to make the it a bit faster
+ - Can be compiled with a cmp function that takes one extra argument.
+*/
#include "mysys_priv.h"
@@ -46,32 +34,36 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
#endif
/* Byte-wise swap two items of size SIZE. */
-#define SWAP(A,B,SIZE) do {int sz=(int)(SIZE); char *a = (A); char *b = (B); \
- do { char _temp = *a;*a++ = *b;*b++ = _temp;} while (--sz);} while (0)
+#define SWAP(a, b, size) \
+ do \
+ { \
+ register size_t __size = (size); \
+ register char *__a = (a), *__b = (b); \
+ do \
+ { \
+ char __tmp = *__a; \
+ *__a++ = *__b; \
+ *__b++ = __tmp; \
+ } while (--__size > 0); \
+ } while (0)
-/* Copy SIZE bytes from item B to item A. */
-#define COPY(A,B,SIZE) {int sz = (int) (SIZE); do { *(A)++ = *(B)++; } while (--sz); }
+/* Discontinue quicksort algorithm when partition gets below this size.
+ This particular magic number was chosen to work best on a Sun 4/260. */
+#define MAX_THRESH 8
-/* This should be replaced by a standard ANSI macro. */
-#define BYTES_PER_WORD 8
+/* Stack node declarations used to store unfulfilled partition obligations. */
+typedef struct
+ {
+ char *lo;
+ char *hi;
+ } stack_node;
/* The next 4 #defines implement a very fast in-line stack abstraction. */
-#define STACK_SIZE (BYTES_PER_WORD * sizeof (long))
+#define STACK_SIZE (8 * sizeof(unsigned long int))
#define PUSH(LOW,HIGH) do {top->lo = LOW;top++->hi = HIGH;} while (0)
#define POP(LOW,HIGH) do {LOW = (--top)->lo;HIGH = top->hi;} while (0)
#define STACK_NOT_EMPTY (stack < top)
-/* Discontinue quicksort algorithm when partition gets below this size.
- This particular magic number was chosen to work best on a Sparc SLC. */
-#define MAX_THRESH 12
-
-/* Stack node declarations used to store unfulfilled partition obligations. */
-typedef struct
-{
- char *lo;
- char *hi;
-} stack_node;
-
/* Order size using quicksort. This implementation incorporates
four optimizations discussed in Sedgewick:
@@ -88,7 +80,7 @@ typedef struct
3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
insertion sort to order the MAX_THRESH items within each partition.
This is a big win, since insertion sort is faster for small, mostly
- sorted array segements.
+ sorted array segments.
4. The larger of the two sub-partitions is always pushed onto the
stack first, with the algorithm then concentrating on the
@@ -111,100 +103,98 @@ qsort_t qsort(void *base_ptr, size_t total_elems, size_t size, qsort_cmp cmp)
/* Allocating SIZE bytes for a pivot buffer facilitates a better
algorithm below since we can do comparisons directly on the pivot.
*/
- int max_thresh = (int) (MAX_THRESH * size);
+ size_t max_thresh = (size_t) (MAX_THRESH * size);
if (total_elems <= 1)
SORT_RETURN; /* Crashes on MSDOS if continues */
if (total_elems > MAX_THRESH)
{
char *lo = base_ptr;
- char *hi = lo + size * (total_elems - 1);
+ char *hi = &lo[size * (total_elems - 1)];
stack_node stack[STACK_SIZE]; /* Largest size needed for 32-bit int!!! */
stack_node *top = stack + 1;
- char *pivot_buffer = (char *) my_alloca ((int) size);
+ char *pivot = (char *) my_alloca ((int) size);
#ifdef HAVE_purify
stack[0].lo=stack[0].hi=0;
#endif
- while (STACK_NOT_EMPTY)
+ do
{
- char *left_ptr;
- char *right_ptr;
+ char *left_ptr,*right_ptr;
+
+ /* Select median value from among LO, MID, and HI. Rearrange
+ LO and HI so the three values are sorted. This lowers the
+ probability of picking a pathological pivot value and
+ skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
+
+ char *mid = lo + size * (((ulong) (hi - lo) / (ulong) size) >> 1);
+
+ if (CMP(hi,lo) < 0)
+ SWAP (hi, lo, size);
+ if (CMP (mid, lo) < 0)
+ SWAP (mid, lo, size);
+ else if (CMP (hi, mid) < 0)
+ SWAP (mid, hi, size);
+ memcpy (pivot, mid, size);
+
+ left_ptr = lo + size;
+ right_ptr = hi - size;
+
+ /* Here's the famous ``collapse the walls'' section of quicksort.
+ Gotta like those tight inner loops! They are the main reason
+ that this algorithm runs much faster than others. */
+ do
{
- char *pivot = pivot_buffer;
+ while (CMP (left_ptr, pivot) < 0)
+ left_ptr += size;
+
+ while (CMP (pivot, right_ptr) < 0)
+ right_ptr -= size;
+
+ if (left_ptr < right_ptr)
{
- /* Select median value from among LO, MID, and HI. Rearrange
- LO and HI so the three values are sorted. This lowers the
- probability of picking a pathological pivot value and
- skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
-
- char *mid = lo + size * (((uint) (hi - lo) / (uint) size) >> 1);
-
- if (CMP(hi,lo) < 0)
- SWAP (hi, lo, size);
- if (CMP (mid, lo) < 0)
- SWAP (mid, lo, size);
- else if (CMP (hi, mid) < 0)
- SWAP (mid, hi, size);
- COPY (pivot, mid, size);
- pivot = pivot_buffer;
+ SWAP (left_ptr, right_ptr, size);
+ left_ptr += size;
+ right_ptr -= size;
}
- left_ptr = lo + size;
- right_ptr = hi - size;
-
- /* Here's the famous ``collapse the walls'' section of quicksort.
- Gotta like those tight inner loops! They are the main reason
- that this algorithm runs much faster than others. */
- do
+ else if (left_ptr == right_ptr)
{
- while (CMP (left_ptr, pivot) < 0)
- left_ptr += size;
-
- while (CMP (pivot, right_ptr) < 0)
- right_ptr -= size;
-
- if (left_ptr < right_ptr)
- {
- SWAP (left_ptr, right_ptr, size);
- left_ptr += size;
- right_ptr -= size;
- }
- else if (left_ptr == right_ptr)
- {
- left_ptr += size;
- right_ptr -= size;
- break;
- }
+ left_ptr += size;
+ right_ptr -= size;
+ break;
}
- while (left_ptr <= right_ptr);
+ else
+ break; /* left_ptr > right_ptr */
}
+ while (left_ptr <= right_ptr);
+
/* Set up pointers for next iteration. First determine whether
left and right partitions are below the threshold size. If so,
ignore one or both. Otherwise, push the larger partition's
bounds on the stack and continue sorting the smaller one. */
- if ((right_ptr - lo) <= max_thresh)
+ if ((size_t) (right_ptr - lo) <= max_thresh)
{
- if ((hi - left_ptr) <= max_thresh) /* Ignore both small parts. */
- POP (lo, hi);
- else /* Ignore small left part. */
- lo = left_ptr;
+ if ((size_t) (hi - left_ptr) <= max_thresh)
+ POP (lo, hi); /* Ignore both small partitions. */
+ else
+ lo = left_ptr; /* Ignore small left part. */
}
- else if ((hi - left_ptr) <= max_thresh) /* Ignore small right part. */
- hi = right_ptr;
- else if ((right_ptr - lo) > (hi - left_ptr)) /* Push larger left part */
+ else if ((size_t) (hi - left_ptr) <= max_thresh)
+ hi = right_ptr; /* Ignore small right partition. */
+ else if ((right_ptr - lo) > (hi - left_ptr))
{
- PUSH (lo, right_ptr);
+ PUSH (lo, right_ptr); /* Push larger left part */
lo = left_ptr;
}
- else /* Push larger right part */
+ else
{
- PUSH (left_ptr, hi);
+ PUSH (left_ptr, hi); /* Push larger right part */
hi = right_ptr;
}
- }
- my_afree(pivot_buffer);
+ } while (STACK_NOT_EMPTY);
+ my_afree(pivot);
}
/* Once the BASE_PTR array is partially sorted by quicksort the rest
@@ -215,9 +205,9 @@ qsort_t qsort(void *base_ptr, size_t total_elems, size_t size, qsort_cmp cmp)
{
char *end_ptr = (char*) base_ptr + size * (total_elems - 1);
- char *run_ptr;
char *tmp_ptr = (char*) base_ptr;
char *thresh = min (end_ptr, (char*) base_ptr + max_thresh);
+ register char *run_ptr;
/* Find smallest element in first threshold and place it at the
array's beginning. This is the smallest array element,
@@ -230,18 +220,18 @@ qsort_t qsort(void *base_ptr, size_t total_elems, size_t size, qsort_cmp cmp)
if (tmp_ptr != (char*) base_ptr)
SWAP (tmp_ptr, (char*) base_ptr, size);
- /* Insertion sort, running from left-hand-side up to `right-hand-side.'
- Pretty much straight out of the original GNU qsort routine. */
+ /* Insertion sort, running from left-hand-side up to right-hand-side. */
for (run_ptr = (char*) base_ptr + size;
- (tmp_ptr = run_ptr += size) <= end_ptr; )
+ (run_ptr += size) <= end_ptr; )
{
- while (CMP (run_ptr, tmp_ptr -= size) < 0) ;
-
- if ((tmp_ptr += size) != run_ptr)
+ if (CMP (run_ptr, (tmp_ptr = run_ptr-size)) < 0)
{
char *trav;
+ while (CMP (run_ptr, tmp_ptr -= size) < 0) ;
+ tmp_ptr += size;
+ /* Shift down all smaller elements, put found element in 'run_ptr' */
for (trav = run_ptr + size; --trav >= run_ptr;)
{
char c = *trav;
@@ -252,7 +242,6 @@ qsort_t qsort(void *base_ptr, size_t total_elems, size_t size, qsort_cmp cmp)
*hi = c;
}
}
-
}
}
SORT_RETURN;
diff --git a/mysys/my_error.c b/mysys/my_error.c
index 6887126e6ec..4aa946aa6c3 100644
--- a/mysys/my_error.c
+++ b/mysys/my_error.c
@@ -66,7 +66,7 @@ int my_error(int nr,myf MyFlags, ...)
while (isdigit(*tpos) || *tpos == '.' || *tpos == '-')
tpos++;
if (*tpos == 'l') /* Skipp 'l' argument */
- *tpos++;
+ tpos++;
if (*tpos == 's') /* String parameter */
{
par = va_arg(ap, char *);
diff --git a/mysys/raid.cc b/mysys/raid.cc
index a92647d1d95..48aa5cdb134 100644
--- a/mysys/raid.cc
+++ b/mysys/raid.cc
@@ -788,7 +788,7 @@ Fstat(int fd, MY_STAT *stat_area, myf MyFlags )
DBUG_PRINT("enter",("fd: %d MyFlags: %d",fd,MyFlags));
uint i;
int error=0;
- MY_STAT my_stat;
+ MY_STAT status;
stat_area->st_size=0;
stat_area->st_mtime=0;
stat_area->st_atime=0;
@@ -796,12 +796,12 @@ Fstat(int fd, MY_STAT *stat_area, myf MyFlags )
for(i=0 ; i < _raid_chunks ; i++)
{
- if (my_fstat(_fd_vector[i],&my_stat,MyFlags))
+ if (my_fstat(_fd_vector[i],&status,MyFlags))
error=1;
- stat_area->st_size+=my_stat.st_size;
- set_if_bigger(stat_area->st_mtime,my_stat.st_mtime);
- set_if_bigger(stat_area->st_atime,my_stat.st_atime);
- set_if_bigger(stat_area->st_ctime,my_stat.st_ctime);
+ stat_area->st_size+=status.st_size;
+ set_if_bigger(stat_area->st_mtime,status.st_mtime);
+ set_if_bigger(stat_area->st_atime,status.st_atime);
+ set_if_bigger(stat_area->st_ctime,status.st_ctime);
}
DBUG_RETURN(error);
}
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 7dc5c90ea25..84dac59018b 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -62,9 +62,10 @@ do
done
for i in extra/comp_err extra/replace extra/perror extra/resolveip \
- extra/my_print_defaults isam/isamchk isam/pack_isam myisam/myisamchk \
+ extra/my_print_defaults extra/resolve_stack_dump \
+ isam/isamchk isam/pack_isam myisam/myisamchk \
myisam/myisampack sql/mysqld sql/mysqlbinlog \
- client/mysql sql/mysqld client/mysqlshow \
+ client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \
client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest
@@ -104,7 +105,7 @@ rm -f $BASE/share/mysql/Makefile* $BASE/share/mysql/*/*.OLD
$CP mysql-test/mysql-test-run mysql-test/install_test_db $BASE/mysql-test/
$CP mysql-test/README $BASE/mysql-test/README
$CP mysql-test/include/*.inc $BASE/mysql-test/include
-$CP mysql-test/std_data/*.dat $BASE/mysql-test/std_data
+$CP mysql-test/std_data/*.dat mysql-test/std_data/*.001 $BASE/mysql-test/std_data
$CP mysql-test/t/*.test mysql-test/t/*.opt mysql-test/t/*.sh $BASE/mysql-test/t
$CP mysql-test/r/*.result mysql-test/r/*.require $BASE/mysql-test/r
diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh
index da8c6fced53..1c26bf8e2d6 100644
--- a/scripts/mysqlhotcopy.sh
+++ b/scripts/mysqlhotcopy.sh
@@ -30,7 +30,7 @@ mysqlhotcopy - fast on-line hot-backup utility for local MySQL databases and tab
mysqlhotcopy --method='scp -Bq -i /usr/home/foo/.ssh/identity' --user=root --password=secretpassword \
db_1./^nice_table/ user@some.system.dom:~/path/to/new_directory
-WARNING: THIS IS VERY MUCH A FIRST-CUT ALPHA. Comments/patches welcome.
+WARNING: THIS PROGRAM IS STILL IN BETA. Comments/patches welcome.
=cut
diff --git a/sql-bench/Results/ATIS-mysql-NT_4.0 b/sql-bench/Results/ATIS-mysql-NT_4.0
index 0b6f896a13b..413a5e512bf 100644
--- a/sql-bench/Results/ATIS-mysql-NT_4.0
+++ b/sql-bench/Results/ATIS-mysql-NT_4.0
@@ -1,20 +1,20 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 0:38:04
+Testing server 'MySQL 3.23.37' at 2001-04-19 13:49:16
ATIS table test
Creating tables
-Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_table (28): 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
-Time to insert (9768): 5 wallclock secs ( 0.95 usr 1.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to insert (9768): 6 wallclock secs ( 0.81 usr 1.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
-Time for select_simple_join (500): 3 wallclock secs ( 1.59 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_join (100): 4 wallclock secs ( 1.39 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key_prefix_join (100): 19 wallclock secs (12.20 usr 4.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_distinct (800): 17 wallclock secs ( 4.72 usr 1.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (2800): 20 wallclock secs ( 3.14 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_simple_join (500): 3 wallclock secs ( 1.52 usr 0.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_join (100): 4 wallclock secs ( 1.41 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key_prefix_join (100): 18 wallclock secs (12.05 usr 4.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_distinct (800): 17 wallclock secs ( 4.72 usr 1.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_group (2800): 21 wallclock secs ( 3.06 usr 1.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 68 wallclock secs (24.02 usr 9.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 69 wallclock secs (23.58 usr 10.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/RUN-mysql-NT_4.0 b/sql-bench/Results/RUN-mysql-NT_4.0
index 43480bf0fe7..70d6fb2dc0f 100644
--- a/sql-bench/Results/RUN-mysql-NT_4.0
+++ b/sql-bench/Results/RUN-mysql-NT_4.0
@@ -1,96 +1,103 @@
-Benchmark DBD suite: 2.11a
-Date of test: 2001-01-17 23:59:27
-Running tests on: Windows NT Version 4.0
-Arguments:
+Benchmark DBD suite: 2.12
+Date of test: 2001-04-19 13:10:13
+Running tests on: Windows NT Version 4.0
+Arguments:
Comments: 2x Pentium III XEON 450MHZ, 512M
-Limits from:
-Server version: MySQL 3.23.31
+Limits from:
+Server version: MySQL 3.23.37
-alter-table: Total time: 2315 wallclock secs ( 0.78 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-ATIS: Total time: 68 wallclock secs (24.02 usr 9.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-big-tables: Total time: 80 wallclock secs (18.31 usr 19.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-connect: Total time: 183 wallclock secs (60.53 usr 49.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-create: Total time: 995 wallclock secs (13.64 usr 10.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-insert: Total time: 7164 wallclock secs (872.86 usr 441.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-select: Total time: 1297 wallclock secs (113.66 usr 43.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-wisconsin: Total time: 28 wallclock secs ( 7.94 usr 5.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+alter-table: Total time: 2342 wallclock secs ( 0.91 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+ATIS: Total time: 69 wallclock secs (23.58 usr 10.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+big-tables: Total time: 79 wallclock secs (17.44 usr 18.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+connect: Total time: 179 wallclock secs (58.89 usr 48.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+create: Total time: 897 wallclock secs (15.94 usr 10.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+insert: Total time: 6659 wallclock secs (1143.94 usr 544.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+select: Total time: 1556 wallclock secs (127.53 usr 47.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+wisconsin: Total time: 28 wallclock secs ( 7.95 usr 5.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
-alter_table_add 1225.00 0.47 0.25 0.00 992
-alter_table_drop 1039.00 0.19 0.11 0.00 496
-connect 33.00 11.47 9.97 0.00 10000
-connect+select_1_row 38.00 13.19 12.03 0.00 10000
-connect+select_simple 36.00 12.84 11.23 0.00 10000
-count 35.00 0.06 0.00 0.00 100
-count_distinct 80.00 0.94 0.31 0.00 2000
-count_distinct_big 214.00 23.03 7.88 0.00 120
-count_distinct_group 150.00 3.03 1.20 0.00 1000
-count_distinct_group_on_key 45.00 0.67 0.25 0.00 1000
-count_distinct_group_on_key_parts 150.00 3.31 0.97 0.00 1000
-count_group_on_key_parts 41.00 2.81 0.92 0.00 1000
-count_on_key 377.00 21.58 9.17 0.00 50100
-create+drop 142.00 3.30 2.89 0.00 10000
-create_MANY_tables 245.00 2.73 1.56 0.00 10000
-create_index 25.00 0.00 0.00 0.00 8
-create_key+drop 172.00 5.14 2.92 0.00 10000
-create_table 0.00 0.00 0.01 0.00 31
-delete_all 21.00 0.00 0.02 0.00 12
-delete_all_many_keys 1980.00 0.03 0.03 0.00 1
-delete_big 0.00 0.00 0.00 0.00 1
-delete_big_many_keys 1980.00 0.03 0.03 0.00 128
-delete_key 8.00 0.88 1.34 0.00 10000
-drop_index 25.00 0.00 0.00 0.00 8
-drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 187.00 1.08 1.38 0.00 10000
-insert 234.00 35.02 48.52 0.00 350768
-insert_duplicates 59.00 8.92 14.09 0.00 100000
-insert_key 1853.00 13.92 13.81 0.00 100000
-insert_many_fields 22.00 0.64 0.39 0.00 2000
-insert_select_1_key 8.00 0.00 0.00 0.00 1
-insert_select_2_keys 12.00 0.00 0.00 0.00 1
-min_max 18.00 0.05 0.02 0.00 60
-min_max_on_key 193.00 35.67 14.80 0.00 85000
-multiple_value_insert 12.00 2.50 0.19 0.00 100000
-order_by_big 104.00 64.25 25.28 0.00 10
-order_by_big_key 95.00 69.14 26.05 0.00 10
-order_by_big_key2 90.00 63.38 26.20 0.00 10
-order_by_big_key_desc 96.00 68.61 26.58 0.00 10
-order_by_big_key_diff 100.00 65.05 24.69 0.00 10
-order_by_big_key_prefix 89.00 63.73 25.53 0.00 10
-order_by_key2_diff 11.00 5.53 2.23 0.00 500
-order_by_key_prefix 6.00 3.11 1.09 0.00 500
-order_by_range 9.00 3.02 1.25 0.00 500
-outer_join 118.00 0.00 0.00 0.00 10
-outer_join_found 106.00 0.02 0.00 0.00 10
-outer_join_not_found 58.00 0.02 0.00 0.00 500
-outer_join_on_key 41.00 0.03 0.00 0.00 10
-select_1_row 5.00 1.14 1.80 0.00 10000
-select_2_rows 6.00 0.91 2.03 0.00 10000
-select_big 146.00 83.48 34.54 0.00 10080
-select_column+column 6.00 0.88 1.83 0.00 10000
-select_diff_key 122.00 0.45 0.08 0.00 500
-select_distinct 17.00 4.72 1.55 0.00 800
-select_group 56.00 3.17 1.01 0.00 2911
-select_group_when_MANY_tables 249.00 1.39 2.03 0.00 10000
-select_join 4.00 1.39 0.55 0.00 100
-select_key 194.00 90.98 38.86 0.00 200000
-select_key2 202.00 92.78 37.67 0.00 200000
-select_key_prefix 199.00 93.61 38.05 0.00 200000
-select_key_prefix_join 19.00 12.20 4.53 0.00 100
-select_many_fields 55.00 17.65 18.90 0.00 2000
-select_range 187.00 27.39 9.33 0.00 410
-select_range_key2 29.00 10.31 3.64 0.00 25010
-select_range_prefix 28.00 10.05 4.22 0.00 25010
-select_simple 4.00 0.88 1.61 0.00 10000
-select_simple_join 3.00 1.59 0.67 0.00 500
-update_big 60.00 0.00 0.00 0.00 10
-update_of_key 56.00 4.28 7.00 0.00 50000
-update_of_key_big 33.00 0.06 0.08 0.00 501
-update_of_primary_key_many_keys 580.00 0.03 0.05 0.00 256
-update_with_key 188.00 27.97 41.06 0.00 300000
-update_with_key_prefix 59.00 9.42 12.14 0.00 100000
-wisc_benchmark 9.00 5.44 1.66 0.00 114
-TOTALS 14098.00 1111.56 580.08 0.00 2046247
+alter_table_add 1246.00 0.52 0.22 0.00 992
+alter_table_drop 1043.00 0.27 0.06 0.00 496
+connect 33.00 12.13 9.89 0.00 10000
+connect+select_1_row 39.00 12.91 11.73 0.00 10000
+connect+select_simple 37.00 12.36 12.34 0.00 10000
+count 36.00 0.03 0.02 0.00 100
+count_distinct 48.00 0.56 0.20 0.00 1000
+count_distinct_2 52.00 0.45 0.27 0.00 1000
+count_distinct_big 205.00 22.49 8.11 0.00 120
+count_distinct_group 145.00 2.67 1.19 0.00 1000
+count_distinct_group_on_key 48.00 0.58 0.17 0.00 1000
+count_distinct_group_on_key_parts 145.00 3.02 0.94 0.00 1000
+count_distinct_key_prefix 39.00 0.52 0.17 0.00 1000
+count_group_on_key_parts 40.00 2.73 0.83 0.00 1000
+count_on_key 405.00 22.87 8.23 0.00 50100
+create+drop 134.00 3.78 2.89 0.00 10000
+create_MANY_tables 231.00 3.27 1.58 0.00 10000
+create_index 26.00 0.00 0.00 0.00 8
+create_key+drop 167.00 5.98 2.77 0.00 10000
+create_table 0.00 0.02 0.02 0.00 31
+delete_all 19.00 0.00 0.00 0.00 12
+delete_all_many_keys 1431.00 0.00 0.03 0.00 1
+delete_big 0.00 0.00 0.00 0.00 1
+delete_big_many_keys 1431.00 0.00 0.03 0.00 128
+delete_key 7.00 1.14 1.42 0.00 10000
+drop_index 27.00 0.00 0.00 0.00 8
+drop_table 0.00 0.00 0.00 0.00 28
+drop_table_when_MANY_tables 169.00 1.41 1.42 0.00 10000
+insert 235.00 33.44 47.68 0.00 350768
+insert_duplicates 59.00 9.02 12.91 0.00 100000
+insert_key 1440.00 13.86 11.92 0.00 100000
+insert_many_fields 22.00 0.64 0.45 0.00 2000
+insert_select_1_key 8.00 0.00 0.00 0.00 1
+insert_select_2_keys 12.00 0.00 0.00 0.00 1
+min_max 19.00 0.00 0.02 0.00 60
+min_max_on_key 196.00 37.78 15.60 0.00 85000
+multiple_value_insert 9.00 2.53 0.19 0.00 100000
+order_by_big 101.00 61.84 25.81 0.00 10
+order_by_big_key 93.00 66.86 26.16 0.00 10
+order_by_big_key2 88.00 62.99 24.97 0.00 10
+order_by_big_key_desc 94.00 67.34 25.92 0.00 10
+order_by_big_key_diff 98.00 62.45 25.16 0.00 10
+order_by_big_key_prefix 88.00 62.72 25.19 0.00 10
+order_by_key2_diff 11.00 5.53 2.19 0.00 500
+order_by_key_prefix 6.00 2.94 1.08 0.00 500
+order_by_range 9.00 2.92 1.23 0.00 500
+outer_join 120.00 0.00 0.00 0.00 10
+outer_join_found 106.00 0.00 0.00 0.00 10
+outer_join_not_found 56.00 0.03 0.00 0.00 500
+outer_join_on_key 41.00 0.00 0.00 0.00 10
+select_1_row 5.00 1.23 1.69 0.00 10000
+select_2_rows 6.00 1.00 2.20 0.00 10000
+select_big 139.00 80.53 32.12 0.00 10080
+select_column+column 5.00 1.08 1.75 0.00 10000
+select_diff_key 127.00 0.67 0.05 0.00 500
+select_distinct 17.00 4.72 1.78 0.00 800
+select_group 59.00 3.11 1.34 0.00 2911
+select_group_when_MANY_tables 196.00 1.48 1.77 0.00 10000
+select_join 4.00 1.41 0.53 0.00 100
+select_key 196.00 103.61 37.28 0.00 200000
+select_key2 205.00 93.56 39.66 0.00 200000
+select_key2_return_key 198.00 90.06 35.53 0.00 200000
+select_key2_return_prim 203.00 91.61 35.25 0.00 200000
+select_key_prefix 201.00 93.56 39.13 0.00 200000
+select_key_prefix_join 18.00 12.05 4.75 0.00 100
+select_key_return_key 195.00 89.05 37.13 0.00 200000
+select_many_fields 54.00 16.80 18.40 0.00 2000
+select_query_cache 90.00 5.81 1.91 0.00 10000
+select_query_cache2 91.00 5.55 1.86 0.00 10000
+select_range 186.00 27.06 9.17 0.00 410
+select_range_key2 30.00 10.39 3.47 0.00 25010
+select_range_prefix 28.00 10.19 4.06 0.00 25010
+select_simple 4.00 0.80 1.48 0.00 10000
+select_simple_join 3.00 1.52 0.66 0.00 500
+update_big 66.00 0.00 0.00 0.00 10
+update_of_key 56.00 4.66 6.17 0.00 50000
+update_of_key_big 32.00 0.05 0.11 0.00 501
+update_of_primary_key_many_keys 447.00 0.03 0.00 0.00 256
+update_with_key 190.00 27.05 40.97 0.00 300000
+update_with_key_prefix 58.00 9.02 13.19 0.00 100000
+wisc_benchmark 9.00 5.77 1.45 0.00 114
+TOTALS 13232.00 1396.03 685.87 0.00 2667247
diff --git a/sql-bench/Results/alter-table-mysql-NT_4.0 b/sql-bench/Results/alter-table-mysql-NT_4.0
index 3a75bf1a366..98863586928 100644
--- a/sql-bench/Results/alter-table-mysql-NT_4.0
+++ b/sql-bench/Results/alter-table-mysql-NT_4.0
@@ -1,16 +1,16 @@
-Testing server 'MySQL 3.23.31' at 2001-01-17 23:59:28
+Testing server 'MySQL 3.23.37' at 2001-04-19 13:10:14
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
-Time for insert (1000) 1 wallclock secs ( 0.13 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert (1000) 0 wallclock secs ( 0.13 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for alter_table_add (992): 1225 wallclock secs ( 0.47 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for alter_table_add (992): 1246 wallclock secs ( 0.52 usr 0.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_index (8): 25 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_index (8): 26 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for drop_index (8): 25 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for drop_index (8): 27 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for alter_table_drop (496): 1039 wallclock secs ( 0.19 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for alter_table_drop (496): 1043 wallclock secs ( 0.27 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 2315 wallclock secs ( 0.78 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 2342 wallclock secs ( 0.91 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/big-tables-mysql-NT_4.0 b/sql-bench/Results/big-tables-mysql-NT_4.0
index 8654e5711dd..7f3510f396c 100644
--- a/sql-bench/Results/big-tables-mysql-NT_4.0
+++ b/sql-bench/Results/big-tables-mysql-NT_4.0
@@ -1,19 +1,19 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 0:39:12
+Testing server 'MySQL 3.23.37' at 2001-04-19 13:50:25
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
-Time to select_many_fields(1000): 20 wallclock secs ( 8.56 usr 9.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_many_fields(1000): 19 wallclock secs ( 8.02 usr 9.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 35 wallclock secs ( 9.09 usr 9.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_many_fields(1000): 35 wallclock secs ( 8.78 usr 8.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
-Time to insert_many_fields(1000): 4 wallclock secs ( 0.47 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to insert_many_fields(1000): 3 wallclock secs ( 0.48 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 18 wallclock secs ( 0.17 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to insert_many_fields(1000): 19 wallclock secs ( 0.16 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 80 wallclock secs (18.31 usr 19.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 79 wallclock secs (17.44 usr 18.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/connect-mysql-NT_4.0 b/sql-bench/Results/connect-mysql-NT_4.0
index cf366c0f547..3c3bfa1e112 100644
--- a/sql-bench/Results/connect-mysql-NT_4.0
+++ b/sql-bench/Results/connect-mysql-NT_4.0
@@ -1,30 +1,30 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 0:40:33
+Testing server 'MySQL 3.23.37' at 2001-04-19 13:51:45
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
-Time to connect (10000): 33 wallclock secs (11.47 usr 9.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to connect (10000): 33 wallclock secs (12.13 usr 9.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 36 wallclock secs (12.84 usr 11.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for connect+select_simple (10000): 37 wallclock secs (12.36 usr 12.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
-Time for select_simple (10000): 4 wallclock secs ( 0.88 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_simple (10000): 4 wallclock secs ( 0.80 usr 1.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 38 wallclock secs (13.19 usr 12.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to connect+select_1_row (10000): 39 wallclock secs (12.91 usr 11.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
-Time to select_1_row (10000): 5 wallclock secs ( 1.14 usr 1.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_1_row (10000): 5 wallclock secs ( 1.23 usr 1.69 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
-Time to select_2_rows (10000): 6 wallclock secs ( 0.91 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_2_rows (10000): 6 wallclock secs ( 1.00 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test select with aritmetic (+)
-Time for select_column+column (10000): 6 wallclock secs ( 0.88 usr 1.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_column+column (10000): 5 wallclock secs ( 1.08 usr 1.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (65000 bytes)
-Time to select_big (10000): 55 wallclock secs (19.23 usr 8.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_big (10000): 50 wallclock secs (17.36 usr 6.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 183 wallclock secs (60.53 usr 49.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 179 wallclock secs (58.89 usr 48.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/create-mysql-NT_4.0 b/sql-bench/Results/create-mysql-NT_4.0
index 326148f9bbd..2191f28735a 100644
--- a/sql-bench/Results/create-mysql-NT_4.0
+++ b/sql-bench/Results/create-mysql-NT_4.0
@@ -1,18 +1,18 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 0:43:36
+Testing server 'MySQL 3.23.37' at 2001-04-19 13:54:45
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
-Time for create_MANY_tables (10000): 245 wallclock secs ( 2.73 usr 1.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_MANY_tables (10000): 231 wallclock secs ( 3.27 usr 1.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
-Time to select_group_when_MANY_tables (10000): 249 wallclock secs ( 1.39 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to select_group_when_MANY_tables (10000): 196 wallclock secs ( 1.48 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
-Time for drop_table_when_MANY_tables (10000): 187 wallclock secs ( 1.08 usr 1.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for drop_table_when_MANY_tables (10000): 169 wallclock secs ( 1.41 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
-Time for create+drop (10000): 142 wallclock secs ( 3.30 usr 2.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_key+drop (10000): 172 wallclock secs ( 5.14 usr 2.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 995 wallclock secs (13.64 usr 10.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create+drop (10000): 134 wallclock secs ( 3.78 usr 2.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_key+drop (10000): 167 wallclock secs ( 5.98 usr 2.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 897 wallclock secs (15.94 usr 10.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/insert-mysql-NT_4.0 b/sql-bench/Results/insert-mysql-NT_4.0
index 1e5e47b994e..dddbf6d4c19 100644
--- a/sql-bench/Results/insert-mysql-NT_4.0
+++ b/sql-bench/Results/insert-mysql-NT_4.0
@@ -1,4 +1,4 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 1:00:12
+Testing server 'MySQL 3.23.37' at 2001-04-19 14:09:43
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
@@ -8,55 +8,58 @@ Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
-Time for insert (300000): 203 wallclock secs (30.27 usr 42.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert (300000): 203 wallclock secs (29.19 usr 40.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert of duplicates
-Time for insert_duplicates (100000): 59 wallclock secs ( 8.92 usr 14.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert_duplicates (100000): 59 wallclock secs ( 9.02 usr 12.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
-Time for select_big (10:3000000): 90 wallclock secs (63.84 usr 25.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key (10:3000000): 95 wallclock secs (69.14 usr 26.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key_desc (10:3000000): 96 wallclock secs (68.61 usr 26.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key_prefix (10:3000000): 89 wallclock secs (63.73 usr 25.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key2 (10:3000000): 90 wallclock secs (63.38 usr 26.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key_diff (10:3000000): 100 wallclock secs (65.05 usr 24.69 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big (10:3000000): 104 wallclock secs (64.25 usr 25.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_range (500:125750): 9 wallclock secs ( 3.02 usr 1.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key_prefix (500:125750): 6 wallclock secs ( 3.11 usr 1.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key2_diff (500:250500): 11 wallclock secs ( 5.53 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_diff_key (500:1000): 122 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_prefix (5010:42084): 15 wallclock secs ( 4.52 usr 1.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_key2 (5010:42084): 16 wallclock secs ( 4.67 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key_prefix (200000): 199 wallclock secs (93.61 usr 38.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key (200000): 194 wallclock secs (90.98 usr 38.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key2 (200000): 202 wallclock secs (92.78 usr 37.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_big (10:3000000): 88 wallclock secs (62.84 usr 25.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big_key (10:3000000): 93 wallclock secs (66.86 usr 26.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big_key_desc (10:3000000): 94 wallclock secs (67.34 usr 25.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big_key_prefix (10:3000000): 88 wallclock secs (62.72 usr 25.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big_key2 (10:3000000): 88 wallclock secs (62.99 usr 24.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big_key_diff (10:3000000): 98 wallclock secs (62.45 usr 25.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_big (10:3000000): 101 wallclock secs (61.84 usr 25.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_range (500:125750): 9 wallclock secs ( 2.92 usr 1.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_key_prefix (500:125750): 6 wallclock secs ( 2.94 usr 1.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for order_by_key2_diff (500:250500): 11 wallclock secs ( 5.53 usr 2.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_diff_key (500:1000): 127 wallclock secs ( 0.67 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_range_prefix (5010:42084): 15 wallclock secs ( 4.69 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_range_key2 (5010:42084): 16 wallclock secs ( 4.70 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key_prefix (200000): 201 wallclock secs (93.56 usr 39.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key (200000): 196 wallclock secs (103.61 usr 37.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key_return_key (200000): 195 wallclock secs (89.05 usr 37.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key2 (200000): 205 wallclock secs (93.56 usr 39.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key2_return_key (200000): 198 wallclock secs (90.06 usr 35.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_key2_return_prim (200000): 203 wallclock secs (91.61 usr 35.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
-Time for select_range_prefix (20000:43500): 13 wallclock secs ( 5.53 usr 2.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_key2 (20000:43500): 13 wallclock secs ( 5.64 usr 2.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (111): 36 wallclock secs ( 0.03 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (15000): 14 wallclock secs ( 6.42 usr 2.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max (60): 18 wallclock secs ( 0.05 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (100): 31 wallclock secs ( 0.06 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count (100): 35 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (20): 103 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_range_prefix (20000:43500): 13 wallclock secs ( 5.50 usr 2.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_range_key2 (20000:43500): 14 wallclock secs ( 5.69 usr 2.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_group (111): 38 wallclock secs ( 0.05 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for min_max_on_key (15000): 14 wallclock secs ( 6.08 usr 2.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for min_max (60): 19 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_on_key (100): 37 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count (100): 36 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_big (20): 91 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
-Time for update_of_key (50000): 56 wallclock secs ( 4.28 usr 7.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_of_key (50000): 56 wallclock secs ( 4.66 usr 6.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_of_key_big (501): 32 wallclock secs ( 0.05 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
-Time for update_with_key (300000): 188 wallclock secs (27.97 usr 41.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for update_with_key_prefix (100000): 59 wallclock secs ( 9.42 usr 12.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_with_key (300000): 190 wallclock secs (27.05 usr 40.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_with_key_prefix (100000): 58 wallclock secs ( 9.02 usr 13.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
-Time for update_big (10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_big (10): 66 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
-Time for outer_join_on_key (10:10): 41 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for outer_join (10:10): 118 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for outer_join_found (10:10): 106 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for outer_join_not_found (500:10): 58 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_on_key (10:10): 41 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join (10:10): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_found (10:10): 106 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_not_found (500:10): 56 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
@@ -64,24 +67,24 @@ Time for insert_select_2_keys (1): 12 wallclock secs ( 0.00 usr 0.00 sys + 0.
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
-Time for delete_key (10000): 8 wallclock secs ( 0.88 usr 1.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for delete_all (12): 21 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for delete_key (10000): 7 wallclock secs ( 1.14 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for delete_all (12): 19 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 1853 wallclock secs (13.92 usr 13.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert_key (100000): 1440 wallclock secs (13.86 usr 11.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
-Time for update_of_primary_key_many_keys (256): 580 wallclock secs ( 0.03 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for update_of_primary_key_many_keys (256): 447 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting rows from the table
-Time for delete_big_many_keys (128): 1980 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for delete_big_many_keys (128): 1431 wallclock secs ( 0.00 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
-Time for delete_all_many_keys (1): 1980 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for delete_all_many_keys (1): 1431 wallclock secs ( 0.00 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting 100000 rows with multiple values
-Time for multiple_value_insert (100000): 12 wallclock secs ( 2.50 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for multiple_value_insert (100000): 9 wallclock secs ( 2.53 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 7164 wallclock secs (872.86 usr 441.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 6659 wallclock secs (1143.94 usr 544.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/select-mysql-NT_4.0 b/sql-bench/Results/select-mysql-NT_4.0
index a32f7063a25..d560ccddb99 100644
--- a/sql-bench/Results/select-mysql-NT_4.0
+++ b/sql-bench/Results/select-mysql-NT_4.0
@@ -1,23 +1,30 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 2:59:37
+Testing server 'MySQL 3.23.37' at 2001-04-19 16:00:44
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
-Time to insert (10000): 7 wallclock secs ( 1.30 usr 1.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to insert (10000): 7 wallclock secs ( 1.27 usr 1.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Test if the database has a query cache
+Time for select_query_cache (10000): 90 wallclock secs ( 5.81 usr 1.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for select_query_cache2 (10000): 91 wallclock secs ( 5.55 usr 1.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
-Time for select_big (70:17207): 1 wallclock secs ( 0.41 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (410:1057904): 187 wallclock secs (27.39 usr 9.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (70000): 179 wallclock secs (29.25 usr 12.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (50000): 346 wallclock secs (21.52 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_big (70:17207): 1 wallclock secs ( 0.33 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for select_range (410:1057904): 186 wallclock secs (27.06 usr 9.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for min_max_on_key (70000): 182 wallclock secs (31.70 usr 12.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_on_key (50000): 368 wallclock secs (22.81 usr 8.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_group_on_key_parts (1000:100000): 41 wallclock secs ( 2.81 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_group_on_key_parts (1000:100000): 40 wallclock secs ( 2.73 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing count(distinct) on the table
-Time for count_distinct (2000:2000): 80 wallclock secs ( 0.94 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key (1000:6000): 45 wallclock secs ( 0.67 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key_parts (1000:100000): 150 wallclock secs ( 3.31 usr 0.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group (1000:100000): 150 wallclock secs ( 3.03 usr 1.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (100:1000000): 111 wallclock secs (23.03 usr 7.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 1297 wallclock secs (113.66 usr 43.80 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_key_prefix (1000:1000): 39 wallclock secs ( 0.52 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct (1000:1000): 48 wallclock secs ( 0.56 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_2 (1000:1000): 52 wallclock secs ( 0.45 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_group_on_key (1000:6000): 48 wallclock secs ( 0.58 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_group_on_key_parts (1000:100000): 145 wallclock secs ( 3.02 usr 0.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_group (1000:100000): 145 wallclock secs ( 2.67 usr 1.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for count_distinct_big (100:1000000): 114 wallclock secs (22.47 usr 8.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 1556 wallclock secs (127.53 usr 47.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/wisconsin-mysql-NT_4.0 b/sql-bench/Results/wisconsin-mysql-NT_4.0
index f4ed2847ff7..0ef69aa9272 100644
--- a/sql-bench/Results/wisconsin-mysql-NT_4.0
+++ b/sql-bench/Results/wisconsin-mysql-NT_4.0
@@ -1,14 +1,14 @@
-Testing server 'MySQL 3.23.31' at 2001-01-18 3:21:15
+Testing server 'MySQL 3.23.37' at 2001-04-19 16:26:40
Wisconsin benchmark test
-Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
-Time to insert (31000): 19 wallclock secs ( 2.50 usr 3.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time to insert (31000): 19 wallclock secs ( 2.17 usr 4.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
-Time for wisc_benchmark (114): 9 wallclock secs ( 5.44 usr 1.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for wisc_benchmark (114): 9 wallclock secs ( 5.77 usr 1.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 28 wallclock secs ( 7.94 usr 5.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Total time: 28 wallclock secs ( 7.95 usr 5.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh
index 014962b1c6d..badbcc85288 100644
--- a/sql-bench/crash-me.sh
+++ b/sql-bench/crash-me.sh
@@ -1,4 +1,4 @@
-#!@PERL@ -w
+#!@PERL@
# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
#
# This library is free software; you can redistribute it and/or
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 596a1d3acf7..774ef06f17c 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -54,7 +54,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
ha_gemini.h opt_range.h opt_ft.h \
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
lex.h lex_symbol.h sql_acl.h sql_crypt.h md5.h \
- log_event.h mini_client.h sql_repl.h slave.h
+ log_event.h mini_client.h sql_repl.h slave.h \
+ stacktrace.h
mysqld_SOURCES = sql_lex.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -67,17 +68,19 @@ mysqld_SOURCES = sql_lex.cc \
sql_base.cc table.cc sql_select.cc sql_insert.cc \
sql_update.cc sql_delete.cc \
procedure.cc item_uniq.cc sql_test.cc \
- log.cc init.cc derror.cc sql_acl.cc unireg.cc \
+ log.cc log_event.cc init.cc derror.cc sql_acl.cc \
+ unireg.cc \
time.cc opt_range.cc opt_sum.cc opt_ft.cc \
records.cc filesort.cc handler.cc \
- ha_isam.cc ha_isammrg.cc ha_heap.cc \
- ha_myisam.cc ha_myisammrg.cc ha_berkeley.cc \
- ha_innobase.cc ha_gemini.cc \
+ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
+ ha_berkeley.cc ha_innobase.cc ha_gemini.cc \
+ ha_isam.cc ha_isammrg.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
slave.cc sql_repl.cc \
- md5.c log_event.cc mini_client.cc mini_client_errors.c
+ mini_client.cc mini_client_errors.c \
+ md5.c stacktrace.c
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
mysqlbinlog_SOURCES = mysqlbinlog.cc mini_client.cc net_serv.cc \
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e116e2b68e6..610fe2e966f 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -49,7 +49,7 @@ typedef struct st_sort_param {
uint sort_length; /* Length of sortarg */
uint keys; /* Max antal nycklar / buffert */
uint ref_length; /* Length of record ref. */
- ha_rows max_rows;
+ ha_rows max_rows,examined_rows;
TABLE *sort_form; /* For quicker make_sortkey */
SORT_FIELD *local_sortorder;
SORT_FIELD *end;
@@ -91,7 +91,8 @@ static uint sortlength(SORT_FIELD *sortorder,uint length);
open a new file is opened */
ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows special, ha_rows max_rows)
+ SQL_SELECT *select, ha_rows special, ha_rows max_rows,
+ ha_rows *examined_rows)
{
int error;
uint memavl,old_memavl,maxbuffer,skr;
@@ -113,6 +114,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
param.ref_length= table[0]->file->ref_length;
param.sort_length=sortlength(sortorder,s_length)+ param.ref_length;
param.max_rows= max_rows;
+ param.examined_rows=0;
if (select && select->quick)
{
@@ -259,7 +261,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG));
else
statistic_add(filesort_rows, records, &LOCK_status);
-
+ *examined_rows= param.examined_rows;
#ifdef SKIPP_DBUG_IN_FILESORT
DBUG_POP(); /* Ok to DBUG */
#endif
@@ -367,6 +369,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
file->rnd_end();
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
}
+ if (error == 0)
+ param->examined_rows++;
if (error == 0 && (!select || select->skipp_record() == 0))
{
if (idx == param->keys)
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 4b4894ebcd5..32af39e4a0d 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -571,6 +571,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
transaction=0;
cursor=0;
key_read=0;
+ block_size=8192; // Berkeley DB block size
share->fixed_length_row=!(table->db_create_options & HA_OPTION_PACK_RECORD);
get_status();
diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc
index f263f693103..8af9de0eaba 100644
--- a/sql/ha_innobase.cc
+++ b/sql/ha_innobase.cc
@@ -449,7 +449,7 @@ innobase_init(void)
if (!innobase_data_file_path)
{
fprintf(stderr,
- "Can't initialize InnoDB as 'innobase_data_file_path' is not set\n");
+ "Can't initialize InnoDB as 'innodb_data_file_path' is not set\n");
innodb_skip=1;
DBUG_RETURN(FALSE); // Continue without innobase
}
@@ -1868,7 +1868,7 @@ corresponding row to buf. */
int
ha_innobase::index_first(
/*=====================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND,
+ /* out: 0, HA_ERR_END_OF_FILE,
or error code */
mysql_byte* buf) /* in/out: buffer for the row */
{
@@ -1879,6 +1879,12 @@ ha_innobase::index_first(
error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY);
+ /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */
+
+ if (error == HA_ERR_KEY_NOT_FOUND) {
+ error = HA_ERR_END_OF_FILE;
+ }
+
DBUG_RETURN(error);
}
@@ -1899,7 +1905,7 @@ ha_innobase::index_last(
error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY);
- /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */
+ /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */
if (error == HA_ERR_KEY_NOT_FOUND) {
error = HA_ERR_END_OF_FILE;
diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h
index e85d73bdae5..429e47523dd 100644
--- a/sql/ha_innobase.h
+++ b/sql/ha_innobase.h
@@ -83,14 +83,14 @@ class ha_innobase: public handler
HA_NO_WRITE_DELAYED |
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_DROP_BEFORE_CREATE |
- HA_NOT_READ_AFTER_KEY),
+ HA_NOT_READ_AFTER_KEY | HA_NO_PREFIX_CHAR_KEYS),
last_dup_key((uint) -1),
start_of_scan(0)
{
}
~ha_innobase() {}
- const char* table_type() const { return("Innobase");}
+ const char* table_type() const { return("InnoDB");}
const char** bas_ext() const;
ulong option_flag() const { return int_option_flag; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index b6cfc2c3612..6409ec5d019 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -50,10 +50,12 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
{
THD* thd = (THD*)param->thd;
String* packet = &thd->packet;
- packet->length(0);
+ uint length;
char msgbuf[MI_MAX_MSG_BUF];
- msgbuf[0] = 0;
+ char name[NAME_LEN*2+2];
+ packet->length(0);
+ msgbuf[0] = 0; // healthy paranoia ?
my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
@@ -70,9 +72,12 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME));
return;
}
- net_store_data(packet, param->table_name);
+ length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
+ name);
+ net_store_data(packet, name, length);
net_store_data(packet, param->op_name);
net_store_data(packet, msg_type);
+
net_store_data(packet, msgbuf);
if (my_net_write(&thd->net, (char*)thd->packet.ptr(), thd->packet.length()))
fprintf(stderr,
@@ -245,6 +250,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = (char*)"check";
+ param.db_name = table->table_cache_key;
param.table_name = table->table_name;
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
@@ -332,6 +338,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = (char*) "analyze";
+ param.db_name = table->table_cache_key;
param.table_name = table->table_name;
param.testflag=(T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
@@ -384,6 +391,7 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = (char*)"restore";
+ param.db_name = table->table_cache_key;
param.table_name = table->table_name;
param.testflag = 0;
mi_check_print_error(&param,errmsg, errno );
@@ -438,6 +446,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = (char*)"backup";
+ param.db_name = table->table_cache_key;
param.table_name = table->table_name;
param.testflag = 0;
mi_check_print_error(&param,errmsg, errno );
@@ -524,6 +533,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
ha_rows rows= file->state->records;
DBUG_ENTER("ha_myisam::repair");
+ param.db_name = table->table_cache_key;
param.table_name = table->table_name;
param.tmpfile_createflag = O_RDWR | O_TRUNC;
param.using_global_keycache = 1;
diff --git a/sql/handler.h b/sql/handler.h
index c7df6e2a915..076bf783f80 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -73,6 +73,7 @@
#define HA_NOT_READ_AFTER_KEY (HA_DROP_BEFORE_CREATE*2)
#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2)
#define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2)
+#define HA_NO_PREFIX_CHAR_KEYS (HA_NO_TEMP_TABLES*2)
/* Parameters for open() (in register form->filestat) */
/* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 82dcb0268b4..80f72c30e57 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -393,12 +393,14 @@ void Item_func_reverse::fix_length_and_dec()
String *Item_func_replace::val_str(String *str)
{
String *res,*res2,*res3;
- int offset=0;
+ int offset;
uint from_length,to_length;
bool alloced=0;
#ifdef USE_MB
const char *ptr,*end,*strend,*search,*search_end;
register uint32 l;
+ bool binary_str = (args[0]->binary || args[1]->binary ||
+ !use_mb(default_charset_info));
#endif
null_value=0;
@@ -415,7 +417,8 @@ String *Item_func_replace::val_str(String *str)
if ((offset=res->strstr(*res2)) < 0)
return res;
#else
- if (!use_mb(default_charset_info) && (offset=res->strstr(*res2)) < 0)
+ offset=0;
+ if (binary_str && (offset=res->strstr(*res2)) < 0)
return res;
#endif
if (!(res3=args[2]->val_str(&tmp_value2)))
@@ -424,7 +427,7 @@ String *Item_func_replace::val_str(String *str)
to_length= res3->length();
#ifdef USE_MB
- if (use_mb(default_charset_info))
+ if (!binary_str)
{
search=res2->ptr();
search_end=search+from_length;
@@ -449,6 +452,7 @@ redo:
res=copy_if_not_alloced(str,res,res->length()+to_length);
}
res->replace((uint) offset,from_length,*res3);
+ offset+=(int) to_length;
goto redo;
}
skipp:
diff --git a/sql/lex.h b/sql/lex.h
index 6c83cb34366..d5df5ed5511 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -88,6 +88,7 @@ static SYMBOL symbols[] = {
{ "COMMIT", SYM(COMMIT_SYM),0,0},
{ "COMMITTED", SYM(COMMITTED_SYM),0,0},
{ "COMPRESSED", SYM(COMPRESSED_SYM),0,0},
+ { "CONCURRENT", SYM(CONCURRENT),0,0},
{ "CONSTRAINT", SYM(CONSTRAINT),0,0},
{ "CREATE", SYM(CREATE),0,0},
{ "CROSS", SYM(CROSS),0,0},
diff --git a/sql/lock.cc b/sql/lock.cc
index 915f1831245..23f81c9c164 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -346,7 +346,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
*write_lock_used=0;
for (i=tables=lock_count=0 ; i < count ; i++)
{
- if (!table_ptr[i]->tmp_table)
+ if (table_ptr[i]->tmp_table != TMP_TABLE)
{
tables+=table_ptr[i]->file->lock_count();
lock_count++;
@@ -366,7 +366,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
for (i=0 ; i < count ; i++)
{
TABLE *table;
- if ((table=table_ptr[i])->tmp_table)
+ if ((table=table_ptr[i])->tmp_table == TMP_TABLE)
continue;
*to++=table;
enum thr_lock_type lock_type= table->reginfo.lock_type;
diff --git a/sql/log.cc b/sql/log.cc
index 9601d162d28..4cd93261973 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -103,7 +103,7 @@ MYSQL_LOG::~MYSQL_LOG()
void MYSQL_LOG::set_index_file_name(const char* index_file_name)
{
if (index_file_name)
- fn_format(this->index_file_name,index_file_name,mysql_data_home,"-index",
+ fn_format(this->index_file_name,index_file_name,mysql_data_home,".index",
4);
else
this->index_file_name[0] = 0;
@@ -129,6 +129,32 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name)
return 0;
}
+bool MYSQL_LOG::open_index( int options)
+{
+ return (index_file < 0 &&
+ (index_file = my_open(index_file_name, options | O_BINARY ,
+ MYF(MY_WME))) < 0);
+}
+
+void MYSQL_LOG::init(enum_log_type log_type_arg)
+{
+ log_type = log_type_arg;
+ if (!inited)
+ {
+ inited=1;
+ (void) pthread_mutex_init(&LOCK_log,MY_MUTEX_INIT_SLOW);
+ (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW);
+ }
+}
+
+void MYSQL_LOG::close_index()
+{
+ if(index_file >= 0)
+ {
+ my_close(index_file, MYF(0));
+ index_file = -1;
+ }
+}
void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
const char *new_name)
@@ -137,17 +163,11 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
char buff[512];
File file= -1;
bool do_magic;
-
- if (!inited)
- {
- inited=1;
- (void) pthread_mutex_init(&LOCK_log,MY_MUTEX_INIT_SLOW);
- (void) pthread_mutex_init(&LOCK_index, MY_MUTEX_INIT_SLOW);
- if (log_type_arg == LOG_BIN && *fn_ext(log_name))
+
+ if (!inited && log_type_arg == LOG_BIN && *fn_ext(log_name))
no_rotate = 1;
- }
+ init(log_type_arg);
- log_type=log_type_arg;
if (!(name=my_strdup(log_name,MYF(MY_WME))))
goto err;
if (new_name)
@@ -208,10 +228,7 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
clean up if failed
*/
if ((do_magic && my_b_write(&log_file, (byte*) BINLOG_MAGIC, 4)) ||
- (index_file < 0 &&
- (index_file = my_open(index_file_name,
- O_APPEND | O_BINARY | O_RDWR | O_CREAT,
- MYF(MY_WME))) < 0))
+ open_index(O_APPEND | O_RDWR | O_CREAT))
goto err;
Start_log_event s;
bool error;
@@ -224,8 +241,7 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
pthread_mutex_unlock(&LOCK_index);
if (error)
{
- my_close(index_file,MYF(0));
- index_file= -1;
+ close_index();
goto err;
}
}
@@ -825,11 +841,12 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length,
{
/* For slow query log */
if (my_b_printf(&log_file,
- "# Time: %lu Lock_time: %lu Rows_sent: %lu\n",
+ "# Time: %lu Lock_time: %lu Rows_sent: %lu Rows_examined: %lu\n",
(ulong) (current_time - query_start),
(ulong) (thd->time_after_lock - query_start),
- (ulong) thd->sent_row_count) == (uint) -1)
- tmp_errno=errno;
+ (ulong) thd->sent_row_count,
+ (ulong) thd->examined_row_count) == (uint) -1)
+ tmp_errno=errno;
}
if (thd->db && strcmp(thd->db,db))
{ // Database changed
diff --git a/sql/log_event.cc b/sql/log_event.cc
index d643952c5b0..ac985c266c8 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -280,7 +280,7 @@ void Log_event::print_timestamp(FILE* file, time_t* ts)
}
-void Start_log_event::print(FILE* file, bool short_form)
+void Start_log_event::print(FILE* file, bool short_form, char* last_db)
{
if (short_form)
return;
@@ -293,7 +293,7 @@ void Start_log_event::print(FILE* file, bool short_form)
fflush(file);
}
-void Stop_log_event::print(FILE* file, bool short_form)
+void Stop_log_event::print(FILE* file, bool short_form, char* last_db)
{
if (short_form)
return;
@@ -303,7 +303,7 @@ void Stop_log_event::print(FILE* file, bool short_form)
fflush(file);
}
-void Rotate_log_event::print(FILE* file, bool short_form)
+void Rotate_log_event::print(FILE* file, bool short_form, char* last_db)
{
if (short_form)
return;
@@ -441,7 +441,7 @@ Query_log_event::Query_log_event(const char* buf, int event_len):
*((char*)query+q_len) = 0;
}
-void Query_log_event::print(FILE* file, bool short_form)
+void Query_log_event::print(FILE* file, bool short_form, char* last_db)
{
char buff[40],*end; // Enough for SET TIMESTAMP
if (!short_form)
@@ -451,7 +451,15 @@ void Query_log_event::print(FILE* file, bool short_form)
(ulong) thread_id, (ulong) exec_time, error_code);
}
- if (db && db[0])
+ bool same_db = 0;
+
+ if(db && last_db)
+ {
+ if(!(same_db = !memcmp(last_db, db, db_len + 1)))
+ memcpy(last_db, db, db_len + 1);
+ }
+
+ if (db && db[0] && !same_db)
fprintf(file, "use %s;\n", db);
end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10);
*end++=';';
@@ -507,7 +515,7 @@ int Intvar_log_event::write_data(IO_CACHE* file)
return my_b_write(file, (byte*) buf, sizeof(buf));
}
-void Intvar_log_event::print(FILE* file, bool short_form)
+void Intvar_log_event::print(FILE* file, bool short_form, char* last_db)
{
char llbuff[22];
if(!short_form)
@@ -625,7 +633,7 @@ void Load_log_event::copy_log_event(const char *buf, ulong data_len)
}
-void Load_log_event::print(FILE* file, bool short_form)
+void Load_log_event::print(FILE* file, bool short_form, char* last_db)
{
if (!short_form)
{
@@ -634,7 +642,15 @@ void Load_log_event::print(FILE* file, bool short_form)
thread_id, exec_time);
}
- if(db && db[0])
+ bool same_db = 0;
+
+ if(db && last_db)
+ {
+ if(!(same_db = !memcmp(last_db, db, db_len + 1)))
+ memcpy(last_db, db, db_len + 1);
+ }
+
+ if(db && db[0] && !same_db)
fprintf(file, "use %s;\n", db);
fprintf(file, "LOAD DATA INFILE '%s' ", fname);
@@ -678,7 +694,7 @@ void Load_log_event::print(FILE* file, bool short_form)
}
if((int)skip_lines > 0)
- fprintf(file, " IGNORE %d LINES ", skip_lines);
+ fprintf(file, " IGNORE %ld LINES ", (long) skip_lines);
if (num_fields)
{
diff --git a/sql/log_event.h b/sql/log_event.h
index 0f4945bae3c..41f847e8d92 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -100,7 +100,7 @@ public:
virtual ~Log_event() {}
virtual int get_data_size() { return 0;}
- virtual void print(FILE* file, bool short_form = 0) = 0;
+ virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0;
void print_timestamp(FILE* file, time_t *ts = 0);
void print_header(FILE* file);
@@ -169,7 +169,7 @@ public:
;
}
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
#define DUMPFILE_FLAG 0x1
@@ -312,7 +312,7 @@ public:
;
}
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
extern char server_version[SERVER_VERSION_LENGTH];
@@ -350,7 +350,7 @@ public:
// sizeof(binlog_version) + sizeof(server_version) sizeof(created)
return 2 + sizeof(server_version) + 4;
}
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
class Intvar_log_event: public Log_event
@@ -369,7 +369,7 @@ public:
int write_data(IO_CACHE* file);
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
class Stop_log_event: public Log_event
@@ -388,7 +388,7 @@ public:
}
~Stop_log_event() {}
Log_event_type get_type_code() { return STOP_EVENT;}
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
class Rotate_log_event: public Log_event
@@ -416,7 +416,7 @@ public:
int get_data_size() { return ident_len;}
int write_data(IO_CACHE* file);
- void print(FILE* file, bool short_form = 0);
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
#endif
diff --git a/sql/mini_client.cc b/sql/mini_client.cc
index fa1b9da38a8..38180c0c6c8 100644
--- a/sql/mini_client.cc
+++ b/sql/mini_client.cc
@@ -330,8 +330,14 @@ mc_net_safe_read(MYSQL *mysql)
if(errno != EINTR)
{
mc_end_server(mysql);
- net->last_errno=CR_SERVER_LOST;
- strmov(net->last_error,ER(net->last_errno));
+ if(net->last_errno != ER_NET_PACKET_TOO_LARGE)
+ {
+ net->last_errno=CR_SERVER_LOST;
+ strmov(net->last_error,ER(net->last_errno));
+ }
+ else
+ strmov(net->last_error, "Packet too large - increase \
+max_allowed_packet on this server");
}
return(packet_error);
}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 78e9db5652f..9c134183fdd 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -602,7 +602,8 @@ void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
int use_record_cache, bool print_errors);
void end_read_record(READ_RECORD *info);
ha_rows filesort(TABLE **form,struct st_sort_field *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows special,ha_rows max_rows);
+ SQL_SELECT *select, ha_rows special,ha_rows max_rows,
+ ha_rows *examined_rows);
void change_double_for_sort(double nr,byte *to);
int get_quick_record(SQL_SELECT *select);
int calc_weekday(long daynr,bool sunday_first_day_of_week);
diff --git a/sql/mysqlbinlog.cc b/sql/mysqlbinlog.cc
index 49daa04ffff..f0a9692cc2d 100644
--- a/sql/mysqlbinlog.cc
+++ b/sql/mysqlbinlog.cc
@@ -38,6 +38,7 @@ ulong mysqld_net_retry_count = 10L;
ulong net_read_timeout= NET_READ_TIMEOUT;
ulong net_write_timeout= NET_WRITE_TIMEOUT;
uint test_flags = 0;
+FILE *result_file;
#ifndef DBUG_OFF
static const char* default_dbug_option = "d:t:o,/tmp/mysqlbinlog.trace";
@@ -46,18 +47,19 @@ static const char* default_dbug_option = "d:t:o,/tmp/mysqlbinlog.trace";
static struct option long_options[] =
{
#ifndef DBUG_OFF
- {"debug", optional_argument, 0, '#'},
+ {"debug", optional_argument, 0, '#'},
#endif
- {"help", no_argument, 0, '?'},
- {"host", required_argument, 0, 'h'},
- {"offset", required_argument, 0, 'o'},
- {"password", required_argument, 0, 'p'},
- {"port", required_argument, 0, 'P'},
- {"position", required_argument, 0, 'j'},
- {"short-form", no_argument, 0, 's'},
- {"table", required_argument, 0, 't'},
- {"user", required_argument, 0, 'u'},
- {"version", no_argument, 0, 'V'},
+ {"help", no_argument, 0, '?'},
+ {"host", required_argument, 0, 'h'},
+ {"offset", required_argument, 0, 'o'},
+ {"password", required_argument, 0, 'p'},
+ {"port", required_argument, 0, 'P'},
+ {"position", required_argument, 0, 'j'},
+ {"result-file", required_argument, 0, 'r'},
+ {"short-form", no_argument, 0, 's'},
+ {"table", required_argument, 0, 't'},
+ {"user", required_argument, 0, 'u'},
+ {"version", no_argument, 0, 'V'},
};
void sql_print_error(const char *format,...);
@@ -106,7 +108,7 @@ static void die(const char* fmt, ...)
static void print_version()
{
- printf("%s Ver 1.2 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
+ printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
@@ -133,6 +135,7 @@ the mysql command line client\n\n");
-P, --port=port Use port to connect to the remove server\n\
-u, --user=username Connect to the remove server as username\n\
-p, --password=password Password to connect to remote server\n\
+-r, --result-file=file Direct output to a given file\n\
-j, --position=N Start reading the binlog at position N\n\
-t, --table=name Get raw table dump using COM_TABLE_DUMB\n\
-V, --version Print version and exit.\n\
@@ -163,17 +166,18 @@ static void dump_remote_file(NET* net, const char* fname)
die("Failed reading a packet during the dump of %s ", fname);
if(!short_form)
- (void)my_fwrite(stdout, (byte*) net->read_pos, packet_len, MYF(0));
+ (void)my_fwrite(result_file, (byte*) net->read_pos, packet_len,MYF(0));
}
- fflush(stdout);
+ fflush(result_file);
}
static int parse_args(int *argc, char*** argv)
{
int c, opt_index = 0;
- while((c = getopt_long(*argc, *argv, "so:#::h:j:u:p:P:t:?V", long_options,
+ result_file = stdout;
+ while((c = getopt_long(*argc, *argv, "so:#::h:j:u:p:P:r:t:?V", long_options,
&opt_index)) != EOF)
{
switch(c)
@@ -210,6 +214,11 @@ static int parse_args(int *argc, char*** argv)
pass = my_strdup(optarg, MYF(0));
break;
+ case 'r':
+ if (!(result_file = my_fopen(optarg, O_WRONLY | O_BINARY, MYF(MY_WME))))
+ exit(1);
+ break;
+
case 'u':
use_remote = 1;
user = my_strdup(optarg, MYF(0));
@@ -276,20 +285,21 @@ static void dump_remote_table(NET* net, const char* db, const char* table)
die("Error sending the table dump command");
for(;;)
- {
- uint packet_len = my_net_read(net);
- if(packet_len == 0) break; // end of file
- if(packet_len == packet_error)
- die("Error reading packet in table dump");
- my_fwrite(stdout, (byte*)net->read_pos, packet_len, MYF(MY_WME));
- fflush(stdout);
- }
+ {
+ uint packet_len = my_net_read(net);
+ if(packet_len == 0) break; // end of file
+ if(packet_len == packet_error)
+ die("Error reading packet in table dump");
+ my_fwrite(result_file, (byte*)net->read_pos, packet_len, MYF(MY_WME));
+ fflush(result_file);
+ }
}
static void dump_remote_log_entries(const char* logname)
{
char buf[128];
+ char last_db[FN_REFLEN+1] = "";
uint len;
NET* net = &mysql->net;
if(!position) position = 4; // protect the innocent from spam
@@ -323,7 +333,7 @@ Unfortunately, no sweepstakes today, adjusted position to 4\n");
len - 1);
if(ev)
{
- ev->print(stdout, short_form);
+ ev->print(result_file, short_form, last_db);
if(ev->get_type_code() == LOAD_EVENT)
dump_remote_file(net, ((Load_log_event*)ev)->fname);
delete ev;
@@ -338,6 +348,7 @@ static void dump_local_log_entries(const char* logname)
File fd = -1;
IO_CACHE cache,*file= &cache;
ulonglong rec_count = 0;
+ char last_db[FN_REFLEN+1] = "";
if (logname && logname[0] != '-')
{
@@ -349,7 +360,7 @@ static void dump_local_log_entries(const char* logname)
}
else
{
- if (init_io_cache(file, fileno(stdout), 0, READ_CACHE, (my_off_t) 0,
+ if (init_io_cache(file, fileno(result_file), 0, READ_CACHE, (my_off_t) 0,
0, MYF(MY_WME | MY_NABP | MY_DONT_CHECK_FILESIZE)))
exit(1);
if (position)
@@ -395,9 +406,9 @@ Could not read entry at offset %s : Error in log format or read error",
if (rec_count >= offset)
{
if (!short_form)
- printf("# at %s\n",llstr(old_off,llbuff));
+ fprintf(result_file, "# at %s\n",llstr(old_off,llbuff));
- ev->print(stdout, short_form);
+ ev->print(result_file, short_form, last_db);
}
rec_count++;
delete ev;
@@ -445,6 +456,8 @@ int main(int argc, char** argv)
dump_log_entries(*(argv++));
}
}
+ if (result_file != stdout)
+ my_fclose(result_file, MYF(0));
if (use_remote)
mc_mysql_close(mysql);
return 0;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 48484aa168c..c9e19b65a6c 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -20,6 +20,7 @@
#include <my_dir.h>
#include "sql_acl.h"
#include "slave.h"
+#include "stacktrace.h"
#ifdef HAVE_BERKELEY_DB
#include "ha_berkeley.h"
#endif
@@ -92,6 +93,11 @@ int deny_severity = LOG_WARNING;
typedef fp_except fp_except_t;
#endif
+#ifdef _AIX41
+extern "C" int initgroups(const char *,int);
+#endif
+
+
/* We can't handle floating point expections with threads, so disable
this on freebsd
*/
@@ -144,7 +150,23 @@ static pthread_cond_t COND_handler_count;
static uint handler_count;
#endif
#ifdef __WIN__
-static bool opt_console=0;
+static bool opt_console=0,start_mode=0;
+#endif
+
+/* Set prefix for windows binary */
+#ifdef __WIN__
+#undef MYSQL_SERVER_SUFFIX
+#ifdef __NT__
+#if defined(HAVE_INNOBASE_DB) || defined(HAVE_BERKELEY_DB)
+#define MYSQL_SERVER_SUFFIX "-max-nt"
+#else
+#define MYSQL_SERVER_SUFFIX "-nt"
+#endif /* ...DB */
+#elif defined(HAVE_INNOBASE_DB) || defined(HAVE_BERKELEY_DB)
+#define MYSQL_SERVER_SUFFIX "-max"
+#else
+#define MYSQL_SERVER_SUFFIX ""
+#endif /* __NT__ */
#endif
#ifdef HAVE_BERKELEY_DB
@@ -1104,139 +1126,84 @@ static void start_signal_handler(void)
#else /* if ! __WIN__ && ! __EMX__ */
#ifdef HAVE_LINUXTHREADS
-static sig_handler write_core(int sig);
-
-#ifdef __i386__
-#define SIGRETURN_FRAME_COUNT 1
-#define PTR_SANE(p) ((char*)p >= heap_start && (char*)p <= heap_end)
-
-extern char* __bss_start;
-static char* heap_start, *heap_end;
-
-inline __volatile__ void print_str(const char* name,
- const char* val, int max_len)
-{
- fprintf(stderr, "%s at %p ", name, val);
- if(!PTR_SANE(val))
- {
- fprintf(stderr, " is invalid pointer\n");
- return;
- }
-
- fprintf(stderr, "= ");
- for(; max_len && PTR_SANE(val) && *val; --max_len)
- fputc(*val++, stderr);
- fputc('\n', stderr);
-}
-
-inline __volatile__ void trace_stack()
-{
- uchar **stack_bottom;
- uchar** ebp;
- LINT_INIT(ebp);
- LINT_INIT(stack_bottom);
-
- fprintf(stderr,
-"Attempting backtrace. You can use the following information to find out\n\
-where mysqld died. If you see no messages after this, something went\n\
-terribly wrong...\n");
- THD* thd = current_thd;
- uint frame_count = 0;
- __asm __volatile__ ("movl %%ebp,%0"
- :"=r"(ebp)
- :"r"(ebp));
- if (!ebp)
- {
- fprintf(stderr, "frame pointer (ebp) is NULL, did you compile with\n\
--fomit-frame-pointer? Aborting backtrace!\n");
- return;
- }
- if (!thd)
- {
- fprintf(stderr, "Cannot determine thread, ebp=%p, backtrace may not be correct.\n", ebp);
- /* Assume that the stack starts at the previous even 65K */
- ulong tmp= min(0x10000,thread_stack);
- stack_bottom= (uchar**) (((ulong) &stack_bottom + tmp) &
- ~(ulong) 0xFFFF);
- }
- else
- stack_bottom = (uchar**) thd->thread_stack;
- if (ebp > stack_bottom || ebp < stack_bottom - thread_stack)
- {
- fprintf(stderr,
- "Bogus stack limit or frame pointer, aborting backtrace.\n");
- return;
- }
-
- fprintf(stderr, "Stack range sanity check OK, backtrace follows:\n");
-
- while (ebp < stack_bottom)
- {
- uchar** new_ebp = (uchar**)*ebp;
- fprintf(stderr, "%p\n", frame_count == SIGRETURN_FRAME_COUNT ?
- *(ebp+17) : *(ebp+1));
- if (new_ebp <= ebp )
- {
- fprintf(stderr, "\
-New value of ebp failed sanity check, terminating backtrace!\n");
- return;
- }
- ebp = new_ebp;
- ++frame_count;
- }
-
- fprintf(stderr, "Stack trace successful, trying to get some variables.\n\
-Some pointers may be invalid and cause the dump to abort...\n");
- heap_start = __bss_start;
- heap_end = (char*)sbrk(0);
- print_str("thd->query", thd->query, 1024);
- fprintf(stderr, "thd->thread_id = %ld\n", thd->thread_id);
- fprintf(stderr, "Successfully dumped variables, if you ran with --log,\n\
-take a look at the details of what thread %ld did to cause the crash.\n\
-In some cases of really bad corruption, this value may be invalid\n",
- thd->thread_id);
- fprintf(stderr, "Please use the information above to create a repeatable\n\
-test case for the crash, and send it to bugs@lists.mysql.com\n");
-}
-#endif
+#define UNSAFE_DEFAULT_LINUX_THREADS 200
#endif
static sig_handler handle_segfault(int sig)
{
+ THD *thd=current_thd;
// strictly speaking, one needs a mutex here
// but since we have got SIGSEGV already, things are a mess
// so not having the mutex is not as bad as possibly using a buggy
// mutex - so we keep things simple
if (segfaulted)
- return;
+ {
+ fprintf(stderr, "Fatal signal %d while backtracing\n", sig);
+ exit(1);
+ }
+
segfaulted = 1;
fprintf(stderr,"\
mysqld got signal %d;\n\
-The manual section 'Debugging a MySQL server' tells you how to use a\n\
-stack trace and/or the core file to produce a readable backtrace that may\n\
-help in finding out why mysqld died.\n",sig);
+This could be because you hit a bug. It is also possible that this binary\n\
+or one of the libraries it was linked agaist is corrupt, improperly built,\n\
+or misconfigured. This error can also be caused by malfunctioning hardware.\n",
+ sig);
+ fprintf(stderr, "\
+We will try our best to scrape up some info that will hopefully help diagnose\n\
+the problem, but since we have already crashed, something is definitely wrong\n\
+and this may fail\n\n");
+ fprintf(stderr, "key_buffer_size=%ld\n", keybuff_size);
+ fprintf(stderr, "record_buffer=%ld\n", my_default_record_cache_size);
+ fprintf(stderr, "sort_buffer=%ld\n", sortbuff_size);
+ fprintf(stderr, "max_used_connections=%ld\n", max_used_connections);
+ fprintf(stderr, "max_connections=%ld\n", max_connections);
+ fprintf(stderr, "threads_connected=%d\n", thread_count);
+ fprintf(stderr, "It is possible that mysqld could use up to \n\
+key_buffer_size + (record_buffer + sort_buffer)*max_connections = %ld K\n\
+bytes of memory\n", (keybuff_size + (my_default_record_cache_size +
+ sortbuff_size) * max_connections)/ 1024);
+ fprintf(stderr, "Hope that's ok, if not, decrease some variables in the equation\n\n");
+
#if defined(HAVE_LINUXTHREADS)
-#ifdef __i386__
- trace_stack();
+ if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
+ {
+ fprintf(stderr, "\
+You seem to be running 32-bit Linux and have %d concurrent connections.\n\
+If you have not changed STACK_SIZE in LinuxThreads and build the binary \n\
+yourself, LinuxThreads is quite likely to steal a part of global heap for\n\
+the thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n\n",
+ thread_count);
+ }
+#endif /* HAVE_LINUXTHREADS */
+
+#ifdef HAVE_STACKTRACE
+ if(!(test_flags & TEST_NO_STACKTRACE))
+ print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
+ thread_stack);
+ if (thd)
+ {
+ fprintf(stderr, "Trying to get some variables.\n\
+Some pointers may be invalid and cause the dump to abort...\n");
+ safe_print_str("thd->query", thd->query, 1024);
+ fprintf(stderr, "thd->thread_id=%ld\n", thd->thread_id);
+ fprintf(stderr, "\n
+Successfully dumped variables, if you ran with --log, take a look at the\n\
+details of what thread %ld did to cause the crash. In some cases of really\n\
+bad corruption, the above values may be invalid\n\n",
+ thd->thread_id);
+ }
+ fprintf(stderr, "\
+Please use the information above to create a repeatable test case for the\n\
+crash, and send it to bugs@lists.mysql.com\n");
fflush(stderr);
-#endif /* __i386__ */
+#endif /* HAVE_STACKTRACE */
+
if (test_flags & TEST_CORE_ON_SIGNAL)
write_core(sig);
-#endif /* HAVE_LINUXTHREADS */
exit(1);
}
-/* Produce a core for the thread */
-
-#ifdef HAVE_LINUXTHREADS
-static sig_handler write_core(int sig)
-{
- signal(sig, SIG_DFL);
- if (fork() != 0) exit(1); // Abort main program
- // Core will be written at exit
-}
-#endif
-
static void init_signals(void)
{
@@ -1248,8 +1215,10 @@ static void init_signals(void)
struct sigaction sa; sa.sa_flags = 0;
sigemptyset(&sa.sa_mask);
sigprocmask(SIG_SETMASK,&sa.sa_mask,NULL);
- if (!(test_flags & TEST_NO_STACKTRACE))
+
+ if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL))
{
+ init_stacktrace();
sa.sa_handler=handle_segfault;
sigaction(SIGSEGV, &sa, NULL);
#ifdef SIGBUS
@@ -1388,7 +1357,7 @@ static void *signal_hand(void *arg __attribute__((unused)))
if (!(opt_specialflag & SPECIAL_NO_PRIOR))
my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR);
if (pthread_create(&tmp,&connection_attrib, kill_server_thread,
- (void*) sig))
+ (void*) 0))
sql_print_error("Error: Can't create thread to kill server");
#else
kill_server((void*) sig); // MIT THREAD has a alarm thread
@@ -1945,12 +1914,24 @@ The server will not act as a slave.");
sql_print_error("After lock_thread_count");
#endif
#else
- // remove the event, because it will not be valid anymore
- Service.SetShutdownEvent(0);
- if(hEventShutdown) CloseHandle(hEventShutdown);
- // if it was started as service on NT try to stop the service
- if(Service.IsNT())
- Service.Stop();
+ if (Service.IsNT())
+ {
+ if(start_mode)
+ {
+ if (WaitForSingleObject(hEventShutdown,INFINITE)==WAIT_OBJECT_0)
+ Service.Stop();
+ }
+ else
+ {
+ Service.SetShutdownEvent(0);
+ if(hEventShutdown) CloseHandle(hEventShutdown);
+ }
+ }
+ else
+ {
+ Service.SetShutdownEvent(0);
+ if(hEventShutdown) CloseHandle(hEventShutdown);
+ }
#endif
/* Wait until cleanup is done */
@@ -2003,6 +1984,7 @@ int main(int argc, char **argv)
else if (argc == 1) // No arguments; start as a service
{
// init service
+ start_mode = 1;
long tmp=Service.Init(MYSQL_SERVICENAME,mysql_service);
return 0;
}
@@ -2478,6 +2460,7 @@ enum options {
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER,
OPT_GEMINI_UNBUFFERED_IO, OPT_SKIP_SAFEMALLOC,
+ OPT_SKIP_STACK_TRACE
};
static struct option long_options[] = {
@@ -2599,11 +2582,12 @@ static struct option long_options[] = {
{"skip-locking", no_argument, 0, (int) OPT_SKIP_LOCK},
{"skip-host-cache", no_argument, 0, (int) OPT_SKIP_HOST_CACHE},
{"skip-name-resolve", no_argument, 0, (int) OPT_SKIP_RESOLVE},
+ {"skip-networking", no_argument, 0, (int) OPT_SKIP_NETWORKING},
{"skip-new", no_argument, 0, (int) OPT_SKIP_NEW},
{"skip-safemalloc", no_argument, 0, (int) OPT_SKIP_SAFEMALLOC},
{"skip-show-database", no_argument, 0, (int) OPT_SKIP_SHOW_DB},
{"skip-slave-start", no_argument, 0, (int) OPT_SKIP_SLAVE_START},
- {"skip-networking", no_argument, 0, (int) OPT_SKIP_NETWORKING},
+ {"skip-stack-trace", no_argument, 0, (int) OPT_SKIP_STACK_TRACE},
{"skip-thread-priority", no_argument, 0, (int) OPT_SKIP_PRIOR},
{"sql-bin-update-same", no_argument, 0, (int) OPT_SQL_BIN_UPDATE_SAME},
#include "sslopt-longopts.h"
@@ -3029,15 +3013,16 @@ static void usage(void)
Don't use concurrent insert with MyISAM\n\
--skip-delay-key-write\n\
Ignore the delay_key_write option for all tables\n\
+ --skip-host-cache Don't cache host names\n\
--skip-locking Don't use system locking. To use isamchk one has\n\
to shut down the server.\n\
--skip-name-resolve Don't resolve hostnames.\n\
All hostnames are IP's or 'localhost'\n\
--skip-networking Don't allow connection with TCP/IP.\n\
- --skip-new Don't use new, possible wrong routines.\n\
- --skip-host-cache Don't cache host names\n");
+ --skip-new Don't use new, possible wrong routines.\n");
/* We have to break the string here because of VC++ limits */
puts("\
+ --skip-stack-trace Don't print a stack trace on failure\n\
--skip-show-database Don't allow 'SHOW DATABASE' commands\n\
--skip-thread-priority\n\
Don't give threads different priorities.\n\
@@ -3483,6 +3468,9 @@ static void get_options(int argc,char **argv)
case (int) OPT_WANT_CORE:
test_flags |= TEST_CORE_ON_SIGNAL;
break;
+ case (int) OPT_SKIP_STACK_TRACE:
+ test_flags|=TEST_NO_STACKTRACE;
+ break;
case (int) OPT_BIND_ADDRESS:
if (optarg && isdigit(optarg[0]))
{
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 98be3639a06..b95b97d670f 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -697,7 +697,9 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
** and that all key blocks are half full (normally things are
** much better)
*/
- uint keys_per_block= head->file->block_size/2/head->key_info[param.real_keynr[idx]].key_length+1;
+ uint keys_per_block= head->file->block_size/2/
+ (head->key_info[param.real_keynr[idx]].key_length+
+ head->file->ref_length) + 1;
found_read_time=((double) (found_records+keys_per_block-1)/
(double) keys_per_block);
}
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index 74139b30a85..ff29fffe958 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -90,7 +90,7 @@
"File '%-.80s' already exists",
"Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld",
"Records: %ld Duplicates: %ld",
-"Incorrect sub part key. The used key part isn't a string or the used length is longer than the key part",
+"Incorrect sub part key. The used key part isn't a string, the used length is longer than the key part or the table handler doesn't support unique sub keys",
"You can't delete all columns with ALTER TABLE. Use DROP TABLE instead",
"Can't DROP '%-.64s'. Check that column/key exists",
"Records: %ld Duplicates: %ld Warnings: %ld",
diff --git a/sql/slave.cc b/sql/slave.cc
index 0220f574112..6b9c376a625 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1191,7 +1191,7 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
pthread_cond_broadcast(&COND_slave_start);
pthread_mutex_unlock(&LOCK_slave);
- int error = 1;
+ // int error = 1;
bool retried_once = 0;
ulonglong last_failed_pos = 0;
@@ -1293,9 +1293,19 @@ try again, log '%s' at postion %s", RPL_LOG_NAME,
sql_print_error("Slave thread killed while reading event");
goto err;
}
-
+
+
if (event_len == packet_error)
{
+ if(mc_mysql_errno(mysql) == ER_NET_PACKET_TOO_LARGE)
+ {
+ sql_print_error("Log entry on master is longer than \
+max_allowed_packet on slave. Slave thread will be aborted. If the entry is \
+really supposed to be that long, restart the server with a higher value of \
+max_allowed_packet. The current value is %ld", max_allowed_packet);
+ goto err;
+ }
+
thd->proc_info = "Waiting to reconnect after a failed read";
if(mysql->net.vio)
vio_close(mysql->net.vio);
@@ -1369,7 +1379,7 @@ the slave thread with \"mysqladmin start-slave\". We stopped at log \
}
}
- error = 0;
+ // error = 0;
err:
// print the current replication position
sql_print_error("Slave thread exiting, replication stopped in log '%s' at \
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 134449fd20a..3c81723b61f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1495,8 +1495,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
DBUG_RETURN(0); /* purecov: inspected */
if (openfrm(path, table_name,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX |
- HA_TRY_READ_ONLY),
+ (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options,
tmp_table))
@@ -1505,11 +1504,13 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
}
tmp_table->file->extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
- tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked
- tmp_table->tmp_table = 1;
+ tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked
+ tmp_table->tmp_table = (tmp_table->file->has_transactions() ?
+ TRANSACTIONAL_TMP_TABLE : TMP_TABLE);
tmp_table->table_cache_key=(char*) (tmp_table+1);
- tmp_table->key_length= (uint) (strmov(strmov(tmp_table->table_cache_key,db)
- +1, table_name)
+ tmp_table->key_length= (uint) (strmov((tmp_table->real_name=
+ strmov(tmp_table->table_cache_key,db)
+ +1), table_name)
- tmp_table->table_cache_key)+1;
int4store(tmp_table->table_cache_key + tmp_table->key_length,
thd->slave_proxy_id);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 297b6acbad5..438898ca294 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -71,9 +71,12 @@ public:
~MYSQL_LOG();
pthread_mutex_t* get_log_lock() { return &LOCK_log; }
void set_index_file_name(const char* index_file_name = 0);
+ void init(enum_log_type log_type_arg);
void open(const char *log_name,enum_log_type log_type,
const char *new_name=0);
void new_file(void);
+ bool open_index(int options);
+ void close_index();
bool write(THD *thd, enum enum_server_command command,const char *format,...);
bool write(THD *thd, const char *query, uint query_length,
time_t query_start=0);
@@ -265,7 +268,7 @@ public:
#endif
ulonglong next_insert_id,last_insert_id,current_insert_id;
ha_rows select_limit,offset_limit,default_select_limit,cuted_fields,
- max_join_size,sent_row_count;
+ max_join_size, sent_row_count, examined_row_count;
table_map used_tables;
ulong query_id,version, inactive_timeout,options,thread_id;
ulong gemini_spin_retries;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index f0cc3f9c42a..7a94dc32997 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2266,7 +2266,7 @@ mysql_init_query(THD *thd)
thd->lex.table_list.next= (byte**) &thd->lex.table_list.first;
thd->fatal_error=0; // Safety
thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0;
- thd->sent_row_count=0;
+ thd->sent_row_count=thd->examined_row_count=0;
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 05e64670df5..e5039d118be 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -132,7 +132,7 @@ static int send_file(THD *thd)
}
-static File open_log(IO_CACHE *log, const char *log_file_name,
+File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg)
{
File file;
@@ -294,7 +294,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags)
goto err;
}
- if ((file=open_log(&log, log_file_name, &errmsg)) < 0)
+ if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
goto err;
if(pos < 4)
@@ -483,7 +483,7 @@ sweepstakes if you report the bug";
// fake Rotate_log event just in case it did not make it to the log
// otherwise the slave make get confused about the offset
- if ((file=open_log(&log, log_file_name, &errmsg)) < 0 ||
+ if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 ||
fake_rotate_event(net, packet, log_file_name, &errmsg))
goto err;
@@ -694,7 +694,9 @@ int change_master(THD* thd)
glob_mi.pos = lex_mi->pos;
if(lex_mi->host)
- strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host));
+ {
+ strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host));
+ }
if(lex_mi->user)
strmake(glob_mi.user, lex_mi->user, sizeof(glob_mi.user));
if(lex_mi->password)
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index f8a67f51aa2..68f2b4ba6c4 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -9,6 +9,9 @@ extern uint32 server_id;
extern bool server_id_supplied;
extern I_List<i_string> binlog_do_db, binlog_ignore_db;
+File open_binlog(IO_CACHE *log, const char *log_file_name,
+ const char **errmsg);
+
int start_slave(THD* thd = 0, bool net_report = 1);
int stop_slave(THD* thd = 0, bool net_report = 1);
int change_master(THD* thd);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 74a36ab6723..c2b5662d436 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -36,7 +36,8 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref",
static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
DYNAMIC_ARRAY *keyuse,List<Item_func_match> &ftfuncs);
-static bool update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
+static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
+ JOIN_TAB *join_tab,
uint tables,COND *conds,table_map table_map,
List<Item_func_match> &ftfuncs);
static int sort_keyuse(KEYUSE *a,KEYUSE *b);
@@ -106,12 +107,14 @@ static uint find_shortest_key(TABLE *table, key_map usable_keys);
static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
ha_rows select_limit);
static int create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit);
-static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields);
+static bool fix_having(JOIN *join, Item **having);
+static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
+ Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
- ulong offset);
+ ulong offset,Item *having);
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
uint field_count, Field **first_field,
- ulong key_length);
+ ulong key_length,Item *having);
static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count);
static ulong used_blob_length(CACHE_FIELD **ptr);
static bool store_record_in_cache(JOIN_CACHE *cache);
@@ -210,7 +213,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
{
if (item->with_sum_func)
flag|=1;
- else if (!item->const_item())
+ else if (!(flag & 2) && !item->const_item())
flag|=2;
}
if (flag == 3)
@@ -265,7 +268,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
join.join_tab=0;
join.tmp_table_param.copy_field=0;
join.sum_funcs=0;
- join.send_records=join.found_records=0;
+ join.send_records=join.found_records=join.examined_rows=0;
join.tmp_table_param.end_write_records= HA_POS_ERROR;
join.first_record=join.sort_and_group=0;
join.select_options=select_options;
@@ -717,8 +720,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
if (select_distinct && ! group)
{
thd->proc_info="Removing duplicates";
- if (remove_duplicates(&join,tmp_table,fields))
- goto err; /* purecov: inspected */
+ if (having)
+ having->update_used_tables();
+ if (remove_duplicates(&join,tmp_table,fields, having))
+ goto err; /* purecov: inspected */
+ having=0;
select_distinct=0;
}
tmp_table->reginfo.lock_type=TL_UNLOCK;
@@ -749,28 +755,8 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
/* If we have already done the group, add HAVING to sorted table */
if (having && ! group && ! join.sort_and_group)
{
- having->update_used_tables(); // Some tables may have been const
- JOIN_TAB *table=&join.join_tab[join.const_tables];
- table_map used_tables= join.const_table_map | table->table->map;
-
- Item* sort_table_cond=make_cond_for_table(having,used_tables,used_tables);
- if (sort_table_cond)
- {
- if (!table->select)
- if (!(table->select=new SQL_SELECT))
- goto err;
- if (!table->select->cond)
- table->select->cond=sort_table_cond;
- else // This should never happen
- if (!(table->select->cond=new Item_cond_and(table->select->cond,
- sort_table_cond)))
- goto err;
- table->select_cond=table->select->cond;
- DBUG_EXECUTE("where",print_where(table->select->cond,
- "select and having"););
- having=make_cond_for_table(having,~ (table_map) 0,~used_tables);
- DBUG_EXECUTE("where",print_where(conds,"having after sort"););
- }
+ if (fix_having(&join,&having))
+ goto err;
}
if (create_sort_index(&join.join_tab[join.const_tables],
group ? group : order,
@@ -784,6 +770,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
error=do_select(&join,&fields,NULL,procedure);
err:
+ thd->examined_row_count=join.examined_rows;
thd->proc_info="end";
join.lock=0; // It's faster to unlock later
join_free(&join);
@@ -867,9 +854,9 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
table->reginfo.not_exists_optimize=0;
bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys);
all_table_map|= table->map;
+ s->join=join;
if ((s->on_expr=tables->on_expr))
{
- // table->maybe_null=table->outer_join=1; // Mark for send fields
if (!table->file->records)
{ // Empty table
s->key_dependent=s->dependent=0;
@@ -940,7 +927,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
}
if (conds || outer_join)
- if (update_ref_and_keys(keyuse_array,stat,join->tables,
+ if (update_ref_and_keys(join->thd,keyuse_array,stat,join->tables,
conds,~outer_join,ftfuncs))
DBUG_RETURN(1);
@@ -1441,8 +1428,9 @@ sort_keyuse(KEYUSE *a,KEYUSE *b)
*/
static bool
-update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables,
- COND *cond, table_map normal_tables,List<Item_func_match> &ftfuncs)
+update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
+ uint tables, COND *cond, table_map normal_tables,
+ List<Item_func_match> &ftfuncs)
{
uint and_level,i,found_eq_constant;
@@ -1450,8 +1438,7 @@ update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables,
KEY_FIELD *key_fields,*end;
if (!(key_fields=(KEY_FIELD*)
- my_malloc(sizeof(key_fields[0])*
- (current_thd->cond_count+1)*2,MYF(0))))
+ thd->alloc(sizeof(key_fields[0])*(thd->cond_count+1)*2)))
return TRUE; /* purecov: inspected */
and_level=0; end=key_fields;
if (cond)
@@ -1465,14 +1452,10 @@ update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables,
}
}
if (init_dynamic_array(keyuse,sizeof(KEYUSE),20,64))
- {
- my_free((gptr) key_fields,MYF(0));
return TRUE;
- }
/* fill keyuse with found key parts */
for (KEY_FIELD *field=key_fields ; field != end ; field++)
add_key_part(keyuse,field);
- my_free((gptr) key_fields,MYF(0));
}
if (ftfuncs.elements)
@@ -1893,7 +1876,7 @@ cache_record_length(JOIN *join,uint idx)
{
uint length;
JOIN_TAB **pos,**end;
- THD *thd=current_thd;
+ THD *thd=join->thd;
length=0;
for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ;
@@ -2075,7 +2058,7 @@ get_best_combination(JOIN *join)
}
else
{
- THD *thd=current_thd;
+ THD *thd=join->thd;
for (i=0 ; i < keyparts ; keyuse++,i++)
{
while (keyuse->keypart != i ||
@@ -2219,6 +2202,7 @@ make_simple_join(JOIN *join,TABLE *tmp_table)
join_tab->ref.key = -1;
join_tab->not_used_in_distinct=0;
join_tab->read_first_record= join_init_read_record;
+ join_tab->join=join;
bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
tmp_table->status=0;
tmp_table->null_row=0;
@@ -3374,7 +3358,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE;
table->blob_ptr_size=mi_portable_sizeof_char_ptr;
table->map=1;
- table->tmp_table=1;
+ table->tmp_table= TMP_TABLE;
table->db_low_byte_first=1; // True for HEAP and MyISAM
table->temp_pool_slot = temp_pool_slot;
@@ -3915,8 +3899,8 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
table->file=0;
*table =new_table;
table->file->change_table_ptr(table);
-
- thd->proc_info=save_proc_info;
+ thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
+ "Copying to tmp table on disk" : save_proc_info);
DBUG_RETURN(0);
err:
@@ -4096,6 +4080,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
bool not_used_in_distinct=join_tab->not_used_in_distinct;
ha_rows found_records=join->found_records;
READ_RECORD *info= &join_tab->read_record;
+ join->examined_rows++;
do
{
@@ -4430,7 +4415,8 @@ join_init_read_record(JOIN_TAB *tab)
{
if (tab->select && tab->select->quick)
tab->select->quick->reset();
- init_read_record(&tab->read_record,current_thd, tab->table, tab->select,1,1);
+ init_read_record(&tab->read_record, tab->join->thd, tab->table,
+ tab->select,1,1);
return (*tab->read_record.read_record)(&tab->read_record);
}
@@ -4483,6 +4469,7 @@ join_init_read_next_with_key(READ_RECORD *info)
return 0;
}
+
static int
join_init_read_last_with_key(JOIN_TAB *tab)
{
@@ -4643,7 +4630,11 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
else
{
if (!join->first_record)
+ {
+ /* No matching rows for group function */
clear_tables(join);
+ copy_fields(&join->tmp_table_param);
+ }
if (join->having && join->having->val_int() == 0)
error= -1; // Didn't satisfy having
else
@@ -4875,7 +4866,11 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (idx < (int) join->send_group_parts)
{
if (!join->first_record)
+ {
+ /* No matching rows for group function */
clear_tables(join);
+ copy_fields(&join->tmp_table_param);
+ }
copy_sum_funcs(join->sum_funcs);
if (!join->having || join->having->val_int())
{
@@ -5196,6 +5191,7 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit)
{
SORT_FIELD *sortorder;
uint length;
+ ha_rows examined_rows;
TABLE *table=tab->table;
SQL_SELECT *select=tab->select;
DBUG_ENTER("create_sort_index");
@@ -5234,12 +5230,13 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit)
}
}
table->found_records=filesort(&table,sortorder,length,
- select, 0L, select_limit);
+ select, 0L, select_limit, &examined_rows);
delete select; // filesort did select
tab->select=0;
tab->select_cond=0;
tab->type=JT_ALL; // Read with normal read_record
tab->read_first_record= join_init_read_record;
+ tab->join->examined_rows+=examined_rows;
if (table->key_read) // Restore if we used indexes
{
table->key_read=0;
@@ -5251,6 +5248,38 @@ err:
}
+/*
+** Add the HAVING criteria to table->select
+*/
+
+static bool fix_having(JOIN *join, Item **having)
+{
+ (*having)->update_used_tables(); // Some tables may have been const
+ JOIN_TAB *table=&join->join_tab[join->const_tables];
+ table_map used_tables= join->const_table_map | table->table->map;
+
+ Item* sort_table_cond=make_cond_for_table(*having,used_tables,used_tables);
+ if (sort_table_cond)
+ {
+ if (!table->select)
+ if (!(table->select=new SQL_SELECT))
+ return 1;
+ if (!table->select->cond)
+ table->select->cond=sort_table_cond;
+ else // This should never happen
+ if (!(table->select->cond=new Item_cond_and(table->select->cond,
+ sort_table_cond)))
+ return 1;
+ table->select_cond=table->select->cond;
+ DBUG_EXECUTE("where",print_where(table->select_cond,
+ "select and having"););
+ *having=make_cond_for_table(*having,~ (table_map) 0,~used_tables);
+ DBUG_EXECUTE("where",print_where(*having,"having after make_cond"););
+ }
+ return 0;
+}
+
+
/*****************************************************************************
** Remove duplicates from tmp table
** This should be recoded to add a uniuqe index to the table and remove
@@ -5291,7 +5320,7 @@ static void free_blobs(Field **ptr)
static int
-remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields)
+remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
{
int error;
ulong reclength,offset;
@@ -5328,9 +5357,10 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields)
sortbuff_size)))
error=remove_dup_with_hash_index(join->thd, entry,
field_count, first_field,
- reclength);
+ reclength, having);
else
- error=remove_dup_with_compare(join->thd, entry, first_field, offset);
+ error=remove_dup_with_compare(join->thd, entry, first_field, offset,
+ having);
free_blobs(first_field);
DBUG_RETURN(error);
@@ -5338,19 +5368,20 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields)
static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
- ulong offset)
+ ulong offset, Item *having)
{
handler *file=table->file;
char *org_record,*new_record;
+ byte *record;
int error;
ulong reclength=table->reclength-offset;
DBUG_ENTER("remove_dup_with_compare");
- org_record=(char*) table->record[0]+offset;
+ org_record=(char*) (record=table->record[0])+offset;
new_record=(char*) table->record[1]+offset;
file->rnd_init();
- error=file->rnd_next(table->record[0]);
+ error=file->rnd_next(record);
for (;;)
{
if (thd->killed)
@@ -5367,6 +5398,12 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
break;
goto err;
}
+ if (having && !having->val_int())
+ {
+ if ((error=file->delete_row(record)))
+ goto err;
+ continue;
+ }
if (copy_blobs(first_field))
{
my_error(ER_OUT_OF_SORTMEMORY,MYF(0));
@@ -5379,7 +5416,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
bool found=0;
for (;;)
{
- if ((error=file->rnd_next(table->record[0])))
+ if ((error=file->rnd_next(record)))
{
if (error == HA_ERR_RECORD_DELETED)
continue;
@@ -5389,19 +5426,19 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
- if ((error=file->delete_row(table->record[0])))
+ if ((error=file->delete_row(record)))
goto err;
}
else if (!found)
{
found=1;
- file->position(table->record[0]); // Remember position
+ file->position(record); // Remember position
}
}
if (!found)
break; // End of file
/* Restart search on next row */
- error=file->restart_rnd_next(table->record[0],file->ref);
+ error=file->restart_rnd_next(record,file->ref);
}
file->extra(HA_EXTRA_NO_CACHE);
@@ -5422,7 +5459,8 @@ err:
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
uint field_count,
Field **first_field,
- ulong key_length)
+ ulong key_length,
+ Item *having)
{
byte *key_buffer, *key_pos, *record=table->record[0];
int error;
@@ -5470,6 +5508,12 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
break;
goto err;
}
+ if (having && !having->val_int())
+ {
+ if ((error=file->delete_row(record)))
+ goto err;
+ continue;
+ }
/* copy fields to key buffer */
field_length=field_lengths;
@@ -5485,7 +5529,8 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if ((error=file->delete_row(record)))
goto err;
}
- (void) hash_insert(&hash, key_pos-key_length);
+ else
+ (void) hash_insert(&hash, key_pos-key_length);
key_pos+=extra_length;
}
my_free((char*) key_buffer,MYF(0));
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 2f7454e4059..bb97a10128f 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -102,6 +102,7 @@ typedef struct st_join_table {
bool cached_eq_ref_table,eq_ref_table,not_used_in_distinct;
TABLE_REF ref;
JOIN_CACHE cache;
+ JOIN *join;
} JOIN_TAB;
@@ -151,7 +152,7 @@ class JOIN {
uint send_group_parts;
bool sort_and_group,first_record,full_join,group, no_field_update;
table_map const_table_map,outer_join;
- ha_rows send_records,found_records;
+ ha_rows send_records,found_records,examined_rows,row_limit;
POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1];
double best_read;
List<Item> *fields;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 38c068d35c3..39ddb79e9de 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -781,7 +781,10 @@ store_create_info(THD *thd, TABLE *table, String *packet)
List<Item> field_list;
char tmp[MAX_FIELD_WIDTH];
String type(tmp, sizeof(tmp));
- packet->append("CREATE TABLE ", 13);
+ if (table->tmp_table)
+ packet->append("CREATE TEMPORARY TABLE ", 23);
+ else
+ packet->append("CREATE TABLE ", 13);
append_identifier(thd,packet,table->real_name);
packet->append(" (\n", 3);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 77aaf1edae4..ad39b91a5ca 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -479,12 +479,16 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
}
}
else if (column->length > length ||
- (f_is_packed(sql_field->pack_flag) && column->length != length))
+ ((f_is_packed(sql_field->pack_flag) ||
+ ((file->option_flag() & HA_NO_PREFIX_CHAR_KEYS) &&
+ (key_info->flags & HA_NOSAME))) &&
+ column->length != length))
{
my_error(ER_WRONG_SUB_KEY,MYF(0));
DBUG_RETURN(-1);
}
- length=column->length;
+ if (!(file->option_flag() & HA_NO_PREFIX_CHAR_KEYS))
+ length=column->length;
}
else if (length == 0)
{
@@ -1426,21 +1430,20 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
create_info,
create_list,key_list,1,1))) // no logging
DBUG_RETURN(error);
+
+ if (table->tmp_table)
+ new_table=open_table(thd,new_db,tmp_name,tmp_name,0);
+ else
{
- if (table->tmp_table)
- new_table=open_table(thd,new_db,tmp_name,tmp_name,0);
- else
- {
- char path[FN_REFLEN];
- (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,tmp_name);
- fn_format(path,path,"","",4);
- new_table=open_temporary_table(thd, path, new_db, tmp_name,0);
- }
- if (!new_table)
- {
- VOID(quick_rm_table(new_db_type,new_db,tmp_name));
- goto err;
- }
+ char path[FN_REFLEN];
+ (void) sprintf(path,"%s/%s/%s",mysql_data_home,new_db,tmp_name);
+ fn_format(path,path,"","",4);
+ new_table=open_temporary_table(thd, path, new_db, tmp_name,0);
+ }
+ if (!new_table)
+ {
+ VOID(quick_rm_table(new_db_type,new_db,tmp_name));
+ goto err;
}
save_time_stamp=new_table->time_stamp;
@@ -1633,6 +1636,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
+ ha_rows examined_rows;
DBUG_ENTER("copy_data_between_tables");
if (!(copy= new Copy_field[to->fields]))
@@ -1668,7 +1672,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
(from->found_records = filesort(&from, sortorder, length,
- (SQL_SELECT *) 0, 0L, HA_POS_ERROR))
+ (SQL_SELECT *) 0, 0L, HA_POS_ERROR,
+ &examined_rows))
== HA_POS_ERROR)
goto err;
};
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 1196d279e5c..c1069d91746 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -145,6 +145,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token COMMITTED_SYM
%token COLUMNS
%token COLUMN_SYM
+%token CONCURRENT
%token CONSTRAINT
%token DATABASES
%token DATA_SYM
@@ -1270,7 +1271,7 @@ select_lock_type:
/* empty */
| FOR_SYM UPDATE_SYM
{ Lex->lock_option= TL_WRITE; }
- | IN_SYM SHARE_SYM MODE_SYM
+ | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM
{ Lex->lock_option= TL_READ_WITH_SHARED_LOCKS; }
select_item_list:
@@ -2339,7 +2340,7 @@ use: USE_SYM ident
/* import, export of files */
-load: LOAD DATA_SYM opt_low_priority opt_local INFILE TEXT_STRING
+load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING
{
Lex->sql_command= SQLCOM_LOAD;
Lex->local_file= $4;
@@ -2366,6 +2367,12 @@ opt_local:
/* empty */ { $$=0;}
| LOCAL_SYM { $$=1;}
+load_data_lock:
+ /* empty */ { Lex->lock_option= current_thd->update_lock_default; }
+ | CONCURRENT { Lex->lock_option= TL_WRITE_CONCURRENT_INSERT ; }
+ | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }
+
+
opt_duplicate:
/* empty */ { Lex->duplicates=DUP_ERROR; }
| REPLACE { Lex->duplicates=DUP_REPLACE; }
@@ -2523,6 +2530,7 @@ keyword:
| COMMIT_SYM {}
| COMMITTED_SYM {}
| COMPRESSED_SYM {}
+ | CONCURRENT {}
| DATA_SYM {}
| DATETIME {}
| DATE_SYM {}
diff --git a/sql/stacktrace.c b/sql/stacktrace.c
new file mode 100644
index 00000000000..ba5bc9d2e85
--- /dev/null
+++ b/sql/stacktrace.c
@@ -0,0 +1,215 @@
+/* Copyright (C) 2000 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <global.h>
+#include "stacktrace.h"
+#include <signal.h>
+
+#ifdef HAVE_STACKTRACE
+#include <unistd.h>
+
+#define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end)
+
+char *heap_start;
+
+void safe_print_str(const char* name, const char* val, int max_len)
+{
+ char *heap_end= (char*) sbrk(0);
+ fprintf(stderr, "%s at %p ", name, val);
+
+ if (!PTR_SANE(val))
+ {
+ fprintf(stderr, " is invalid pointer\n");
+ return;
+ }
+
+ fprintf(stderr, "= ");
+ for(; max_len && PTR_SANE(val) && *val; --max_len)
+ fputc(*val++, stderr);
+ fputc('\n', stderr);
+}
+
+#ifdef HAVE_LINUXTHREADS
+#define SIGRETURN_FRAME_COUNT 1
+
+#if defined(__alpha__) && defined(__GNUC__)
+/*
+ The only way to backtrace without a symbol table on alpha
+ is to find stq fp,N(sp), and the first byte
+ of the instruction opcode will give us the value of N. From this
+ we can find where the old value of fp is stored
+*/
+
+#define MAX_INSTR_IN_FUNC 10000
+
+inline uchar** find_prev_fp(uint32* pc, uchar** fp)
+{
+ int i;
+ for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc)
+ {
+ uchar* p = (uchar*)pc;
+ if (p[2] == 222 && p[3] == 35)
+ {
+ return (uchar**)((uchar*)fp - *(short int*)p);
+ }
+ }
+ return 0;
+}
+
+inline uint32* find_prev_pc(uint32* pc, uchar** fp)
+{
+ int i;
+ for(i = 0; i < MAX_INSTR_IN_FUNC; ++i,--pc)
+ {
+ char* p = (char*)pc;
+ if (p[1] == 0 && p[2] == 94 && p[3] == -73)
+ {
+ uint32* prev_pc = (uint32*)*((fp+p[0]/sizeof(fp)));
+ return prev_pc;
+ }
+ }
+ return 0;
+}
+#endif /* defined(__alpha__) && defined(__GNUC__) */
+
+
+void print_stacktrace(gptr stack_bottom, ulong thread_stack)
+{
+ uchar** fp;
+ uint frame_count = 0;
+#if defined(__alpha__) && defined(__GNUC__)
+ uint32* pc;
+#endif
+ LINT_INIT(fp);
+
+ fprintf(stderr,"\
+Attempting backtrace. You can use the following information to find out\n\
+where mysqld died. If you see no messages after this, something went\n\
+terribly wrong...\n");
+#ifdef __i386__
+ __asm __volatile__ ("movl %%ebp,%0"
+ :"=r"(fp)
+ :"r"(fp));
+ if (!fp)
+ {
+ fprintf(stderr, "frame pointer (ebp) is NULL, did you compile with\n\
+-fomit-frame-pointer? Aborting backtrace!\n");
+ return;
+ }
+#endif
+#if defined(__alpha__) && defined(__GNUC__)
+ __asm __volatile__ ("mov $15,%0"
+ :"=r"(fp)
+ :"r"(fp));
+ if (!fp)
+ {
+ fprintf(stderr, "frame pointer (fp) is NULL, did you compile with\n\
+-fomit-frame-pointer? Aborting backtrace!\n");
+ return;
+ }
+#endif /* __alpha__ */
+
+ if (!stack_bottom)
+ {
+ ulong tmp= min(0x10000,thread_stack);
+ /* Assume that the stack starts at the previous even 65K */
+ stack_bottom= (gptr) (((ulong) &fp + tmp) &
+ ~(ulong) 0xFFFF);
+ fprintf(stderr, "Cannot determine thread, fp=%p, backtrace may not be correct.\n", fp);
+ }
+ if (fp > (uchar**) stack_bottom ||
+ fp < (uchar**) stack_bottom - thread_stack)
+ {
+ fprintf(stderr, "Bogus stack limit or frame pointer,\
+ fp=%p, stack_bottom=%p, thread_stack=%ld, aborting backtrace.\n",
+ fp, stack_bottom, thread_stack);
+ return;
+ }
+
+ fprintf(stderr, "Stack range sanity check OK, backtrace follows:\n");
+#if defined(__alpha__) && defined(__GNUC__)
+ fprintf(stderr, "Warning: Alpha stacks are difficult -\
+ will be taking some wild guesses, stack trace may be incorrect or \
+ terminate abruptly\n");
+ // On Alpha, we need to get pc
+ __asm __volatile__ ("bsr %0, do_next; do_next: "
+ :"=r"(pc)
+ :"r"(pc));
+#endif /* __alpha__ */
+
+ while (fp < (uchar**) stack_bottom)
+ {
+#ifdef __i386__
+ uchar** new_fp = (uchar**)*fp;
+ fprintf(stderr, "%p\n", frame_count == SIGRETURN_FRAME_COUNT ?
+ *(fp+17) : *(fp+1));
+#endif /* __386__ */
+
+#if defined(__alpha__) && defined(__GNUC__)
+ uchar** new_fp = find_prev_fp(pc, fp);
+ if (frame_count == SIGRETURN_FRAME_COUNT - 1)
+ {
+ new_fp += 90;
+ }
+
+ if (fp && pc)
+ {
+ pc = find_prev_pc(pc, fp);
+ if (pc)
+ fprintf(stderr, "%p\n", pc);
+ else
+ {
+ fprintf(stderr, "Not smart enough to deal with the rest\
+ of this stack\n");
+ goto end;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Not smart enough to deal with the rest of this stack\n");
+ goto end;
+ }
+#endif /* defined(__alpha__) && defined(__GNUC__) */
+ if (new_fp <= fp )
+ {
+ fprintf(stderr, "New value of fp=%p failed sanity check,\
+ terminating stack trace!\n", new_fp);
+ goto end;
+ }
+ fp = new_fp;
+ ++frame_count;
+ }
+
+ fprintf(stderr, "Stack trace seems successful - bottom reached\n");
+
+end:
+ fprintf(stderr, "Please read http://www.mysql.com/doc/U/s/Using_stack_trace.html and follow instructions on how to resolve the stack trace. Resolved\n\
+stack trace is much more helpful in diagnosing the problem, so please do \n\
+resolve it\n");
+}
+#endif /* HAVE_LINUXTHREADS */
+#endif /* HAVE_STACKTRACE */
+
+/* Produce a core for the thread */
+
+#ifdef HAVE_WRITE_CORE
+void write_core(int sig)
+{
+ signal(sig, SIG_DFL);
+ if (fork() != 0) exit(1); // Abort main program
+ // Core will be written at exit
+}
+#endif /* HAVE_WRITE_CORE */
diff --git a/sql/stacktrace.h b/sql/stacktrace.h
new file mode 100644
index 00000000000..b6c0ec43a0f
--- /dev/null
+++ b/sql/stacktrace.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2000 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HAVE_LINUXTHREADS
+#if defined(HAVE_STACKTRACE) || (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__)))
+#undef HAVE_STACKTRACE
+#define HAVE_STACKTRACE
+
+extern char* __bss_start;
+extern char* heap_start;
+
+#define init_stacktrace() { heap_start = (char*) &__bss_start; }
+void print_stacktrace(gptr stack_bottom, ulong thread_stack);
+void safe_print_str(const char* name, const char* val, int max_len);
+#endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */
+
+#define HAVE_WRITE_CORE
+void write_core(int sig);
+#endif /* HAVE_LINUXTHREADS */
+
+/* Define empty prototypes for functions that are not implemented */
+#ifndef HAVE_STACKTRACE
+#define init_stacktrace() {}
+#define print_stacktrace(A,B) {}
+#define safe_print_str(A,B,C) {}
+#endif /* HAVE_STACKTRACE */
+
+#ifndef HAVE_WRITE_CORE
+#define write_core(A) {}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/sql/table.h b/sql/table.h
index a3b361742c5..b627a158556 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -41,6 +41,8 @@ typedef struct st_grant_info
uint want_privilege;
} GRANT_INFO;
+enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2};
+
/* Table cache entry struct */
class Field_timestamp;
@@ -83,10 +85,11 @@ struct st_table {
uint blob_ptr_size; /* 4 or 8 */
uint next_number_key_offset;
int current_lock; /* Type of lock on table */
+ enum tmp_table_type tmp_table;
my_bool copy_blobs; /* copy_blobs when storing */
my_bool null_row; /* All columns are null */
my_bool maybe_null,outer_join; /* Used with OUTER JOIN */
- my_bool distinct,tmp_table,const_table;
+ my_bool distinct,const_table;
my_bool key_read;
my_bool crypted;
my_bool db_low_byte_first; /* Portable row format */
diff --git a/support-files/Makefile.am b/support-files/Makefile.am
index 164fd50a89c..00e8b13b12c 100644
--- a/support-files/Makefile.am
+++ b/support-files/Makefile.am
@@ -18,7 +18,6 @@
## Process this file with automake to create Makefile.in
EXTRA_DIST = mysql.spec.sh \
- mysql-max.spec.sh \
my-small.cnf.sh \
my-medium.cnf.sh \
my-large.cnf.sh \
@@ -34,7 +33,6 @@ pkgdata_DATA = my-small.cnf \
my-huge.cnf \
mysql-log-rotate \
mysql-@VERSION@.spec \
- mysql-max-@VERSION@.spec \
binary-configure
pkgdata_SCRIPTS = mysql.server
@@ -44,7 +42,6 @@ CLEANFILES = my-small.cnf \
my-large.cnf \
my-huge.cnf \
mysql.spec \
- mysql-max-@VERSION@.spec \
mysql-@VERSION@.spec \
mysql-log-rotate \
mysql.server \
@@ -55,10 +52,6 @@ mysql-@VERSION@.spec: mysql.spec
rm -f $@
cp mysql.spec $@
-mysql-max-@VERSION@.spec: mysql-max.spec
- rm -f $@
- cp mysql-max.spec $@
-
SUFFIXES = .sh
.sh:
diff --git a/support-files/my-huge.cnf.sh b/support-files/my-huge.cnf.sh
index 7917ab9d9f1..103a6c16cfd 100644
--- a/support-files/my-huge.cnf.sh
+++ b/support-files/my-huge.cnf.sh
@@ -47,10 +47,10 @@ server-id = 1
#set-variable = bdb_max_lock=100000
# Uncomment the following if you are using Innobase tables
+#innodb_data_file_path = ibdata1:2000M;ibdata2:2000M
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
-#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
diff --git a/support-files/my-large.cnf.sh b/support-files/my-large.cnf.sh
index d4e28c14019..f314566c5fb 100644
--- a/support-files/my-large.cnf.sh
+++ b/support-files/my-large.cnf.sh
@@ -43,10 +43,10 @@ server-id = 1
#set-variable = bdb_max_lock=100000
# Uncomment the following if you are using Innobase tables
+#innodb_data_file_path = ibdata1:1000M
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
-#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
diff --git a/support-files/my-medium.cnf.sh b/support-files/my-medium.cnf.sh
index d25345502e2..675241b25dd 100644
--- a/support-files/my-medium.cnf.sh
+++ b/support-files/my-medium.cnf.sh
@@ -45,10 +45,10 @@ server-id = 1
#set-variable = bdb_max_lock=10000
# Uncomment the following if you are using Innobase tables
+#innodb_data_file_path = ibdata1:400M
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
-#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
diff --git a/support-files/my-small.cnf.sh b/support-files/my-small.cnf.sh
index 6cafa8281fe..cad0e10e684 100644
--- a/support-files/my-small.cnf.sh
+++ b/support-files/my-small.cnf.sh
@@ -42,10 +42,10 @@ server-id = 1
#skip-bdb
# Uncomment the following if you are using Innobase tables
+#innodb_data_file_path = ibdata1:100M
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
-#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M