summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <monty@hundin.mysql.fi>2001-06-05 03:48:25 +0300
committerunknown <monty@hundin.mysql.fi>2001-06-05 03:48:25 +0300
commitd82a86e61b04c0d78a3216d97a346c24bd2dd218 (patch)
treec3801de3a55a3908dafdbc48f1bb034307fc85fc
parentfe1842d9ee1a7e51f74ca192d3181968f16a0b29 (diff)
parent011b141574d8b4c07b9f6b0f8c2f44b18d751646 (diff)
downloadmariadb-git-d82a86e61b04c0d78a3216d97a346c24bd2dd218.tar.gz
merge
client/mysqltest.c: Auto merged sql/mysqld.cc: Auto merged
-rw-r--r--.bzrignore1
-rw-r--r--BitKeeper/etc/logging_ok3
-rw-r--r--[-rwxr-xr-x]Docs/Flags/australia.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/australia.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/austria.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/austria.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/canada.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/canada.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/czech-republic.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/czech-republic.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/germany.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/germany.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/great-britain.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/great-britain.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/hungary.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/hungary.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/israel.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/israel.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/italy.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/italy.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/japan.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/japan.txt0
-rw-r--r--Docs/Flags/latvia.eps99
-rw-r--r--[-rwxr-xr-x]Docs/Flags/russia.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/russia.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/south-korea.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/south-korea.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/sweden.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/sweden.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/taiwan.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/taiwan.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/usa.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/usa.txt0
-rw-r--r--Docs/manual.texi73
-rw-r--r--client/mysqltest.c11
-rw-r--r--include/m_string.h13
-rw-r--r--include/violite.h3
-rw-r--r--innobase/btr/btr0btr.c123
-rw-r--r--innobase/btr/btr0cur.c9
-rw-r--r--innobase/btr/btr0sea.c11
-rw-r--r--innobase/include/btr0btr.h3
-rw-r--r--innobase/include/btr0cur.h14
-rw-r--r--innobase/include/btr0pcur.h6
-rw-r--r--innobase/include/btr0pcur.ic6
-rw-r--r--innobase/include/row0mysql.h14
-rw-r--r--innobase/include/trx0trx.h14
-rw-r--r--innobase/page/page0page.c142
-rw-r--r--innobase/rem/rem0cmp.c4
-rw-r--r--innobase/row/row0mysql.c143
-rw-r--r--innobase/row/row0sel.c121
-rw-r--r--innobase/trx/trx0trx.c22
-rw-r--r--libmysqld/Makefile.am4
-rw-r--r--libmysqld/lib_sql.cc4
-rw-r--r--libmysqld/libmysqld.c29
-rw-r--r--mysql-test/r/innodb.result2
-rw-r--r--mysql-test/t/select.test2
-rw-r--r--mysys/getvar.c2
-rw-r--r--mysys/tree.c2
-rw-r--r--sql-bench/Comments/postgres.benchmark77
-rw-r--r--sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg20
-rw-r--r--sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg102
-rw-r--r--sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg30
-rw-r--r--sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg103
-rw-r--r--sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg36
-rw-r--r--sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/bench-init.pl.sh2
-rw-r--r--sql-bench/limits/pg.cfg79
-rw-r--r--sql-bench/server-cfg.sh110
-rw-r--r--sql-bench/test-connect.sh2
-rw-r--r--sql/ha_gemini.cc5
-rw-r--r--sql/ha_innobase.cc56
-rw-r--r--sql/ha_innobase.h2
-rw-r--r--sql/mysqld.cc9
-rwxr-xr-xtests/fork_big.pl30
-rw-r--r--vio/vio.c10
78 files changed, 1373 insertions, 245 deletions
diff --git a/.bzrignore b/.bzrignore
index 716785b1665..01abc218748 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -256,6 +256,7 @@ libmysqld/sql_yacc.cc
libmysqld/table.cc
libmysqld/thr_malloc.cc
libmysqld/time.cc
+libmysqld/uniques.cc
libmysqld/unireg.cc
libtool
linked_client_sources
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 79984519528..c3d008f6c0c 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -4,5 +4,8 @@ monty@hundin.mysql.fi
monty@work.mysql.com
mwagner@evoq.mwagner.org
paul@central.snake.net
+paul@teton.kitebird.com
sasha@mysql.sashanet.com
+serg@serg.mysql.com
+tim@threads.polyesthetic.msg
tonu@hundin.mysql.fi
diff --git a/Docs/Flags/australia.eps b/Docs/Flags/australia.eps
index f98c03e2c83..f98c03e2c83 100755..100644
--- a/Docs/Flags/australia.eps
+++ b/Docs/Flags/australia.eps
diff --git a/Docs/Flags/australia.txt b/Docs/Flags/australia.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/australia.txt
+++ b/Docs/Flags/australia.txt
diff --git a/Docs/Flags/austria.eps b/Docs/Flags/austria.eps
index 7a0b56f3690..7a0b56f3690 100755..100644
--- a/Docs/Flags/austria.eps
+++ b/Docs/Flags/austria.eps
diff --git a/Docs/Flags/austria.txt b/Docs/Flags/austria.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/austria.txt
+++ b/Docs/Flags/austria.txt
diff --git a/Docs/Flags/canada.eps b/Docs/Flags/canada.eps
index b770266de60..b770266de60 100755..100644
--- a/Docs/Flags/canada.eps
+++ b/Docs/Flags/canada.eps
diff --git a/Docs/Flags/canada.txt b/Docs/Flags/canada.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/canada.txt
+++ b/Docs/Flags/canada.txt
diff --git a/Docs/Flags/czech-republic.eps b/Docs/Flags/czech-republic.eps
index afa50e9a82d..afa50e9a82d 100755..100644
--- a/Docs/Flags/czech-republic.eps
+++ b/Docs/Flags/czech-republic.eps
diff --git a/Docs/Flags/czech-republic.txt b/Docs/Flags/czech-republic.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/czech-republic.txt
+++ b/Docs/Flags/czech-republic.txt
diff --git a/Docs/Flags/germany.eps b/Docs/Flags/germany.eps
index 568543e3680..568543e3680 100755..100644
--- a/Docs/Flags/germany.eps
+++ b/Docs/Flags/germany.eps
diff --git a/Docs/Flags/germany.txt b/Docs/Flags/germany.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/germany.txt
+++ b/Docs/Flags/germany.txt
diff --git a/Docs/Flags/great-britain.eps b/Docs/Flags/great-britain.eps
index 97a7ffc9b57..97a7ffc9b57 100755..100644
--- a/Docs/Flags/great-britain.eps
+++ b/Docs/Flags/great-britain.eps
diff --git a/Docs/Flags/great-britain.txt b/Docs/Flags/great-britain.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/great-britain.txt
+++ b/Docs/Flags/great-britain.txt
diff --git a/Docs/Flags/hungary.eps b/Docs/Flags/hungary.eps
index e405fc3cffe..e405fc3cffe 100755..100644
--- a/Docs/Flags/hungary.eps
+++ b/Docs/Flags/hungary.eps
diff --git a/Docs/Flags/hungary.txt b/Docs/Flags/hungary.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/hungary.txt
+++ b/Docs/Flags/hungary.txt
diff --git a/Docs/Flags/israel.eps b/Docs/Flags/israel.eps
index 3d3059a907a..3d3059a907a 100755..100644
--- a/Docs/Flags/israel.eps
+++ b/Docs/Flags/israel.eps
diff --git a/Docs/Flags/israel.txt b/Docs/Flags/israel.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/israel.txt
+++ b/Docs/Flags/israel.txt
diff --git a/Docs/Flags/italy.eps b/Docs/Flags/italy.eps
index 20c7c7d5da3..20c7c7d5da3 100755..100644
--- a/Docs/Flags/italy.eps
+++ b/Docs/Flags/italy.eps
diff --git a/Docs/Flags/italy.txt b/Docs/Flags/italy.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/italy.txt
+++ b/Docs/Flags/italy.txt
diff --git a/Docs/Flags/japan.eps b/Docs/Flags/japan.eps
index 8dee6e497ba..8dee6e497ba 100755..100644
--- a/Docs/Flags/japan.eps
+++ b/Docs/Flags/japan.eps
diff --git a/Docs/Flags/japan.txt b/Docs/Flags/japan.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/japan.txt
+++ b/Docs/Flags/japan.txt
diff --git a/Docs/Flags/latvia.eps b/Docs/Flags/latvia.eps
new file mode 100644
index 00000000000..9c1f81f3ddc
--- /dev/null
+++ b/Docs/Flags/latvia.eps
@@ -0,0 +1,99 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Creator: pnmtops
+%%Title: latvia.ps
+%%Pages: 1
+%%BoundingBox: 295 365 317 396
+%%EndComments
+/readstring {
+ currentfile exch readhexstring pop
+} bind def
+/rpicstr 32 string def
+/gpicstr 32 string def
+/bpicstr 32 string def
+%%EndProlog
+%%Page: 1 1
+gsave
+295.44 365.64 translate
+21.12 30.72 scale
+0.5 0.5 translate 90 rotate -0.5 -0.5 translate
+32 22 8
+[ 32 0 0 -22 0 22 ]
+{ rpicstr readstring }
+{ gpicstr readstring }
+{ bpicstr readstring }
+true 3
+colorimage
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+18181818181818181818181818181818181818181818180000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff00009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000
+grestore
+showpage
+%%Trailer
diff --git a/Docs/Flags/russia.eps b/Docs/Flags/russia.eps
index 85c5899d891..85c5899d891 100755..100644
--- a/Docs/Flags/russia.eps
+++ b/Docs/Flags/russia.eps
diff --git a/Docs/Flags/russia.txt b/Docs/Flags/russia.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/russia.txt
+++ b/Docs/Flags/russia.txt
diff --git a/Docs/Flags/south-korea.eps b/Docs/Flags/south-korea.eps
index a363ab514c4..a363ab514c4 100755..100644
--- a/Docs/Flags/south-korea.eps
+++ b/Docs/Flags/south-korea.eps
diff --git a/Docs/Flags/south-korea.txt b/Docs/Flags/south-korea.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/south-korea.txt
+++ b/Docs/Flags/south-korea.txt
diff --git a/Docs/Flags/sweden.eps b/Docs/Flags/sweden.eps
index 47cd1fa3e9c..47cd1fa3e9c 100755..100644
--- a/Docs/Flags/sweden.eps
+++ b/Docs/Flags/sweden.eps
diff --git a/Docs/Flags/sweden.txt b/Docs/Flags/sweden.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/sweden.txt
+++ b/Docs/Flags/sweden.txt
diff --git a/Docs/Flags/taiwan.eps b/Docs/Flags/taiwan.eps
index a514bdf2af4..a514bdf2af4 100755..100644
--- a/Docs/Flags/taiwan.eps
+++ b/Docs/Flags/taiwan.eps
diff --git a/Docs/Flags/taiwan.txt b/Docs/Flags/taiwan.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/taiwan.txt
+++ b/Docs/Flags/taiwan.txt
diff --git a/Docs/Flags/usa.eps b/Docs/Flags/usa.eps
index 31bd9996d11..31bd9996d11 100755..100644
--- a/Docs/Flags/usa.eps
+++ b/Docs/Flags/usa.eps
diff --git a/Docs/Flags/usa.txt b/Docs/Flags/usa.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/usa.txt
+++ b/Docs/Flags/usa.txt
diff --git a/Docs/manual.texi b/Docs/manual.texi
index d17e51ba26b..ec92d892a7f 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -2137,7 +2137,7 @@ The server can provide error messages to clients in many languages.
@item
Clients may connect to the @strong{MySQL} server using TCP/IP Sockets,
-Unix Sockets (Unixes), or Named Pipes (NT).
+Unix Sockets (Unix), or Named Pipes (NT).
@item
The @strong{MySQL}-specific @code{SHOW} command can be used to retrieve
@@ -5249,7 +5249,7 @@ clients can connect to both @strong{MySQL} versions.
The extended @strong{MySQL} binary distribution is marked with the
@code{-max} suffix and is configured with the same options as
-@code{mysqld-max}. @xref{mysqld-max}.
+@code{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}.
If you want to use the @code{MySQL-Max} RPM, you must first
install the standard @code{MySQL} RPM.
@@ -5590,8 +5590,8 @@ indicates the type of operating system for which the distribution is intended
@item
If you see a binary distribution marked with the @code{-max} prefix, this
means that the binary has support for transaction-safe tables and other
-features. @xref{mysqld-max}. Note that all binaries are built from
-the same @strong{MySQL} source distribution.
+features. @xref{mysqld-max, @code{mysqld-max}}. Note that all binaries
+are built from the same @strong{MySQL} source distribution.
@item
Add a user and group for @code{mysqld} to run as:
@@ -5603,8 +5603,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different
-Unixes. They may also be called @code{adduser} and @code{addgroup}. You may
-wish to call the user and group something else instead of @code{mysql}.
+versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
+You may wish to call the user and group something else instead of @code{mysql}.
@item
Change into the intended installation directory:
@@ -5647,7 +5647,8 @@ programs properly. @xref{Environment variables}.
@item scripts
This directory contains the @code{mysql_install_db} script used to initialize
-the server access permissions.
+the @code{mysql} database containing the grant tables that store the server
+access permissions.
@end table
@item
@@ -5713,7 +5714,7 @@ You can start the @strong{MySQL} server with the following command:
shell> bin/safe_mysqld --user=mysql &
@end example
-@xref{safe_mysqld}.
+@xref{safe_mysqld, @code{safe_mysqld}}.
@xref{Post-installation}.
@@ -6117,8 +6118,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group, and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different
-Unixes. They may also be called @code{adduser} and @code{addgroup}. You may
-wish to call the user and group something else instead of @code{mysql}.
+versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
+You may wish to call the user and group something else instead of @code{mysql}.
@item
Unpack the distribution into the current directory:
@@ -7672,13 +7673,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV
signal, you can start @code{mysqld} with the @code{--core-file} option. Note
that you also probably need to raise the @code{core file size} by adding
@code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld}
-with @code{--core-file-sizes=1000000}. @xref{safe_mysqld}.
+with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can
start @code{mysqld} with the @code{--core-file} option. Note that you also probably
need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to
@code{safe_mysqld} or starting @code{safe_mysqld} with
-@code{--core-file-sizes=1000000}. @xref{safe_mysqld}.
+@code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
If you are linking your own @strong{MySQL} client and get the error:
@@ -8006,7 +8007,7 @@ shell> nohup mysqld [options] &
@code{nohup} causes the command following it to ignore any @code{SIGHUP}
signal sent from the terminal. Alternatively, start the server by running
@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
-@xref{safe_mysqld}.
+@xref{safe_mysqld, @code{safe_mysqld}}.
If you get a problem when compiling mysys/get_opt.c, just remove the
line #define _NO_PROTO from the start of that file!
@@ -8263,7 +8264,8 @@ FreeBSD is also known to have a very low default file handle limit.
safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf
(and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the
appropriate class for this user in the password file if you are not
-using the default (use: chpass mysqld-user-name). @xref{safe_mysqld}.
+using the default (use: chpass mysqld-user-name). @xref{safe_mysqld,
+@code{safe_mysqld}}.
If you get problems with the current date in @strong{MySQL}, setting the
@code{TZ} variable will probably help. @xref{Environment variables}.
@@ -9679,7 +9681,7 @@ mysqld: Can't find file: 'host.frm'
The above may also happen with a binary @strong{MySQL} distribution if you
don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
-@xref{safe_mysqld}.
+@xref{safe_mysqld, @code{safe_mysqld}}.
You might need to run @code{mysql_install_db} as @code{root}. However,
if you prefer, you can run the @strong{MySQL} server as an unprivileged
@@ -9980,7 +9982,8 @@ system startup and shutdown, and is described more fully in
@item
By invoking @code{safe_mysqld}, which tries to determine the proper options
-for @code{mysqld} and then runs it with those options. @xref{safe_mysqld}.
+for @code{mysqld} and then runs it with those options. @xref{safe_mysqld,
+@code{safe_mysqld}}.
@item
On NT you should install @code{mysqld} as a service as follows:
@@ -10229,7 +10232,8 @@ though.
@item --core-file
Write a core file if @code{mysqld} dies. For some systems you must also
-specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}.
+specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld,
+@code{safe_mysqld}}.
@item -h, --datadir=path
Path to the database root.
@@ -24295,6 +24299,14 @@ tables are:
@item Tables are compressed with @code{pack_isam} rather than with @code{myisampack}.
@end itemize
+If you want to convert an @code{ISAM} table to a @code{MyISAM} table so
+that you can use utilities such as @code{mysqlcheck}, use an @code{ALTER
+TABLE} statement:
+
+@example
+mysql> ALTER TABLE tbl_name TYPE = MYISAM;
+@end example
+
@cindex tables, @code{HEAP}
@node HEAP, BDB, ISAM, Table types
@section HEAP Tables
@@ -24422,7 +24434,7 @@ this. @xref{Table handler support}.
If you have downloaded a binary version of @strong{MySQL} that includes
support for BerkeleyDB, simply follow the instructions for installing a
binary version of @strong{MySQL}.
-@xref{Installing binary}. @xref{mysqld-max}.
+@xref{Installing binary}. @xref{mysqld-max, @code{mysqld-max}}.
To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
Version 3.23.34 or newer and configure @code{MySQL} with the
@@ -25534,7 +25546,7 @@ binary.
If you have downloaded a binary version of @strong{MySQL} that includes
support for InnoDB (mysqld-max), simply follow the instructions for
installing a binary version of @strong{MySQL}. @xref{Installing binary}.
-@xref{mysqld-max}.
+@xref{mysqld-max, @code{mysqld-max}}.
To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer
and configure @code{MySQL} with the @code{--with-innodb} option.
@@ -26309,7 +26321,7 @@ time will be longer.
Also the log buffer should be quite big, say 8 MB.
@strong{6.} (Relevant from 3.23.39 up.)
-In some versions of Linux and other Unixes flushing files to disk with the Unix
+In some versions of Linux and Unix, flushing files to disk with the Unix
@code{fdatasync} and other similar methods is surprisingly slow.
The default method InnoDB uses is the @code{fdatasync} function.
If you are not satisfied with the database write performance, you may
@@ -26590,11 +26602,11 @@ integer that can be stored in the specified integer type.
In disk i/o InnoDB uses asynchronous i/o. On Windows NT
it uses the native asynchronous i/o provided by the operating system.
-On Unixes InnoDB uses simulated asynchronous i/o built
+On Unix, InnoDB uses simulated asynchronous i/o built
into InnoDB: InnoDB creates a number of i/o threads to take care
of i/o operations, such as read-ahead. In a future version we will
add support for simulated aio on Windows NT and native aio on those
-Unixes which have one.
+versions of Unix which have one.
On Windows NT InnoDB uses non-buffered i/o. That means that the disk
pages InnoDB reads or writes are not buffered in the operating system
@@ -26605,7 +26617,7 @@ just define the raw disk in place of a data file in @file{my.cnf}.
You must give the exact size in bytes of the raw disk in @file{my.cnf},
because at startup InnoDB checks that the size of the file
is the same as specified in the configuration file. Using a raw disk
-you can on some Unixes perform non-buffered i/o.
+you can on some versions of Unix perform non-buffered i/o.
There are two read-ahead heuristics in InnoDB: sequential read-ahead
and random read-ahead. In sequential read-ahead InnoDB notices that
@@ -26784,7 +26796,7 @@ the maximum size for a table. The minimum tablespace size is 10 MB.
Contact information of Innobase Oy, producer of the InnoDB engine:
@example
-Website: www.innobase.fi
+Website: www.innodb.com
Heikki.Tuuri@@innobase.inet.fi
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
InnoDB Oy Inc.
@@ -33177,7 +33189,7 @@ with the @code{-max} prefix. This makes it very easy to test out a
another @code{mysqld} binary in an existing installation. Just
run @code{configure} with the options you want and then install the
new @code{mysqld} binary as @code{mysqld-max} in the same directory
-where your old @code{mysqld} binary is. @xref{safe_mysqld}.
+where your old @code{mysqld} binary is. @xref{safe_mysqld, @code{safe_mysqld}}.
The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
feature. It just installs the @code{mysqld-max} executable and
@@ -33425,7 +33437,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary
release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
-a -x ./bin/mysqld
--------------------------------------------------------------------------
-@xref{safe_mysqld}.
+@xref{safe_mysqld, @code{safe_mysqld}}.
@end example
The above test should be successful, or you may encounter problems.
@item
@@ -33953,7 +33965,7 @@ server). The dump will contain SQL statements to create the table
and/or populate the table.
If you are doing a backup on the server, you should consider using
-the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy}.
+the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, @code{mysqlhotcopy}}.
@example
shell> mysqldump [OPTIONS] database [tables]
@@ -39158,7 +39170,8 @@ If you want to make a SQL level backup of a table, you can use
TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}.
Another way to back up a database is to use the @code{mysqldump} program or
-the @code{mysqlhotcopy script}. @xref{mysqldump}. @xref{mysqlhotcopy}.
+the @code{mysqlhotcopy script}. @xref{mysqldump, @code{mysqldump}}.
+@xref{mysqlhotcopy, @code{mysqlhotcopy}}.
@enumerate
@item
@@ -46468,8 +46481,8 @@ read by @code{mysql_options()}.
Added new options @code{--pager[=...]}, @code{--no-pager},
@code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The
new corresponding interactive commands are @code{pager}, @code{nopager},
-@code{tee} and @code{notee}. @xref{mysql}, @code{mysql --help} and the
-interactive help for more information.
+@code{tee} and @code{notee}. @xref{mysql, @code{mysql}}, @code{mysql --help}
+and the interactive help for more information.
@item
Fixed crash when automatic repair of @code{MyISAM} table failed.
@item
diff --git a/client/mysqltest.c b/client/mysqltest.c
index 12d8ed2ca18..160f5c9d092 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -1018,7 +1018,8 @@ int do_connect(struct st_query* q)
if (!mysql_init(&next_con->mysql))
die("Failed on mysql_init()");
- con_sock=fn_format(buff, con_sock, TMPDIR, "",0);
+ if (con_sock)
+ con_sock=fn_format(buff, con_sock, TMPDIR, "",0);
if (!con_db[0])
con_db=db;
con_error = 1;
@@ -1369,6 +1370,7 @@ struct option long_options[] =
{"silent", no_argument, 0, 'q'},
{"sleep", required_argument, 0, 'T'},
{"socket", required_argument, 0, 'S'},
+ {"test-file", required_argument, 0, 'x'},
{"tmpdir", required_argument, 0, 't'},
{"user", required_argument, 0, 'u'},
{"verbose", no_argument, 0, 'v'},
@@ -1408,6 +1410,7 @@ void usage()
-T, --sleep=# Sleep always this many seconds on sleep commands\n\
-r, --record Record output of test_file into result file.\n\
-R, --result-file=... Read/Store result from/in this file.\n\
+ -x, --test-file=... Read test from/in this file (default stdin).\n\
-v, --verbose Write more.\n\
-q, --quiet, --silent Suppress all normal output.\n\
-V, --version Output version information and exit.\n\
@@ -1422,7 +1425,7 @@ int parse_args(int argc, char **argv)
load_defaults("my",load_default_groups,&argc,&argv);
default_argv= argv;
- while((c = getopt_long(argc, argv, "h:p::u:P:D:S:R:t:T:#:?rvVq",
+ while((c = getopt_long(argc, argv, "h:p::u:P:D:S:R:x:t:T:#:?rvVq",
long_options, &option_index)) != EOF)
{
switch(c) {
@@ -1441,6 +1444,10 @@ int parse_args(int argc, char **argv)
case 'R':
result_file = optarg;
break;
+ case 'x':
+ if (!(*cur_file = my_fopen(optarg, O_RDONLY, MYF(MY_WME))))
+ die("Could not open %s: errno = %d", optarg, errno);
+ break;
case 'p':
if (optarg)
{
diff --git a/include/m_string.h b/include/m_string.h
index ce5197f17af..84c42e0c8b9 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -69,10 +69,6 @@
# define memmove(d, s, n) bmove((d), (s), (n)) /* our bmove */
#endif
-#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
-#define strmov(A,B) stpcpy((A),(B))
-#endif
-
/* Unixware 7 */
#if !defined(HAVE_BFILL)
# define bfill(A,B,C) memset((A),(C),(B))
@@ -90,6 +86,13 @@
extern "C" {
#endif
+#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
+#define strmov(A,B) stpcpy((A),(B))
+#ifndef stpcpy
+extern char *stpcpy(char *, const char *); /* For AIX with gcc 2.95.3 */
+#endif
+#endif
+
extern char NEAR _dig_vec[]; /* Declared in int2str() */
#ifdef BAD_STRING_COMPILER
@@ -148,7 +151,7 @@ extern void bchange(char *dst,uint old_len,const char *src,
uint new_len,uint tot_len);
extern void strappend(char *s,uint len,pchar fill);
extern char *strend(const char *s);
-extern char *strcend(const char *, pchar);
+extern char *strcend(const char *, pchar);
extern char *strfield(char *src,int fields,int chars,int blanks,
int tabch);
extern char *strfill(my_string s,uint len,pchar fill);
diff --git a/include/violite.h b/include/violite.h
index a88b5c0db51..ffef4d68c73 100644
--- a/include/violite.h
+++ b/include/violite.h
@@ -210,9 +210,6 @@ struct st_VioSSLConnectorFd *new_VioSSLConnectorFd(
struct st_VioSSLAcceptorFd *new_VioSSLAcceptorFd(
const char* key_file,const char* cert_file,const char* ca_file,const char* ca_path);
Vio* new_VioSSL(struct st_VioSSLAcceptorFd* fd, Vio* sd,int state);
-//static int
-//init_bio_(struct st_VioSSLAcceptorFd* fd, Vio* sd, int state, int bio_flags);
-//void report_errors();
#ifdef __cplusplus
}
diff --git a/innobase/btr/btr0btr.c b/innobase/btr/btr0btr.c
index 63e70eb1b83..2507f805cd6 100644
--- a/innobase/btr/btr0btr.c
+++ b/innobase/btr/btr0btr.c
@@ -2239,11 +2239,92 @@ btr_check_node_ptr(
}
/****************************************************************
+Checks the size and number of fields in a record based on the definition of
+the index. */
+static
+ibool
+btr_index_rec_validate(
+/*====================*/
+ /* out: TRUE if ok */
+ rec_t* rec, /* in: index record */
+ dict_index_t* index) /* in: index */
+{
+ dtype_t* type;
+ byte* data;
+ ulint len;
+ ulint n;
+ ulint i;
+
+ n = dict_index_get_n_fields(index);
+
+ if (rec_get_n_fields(rec) != n) {
+ fprintf(stderr, "Record has %lu fields, should have %lu\n",
+ rec_get_n_fields(rec), n);
+
+ return(FALSE);
+ }
+
+ for (i = 0; i < n; i++) {
+ data = rec_get_nth_field(rec, i, &len);
+
+ type = dict_index_get_nth_type(index, i);
+
+ if (len != UNIV_SQL_NULL && dtype_is_fixed_size(type)
+ && len != dtype_get_fixed_size(type)) {
+ fprintf(stderr,
+ "Record field %lu len is %lu, should be %lu\n",
+ i, len, dtype_get_fixed_size(type));
+
+ return(FALSE);
+ }
+ }
+
+ return(TRUE);
+}
+
+/****************************************************************
+Checks the size and number of fields in records based on the definition of
+the index. */
+static
+ibool
+btr_index_page_validate(
+/*====================*/
+ /* out: TRUE if ok */
+ page_t* page, /* in: index page */
+ dict_index_t* index) /* in: index */
+{
+ rec_t* rec;
+ page_cur_t cur;
+ ibool ret = TRUE;
+
+ page_cur_set_before_first(page, &cur);
+ page_cur_move_to_next(&cur);
+
+ for (;;) {
+ rec = (&cur)->rec;
+
+ if (page_cur_is_after_last(&cur)) {
+ break;
+ }
+
+ if (!btr_index_rec_validate(rec, index)) {
+
+ ret = FALSE;
+ }
+
+ page_cur_move_to_next(&cur);
+ }
+
+ return(ret);
+}
+
+/****************************************************************
Validates index tree level. */
static
-void
+ibool
btr_validate_level(
/*===============*/
+ /* out: TRUE if ok */
dict_tree_t* tree, /* in: index tree */
ulint level) /* in: level number */
{
@@ -2260,7 +2341,9 @@ btr_validate_level(
page_cur_t cursor;
mem_heap_t* heap;
dtuple_t* node_ptr_tuple;
-
+ ibool ret = TRUE;
+ dict_index_t* index;
+
mtr_start(&mtr);
page = btr_root_get(tree, &mtr);
@@ -2278,13 +2361,31 @@ btr_validate_level(
page = btr_node_ptr_get_child(node_ptr, &mtr);
}
+ index = UT_LIST_GET_FIRST(tree->tree_indexes);
+
/* Now we are on the desired level */
loop:
mtr_x_lock(dict_tree_get_lock(tree), &mtr);
- /* Check ordering of records */
- page_validate(page, UT_LIST_GET_FIRST(tree->tree_indexes));
+ /* Check ordering etc. of records */
+
+ if (!page_validate(page, index)) {
+ fprintf(stderr, "Error in page %lu in index %s\n",
+ buf_frame_get_page_no(page), index->name);
+ ret = FALSE;
+ }
+
+ if (level == 0) {
+ if (!btr_index_page_validate(page, index)) {
+ fprintf(stderr,
+ "Error in page %lu in index %s\n",
+ buf_frame_get_page_no(page), index->name);
+
+ ret = FALSE;
+ }
+ }
+
ut_a(btr_page_get_level(page, &mtr) == level);
right_page_no = btr_page_get_next(page, &mtr);
@@ -2374,14 +2475,17 @@ loop:
goto loop;
}
+
+ return(ret);
}
/******************************************************************
Checks the consistency of an index tree. */
-void
+ibool
btr_validate_tree(
/*==============*/
+ /* out: TRUE if ok */
dict_tree_t* tree) /* in: tree */
{
mtr_t mtr;
@@ -2397,8 +2501,15 @@ btr_validate_tree(
for (i = 0; i <= n; i++) {
- btr_validate_level(tree, n - i);
+ if (!btr_validate_level(tree, n - i)) {
+
+ mtr_commit(&mtr);
+
+ return(FALSE);
+ }
}
mtr_commit(&mtr);
+
+ return(TRUE);
}
diff --git a/innobase/btr/btr0cur.c b/innobase/btr/btr0cur.c
index e0e59152895..a8680c6b380 100644
--- a/innobase/btr/btr0cur.c
+++ b/innobase/btr/btr0cur.c
@@ -163,9 +163,14 @@ btr_cur_search_to_nth_level(
BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer
to the left neighbor page, in the cases
- BTR_SEARCH_PREV and BTR_MODIFY_PREV */
+ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
+ NOTE that if has_search_latch
+ is != 0, we maybe do not have a latch set
+ on the cursor page, we assume
+ the caller uses his search latch
+ to protect the record! */
btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
- s- or x-latched */
+ s- or x-latched, but see also above! */
ulint has_search_latch,/* in: info on the latch mode the
caller currently has on btr_search_latch:
RW_S_LATCH, or 0 */
diff --git a/innobase/btr/btr0sea.c b/innobase/btr/btr0sea.c
index 318bf97e7d2..ac4e7c5ba3f 100644
--- a/innobase/btr/btr0sea.c
+++ b/innobase/btr/btr0sea.c
@@ -601,7 +601,12 @@ btr_search_guess_on_hash(
btr_search_t* info, /* in: index search info */
dtuple_t* tuple, /* in: logical record */
ulint mode, /* in: PAGE_CUR_L, ... */
- ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
+ NOTE that only if has_search_latch
+ is 0, we will have a latch set on
+ the cursor page, otherwise we assume
+ the caller uses his search latch
+ to protect the record! */
btr_cur_t* cursor, /* out: tree cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
@@ -722,7 +727,9 @@ btr_search_guess_on_hash(
}
if (!success) {
- btr_leaf_page_release(page, latch_mode, mtr);
+ if (!has_search_latch) {
+ btr_leaf_page_release(page, latch_mode, mtr);
+ }
goto failure;
}
diff --git a/innobase/include/btr0btr.h b/innobase/include/btr0btr.h
index d2ac9952695..f8a3000ca8a 100644
--- a/innobase/include/btr0btr.h
+++ b/innobase/include/btr0btr.h
@@ -376,9 +376,10 @@ btr_print_tree(
/******************************************************************
Checks the consistency of an index tree. */
-void
+ibool
btr_validate_tree(
/*==============*/
+ /* out: TRUE if ok */
dict_tree_t* tree); /* in: tree */
#define BTR_N_LEAF_PAGES 1
diff --git a/innobase/include/btr0cur.h b/innobase/include/btr0cur.h
index 79ec56c8e50..4ce2177bfe8 100644
--- a/innobase/include/btr0cur.h
+++ b/innobase/include/btr0cur.h
@@ -98,12 +98,18 @@ btr_cur_search_to_nth_level(
the previous page of the record! Inserts
should always be made using PAGE_CUR_LE to
search the position! */
- ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
+ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ..., ORed with
+ BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer
to the left neighbor page, in the cases
- BTR_SEARCH_PREV and BTR_MODIFY_PREV */
- btr_cur_t* cursor, /* out: tree cursor; the cursor page is s- or
- x-latched */
+ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
+ NOTE that if has_search_latch
+ is != 0, we maybe do not have a latch set
+ on the cursor page, we assume
+ the caller uses his search latch
+ to protect the record! */
+ btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
+ s- or x-latched, but see also above! */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
RW_S_LATCH, or 0 */
diff --git a/innobase/include/btr0pcur.h b/innobase/include/btr0pcur.h
index c07d5199d8c..6465093e3c1 100644
--- a/innobase/include/btr0pcur.h
+++ b/innobase/include/btr0pcur.h
@@ -87,7 +87,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the
record! */
- ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
+ NOTE that if has_search_latch != 0 then
+ we maybe do not acquire a latch on the cursor
+ page, but assume that the caller uses his
+ btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
diff --git a/innobase/include/btr0pcur.ic b/innobase/include/btr0pcur.ic
index 7f31f8fe502..8e927689208 100644
--- a/innobase/include/btr0pcur.ic
+++ b/innobase/include/btr0pcur.ic
@@ -492,7 +492,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the
record! */
- ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
+ NOTE that if has_search_latch != 0 then
+ we maybe do not acquire a latch on the cursor
+ page, but assume that the caller uses his
+ btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
diff --git a/innobase/include/row0mysql.h b/innobase/include/row0mysql.h
index d47fa729dce..554da2c035c 100644
--- a/innobase/include/row0mysql.h
+++ b/innobase/include/row0mysql.h
@@ -229,6 +229,15 @@ row_rename_table_for_mysql(
char* old_name, /* in: old table name */
char* new_name, /* in: new table name */
trx_t* trx); /* in: transaction handle */
+/*************************************************************************
+Checks a table for corruption. */
+
+ulint
+row_check_table_for_mysql(
+/*======================*/
+ /* out: DB_ERROR or DB_SUCCESS */
+ row_prebuilt_t* prebuilt); /* in: prebuilt struct in MySQL
+ handle */
/* A struct describing a place for an individual column in the MySQL
row format which is presented to the table handler in ha_innobase.
@@ -281,7 +290,8 @@ struct row_prebuilt_struct {
is set to TRUE */
dict_index_t* index; /* current index for a search, if any */
ulint template_type; /* ROW_MYSQL_WHOLE_ROW,
- ROW_MYSQL_REC_FIELDS or
+ ROW_MYSQL_REC_FIELDS,
+ ROW_MYSQL_DUMMY_TEMPLATE, or
ROW_MYSQL_NO_TEMPLATE */
ulint n_template; /* number of elements in the
template */
@@ -359,6 +369,8 @@ struct row_prebuilt_struct {
#define ROW_MYSQL_WHOLE_ROW 0
#define ROW_MYSQL_REC_FIELDS 1
#define ROW_MYSQL_NO_TEMPLATE 2
+#define ROW_MYSQL_DUMMY_TEMPLATE 3 /* dummy template used in
+ row_scan_and_check_index */
#ifndef UNIV_NONINL
#include "row0mysql.ic"
diff --git a/innobase/include/trx0trx.h b/innobase/include/trx0trx.h
index 52be0b1d992..f67ba43162d 100644
--- a/innobase/include/trx0trx.h
+++ b/innobase/include/trx0trx.h
@@ -24,6 +24,13 @@ saving CPU time. The kernel mutex contention is increased, however. */
extern ulint trx_n_mysql_transactions;
+/************************************************************************
+Releases the search latch if trx has reserved it. */
+
+void
+trx_search_latch_release_if_reserved(
+/*=================================*/
+ trx_t* trx); /* in: transaction */
/********************************************************************
Retrieves the error_info field from a trx. */
@@ -282,6 +289,13 @@ struct trx_struct{
ulint n_mysql_tables_in_use; /* number of Innobase tables
used in the processing of the current
SQL statement in MySQL */
+ ulint mysql_n_tables_locked;
+ /* how many tables the current SQL
+ statement uses, except those
+ in consistent read */
+ ibool has_search_latch;
+ /* TRUE if this trx has latched the
+ search system latch in S-mode */
ibool ignore_duplicates_in_insert;
/* in an insert roll back only insert
of the latest row in case
diff --git a/innobase/page/page0page.c b/innobase/page/page0page.c
index 7986684fd07..511191ecd89 100644
--- a/innobase/page/page0page.c
+++ b/innobase/page/page0page.c
@@ -1199,8 +1199,16 @@ page_rec_validate(
n_owned = rec_get_n_owned(rec);
heap_no = rec_get_heap_no(rec);
- ut_a(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED);
- ut_a(heap_no < page_header_get_field(page, PAGE_N_HEAP));
+ if (!(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED)) {
+ fprintf(stderr, "Dir slot n owned too big %lu\n", n_owned);
+ return(FALSE);
+ }
+
+ if (!(heap_no < page_header_get_field(page, PAGE_N_HEAP))) {
+ fprintf(stderr, "Heap no too big %lu %lu\n", heap_no,
+ page_header_get_field(page, PAGE_N_HEAP));
+ return(FALSE);
+ }
return(TRUE);
}
@@ -1216,20 +1224,21 @@ page_validate(
dict_index_t* index) /* in: data dictionary index containing
the page record type definition */
{
+ page_dir_slot_t* slot;
mem_heap_t* heap;
+ page_cur_t cur;
byte* buf;
ulint i;
ulint count;
ulint own_count;
ulint slot_no;
ulint data_size;
- page_cur_t cur;
rec_t* rec;
rec_t* old_rec = NULL;
- page_dir_slot_t* slot;
ulint offs;
ulint n_slots;
-
+ ibool ret = FALSE;
+
heap = mem_heap_create(UNIV_PAGE_SIZE);
/* The following buffer is used to check that the
@@ -1244,8 +1253,16 @@ page_validate(
overlap. */
n_slots = page_dir_get_n_slots(page);
- ut_ad(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
- page_dir_get_nth_slot(page, n_slots - 1));
+
+ if (!(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
+ page_dir_get_nth_slot(page, n_slots - 1))) {
+ fprintf(stderr,
+ "Record heap and dir overlap on a page in index %s, %lu, %lu\n",
+ index->name, page_header_get_ptr(page, PAGE_HEAP_TOP),
+ page_dir_get_nth_slot(page, n_slots - 1));
+
+ goto func_exit;
+ }
/* Validate the record list in a loop checking also that
it is consistent with the directory. */
@@ -1259,11 +1276,20 @@ page_validate(
for (;;) {
rec = (&cur)->rec;
- page_rec_validate(rec);
+
+ if (!page_rec_validate(rec)) {
+ goto func_exit;
+ }
/* Check that the records are in the ascending order */
if ((count >= 2) && (!page_cur_is_after_last(&cur))) {
- ut_a(1 == cmp_rec_rec(rec, old_rec, index));
+ if (!(1 == cmp_rec_rec(rec, old_rec, index))) {
+ fprintf(stderr,
+ "Records in wrong order in index %s\n",
+ index->name);
+
+ goto func_exit;
+ }
}
if ((rec != page_get_supremum_rec(page))
@@ -1275,16 +1301,38 @@ page_validate(
offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) {
- ut_a(buf[offs + i] == 0); /* No other record may
- overlap this */
+ if (!buf[offs + i] == 0) {
+ /* No other record may overlap this */
+
+ fprintf(stderr,
+ "Record overlaps another in index %s \n",
+ index->name);
+
+ goto func_exit;
+ }
+
buf[offs + i] = 1;
}
if (rec_get_n_owned(rec) != 0) {
/* This is a record pointed to by a dir slot */
- ut_a(rec_get_n_owned(rec) == own_count);
+ if (rec_get_n_owned(rec) != own_count) {
+ fprintf(stderr,
+ "Wrong owned count %lu, %lu, in index %s\n",
+ rec_get_n_owned(rec), own_count,
+ index->name);
- ut_a(page_dir_slot_get_rec(slot) == rec);
+ goto func_exit;
+ }
+
+ if (page_dir_slot_get_rec(slot) != rec) {
+ fprintf(stderr,
+ "Dir slot does not point to right rec in %s\n",
+ index->name);
+
+ goto func_exit;
+ }
+
page_dir_slot_check(slot);
own_count = 0;
@@ -1297,45 +1345,89 @@ page_validate(
if (page_cur_is_after_last(&cur)) {
break;
}
-
- count++;
+
+ if (rec_get_next_offs(rec) < FIL_PAGE_DATA
+ || rec_get_next_offs(rec) >= UNIV_PAGE_SIZE) {
+ fprintf(stderr,
+ "Next record offset wrong %lu in index %s\n",
+ rec_get_next_offs(rec), index->name);
+
+ goto func_exit;
+ }
+
+ count++;
page_cur_move_to_next(&cur);
own_count++;
old_rec = rec;
}
- ut_a(rec_get_n_owned(rec) != 0);
- ut_a(slot_no == n_slots - 1);
- ut_a(page_header_get_field(page, PAGE_N_RECS) + 2 == count + 1);
+ if (rec_get_n_owned(rec) == 0) {
+ fprintf(stderr, "n owned is zero in index %s\n", index->name);
+
+ goto func_exit;
+ }
+
+ if (slot_no != n_slots - 1) {
+ fprintf(stderr, "n slots wrong %lu %lu in index %s\n",
+ slot_no, n_slots - 1, index->name);
+ goto func_exit;
+ }
+
+ if (page_header_get_field(page, PAGE_N_RECS) + 2 != count + 1) {
+ fprintf(stderr, "n recs wrong %lu %lu in index %s\n",
+ page_header_get_field(page, PAGE_N_RECS) + 2, count + 1,
+ index->name);
+
+ goto func_exit;
+ }
if (data_size != page_get_data_size(page)) {
- printf("Summed data size %lu, returned by func %lu\n",
+ fprintf(stderr, "Summed data size %lu, returned by func %lu\n",
data_size, page_get_data_size(page));
- ut_error;
+ goto func_exit;
}
/* Check then the free list */
rec = page_header_get_ptr(page, PAGE_FREE);
while (rec != NULL) {
- page_rec_validate(rec);
+ if (!page_rec_validate(rec)) {
+
+ goto func_exit;
+ }
count++;
offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) {
- ut_a(buf[offs + i] == 0);
+
+ if (buf[offs + i] != 0) {
+ fprintf(stderr,
+ "Record overlaps another in free list, index %s \n",
+ index->name);
+
+ goto func_exit;
+ }
+
buf[offs + i] = 1;
}
rec = page_rec_get_next(rec);
}
- ut_a(page_header_get_field(page, PAGE_N_HEAP) == count + 1);
-
+ if (page_header_get_field(page, PAGE_N_HEAP) != count + 1) {
+
+ fprintf(stderr, "N heap is wrong %lu %lu in index %s\n",
+ page_header_get_field(page, PAGE_N_HEAP), count + 1,
+ index->name);
+ }
+
+ ret = TRUE;
+
+func_exit:
mem_heap_free(heap);
- return(TRUE);
+ return(ret);
}
/*******************************************************************
diff --git a/innobase/rem/rem0cmp.c b/innobase/rem/rem0cmp.c
index d5208f2d486..78f4e450269 100644
--- a/innobase/rem/rem0cmp.c
+++ b/innobase/rem/rem0cmp.c
@@ -177,7 +177,9 @@ cmp_whole_field(
(int)(type->prtype & ~DATA_NOT_NULL),
a, a_length, b, b_length));
default:
- assert(0);
+ fprintf(stderr,
+ "InnoDB: unknown type number %lu\n", data_type);
+ ut_a(0);
}
return(0);
diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c
index ec24b40f5c2..b40e026c675 100644
--- a/innobase/row/row0mysql.c
+++ b/innobase/row/row0mysql.c
@@ -1129,3 +1129,146 @@ funct_exit:
return((int) err);
}
+
+/*************************************************************************
+Checks that the index contains entries in an ascending order, unique
+constraint is not broken, and calculates the number of index entries
+in the read view of the current transaction. */
+static
+ibool
+row_scan_and_check_index(
+/*=====================*/
+ /* out: TRUE if ok */
+ row_prebuilt_t* prebuilt, /* in: prebuilt struct in MySQL */
+ dict_index_t* index, /* in: index */
+ ulint* n_rows) /* out: number of entries seen in the
+ current consistent read */
+{
+ mem_heap_t* heap;
+ dtuple_t* prev_entry = NULL;
+ ulint matched_fields;
+ ulint matched_bytes;
+ byte* buf;
+ ulint ret;
+ rec_t* rec;
+ ibool is_ok = TRUE;
+ int cmp;
+
+ *n_rows = 0;
+
+ buf = mem_alloc(UNIV_PAGE_SIZE);
+ heap = mem_heap_create(100);
+
+ /* Make a dummy template in prebuilt, which we will use
+ in scanning the index entries */
+
+ prebuilt->index = index;
+ prebuilt->sql_stat_start = TRUE;
+ prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE;
+ prebuilt->n_template = 0;
+ prebuilt->need_to_access_clustered = FALSE;
+
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
+
+ prebuilt->select_lock_type = LOCK_NONE;
+
+ ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, 0);
+loop:
+ if (ret != DB_SUCCESS) {
+
+ mem_free(buf);
+ mem_heap_free(heap);
+
+ return(is_ok);
+ }
+
+ *n_rows = *n_rows + 1;
+
+ /* row_search... returns the index record in buf, record origin offset
+ within buf stored in the first 4 bytes, because we have built a dummy
+ template */
+
+ rec = buf + mach_read_from_4(buf);
+
+ if (prev_entry != NULL) {
+ matched_fields = 0;
+ matched_bytes = 0;
+
+ cmp = cmp_dtuple_rec_with_match(prev_entry, rec,
+ &matched_fields,
+ &matched_bytes);
+ if (cmp > 0) {
+ fprintf(stderr,
+ "Error: index records in a wrong order in index %s\n",
+ index->name);
+
+ is_ok = FALSE;
+ } else if ((index->type & DICT_UNIQUE)
+ && matched_fields >=
+ dict_index_get_n_ordering_defined_by_user(index)) {
+ fprintf(stderr,
+ "Error: duplicate key in index %s\n",
+ index->name);
+
+ is_ok = FALSE;
+ }
+ }
+
+ mem_heap_empty(heap);
+
+ prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
+
+ ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT);
+
+ goto loop;
+}
+
+/*************************************************************************
+Checks a table for corruption. */
+
+ulint
+row_check_table_for_mysql(
+/*======================*/
+ /* out: DB_ERROR or DB_SUCCESS */
+ row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
+ handle */
+{
+ dict_table_t* table = prebuilt->table;
+ dict_index_t* index;
+ ulint n_rows;
+ ulint n_rows_in_table;
+ ulint ret = DB_SUCCESS;
+
+ index = dict_table_get_first_index(table);
+
+ while (index != NULL) {
+ /* fprintf(stderr, "Validating index %s\n", index->name); */
+
+ if (!btr_validate_tree(index->tree)) {
+ ret = DB_ERROR;
+ } else {
+ if (!row_scan_and_check_index(prebuilt,
+ index, &n_rows)) {
+ ret = DB_ERROR;
+ }
+
+ /* fprintf(stderr, "%lu entries in index %s\n", n_rows,
+ index->name); */
+
+ if (index == dict_table_get_first_index(table)) {
+ n_rows_in_table = n_rows;
+ } else if (n_rows != n_rows_in_table) {
+
+ ret = DB_ERROR;
+
+ fprintf(stderr,
+ "Error: index %s contains %lu entries, should be %lu\n",
+ index->name, n_rows, n_rows_in_table);
+ }
+ }
+
+ index = dict_table_get_next_index(index);
+ }
+
+ return(ret);
+}
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index 58e0d053947..e3bab021669 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -2341,6 +2341,65 @@ row_sel_push_cache_row_for_mysql(
prebuilt->n_fetch_cached++;
}
+/*************************************************************************
+Tries to do a shortcut to fetch a clustered index record with a unique key,
+using the hash index if possible (not always). We assume that the search
+mode is PAGE_CUR_GE, it is a consistent read, trx has already a read view,
+btr search latch has been locked in S-mode. */
+static
+ulint
+row_sel_try_search_shortcut_for_mysql(
+/*==================================*/
+ /* out: SEL_FOUND, SEL_EXHAUSTED, SEL_RETRY */
+ rec_t** out_rec,/* out: record if found */
+ row_prebuilt_t* prebuilt,/* in: prebuilt struct */
+ mtr_t* mtr) /* in: started mtr */
+{
+ dict_index_t* index = prebuilt->index;
+ dtuple_t* search_tuple = prebuilt->search_tuple;
+ btr_pcur_t* pcur = prebuilt->pcur;
+ trx_t* trx = prebuilt->trx;
+ rec_t* rec;
+
+ ut_ad(index->type & DICT_CLUSTERED);
+
+ btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
+ BTR_SEARCH_LEAF, pcur,
+ RW_S_LATCH, mtr);
+ rec = btr_pcur_get_rec(pcur);
+
+ if (!page_rec_is_user_rec(rec)) {
+
+ return(SEL_RETRY);
+ }
+
+ /* As the cursor is now placed on a user record after a search with
+ the mode PAGE_CUR_GE, the up_match field in the cursor tells how many
+ fields in the user record matched to the search tuple */
+
+ if (btr_pcur_get_up_match(pcur) < dtuple_get_n_fields(search_tuple)) {
+
+ return(SEL_EXHAUSTED);
+ }
+
+ /* This is a non-locking consistent read: if necessary, fetch
+ a previous version of the record */
+
+ if (!lock_clust_rec_cons_read_sees(rec, index, trx->read_view)) {
+
+ return(SEL_RETRY);
+ }
+
+ if (rec_get_deleted_flag(rec)) {
+
+ return(SEL_EXHAUSTED);
+ }
+
+ *out_rec = rec;
+
+ return(SEL_FOUND);
+}
+
/************************************************************************
Searches for rows in the database. This is used in the interface to
MySQL. This function opens a cursor, and also implements fetch next
@@ -2387,6 +2446,7 @@ row_search_for_mysql(
ibool cons_read_requires_clust_rec;
ibool was_lock_wait;
ulint ret;
+ ulint shortcut;
ibool unique_search_from_clust_index = FALSE;
ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE;
@@ -2452,6 +2512,8 @@ row_search_for_mysql(
mode = pcur->search_mode;
}
+ mtr_start(&mtr);
+
if (match_mode == ROW_SEL_EXACT && index->type & DICT_UNIQUE
&& index->type & DICT_CLUSTERED
&& dtuple_get_n_fields(search_tuple)
@@ -2464,6 +2526,8 @@ row_search_for_mysql(
restore cursor position, and must return
immediately */
+ mtr_commit(&mtr);
+
return(DB_RECORD_NOT_FOUND);
}
@@ -2472,8 +2536,51 @@ row_search_for_mysql(
mode = PAGE_CUR_GE;
unique_search_from_clust_index = TRUE;
+
+ if (trx->mysql_n_tables_locked == 0
+ && !prebuilt->sql_stat_start) {
+
+ /* This is a SELECT query done as a consistent read,
+ and the read view has already been allocated:
+ let us try a search shortcut through the hash
+ index */
+
+ if (!trx->has_search_latch) {
+ rw_lock_s_lock(&btr_search_latch);
+ trx->has_search_latch = TRUE;
+
+ } else if (btr_search_latch.writer_is_wait_ex) {
+ /* There is an x-latch request waiting:
+ release the s-latch for a moment to reduce
+ starvation */
+
+ rw_lock_s_unlock(&btr_search_latch);
+ rw_lock_s_lock(&btr_search_latch);
+ }
+
+ shortcut = row_sel_try_search_shortcut_for_mysql(&rec,
+ prebuilt, &mtr);
+ if (shortcut == SEL_FOUND) {
+ row_sel_store_mysql_rec(buf, prebuilt, rec);
+
+ mtr_commit(&mtr);
+
+ return(DB_SUCCESS);
+
+ } else if (shortcut == SEL_EXHAUSTED) {
+
+ mtr_commit(&mtr);
+
+ return(DB_RECORD_NOT_FOUND);
+ }
+ }
}
+ if (trx->has_search_latch) {
+ rw_lock_s_unlock(&btr_search_latch);
+ trx->has_search_latch = FALSE;
+ }
+
/* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order,
otherwise downward */
@@ -2485,8 +2592,6 @@ row_search_for_mysql(
} else if (direction == ROW_SEL_NEXT) {
moves_up = TRUE;
}
-
- mtr_start(&mtr);
thr = que_fork_get_first_thr(prebuilt->sel_graph);
@@ -2711,7 +2816,9 @@ rec_loop:
if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD
&& !prebuilt->templ_contains_blob
&& prebuilt->select_lock_type == LOCK_NONE
- && !prebuilt->clust_index_was_generated) {
+ && !prebuilt->clust_index_was_generated
+ && prebuilt->template_type
+ != ROW_MYSQL_DUMMY_TEMPLATE) {
/* Inside an update, for example, we do not cache rows,
since we may use the cursor position to do the actual
@@ -2726,7 +2833,13 @@ rec_loop:
goto next_rec;
} else {
- row_sel_store_mysql_rec(buf, prebuilt, rec);
+ if (prebuilt->template_type == ROW_MYSQL_DUMMY_TEMPLATE) {
+ ut_memcpy(buf + 4, rec - rec_get_extra_size(rec),
+ rec_get_size(rec));
+ mach_write_to_4(buf, rec_get_extra_size(rec) + 4);
+ } else {
+ row_sel_store_mysql_rec(buf, prebuilt, rec);
+ }
if (prebuilt->clust_index_was_generated) {
row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
diff --git a/innobase/trx/trx0trx.c b/innobase/trx/trx0trx.c
index 4841711551b..14108c677eb 100644
--- a/innobase/trx/trx0trx.c
+++ b/innobase/trx/trx0trx.c
@@ -22,6 +22,7 @@ Created 3/26/1996 Heikki Tuuri
#include "read0read.h"
#include "srv0srv.h"
#include "thr0loc.h"
+#include "btr0sea.h"
/* Dummy session used currently in MySQL interface */
sess_t* trx_dummy_sess = NULL;
@@ -63,6 +64,7 @@ trx_create(
trx->dict_operation = FALSE;
trx->n_mysql_tables_in_use = 0;
+ trx->mysql_n_tables_locked = 0;
trx->ignore_duplicates_in_insert = FALSE;
@@ -96,6 +98,8 @@ trx_create(
trx->lock_heap = mem_heap_create_in_buffer(256);
UT_LIST_INIT(trx->trx_locks);
+ trx->has_search_latch = FALSE;
+
trx->read_view_heap = mem_heap_create(256);
trx->read_view = NULL;
@@ -133,6 +137,21 @@ trx_allocate_for_mysql(void)
}
/************************************************************************
+Releases the search latch if trx has reserved it. */
+
+void
+trx_search_latch_release_if_reserved(
+/*=================================*/
+ trx_t* trx) /* in: transaction */
+{
+ if (trx->has_search_latch) {
+ rw_lock_s_unlock(&btr_search_latch);
+
+ trx->has_search_latch = FALSE;
+ }
+}
+
+/************************************************************************
Frees a transaction object. */
void
@@ -149,6 +168,7 @@ trx_free(
ut_a(trx->update_undo == NULL);
ut_a(trx->n_mysql_tables_in_use == 0);
+ ut_a(trx->mysql_n_tables_locked == 0);
if (trx->undo_no_arr) {
trx_undo_arr_free(trx->undo_no_arr);
@@ -160,6 +180,8 @@ trx_free(
ut_a(trx->wait_lock == NULL);
ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);
+ ut_a(!trx->has_search_latch);
+
if (trx->lock_heap) {
mem_heap_free(trx->lock_heap);
}
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index 8d8ac3c9a21..fe8bcb4c4e4 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -54,7 +54,7 @@ sqlsources = convert.cc derror.cc field.cc field_conv.cc filesort.cc \
sql_rename.cc sql_repl.cc sql_select.cc sql_show.cc \
sql_string.cc sql_table.cc sql_test.cc sql_udf.cc \
sql_update.cc sql_yacc.cc table.cc thr_malloc.cc time.cc \
- unireg.cc
+ unireg.cc uniques.cc
## XXX: we should not have to duplicate info from the sources list
sqlobjects = convert.lo derror.lo field.lo field_conv.lo filesort.lo \
@@ -72,7 +72,7 @@ sqlobjects = convert.lo derror.lo field.lo field_conv.lo filesort.lo \
sql_rename.lo sql_repl.lo sql_select.lo sql_show.lo \
sql_string.lo sql_table.lo sql_test.lo sql_udf.lo \
sql_update.lo sql_yacc.lo table.lo thr_malloc.lo time.lo \
- unireg.lo
+ unireg.lo uniques.lo
EXTRA_DIST = lib_vio.c
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index b2889079ac8..ed04d85ee6e 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -50,8 +50,8 @@ void free_defaults_internal(char ** argv){if (argv) free_defaults(argv);}
char mysql_data_home[FN_REFLEN];
char * get_mysql_data_home(){return mysql_data_home;};
#define mysql_data_home mysql_data_home_internal
-#include "../sql/mysqld.cc"
#include "lib_vio.c"
+#include "../sql/mysqld.cc"
#define SCRAMBLE_LENGTH 8
extern "C" {
@@ -600,7 +600,7 @@ void embedded_srv_init(void)
}
//printf(ER(ER_READY),my_progname,server_version,"");
- printf("%s initialized.\n", server_version);
+ //printf("%s initialized.\n", server_version);
fflush(stdout);
diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c
index 732c102c640..fe429c4d54d 100644
--- a/libmysqld/libmysqld.c
+++ b/libmysqld/libmysqld.c
@@ -1323,22 +1323,26 @@ mysql_query(MYSQL *mysql, const char *query)
return mysql_real_query(mysql,query, (uint) strlen(query));
}
+int STDCALL
+mysql_send_query(MYSQL* mysql, const char* query, uint length)
+{
+ return simple_command(mysql, COM_QUERY, query, length, 1);
+}
+
int STDCALL
-mysql_real_query(MYSQL *mysql, const char *query, uint length)
+mysql_read_query_result(MYSQL *mysql)
{
uchar *pos;
ulong field_count;
MYSQL_DATA *fields;
- DBUG_ENTER("mysql_real_query");
- DBUG_PRINT("enter",("handle: %lx",mysql));
- DBUG_PRINT("query",("Query = \"%s\"",query));
+ uint length;
+ DBUG_ENTER("mysql_read_query_result");
- if (simple_command(mysql,COM_QUERY,query,length,1) ||
- (length=net_safe_read(mysql)) == packet_error)
+ if ((length=net_safe_read(mysql)) == packet_error)
DBUG_RETURN(-1);
free_old_query(mysql); /* Free old result */
- get_info:
+get_info:
pos=(uchar*) mysql->net.read_pos;
if ((field_count= net_field_length(&pos)) == 0)
{
@@ -1375,6 +1379,17 @@ mysql_real_query(MYSQL *mysql, const char *query, uint length)
DBUG_RETURN(0);
}
+int STDCALL
+mysql_real_query(MYSQL *mysql, const char *query, uint length)
+{
+ DBUG_ENTER("mysql_real_query");
+ DBUG_PRINT("enter",("handle: %lx",mysql));
+ DBUG_PRINT("query",("Query = \"%s\"",query));
+ if (mysql_send_query(mysql, query, length))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(mysql_read_query_result(mysql));
+}
+
static int
send_file_to_server(MYSQL *mysql, const char *filename)
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 3ad645a6511..3598b15eb0a 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -144,7 +144,7 @@ test.t1 optimize error The handler for the table doesn't support check/repair
a
2
Table Op Msg_type Msg_text
-test.t1 check error The handler for the table doesn't support check/repair
+test.t1 check status OK
a b
2 testing
Table Op Msg_type Msg_text
diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test
index 10079ba2549..cdb6ee57e0f 100644
--- a/mysql-test/t/select.test
+++ b/mysql-test/t/select.test
@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou
#
select sum(Period)/count(*) from t1;
-select companynr,count(price) as "count",sum(price) as "sum" ,sum(price)/count(price)-avg(price) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
+select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg;
#
diff --git a/mysys/getvar.c b/mysys/getvar.c
index e0f60b207b7..90ab599244d 100644
--- a/mysys/getvar.c
+++ b/mysys/getvar.c
@@ -101,7 +101,7 @@ my_bool set_changeable_var(my_string str,CHANGEABLE_VAR *vars)
}
if (num < (longlong) found->min_value)
num=(longlong) found->min_value;
- else if (num > (longlong) (ulong) found->max_value)
+ else if (num > 0 && (ulonglong) num > (ulonglong) (ulong) found->max_value)
num=(longlong) (ulong) found->max_value;
num=((num- (longlong) found->sub_size) / (ulonglong) found->block_size);
(*found->varptr)= (long) (num*(ulonglong) found->block_size);
diff --git a/mysys/tree.c b/mysys/tree.c
index af64be55d2f..02e8fdb12bd 100644
--- a/mysys/tree.c
+++ b/mysys/tree.c
@@ -266,6 +266,8 @@ int tree_delete(TREE *tree, void *key)
}
if (remove_colour == BLACK)
rb_delete_fixup(tree,parent);
+ if (tree->free)
+ (*tree->free)(ELEMENT_KEY(tree,element));
my_free((gptr) element,MYF(0));
tree->elements_in_tree--;
return 0;
diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark
index a51752a5023..cce9a8f05fe 100644
--- a/sql-bench/Comments/postgres.benchmark
+++ b/sql-bench/Comments/postgres.benchmark
@@ -1,5 +1,14 @@
-# This file describes how to run MySQL benchmarks with Postgres
-#
+# This file describes how to run MySQL benchmarks with PostgreSQL
+#
+# WARNING:
+#
+# Don't run the --fast test on a PostgreSQL 7.1.1 database on
+# which you have any critical data; During one of our test runs
+# PostgreSQL got a corrupted database and all data was destroyed!
+# (When we tried to restart postmaster, It died with a
+# 'no such file or directory' error and never recovered from that!
+#
+# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP
@@ -8,49 +17,52 @@
# on the same machine. No other cpu intensive process was used during
# the benchmark.
-#
-#
-# First, install postgresql-7.0.2.tar.gz
-#
+# First, install postgresql-7.1.1.tar.gz
-#
-# Start by adding the following lines to your ~/.bash_profile or
+# Adding the following lines to your ~/.bash_profile or
# corresponding file. If you are using csh, use ´setenv´.
#
-export POSTGRES_INCLUDE=/usr/local/pgsql/include
-export POSTGRES_LIB=/usr/local/pgsql/lib
+export POSTGRES_INCLUDE=/usr/local/pg/include
+export POSTGRES_LIB=/usr/local/pg/lib
-PATH=$PATH:/usr/local/pgsql/bin
-MANPATH=$MANPATH:/usr/local/pgsql/man
+PATH=$PATH:/usr/local/pg/bin
+MANPATH=$MANPATH:/usr/local/pg/man
#
# Add the following line to /etc/ld.so.conf:
#
-/usr/local/pgsql/lib
-and run ldconfig.
+/usr/local/pg/lib
-#
-# untar the postgres source distribution and cd to src/
-# run the following commands:
-#
+# and run:
+
+ldconfig
-./configure
+# untar the postgres source distribution, cd to postgresql-*
+# and run the following commands:
+
+CFLAGS=-O3 ./configure
gmake
gmake install
-mkdir /usr/local/pgsql/data
-chown postgres /usr/local/pgsql/data
+mkdir /usr/local/pg/data
+chown postgres /usr/local/pg/data
su - postgres
-/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data
-su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" &
-su postgres -c "/usr/local/pgsql/bin/createdb test"
+/usr/local/pg/bin/initdb -D /usr/local/pg/data
+/usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data &
+/usr/local/pg/bin/createdb test
+exit
#
-# Second, install packages DBD-Pg-0.95.tar.gz and DBI-1.14.tar.gz,
+# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz,
# available from http://www.perl.com/CPAN/
-#
+
+export POSTGRES_LIB=/usr/local/pg/lib/
+export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql
+perl Makefile.PL
+make
+make install
#
# Now we run the test that can be found in the sql-bench directory in the
@@ -59,17 +71,20 @@ su postgres -c "/usr/local/pgsql/bin/createdb test"
# We did run two tests:
# The standard test
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column)
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
# If you want to store the results in a output/RUN-xxx file, you should
# repeate the benchmark with the extra option --log --use-old-result
# This will create a the RUN file based of the previous results
-#
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512MG, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
+
+# Between running the different tests we dropped and recreated the PostgreSQL
+# database to ensure that PostgreSQL should get a clean start,
+# independent of the previous runs.
diff --git a/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..9db5568dd99
--- /dev/null
+++ b/sql-bench/Results/ATIS-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,20 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 4:40:22
+
+ATIS table test
+
+Creating tables
+Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Inserting data
+Time to insert (9768): 8 wallclock secs ( 2.97 usr 0.28 sys + 0.00 cusr 0.00 csys = 3.25 CPU)
+
+Retrieving data
+Time for select_simple_join (500): 3 wallclock secs ( 0.74 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.77 CPU)
+Time for select_join (100): 4 wallclock secs ( 0.52 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.65 CPU)
+Time for select_key_prefix_join (100): 11 wallclock secs ( 4.30 usr 0.16 sys + 0.00 cusr 0.00 csys = 4.46 CPU)
+Time for select_distinct (800): 22 wallclock secs ( 1.95 usr 0.18 sys + 0.00 cusr 0.00 csys = 2.13 CPU)
+Time for select_group (2600): 52 wallclock secs ( 1.43 usr 0.19 sys + 0.00 cusr 0.00 csys = 1.62 CPU)
+
+Removing tables
+Time to drop_table (28): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU)
diff --git a/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..1d07a79018e
--- /dev/null
+++ b/sql-bench/Results/RUN-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,102 @@
+Benchmark DBD suite: 2.13
+Date of test: 2001-06-03 19:30:53
+Running tests on: Linux 2.4.0-64GB-SMP i686
+Arguments:
+Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F
+Limits from: mysql,pg
+Server version: PostgreSQL version 7.1.1
+
+ATIS: Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU)
+alter-table: Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU)
+big-tables: Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU)
+connect: Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU)
+create: Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU)
+insert: Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU)
+select: Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU)
+wisconsin: Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU)
+
+All 8 test executed successfully
+Tests with estimated time have a + at end of line
+
+Totals per operation:
+Operation seconds usr sys cpu tests
+alter_table_add 49.00 0.26 0.06 0.32 992
+connect 143.00 8.01 1.89 9.90 10000
+connect+select_1_row 195.00 10.94 2.31 13.25 10000
+connect+select_simple 157.00 10.42 2.41 12.83 10000
+count 131.00 0.03 0.00 0.03 100
+count_distinct 132.00 0.31 0.06 0.37 1000
+count_distinct_2 213.00 0.37 0.03 0.40 1000
+count_distinct_big 266.00 7.91 0.25 8.16 120
+count_distinct_group 384.00 1.07 0.08 1.15 1000
+count_distinct_group_on_key 488.00 0.41 0.03 0.44 1000
+count_distinct_group_on_key_parts 383.00 1.10 0.07 1.17 1000
+count_distinct_key_prefix 179.00 0.28 0.07 0.35 1000
+count_group_on_key_parts 331.00 1.13 0.06 1.19 1000
+count_on_key 1850.00 15.78 1.99 17.77 50100 +
+create+drop 3280.00 10.74 1.89 12.63 10000
+create_MANY_tables 160.00 3.67 1.35 5.02 5000
+create_index 1.00 0.00 0.00 0.00 8
+create_key+drop 5781.00 10.70 1.53 12.23 10000
+create_table 1.00 0.01 0.00 0.01 31
+delete_all 2478.00 0.01 0.00 0.01 12
+delete_all_many_keys 94.00 0.05 0.00 0.05 1
+delete_big 0.00 0.01 0.00 0.01 1
+delete_big_many_keys 93.00 0.05 0.00 0.05 128
+delete_key 85.00 3.19 0.48 3.67 10000
+drop_index 0.00 0.01 0.00 0.01 8
+drop_table 1.00 0.01 0.00 0.01 28
+drop_table_when_MANY_tables 772.00 1.29 0.28 1.57 5000
+insert 353.00 104.09 24.32 128.41 350768
+insert_duplicates 120.00 30.53 10.61 41.14 100000
+insert_key 804.00 47.08 47.06 94.14 100000
+insert_many_fields 528.00 1.12 0.21 1.33 2000
+insert_select_1_key 86.00 0.00 0.00 0.00 1
+insert_select_2_keys 196.00 0.00 0.00 0.00 1
+min_max 60.00 0.02 0.00 0.02 60
+min_max_on_key 10543.00 25.38 4.37 29.75 85000 ++
+order_by_big 148.00 21.11 0.72 21.83 10
+order_by_big_key 145.00 24.01 1.27 25.28 10
+order_by_big_key2 132.00 21.28 0.64 21.92 10
+order_by_big_key_desc 145.00 23.93 1.27 25.20 10
+order_by_big_key_diff 138.00 21.30 0.56 21.86 10
+order_by_big_key_prefix 133.00 21.16 0.80 21.96 10
+order_by_key2_diff 7.00 1.94 0.03 1.97 500
+order_by_key_prefix 4.00 1.04 0.08 1.12 500
+order_by_range 4.00 1.13 0.06 1.19 500
+outer_join 2539.00 0.00 0.01 0.01 10
+outer_join_found 2515.00 0.00 0.00 0.00 10
+outer_join_not_found 124666.00 0.00 0.00 0.00 500 +
+outer_join_on_key 2307.00 0.00 0.00 0.00 10
+select_1_row 6.00 2.25 0.46 2.71 10000
+select_2_rows 7.00 2.77 0.38 3.15 10000
+select_big 93.00 33.23 9.79 43.02 10080
+select_column+column 8.00 2.78 0.41 3.19 10000
+select_diff_key 0.00 0.21 0.02 0.23 500
+select_distinct 22.00 1.95 0.18 2.13 800
+select_group 326.00 1.47 0.20 1.67 2711
+select_group_when_MANY_tables 15.00 1.57 0.78 2.35 5000
+select_join 4.00 0.52 0.13 0.65 100
+select_key 243.00 68.03 8.10 76.13 200000
+select_key2 208.00 66.48 8.68 75.16 200000
+select_key2_return_key 200.00 66.41 7.77 74.18 200000
+select_key2_return_prim 204.00 64.75 7.90 72.65 200000
+select_key_prefix 208.00 66.62 8.81 75.43 200000
+select_key_prefix_join 11.00 4.30 0.16 4.46 100
+select_key_return_key 239.00 66.86 8.37 75.23 200000
+select_many_fields 795.00 6.97 0.48 7.45 2000
+select_query_cache 2549.00 3.25 0.52 3.77 10000
+select_query_cache2 2547.00 3.04 0.53 3.57 10000
+select_range 465.00 10.41 0.63 11.04 410
+select_range_key2 20341.00 4.22 0.52 4.74 25010 ++
+select_range_prefix 20344.00 6.32 1.04 7.36 25010 ++
+select_simple 5.00 2.73 0.30 3.03 10000
+select_simple_join 3.00 0.74 0.03 0.77 500
+update_big 6046.00 0.01 0.00 0.01 10
+update_of_key 136.00 16.21 11.85 28.06 50000
+update_of_key_big 320.00 0.16 0.09 0.25 501
+update_of_primary_key_many_keys 5365.00 0.16 0.03 0.19 256
+update_with_key 518.00 89.50 33.03 122.53 300000
+update_with_key_prefix 186.00 30.32 15.83 46.15 100000
+wisc_benchmark 16.00 3.30 0.65 3.95 114
+TOTALS 224650.00 1060.42 234.52 1294.94 2551551 ++++++++
diff --git a/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..62a8ccfdb01
--- /dev/null
+++ b/sql-bench/Results/alter-table-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,14 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:04
+
+Testing of ALTER TABLE
+Testing with 1000 columns and 1000 rows in 20 steps
+Insert data into the table
+Time for insert (1000) 1 wallclock secs ( 0.21 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.29 CPU)
+
+Time for alter_table_add (992): 49 wallclock secs ( 0.26 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.32 CPU)
+
+Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for drop_index (8): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU)
diff --git a/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..3ac4af4f5ea
--- /dev/null
+++ b/sql-bench/Results/big-tables-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,19 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:56
+
+Testing of some unusual tables
+All tests are done 1000 times with 1000 fields
+
+Testing table with 1000 fields
+Testing select * from table with 1 record
+Time to select_many_fields(1000): 338 wallclock secs ( 3.28 usr 0.22 sys + 0.00 cusr 0.00 csys = 3.50 CPU)
+
+Testing select all_fields from table with 1 record
+Time to select_many_fields(1000): 457 wallclock secs ( 3.69 usr 0.26 sys + 0.00 cusr 0.00 csys = 3.95 CPU)
+
+Testing insert VALUES()
+Time to insert_many_fields(1000): 229 wallclock secs ( 0.40 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.47 CPU)
+
+Testing insert (all_fields) VALUES()
+Time to insert_many_fields(1000): 299 wallclock secs ( 0.72 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.86 CPU)
+
+Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU)
diff --git a/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..86af371cbb9
--- /dev/null
+++ b/sql-bench/Results/connect-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,30 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 5:05:01
+
+Testing the speed of connecting to the server and sending of data
+All tests are done 10000 times
+
+Testing connection/disconnect
+Time to connect (10000): 143 wallclock secs ( 8.01 usr 1.89 sys + 0.00 cusr 0.00 csys = 9.90 CPU)
+
+Test connect/simple select/disconnect
+Time for connect+select_simple (10000): 157 wallclock secs (10.42 usr 2.41 sys + 0.00 cusr 0.00 csys = 12.83 CPU)
+
+Test simple select
+Time for select_simple (10000): 5 wallclock secs ( 2.73 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.03 CPU)
+
+Testing connect/select 1 row from table/disconnect
+Time to connect+select_1_row (10000): 195 wallclock secs (10.94 usr 2.31 sys + 0.00 cusr 0.00 csys = 13.25 CPU)
+
+Testing select 1 row from table
+Time to select_1_row (10000): 6 wallclock secs ( 2.25 usr 0.46 sys + 0.00 cusr 0.00 csys = 2.71 CPU)
+
+Testing select 2 rows from table
+Time to select_2_rows (10000): 7 wallclock secs ( 2.77 usr 0.38 sys + 0.00 cusr 0.00 csys = 3.15 CPU)
+
+Test select with aritmetic (+)
+Time for select_column+column (10000): 8 wallclock secs ( 2.78 usr 0.41 sys + 0.00 cusr 0.00 csys = 3.19 CPU)
+
+Testing retrieval of big records (65000 bytes)
+Time to select_big (10000): 34 wallclock secs (11.75 usr 5.84 sys + 0.00 cusr 0.00 csys = 17.59 CPU)
+
+Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU)
diff --git a/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..7e33d155a20
--- /dev/null
+++ b/sql-bench/Results/create-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,18 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 5:14:17
+
+Testing the speed of creating and droping tables
+Testing with 5000 tables and 10000 loop count
+
+Testing create of tables
+Time for create_MANY_tables (5000): 160 wallclock secs ( 3.67 usr 1.35 sys + 0.00 cusr 0.00 csys = 5.02 CPU)
+
+Accessing tables
+Time to select_group_when_MANY_tables (5000): 15 wallclock secs ( 1.57 usr 0.78 sys + 0.00 cusr 0.00 csys = 2.35 CPU)
+
+Testing drop
+Time for drop_table_when_MANY_tables (5000): 772 wallclock secs ( 1.29 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.57 CPU)
+
+Testing create+drop
+Time for create+drop (10000): 3280 wallclock secs (10.74 usr 1.89 sys + 0.00 cusr 0.00 csys = 12.63 CPU)
+Time for create_key+drop (10000): 5781 wallclock secs (10.70 usr 1.53 sys + 0.00 cusr 0.00 csys = 12.23 CPU)
+Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU)
diff --git a/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..68d7052ef6e
--- /dev/null
+++ b/sql-bench/Results/insert-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,103 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 8:01:05
+
+Testing the speed of inserting data into 1 table and do some selects on it.
+The tests are done with a table that has 100000 rows.
+
+Generating random keys
+Creating tables
+Inserting 100000 rows in order
+Inserting 100000 rows in reverse order
+Inserting 100000 rows in random order
+Time for insert (300000): 302 wallclock secs (89.07 usr 22.07 sys + 0.00 cusr 0.00 csys = 111.14 CPU)
+
+Testing insert of duplicates
+Time for insert_duplicates (100000): 120 wallclock secs (30.53 usr 10.61 sys + 0.00 cusr 0.00 csys = 41.14 CPU)
+
+Retrieving data from the table
+Time for select_big (10:3000000): 58 wallclock secs (21.31 usr 3.95 sys + 0.00 cusr 0.00 csys = 25.26 CPU)
+Time for order_by_big_key (10:3000000): 145 wallclock secs (24.01 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.28 CPU)
+Time for order_by_big_key_desc (10:3000000): 145 wallclock secs (23.93 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.20 CPU)
+Time for order_by_big_key_prefix (10:3000000): 133 wallclock secs (21.16 usr 0.80 sys + 0.00 cusr 0.00 csys = 21.96 CPU)
+Time for order_by_big_key2 (10:3000000): 132 wallclock secs (21.28 usr 0.64 sys + 0.00 cusr 0.00 csys = 21.92 CPU)
+Time for order_by_big_key_diff (10:3000000): 138 wallclock secs (21.30 usr 0.56 sys + 0.00 cusr 0.00 csys = 21.86 CPU)
+Time for order_by_big (10:3000000): 148 wallclock secs (21.11 usr 0.72 sys + 0.00 cusr 0.00 csys = 21.83 CPU)
+Time for order_by_range (500:125750): 4 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU)
+Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 1.04 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.12 CPU)
+Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 1.94 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.97 CPU)
+Time for select_diff_key (500:1000): 0 wallclock secs ( 0.21 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.23 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+210 queries in 210 loops of 5010 loops took 616 seconds
+Estimated time for select_range_prefix (5010:1764): 14696 wallclock secs ( 2.62 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.10 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+210 queries in 210 loops of 5010 loops took 615 seconds
+Estimated time for select_range_key2 (5010:1764): 14672 wallclock secs ( 1.67 usr 0.24 sys + 0.00 cusr 0.00 csys = 1.91 CPU)
+Time for select_key_prefix (200000): 208 wallclock secs (66.62 usr 8.81 sys + 0.00 cusr 0.00 csys = 75.43 CPU)
+Time for select_key (200000): 243 wallclock secs (68.03 usr 8.10 sys + 0.00 cusr 0.00 csys = 76.13 CPU)
+Time for select_key_return_key (200000): 239 wallclock secs (66.86 usr 8.37 sys + 0.00 cusr 0.00 csys = 75.23 CPU)
+Time for select_key2 (200000): 208 wallclock secs (66.48 usr 8.68 sys + 0.00 cusr 0.00 csys = 75.16 CPU)
+Time for select_key2_return_key (200000): 200 wallclock secs (66.41 usr 7.77 sys + 0.00 cusr 0.00 csys = 74.18 CPU)
+Time for select_key2_return_prim (200000): 204 wallclock secs (64.75 usr 7.90 sys + 0.00 cusr 0.00 csys = 72.65 CPU)
+
+Test of compares with simple ranges
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+2160 queries in 54 loops of 500 loops took 610 seconds
+Estimated time for select_range_prefix (20000:4698): 5648 wallclock secs ( 3.70 usr 0.56 sys + 0.00 cusr 0.00 csys = 4.26 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+2120 queries in 53 loops of 500 loops took 601 seconds
+Estimated time for select_range_key2 (20000:4611): 5669 wallclock secs ( 2.55 usr 0.28 sys + 0.00 cusr 0.00 csys = 2.83 CPU)
+Time for select_group (111): 274 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+1320 queries in 220 loops of 2500 loops took 601 seconds
+Estimated time for min_max_on_key (15000): 6829 wallclock secs ( 5.23 usr 0.91 sys + 0.00 cusr 0.00 csys = 6.14 CPU)
+Time for min_max (60): 60 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
+Time for count_on_key (100): 116 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
+Time for count (100): 131 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
+Time for count_distinct_big (20): 201 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
+
+Testing update of keys with functions
+Time for update_of_key (50000): 136 wallclock secs (16.21 usr 11.85 sys + 0.00 cusr 0.00 csys = 28.06 CPU)
+Time for update_of_key_big (501): 320 wallclock secs ( 0.16 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.25 CPU)
+
+Testing update with key
+Time for update_with_key (300000): 518 wallclock secs (89.50 usr 33.03 sys + 0.00 cusr 0.00 csys = 122.53 CPU)
+Time for update_with_key_prefix (100000): 186 wallclock secs (30.32 usr 15.83 sys + 0.00 cusr 0.00 csys = 46.15 CPU)
+
+Testing update of all rows
+Time for update_big (10): 6046 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing left outer join
+Time for outer_join_on_key (10:10): 2307 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join (10:10): 2539 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+Time for outer_join_found (10:10): 2515 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+3 queries in 3 loops of 500 loops took 748 seconds
+Estimated time for outer_join_not_found (500:500): 124666 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing INSERT INTO ... SELECT
+Time for insert_select_1_key (1): 86 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert_select_2_keys (1): 196 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for drop table(2): 22 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing delete
+Time for delete_key (10000): 85 wallclock secs ( 3.19 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.67 CPU)
+Time for delete_all (12): 2478 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Insert into table with 16 keys and with a primary key with 16 parts
+Time for insert_key (100000): 804 wallclock secs (47.08 usr 47.06 sys + 0.00 cusr 0.00 csys = 94.14 CPU)
+
+Testing update of keys
+Time for update_of_primary_key_many_keys (256): 5365 wallclock secs ( 0.16 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.19 CPU)
+
+Deleting rows from the table
+Time for delete_big_many_keys (128): 93 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+
+Deleting everything from table
+Time for delete_all_many_keys (1): 94 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+
+Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU)
diff --git a/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..ea359e81a2b
--- /dev/null
+++ b/sql-bench/Results/select-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,36 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 16:37:16
+
+Testing the speed of selecting on keys that consist of many parts
+The test-table has 10000 rows and the test is done with 500 ranges.
+
+Creating table
+Inserting 10000 rows
+Time to insert (10000): 10 wallclock secs ( 2.96 usr 0.39 sys + 0.00 cusr 0.00 csys = 3.35 CPU)
+
+Test if the database has a query cache
+Time for select_query_cache (10000): 2549 wallclock secs ( 3.25 usr 0.52 sys + 0.00 cusr 0.00 csys = 3.77 CPU)
+
+Time for select_query_cache2 (10000): 2547 wallclock secs ( 3.04 usr 0.53 sys + 0.00 cusr 0.00 csys = 3.57 CPU)
+
+Testing big selects on the table
+Time for select_big (70:17207): 1 wallclock secs ( 0.17 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.17 CPU)
+Time for select_range (410:1057904): 465 wallclock secs (10.41 usr 0.63 sys + 0.00 cusr 0.00 csys = 11.04 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+11326 queries in 1618 loops of 10000 loops took 601 seconds
+Estimated time for min_max_on_key (70000): 3714 wallclock secs (20.15 usr 3.46 sys + 0.00 cusr 0.00 csys = 23.61 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+17320 queries in 3464 loops of 10000 loops took 601 seconds
+Estimated time for count_on_key (50000): 1734 wallclock secs (15.76 usr 1.99 sys + 0.00 cusr 0.00 csys = 17.75 CPU)
+
+Time for count_group_on_key_parts (1000:100000): 331 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU)
+Testing count(distinct) on the table
+Time for count_distinct_key_prefix (1000:1000): 179 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.35 CPU)
+Time for count_distinct (1000:1000): 132 wallclock secs ( 0.31 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.37 CPU)
+Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.37 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.40 CPU)
+Time for count_distinct_group_on_key (1000:6000): 488 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.44 CPU)
+Time for count_distinct_group_on_key_parts (1000:100000): 383 wallclock secs ( 1.10 usr 0.07 sys + 0.00 cusr 0.00 csys = 1.17 CPU)
+Time for count_distinct_group (1000:100000): 384 wallclock secs ( 1.07 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.15 CPU)
+Time for count_distinct_big (100:1000000): 65 wallclock secs ( 7.88 usr 0.25 sys + 0.00 cusr 0.00 csys = 8.13 CPU)
+Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU)
diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..079272b708e
--- /dev/null
+++ b/sql-bench/Results/wisconsin-pg-Linux_2.4.0_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,14 @@
+Testing server 'PostgreSQL version ???' at 2001-06-03 19:06:27
+
+Wisconsin benchmark test
+
+Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Inserting data
+Time to insert (31000): 33 wallclock secs ( 9.09 usr 1.58 sys + 0.00 cusr 0.00 csys = 10.67 CPU)
+Time to delete_big (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Running actual benchmark
+Time for wisc_benchmark (114): 16 wallclock secs ( 3.30 usr 0.65 sys + 0.00 cusr 0.00 csys = 3.95 CPU)
+
+Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU)
diff --git a/sql-bench/bench-init.pl.sh b/sql-bench/bench-init.pl.sh
index a30e9b3d9c9..adfa114f569 100644
--- a/sql-bench/bench-init.pl.sh
+++ b/sql-bench/bench-init.pl.sh
@@ -31,7 +31,7 @@
# $server Object for current server
# $limits Hash reference to limits for benchmark
-$benchmark_version="2.12";
+$benchmark_version="2.13";
use Getopt::Long;
require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n";
diff --git a/sql-bench/limits/pg.cfg b/sql-bench/limits/pg.cfg
index 7e4d20b052a..ed1c2eaa63f 100644
--- a/sql-bench/limits/pg.cfg
+++ b/sql-bench/limits/pg.cfg
@@ -1,10 +1,10 @@
-#This file is automaticly generated by crash-me 1.54
+#This file is automaticly generated by crash-me 1.56
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
alter_add_col=yes # Alter table add column
-alter_add_constraint=no # Alter table add constraint
-alter_add_foreign_key=yes # Alter table add foreign key
+alter_add_constraint=yes # Alter table add constraint
+alter_add_foreign_key=no # Alter table add foreign key
alter_add_multi_col=no # Alter table add many columns
alter_add_primary_key=no # Alter table add primary key
alter_add_unique=no # Alter table add unique
@@ -29,21 +29,22 @@ columns_in_order_by=+64 # number of columns in order by
comment_#=no # # as comment
comment_--=yes # -- as comment (ANSI)
comment_/**/=yes # /* */ as comment
-comment_//=no # // as comment (ANSI)
+comment_//=no # // as comment
compute=no # Compute
connections=32 # Simultaneous connections (installation default)
constraint_check=yes # Column constraints
constraint_check_table=yes # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
-crash_me_version=1.54 # crash me version
+crash_me_version=1.56 # crash me version
create_default=yes # default value for column
-create_default_func=no # default value function for column
+create_default_func=yes # default value function for column
create_if_not_exists=no # create table if not exists
create_index=yes # create index
create_schema=no # Create SCHEMA
create_table_select=with AS # create table from select
cross_join=yes # cross join (same as from a,b)
+date_as_string=yes # String functions on date columns
date_infinity=no # Supports 'infinity dates
date_last=yes # Supports 9999-12-31 dates
date_one=yes # Supports 0001-01-01 dates
@@ -58,16 +59,16 @@ drop_requires_cascade=no # drop table require cascade/restrict
drop_restrict=no # drop table with cascade/restrict
end_colon=yes # allows end ';'
except=yes # except
-except_all=no # except all
+except_all=yes # except all
except_all_incompat=no # except all (incompatible lists)
except_incompat=no # except (incompatible lists)
float_int_expr=yes # mixing of integer and float in expression
foreign_key=yes # foreign keys
foreign_key_syntax=yes # foreign key syntax
-full_outer_join=no # full outer join
+full_outer_join=yes # full outer join
func_extra_!=no # Function NOT as '!' in SELECT
func_extra_%=yes # Function MOD as %
-func_extra_&=no # Function & (bitwise and)
+func_extra_&=yes # Function & (bitwise and)
func_extra_&&=no # Function AND as '&&'
func_extra_<>=yes # Function <> in SELECT
func_extra_==yes # Function =
@@ -79,12 +80,12 @@ func_extra_atn2=no # Function ATN2
func_extra_auto_num2string=no # Function automatic num->string convert
func_extra_auto_string2num=yes # Function automatic string->num convert
func_extra_between=yes # Function BETWEEN in SELECT
-func_extra_binary_shifts=no # Function << and >> (bitwise shifts)
+func_extra_binary_shifts=yes # Function << and >> (bitwise shifts)
func_extra_bit_count=no # Function BIT_COUNT
func_extra_ceil=yes # Function CEIL
func_extra_charindex=no # Function CHARINDEX
-func_extra_chr=no # Function CHR
-func_extra_concat_as_+=no # Function concatenation with +
+func_extra_chr=yes # Function CHR
+func_extra_concat_as_+=error # Function concatenation with +
func_extra_concat_list=no # Function CONCAT(list)
func_extra_convert=no # Function CONVERT
func_extra_cosh=no # Function COSH
@@ -103,7 +104,7 @@ func_extra_getdate=no # Function GETDATE
func_extra_greatest=no # Function GREATEST
func_extra_if=no # Function IF
func_extra_in_num=yes # Function IN on numbers in SELECT
-func_extra_in_str=no # Function IN on strings in SELECT
+func_extra_in_str=yes # Function IN on strings in SELECT
func_extra_initcap=yes # Function INITCAP
func_extra_instr=no # Function LOCATE as INSTR
func_extra_instr_oracle=no # Function INSTR (Oracle syntax)
@@ -114,7 +115,7 @@ func_extra_last_insert_id=no # Function LAST_INSERT_ID
func_extra_least=no # Function LEAST
func_extra_lengthb=no # Function LENGTHB
func_extra_like=yes # Function LIKE in SELECT
-func_extra_like_escape=no # Function LIKE ESCAPE in SELECT
+func_extra_like_escape=yes # Function LIKE ESCAPE in SELECT
func_extra_ln=no # Function LN
func_extra_log(m_n)=yes # Function LOG(m,n)
func_extra_logn=no # Function LOGN
@@ -160,7 +161,7 @@ func_extra_unix_timestamp=no # Function UNIX_TIMESTAMP
func_extra_userenv=no # Function USERENV
func_extra_version=yes # Function VERSION
func_extra_weekday=no # Function WEEKDAY
-func_extra_|=no # Function | (bitwise or)
+func_extra_|=yes # Function | (bitwise or)
func_extra_||=no # Function OR as '||'
func_extra_~*=yes # Function ~* (case insensitive compare)
func_odbc_abs=yes # Function ABS
@@ -192,7 +193,7 @@ func_odbc_ifnull=no # Function IFNULL
func_odbc_insert=no # Function INSERT
func_odbc_lcase=no # Function LCASE
func_odbc_left=no # Function LEFT
-func_odbc_length=no # Function REAL LENGTH
+func_odbc_length=yes # Function REAL LENGTH
func_odbc_length_without_space=no # Function ODBC LENGTH
func_odbc_locate_2=no # Function LOCATE(2 arg)
func_odbc_locate_3=no # Function LOCATE(3 arg)
@@ -220,7 +221,7 @@ func_odbc_sin=yes # Function SIN
func_odbc_soundex=no # Function SOUNDEX
func_odbc_space=no # Function SPACE
func_odbc_sqrt=no # Function SQRT
-func_odbc_substring=no # Function ODBC SUBSTRING
+func_odbc_substring=yes # Function ODBC SUBSTRING
func_odbc_tan=yes # Function TAN
func_odbc_timestampadd=no # Function TIMESTAMPADD
func_odbc_timestampdiff=no # Function TIMESTAMPDIFF
@@ -246,8 +247,8 @@ func_sql_localtime=no # Function LOCALTIME
func_sql_localtimestamp=no # Function LOCALTIMESTAMP
func_sql_lower=yes # Function LOWER
func_sql_nullif_num=yes # Function NULLIF with numbers
-func_sql_nullif_string=no # Function NULLIF with strings
-func_sql_octet_length=no # Function OCTET_LENGTH
+func_sql_nullif_string=yes # Function NULLIF with strings
+func_sql_octet_length=yes # Function OCTET_LENGTH
func_sql_position=yes # Function POSITION
func_sql_searched_case=yes # Function searched CASE
func_sql_session_user=yes # Function SESSION_USER
@@ -264,7 +265,7 @@ func_where_eq_some=yes # Function = SOME
func_where_exists=yes # Function EXISTS
func_where_in_num=yes # Function IN on numbers
func_where_like=yes # Function LIKE
-func_where_like_escape=no # Function LIKE ESCAPE
+func_where_like_escape=yes # Function LIKE ESCAPE
func_where_match=no # Function MATCH
func_where_match_unique=no # Function MATCH UNIQUE
func_where_matches=no # Function MATCHES
@@ -283,8 +284,8 @@ group_func_extra_bit_and=no # Group function BIT_AND
group_func_extra_bit_or=no # Group function BIT_OR
group_func_extra_count_distinct_list=no # Group function COUNT(DISTINCT expr,expr,...)
group_func_extra_std=no # Group function STD
-group_func_extra_stddev=no # Group function STDDEV
-group_func_extra_variance=no # Group function VARIANCE
+group_func_extra_stddev=yes # Group function STDDEV
+group_func_extra_variance=yes # Group function VARIANCE
group_func_sql_any=no # Group function ANY
group_func_sql_avg=yes # Group function AVG
group_func_sql_count_*=yes # Group function COUNT (*)
@@ -315,37 +316,37 @@ insert_multi_value=no # INSERT with Value lists
insert_select=yes # insert INTO ... SELECT ...
insert_with_set=no # INSERT with set syntax
intersect=yes # intersect
-intersect_all=no # intersect all
+intersect_all=yes # intersect all
intersect_all_incompat=no # intersect all (incompatible lists)
intersect_incompat=no # intersect (incompatible lists)
join_tables=+64 # tables in join
-left_outer_join=no # left outer join
-left_outer_join_using=no # left outer join using
+left_outer_join=yes # left outer join
+left_outer_join_using=yes # left outer join using
like_with_column=yes # column LIKE column
like_with_number=yes # LIKE on numbers
lock_tables=yes # lock table
logical_value=1 # Value of logical operation (1=1)
max_big_expressions=10 # big expressions
-max_char_size=8104 # max char() size
+max_char_size=+8000000 # max char() size
max_column_name=+512 # column name length
max_columns=1600 # Columns in table
max_conditions=19994 # OR and AND in WHERE
max_expressions=9999 # simple expressions
max_index=+64 # max index
-max_index_length=2704 # index length
+max_index_length=+8192 # index length
max_index_name=+512 # index name length
-max_index_part_length=2704 # max index part length
+max_index_part_length=235328 # max index part length
max_index_parts=16 # index parts
-max_index_varchar_part_length=2704 # index varchar part length
-max_row_length=7949 # max table row length (without blobs)
-max_row_length_with_null=7949 # table row length with nulls (without blobs)
+max_index_varchar_part_length=235328 # index varchar part length
+max_row_length=64519 # max table row length (without blobs)
+max_row_length_with_null=64519 # table row length with nulls (without blobs)
max_select_alias_name=+512 # select alias name length
max_stack_expression=+2000 # stacked expressions
max_table_alias_name=+512 # table alias name length
max_table_name=+512 # table name length
-max_text_size=8104 # max text or blob size
+max_text_size=+8000000 # max text or blob size
max_unique_index=+64 # unique indexes
-max_varchar_size=8104 # max varchar() size
+max_varchar_size=+8000000 # max varchar() size
minus=no # minus
minus_incompat=no # minus (incompatible lists)
minus_neg=no # Calculate 1--1
@@ -356,7 +357,7 @@ multi_table_delete=no # DELETE FROM table1,table2...
multi_table_update=no # Update with many tables
natural_join=yes # natural join
natural_join_incompat=yes # natural join (incompatible lists)
-natural_left_outer_join=no # natural left outer join
+natural_left_outer_join=yes # natural left outer join
no_primary_key=yes # Tables without primary key
null_concat_expr=yes # Is 'a' || NULL = NULL
null_in_index=yes # null in index
@@ -364,7 +365,7 @@ null_in_unique=yes # null in unique index
null_num_expr=yes # Is 1+NULL = NULL
nulls_in_unique=yes # null combination in unique index
odbc_left_outer_join=no # left outer join odbc style
-operating_system=Linux 2.2.14-5.0 i686 # crash-me tested on
+operating_system=Linux 2.4.0-64GB-SMP i686 # crash-me tested on
order_by=yes # Order by
order_by_alias=yes # Order by alias
order_by_function=yes # Order by function
@@ -386,7 +387,7 @@ remember_end_space=no # Remembers end space in char()
remember_end_space_varchar=yes # Remembers end space in varchar()
rename_table=no # rename table
repeat_string_size=+8000000 # return string size from function
-right_outer_join=no # right outer join
+right_outer_join=yes # right outer join
rowid=oid # Type for row id
select_constants=yes # Select constants
select_limit=with LIMIT # LIMIT number of rows
@@ -394,7 +395,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=16777207 # constant string size in SELECT
select_table_update=yes # Update with sub select
select_without_from=yes # SELECT without FROM
-server_version=PostgreSQL version 7.0.2 # server version
+server_version=PostgreSQL version 7.1.1 # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=yes # subqueries
@@ -466,7 +467,7 @@ type_extra_timespan=yes # Type timespan
type_extra_uint=no # Type uint
type_extra_varchar2(1_arg)=no # Type varchar2(1 arg)
type_extra_year=no # Type year
-type_odbc_bigint=no # Type bigint
+type_odbc_bigint=yes # Type bigint
type_odbc_binary(1_arg)=no # Type binary(1 arg)
type_odbc_datetime=yes # Type datetime
type_odbc_tinyint=no # Type tinyint
@@ -519,4 +520,4 @@ union_incompat=yes # union (incompatible lists)
unique_in_create=yes # unique in create table
unique_null_in_create=yes # unique null in create
views=yes # views
-where_string_size=16777182 # constant string size in where
+where_string_size=16777181 # constant string size in where
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 0ed6926a297..e43548d924e 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -122,53 +122,49 @@ sub new
$self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = "";
- $limits{'max_conditions'} = 9999; # (Actually not a limit)
- $limits{'max_columns'} = 2000; # Max number of columns in table
- # Windows can't handle that many files in one directory
- $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
- $limits{'max_text_size'} = 65000; # Max size with default buffers.
- $limits{'query_size'} = 1000000; # Max size with default buffers.
- $limits{'max_index'} = 16; # Max number of keys
- $limits{'max_index_parts'} = 16; # Max segments/key
- $limits{'max_column_name'} = 64; # max table and column name
-
- $limits{'join_optimizer'} = 1; # Can optimize FROM tables
- $limits{'load_data_infile'} = 1; # Has load data infile
- $limits{'lock_tables'} = 1; # Has lock tables
- $limits{'functions'} = 1; # Has simple functions (+/-)
- $limits{'group_functions'} = 1; # Have group functions
- $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
- $limits{'group_distinct_functions'}= 1; # Have count(distinct)
- $limits{'select_without_from'}= 1; # Can do 'select 1';
- $limits{'multi_drop'} = 1; # Drop table can take many tables
- $limits{'subqueries'} = 0; # Doesn't support sub-queries.
- $limits{'left_outer_join'} = 1; # Supports left outer joins
- $limits{'table_wildcard'} = 1; # Has SELECT table_name.*
- $limits{'having_with_alias'} = 1; # Can use aliases in HAVING
- $limits{'having_with_group'} = 1; # Can use group functions in HAVING
- $limits{'like_with_column'} = 1; # Can use column1 LIKE column2
- $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
- $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
- $limits{'alter_table'} = 1; # Have ALTER TABLE
+ $limits{'NEG'} = 1; # Supports -id
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
+ $limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column
- $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
-
- $limits{'group_func_extra_std'} = 1; # Have group function std().
-
- $limits{'func_odbc_mod'} = 1; # Have function mod.
+ $limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'func_extra_%'} = 1; # Has % as alias for mod()
- $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'func_extra_if'} = 1; # Have function if.
- $limits{'column_alias'} = 1; # Alias for fields in select statement.
- $limits{'NEG'} = 1; # Supports -id
$limits{'func_extra_in_num'} = 1; # Has function in
- $limits{'limit'} = 1; # supports the limit attribute
- $limits{'unique_index'} = 1; # Unique index works or not
+ $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
+ $limits{'func_odbc_mod'} = 1; # Have function mod.
+ $limits{'functions'} = 1; # Has simple functions (+/-)
+ $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
+ $limits{'group_distinct_functions'}= 1; # Have count(distinct)
+ $limits{'group_func_extra_std'} = 1; # Have group function std().
+ $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
+ $limits{'group_functions'} = 1; # Have group functions
+ $limits{'having_with_alias'} = 1; # Can use aliases in HAVING
+ $limits{'having_with_group'} = 1; # Can use group functions in HAVING
+ $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
$limits{'insert_select'} = 1;
- $limits{'working_blobs'} = 1; # If big varchar/blobs works
+ $limits{'join_optimizer'} = 1; # Can optimize FROM tables
+ $limits{'left_outer_join'} = 1; # Supports left outer joins
+ $limits{'like_with_column'} = 1; # Can use column1 LIKE column2
+ $limits{'limit'} = 1; # supports the limit attribute
+ $limits{'load_data_infile'} = 1; # Has load data infile
+ $limits{'lock_tables'} = 1; # Has lock tables
+ $limits{'max_column_name'} = 64; # max table and column name
+ $limits{'max_columns'} = 2000; # Max number of columns in table
+ $limits{'max_conditions'} = 9999; # (Actually not a limit)
+ $limits{'max_index'} = 16; # Max number of keys
+ $limits{'max_index_parts'} = 16; # Max segments/key
+ $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
+ $limits{'max_text_size'} = 1000000; # Good enough for tests
+ $limits{'multi_drop'} = 1; # Drop table can take many tables
+ $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'order_by_unused'} = 1;
+ $limits{'query_size'} = 1000000; # Max size with default buffers.
+ $limits{'select_without_from'}= 1; # Can do 'select 1';
+ $limits{'subqueries'} = 0; # Doesn't support sub-queries.
+ $limits{'table_wildcard'} = 1; # Has SELECT table_name.*
+ $limits{'unique_index'} = 1; # Unique index works or not
$limits{'working_all_fields'} = 1;
+ $limits{'working_blobs'} = 1; # If big varchar/blobs works
$smds{'time'} = 1;
$smds{'q1'} = 'b'; # with time not supp by mysql ('')
@@ -569,12 +565,12 @@ sub new
$self->{'drop_attr'} = "";
$self->{"vacuum"} = 1;
$limits{'join_optimizer'} = 1; # Can optimize FROM tables
- $limits{'load_data_infile'} = 0; # Is this true ?
+ $limits{'load_data_infile'} = 0;
- $limits{'NEG'} = 1; # Can't handle -id
- $limits{'alter_table'} = 1; # alter ??
+ $limits{'NEG'} = 1;
$limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ?
- $limits{'alter_table_dropcol'}= 0; # alter_drop_col ?
+ $limits{'alter_table'} = 1;
+ $limits{'alter_table_dropcol'}= 0;
$limits{'column_alias'} = 1;
$limits{'func_extra_%'} = 1;
$limits{'func_extra_if'} = 0;
@@ -583,33 +579,33 @@ sub new
$limits{'func_odbc_mod'} = 1; # Has %
$limits{'functions'} = 1;
$limits{'group_by_position'} = 1;
+ $limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 0;
$limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1;
- $limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'having_with_alias'} = 0;
$limits{'having_with_group'} = 1;
- $limits{'left_outer_join'} = 0;
+ $limits{'insert_select'} = 1;
+ $limits{'left_outer_join'} = 1;
$limits{'like_with_column'} = 1;
$limits{'lock_tables'} = 0; # in ATIS gives this a problem
+ $limits{'max_column_name'} = 128;
+ $limits{'max_columns'} = 1000; # 500 crashes pg 6.3
+ $limits{'max_conditions'} = 9999; # This makes Pg real slow
+ $limits{'max_index'} = 64; # Big enough
+ $limits{'max_index_parts'} = 16;
+ $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
+ $limits{'max_text_size'} = 65000; # Good enough for test
$limits{'multi_drop'} = 1;
$limits{'order_by_position'} = 1;
+ $limits{'order_by_unused'} = 1;
+ $limits{'query_size'} = 16777216;
$limits{'select_without_from'}= 1;
$limits{'subqueries'} = 1;
$limits{'table_wildcard'} = 1;
- $limits{'max_column_name'} = 32; # Is this true
- $limits{'max_columns'} = 1000; # 500 crashes pg 6.3
- $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
- $limits{'max_conditions'} = 30; # This makes Pg real slow
- $limits{'max_index'} = 64; # Is this true ?
- $limits{'max_index_parts'} = 16; # Is this true ?
- $limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
- $limits{'query_size'} = 16777216;
$limits{'unique_index'} = 1; # Unique index works or not
- $limits{'insert_select'} = 1;
- $limits{'working_blobs'} = 1; # If big varchar/blobs works
- $limits{'order_by_unused'} = 1;
$limits{'working_all_fields'} = 1;
+ $limits{'working_blobs'} = 1; # If big varchar/blobs works
# the different cases per query ...
$smds{'q1'} = 'b'; # with time
@@ -640,7 +636,7 @@ sub new
sub version
{
my ($version,$dir);
- foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/my/local/pgsql/")
+ foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data")
{
if ($dir && -e "$dir/PG_VERSION")
{
diff --git a/sql-bench/test-connect.sh b/sql-bench/test-connect.sh
index cddb32e2775..862161e3a03 100644
--- a/sql-bench/test-connect.sh
+++ b/sql-bench/test-connect.sh
@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++)
}
$end_time=new Benchmark;
-print "Time to select_big ($opt_loop_count): " .
+print "Time to select_big_str ($opt_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
$sth = $dbh->do("drop table bench1" . $server->{'drop_attr'})
diff --git a/sql/ha_gemini.cc b/sql/ha_gemini.cc
index 733f0aa3a7d..c95a348f238 100644
--- a/sql/ha_gemini.cc
+++ b/sql/ha_gemini.cc
@@ -19,11 +19,7 @@
#pragma implementation // gcc: Class implementation
#endif
-#include <string.h>
-
#include "mysql_priv.h"
-#include "my_pthread.h"
-
#ifdef HAVE_GEMINI_DB
#include "ha_gemini.h"
#include "dbconfig.h"
@@ -33,6 +29,7 @@
#include <m_ctype.h>
#include <myisampack.h>
+#include <m_string.h>
#include <assert.h>
#include <hash.h>
#include <stdarg.h>
diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc
index fa44cebe19d..4a69056a9e2 100644
--- a/sql/ha_innobase.cc
+++ b/sql/ha_innobase.cc
@@ -2142,6 +2142,7 @@ ha_innobase::external_lock(
prebuilt->in_update_remember_pos = TRUE;
if (lock_type == F_WRLCK) {
+
/* If this is a SELECT, then it is in UPDATE TABLE ...
or SELECT ... FOR UPDATE */
prebuilt->select_lock_type = LOCK_X;
@@ -2153,13 +2154,27 @@ ha_innobase::external_lock(
}
trx->n_mysql_tables_in_use++;
+
+ if (prebuilt->select_lock_type != LOCK_NONE) {
+
+ trx->mysql_n_tables_locked++;
+ }
} else {
trx->n_mysql_tables_in_use--;
- if (trx->n_mysql_tables_in_use == 0 &&
- !(thd->options
- & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
- innobase_commit(thd, trx);
+ if (trx->n_mysql_tables_in_use == 0) {
+
+ trx->mysql_n_tables_locked = 0;
+
+ if (trx->has_search_latch) {
+
+ trx_search_latch_release_if_reserved(trx);
+ }
+
+ if (!(thd->options
+ & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
+ innobase_commit(thd, trx);
+ }
}
}
@@ -2690,6 +2705,39 @@ ha_innobase::info(
DBUG_VOID_RETURN;
}
+/***********************************************************************
+Tries to check that an InnoDB table is not corrupted. If corruption is
+noticed, prints to stderr information about it. In case of corruption
+may also assert a failure and crash the server. */
+
+int
+ha_innobase::check(
+/*===============*/
+ /* out: HA_ADMIN_CORRUPT or
+ HA_ADMIN_OK */
+ THD* thd, /* in: user thread handle */
+ HA_CHECK_OPT* check_opt) /* in: check options, currently
+ ignored */
+{
+ row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ ulint ret;
+
+ if (prebuilt->mysql_template == NULL) {
+ /* Build the template; we will use a dummy template
+ in index scans done in checking */
+
+ build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
+ }
+
+ ret = row_check_table_for_mysql(prebuilt);
+
+ if (ret == DB_SUCCESS) {
+ return(HA_ADMIN_OK);
+ }
+
+ return(HA_ADMIN_CORRUPT);
+}
+
/*****************************************************************
Adds information about free space in the InnoDB tablespace to a
table comment which is printed out when a user calls SHOW TABLE STATUS. */
diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h
index 258e34cbf86..d832ac93d0f 100644
--- a/sql/ha_innobase.h
+++ b/sql/ha_innobase.h
@@ -142,7 +142,7 @@ class ha_innobase: public handler
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
-
+ int check(THD* thd, HA_CHECK_OPT* check_opt);
char* update_table_comment(const char* comment);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index ccbf2694345..c8e3bb56449 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -95,17 +95,16 @@ int deny_severity = LOG_WARNING;
#include <sys/mman.h>
#endif
+#ifdef _AIX41
+int initgroups(const char *,unsigned int);
+#endif
+
#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
#include <ieeefp.h>
#ifdef HAVE_FP_EXCEPT // Fix type conflict
typedef fp_except fp_except_t;
#endif
-#ifdef _AIX41
-extern "C" int initgroups(const char *,int);
-#endif
-
-
/* We can't handle floating point expections with threads, so disable
this on freebsd
*/
diff --git a/tests/fork_big.pl b/tests/fork_big.pl
index 8f16db74793..4009a9da71b 100755
--- a/tests/fork_big.pl
+++ b/tests/fork_big.pl
@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++)
{
test_select() if (($pid=fork()) == 0); $work{$pid}="select_key";
}
+test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count";
test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
test_update() if (($pid=fork()) == 0); $work{$pid}="update";
test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush";
@@ -214,6 +215,35 @@ sub test_select
}
#
+# Do big select count(distinct..) over the table
+#
+
+sub test_select_count
+{
+ my ($dbh, $i, $j, $count, $loop);
+
+ $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
+ $opt_user, $opt_password,
+ { PrintError => 0}) || die $DBI::errstr;
+
+ $count=0;
+ $i=0;
+ while (!test_if_abort($dbh))
+ {
+ for ($j=0 ; $j < $numtables ; $j++)
+ {
+ my ($table)= $testtables[$j]->[0];
+ simple_query($dbh, "select count(distinct marker),count(distinct id),count(distinct info) from $table");
+ $count++;
+ }
+ sleep(20); # This query is quite slow
+ }
+ $dbh->disconnect; $dbh=0;
+ print "Test_select: Executed $count select count(distinct) queries\n";
+ exit(0);
+}
+
+#
# Delete 1-5 rows from the first 2 tables.
# Test ends when the number of rows for table 3 didn't change during
# one loop
diff --git a/vio/vio.c b/vio/vio.c
index c47671d0e23..96cb0c31ef6 100644
--- a/vio/vio.c
+++ b/vio/vio.c
@@ -66,12 +66,16 @@ void vio_reset(Vio* vio, enum enum_vio_type type,
my_socket sd, HANDLE hPipe,
my_bool localhost)
{
+ DBUG_ENTER("vio_reset");
+ DBUG_PRINT("enter", ("type=%d sd=%d localhost=%d", type, sd, localhost));
+
bzero((char*) vio, sizeof(*vio));
vio->type = type;
vio->sd = sd;
vio->hPipe = hPipe;
vio->localhost= localhost;
#ifdef HAVE_VIO
+#ifdef HAVE_OPENSSL
if (type == VIO_TYPE_SSL)
{
vio->viodelete =vio_ssl_delete;
@@ -85,8 +89,11 @@ void vio_reset(Vio* vio, enum enum_vio_type type,
vio->peer_addr =vio_ssl_peer_addr;
vio->in_addr =vio_ssl_in_addr;
vio->poll_read =vio_ssl_poll_read;
+ vio->vioblocking =vio_blocking;
+ vio->is_blocking =vio_is_blocking;
}
else /* default is VIO_TYPE_TCPIP */
+#endif /* HAVE_OPENSSL */
{
vio->viodelete =vio_delete;
vio->vioerrno =vio_errno;
@@ -99,8 +106,11 @@ void vio_reset(Vio* vio, enum enum_vio_type type,
vio->peer_addr =vio_peer_addr;
vio->in_addr =vio_in_addr;
vio->poll_read =vio_poll_read;
+ vio->vioblocking =vio_blocking;
+ vio->is_blocking =vio_is_blocking;
}
#endif /* HAVE_VIO */
+ DBUG_VOID_RETURN;
}
/* Open the socket or TCP/IP connection and read the fnctl() status */