summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <serg@serg.mysql.com>2001-07-02 21:52:22 +0200
committerunknown <serg@serg.mysql.com>2001-07-02 21:52:22 +0200
commita1826b55198ebd1ea5a3bafb88a52d1cf0c2f427 (patch)
treeb02cd20fcbc632447cd4b10782204969c9e6af34
parent2d28c646cbd53c1fcdf800dc408580aa5377f3b9 (diff)
parentcdfc04fb0819109ba3f7b78e87191b9b8311b9bf (diff)
downloadmariadb-git-a1826b55198ebd1ea5a3bafb88a52d1cf0c2f427.tar.gz
merged
include/my_base.h: Auto merged include/myisam.h: Auto merged myisam/mi_open.c: Auto merged myisam/myisamdef.h: Auto merged myisam/myisampack.c: Auto merged mysql-test/t/alter_table.test: Auto merged mysys/tree.c: Auto merged sql/ha_myisam.cc: Auto merged sql/item_sum.cc: Auto merged sql/mysqld.cc: Auto merged sql/sql_table.cc: Auto merged
-rw-r--r--.bzrignore18
-rw-r--r--BUILD/FINISH.sh4
-rw-r--r--BUILD/SETUP.sh8
-rwxr-xr-xBUILD/compile-pentium2
-rwxr-xr-xBUILD/compile-pentium-debug-max2
-rwxr-xr-xBUILD/compile-pentium-debug-openssl13
-rw-r--r--BitKeeper/etc/logging_ok6
-rwxr-xr-xBitKeeper/triggers/post-commit21
-rwxr-xr-xBitKeeper/triggers/post-incoming3
-rwxr-xr-xBitKeeper/triggers/post-outgoing3
-rwxr-xr-xBuild-tools/Do-compile21
-rw-r--r--[-rwxr-xr-x]Docs/Flags/australia.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/australia.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/austria.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/austria.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/canada.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/canada.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/czech-republic.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/czech-republic.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/germany.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/germany.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/great-britain.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/great-britain.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/hungary.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/hungary.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/israel.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/israel.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/italy.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/italy.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/japan.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/japan.txt0
-rw-r--r--Docs/Flags/latvia.eps99
-rw-r--r--Docs/Flags/latvia.gifbin0 -> 117 bytes
-rw-r--r--Docs/Flags/latvia.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/russia.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/russia.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/south-korea.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/south-korea.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/sweden.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/sweden.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/taiwan.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/taiwan.txt0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/usa.eps0
-rw-r--r--[-rwxr-xr-x]Docs/Flags/usa.txt0
-rw-r--r--Docs/internals.texi209
-rw-r--r--Docs/manual.texi3453
-rw-r--r--Makefile.am2
-rw-r--r--acinclude.m44
-rw-r--r--bdb/include/rpc_server_ext.h21
-rw-r--r--client/Makefile.am2
-rw-r--r--client/errmsg.c42
-rw-r--r--client/mysqladmin.c4
-rw-r--r--client/mysqlcheck.c52
-rw-r--r--client/mysqlshow.c171
-rw-r--r--client/mysqltest.c262
-rw-r--r--configure.in28
-rw-r--r--include/errmsg.h7
-rw-r--r--include/m_string.h13
-rw-r--r--include/my_base.h1
-rw-r--r--include/my_sys.h23
-rw-r--r--include/myisam.h2
-rw-r--r--include/mysql.h72
-rw-r--r--include/mysql_com.h17
-rw-r--r--include/mysqld_error.h10
-rw-r--r--include/mysys_err.h5
-rw-r--r--include/vio.h278
-rw-r--r--include/violite.h166
-rw-r--r--innobase/btr/btr0btr.c123
-rw-r--r--innobase/btr/btr0cur.c9
-rw-r--r--innobase/btr/btr0pcur.c6
-rw-r--r--innobase/btr/btr0sea.c11
-rw-r--r--innobase/buf/buf0buf.c73
-rw-r--r--innobase/buf/buf0flu.c6
-rw-r--r--innobase/buf/buf0lru.c6
-rw-r--r--innobase/buf/buf0rea.c26
-rw-r--r--innobase/configure.in6
-rw-r--r--innobase/dict/dict0boot.c9
-rw-r--r--innobase/fsp/fsp0fsp.c40
-rw-r--r--innobase/ibuf/ibuf0ibuf.c8
-rw-r--r--innobase/include/btr0btr.h3
-rw-r--r--innobase/include/btr0cur.h14
-rw-r--r--innobase/include/btr0pcur.h6
-rw-r--r--innobase/include/btr0pcur.ic6
-rw-r--r--innobase/include/buf0buf.h29
-rw-r--r--innobase/include/mem0pool.h3
-rw-r--r--innobase/include/page0cur.ic4
-rw-r--r--innobase/include/row0mysql.h14
-rw-r--r--innobase/include/srv0start.h3
-rw-r--r--innobase/include/sync0sync.ic31
-rw-r--r--innobase/include/trx0trx.h14
-rw-r--r--innobase/include/univ.i4
-rw-r--r--innobase/include/ut0ut.h7
-rw-r--r--innobase/log/log0log.c8
-rw-r--r--innobase/log/log0recv.c6
-rw-r--r--innobase/os/os0file.c6
-rw-r--r--innobase/page/page0page.c142
-rw-r--r--innobase/rem/rem0cmp.c4
-rw-r--r--innobase/row/row0mysql.c150
-rw-r--r--innobase/row/row0sel.c125
-rw-r--r--innobase/row/row0umod.c27
-rw-r--r--innobase/row/row0upd.c30
-rw-r--r--innobase/srv/srv0start.c27
-rw-r--r--innobase/sync/sync0sync.c40
-rw-r--r--innobase/trx/trx0roll.c10
-rw-r--r--innobase/trx/trx0sys.c2
-rw-r--r--innobase/trx/trx0trx.c22
-rw-r--r--innobase/trx/trx0undo.c10
-rw-r--r--innobase/ut/ut0ut.c45
-rw-r--r--isam/_dynrec.c2
-rw-r--r--libmysql/Makefile.am6
-rw-r--r--libmysql/Makefile.shared12
-rw-r--r--libmysql/errmsg.c43
-rw-r--r--libmysql/libmysql.c435
-rw-r--r--libmysql/net.c2
-rw-r--r--libmysqld/Makefile.am4
-rw-r--r--libmysqld/lib_sql.cc37
-rw-r--r--libmysqld/lib_vio.c7
-rw-r--r--libmysqld/libmysqld.c181
-rw-r--r--man/Makefile.am2
-rw-r--r--man/isamchk.12
-rw-r--r--man/isamlog.12
-rw-r--r--man/mysql.14
-rw-r--r--man/mysql_zap.12
-rw-r--r--man/mysqlaccess.12
-rw-r--r--man/mysqladmin.12
-rw-r--r--man/mysqld.14
-rw-r--r--man/mysqld_multi.14
-rw-r--r--man/mysqld_safe.1 (renamed from man/safe_mysqld.1)10
-rw-r--r--man/mysqldump.12
-rw-r--r--man/mysqlshow.12
-rw-r--r--man/perror.12
-rw-r--r--man/replace.12
-rw-r--r--man/which.22
-rw-r--r--myisam/ft_dump.c10
-rw-r--r--myisam/mi_check.c63
-rw-r--r--myisam/mi_create.c80
-rw-r--r--myisam/mi_dbug.c2
-rw-r--r--myisam/mi_delete_table.c4
-rw-r--r--myisam/mi_dynrec.c9
-rw-r--r--myisam/mi_info.c2
-rw-r--r--myisam/mi_open.c38
-rw-r--r--myisam/mi_packrec.c2
-rw-r--r--myisam/mi_rename.c4
-rw-r--r--myisam/mi_search.c14
-rw-r--r--myisam/mi_statrec.c48
-rwxr-xr-xmyisam/mi_test_all.sh7
-rw-r--r--myisam/myisamchk.c35
-rw-r--r--myisam/myisamdef.h4
-rw-r--r--myisam/myisamlog.c5
-rw-r--r--myisam/myisampack.c3
-rw-r--r--mysql-test/Makefile.am2
-rw-r--r--mysql-test/install_test_db.sh8
-rw-r--r--mysql-test/mysql-test-run.sh108
-rw-r--r--mysql-test/r/alter_table.result5
-rw-r--r--mysql-test/r/bdb.result2
-rw-r--r--mysql-test/r/big_test.require2
-rw-r--r--mysql-test/r/bigint.result6
-rw-r--r--mysql-test/r/check.result2
-rw-r--r--mysql-test/r/count_distinct.result2
-rw-r--r--mysql-test/r/create.result4
-rw-r--r--mysql-test/r/drop.result9
-rw-r--r--mysql-test/r/flush.result4
-rw-r--r--mysql-test/r/func_test.result2
-rw-r--r--mysql-test/r/have_symlink.require2
-rw-r--r--mysql-test/r/innodb.result2
-rw-r--r--mysql-test/r/multi_update.result22
-rw-r--r--mysql-test/r/order_by.result116
-rw-r--r--mysql-test/r/order_fill_sortbuf.result2
-rw-r--r--mysql-test/r/rpl000002.result2
-rw-r--r--mysql-test/r/rpl000009.result30
-rw-r--r--mysql-test/r/rpl000014.result20
-rw-r--r--mysql-test/r/rpl000015.result18
-rw-r--r--mysql-test/r/rpl000016.result14
-rw-r--r--mysql-test/r/rpl_log.result57
-rw-r--r--mysql-test/r/rpl_magic.result22
-rw-r--r--mysql-test/r/rpl_sporadic_master.result7
-rw-r--r--mysql-test/r/select_found.result2
-rw-r--r--mysql-test/r/show_check.result10
-rw-r--r--mysql-test/r/symlink.result23
-rw-r--r--mysql-test/r/type_float.result2
-rw-r--r--mysql-test/r/variables.result4
-rw-r--r--mysql-test/std_data/master-bin.001bin113 -> 98 bytes
-rw-r--r--mysql-test/t/alter_table.test11
-rw-r--r--mysql-test/t/bdb-crash.test1
-rw-r--r--mysql-test/t/bdb.test14
-rw-r--r--mysql-test/t/bigint.test8
-rw-r--r--mysql-test/t/check.test19
-rw-r--r--mysql-test/t/compare.test1
-rw-r--r--mysql-test/t/count_distinct.test10
-rw-r--r--mysql-test/t/count_distinct2.test2
-rw-r--r--mysql-test/t/create.test12
-rw-r--r--mysql-test/t/drop.test31
-rw-r--r--mysql-test/t/err000001.test3
-rw-r--r--mysql-test/t/flush.test25
-rw-r--r--mysql-test/t/func_test.test10
-rw-r--r--mysql-test/t/innodb.test4
-rw-r--r--mysql-test/t/insert.test12
-rw-r--r--mysql-test/t/multi_update.test51
-rw-r--r--mysql-test/t/order_by.test59
-rw-r--r--mysql-test/t/order_fill_sortbuf-master.opt1
-rw-r--r--mysql-test/t/order_fill_sortbuf.test20
-rw-r--r--mysql-test/t/overflow.test2
-rw-r--r--mysql-test/t/rpl000002.test1
-rw-r--r--mysql-test/t/rpl000009.test53
-rw-r--r--mysql-test/t/rpl000014.test2
-rwxr-xr-xmysql-test/t/rpl000017-slave.sh1
-rw-r--r--mysql-test/t/rpl_log.test38
-rw-r--r--mysql-test/t/rpl_magic.test30
-rw-r--r--mysql-test/t/rpl_sporadic_master-master.opt1
-rw-r--r--mysql-test/t/rpl_sporadic_master.test24
-rw-r--r--mysql-test/t/select.test2
-rw-r--r--mysql-test/t/select_found.test2
-rw-r--r--mysql-test/t/show_check.test7
-rw-r--r--mysql-test/t/status.test2
-rw-r--r--mysql-test/t/symlink.test82
-rw-r--r--mysql-test/t/type_float.test3
-rw-r--r--mysql-test/t/variables.test4
-rw-r--r--mysys/Makefile.am1
-rw-r--r--mysys/errors.c8
-rw-r--r--mysys/getvar.c2
-rw-r--r--mysys/mf_brkhant.c6
-rw-r--r--mysys/mf_cache.c3
-rw-r--r--mysys/mf_format.c30
-rw-r--r--mysys/mf_pack.c7
-rw-r--r--mysys/mf_same.c15
-rw-r--r--mysys/my_alloc.c69
-rw-r--r--mysys/my_compress.c2
-rw-r--r--mysys/my_copy.c4
-rw-r--r--mysys/my_delete.c1
-rw-r--r--mysys/my_pread.c6
-rw-r--r--mysys/my_static.c1
-rw-r--r--mysys/my_symlink.c138
-rw-r--r--mysys/my_symlink2.c155
-rw-r--r--mysys/tree.c2
-rw-r--r--pstack/bucomm.c2
-rw-r--r--scripts/Makefile.am2
-rw-r--r--scripts/make_binary_distribution.sh57
-rw-r--r--scripts/mysql_config.sh2
-rw-r--r--scripts/mysql_install_db.sh2
-rw-r--r--scripts/mysqld_multi.sh16
-rw-r--r--scripts/mysqld_safe-watch.sh (renamed from scripts/safe_mysqld-watch.sh)2
-rw-r--r--scripts/mysqld_safe.sh (renamed from scripts/safe_mysqld.sh)14
-rw-r--r--sql-bench/Comments/postgres.benchmark100
-rw-r--r--sql-bench/Makefile.am4
-rwxr-xr-xsql-bench/README2
-rw-r--r--sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg20
-rw-r--r--sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg28
-rw-r--r--sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg26
-rw-r--r--sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg75
-rw-r--r--sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg74
-rw-r--r--sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg88
-rw-r--r--sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg103
-rw-r--r--sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg77
-rw-r--r--sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg104
-rw-r--r--sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg19
-rw-r--r--sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg28
-rw-r--r--sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg25
-rw-r--r--sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg30
-rw-r--r--sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg30
-rw-r--r--sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg30
-rw-r--r--sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg30
-rw-r--r--sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg42
-rw-r--r--sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg38
-rw-r--r--sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg18
-rw-r--r--sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg58
-rw-r--r--sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg58
-rw-r--r--sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg98
-rw-r--r--sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg103
-rw-r--r--sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg85
-rw-r--r--sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg104
-rw-r--r--sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg23
-rw-r--r--sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg23
-rw-r--r--sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg29
-rw-r--r--sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg36
-rw-r--r--sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg38
-rw-r--r--sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg42
-rw-r--r--sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg14
-rw-r--r--sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg26
-rw-r--r--sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg22
-rw-r--r--sql-bench/bench-init.pl.sh2
-rw-r--r--sql-bench/compare-results.sh6
-rw-r--r--sql-bench/crash-me.sh39
-rw-r--r--sql-bench/graph-compare-results.sh660
-rw-r--r--sql-bench/limits/mysql-3.23.cfg8
-rw-r--r--sql-bench/limits/mysql.cfg8
-rw-r--r--sql-bench/limits/pg.cfg79
-rw-r--r--sql-bench/server-cfg.sh133
-rw-r--r--sql-bench/test-connect.sh2
-rw-r--r--sql-bench/test-insert.sh8
-rw-r--r--sql/Makefile.am8
-rw-r--r--sql/field.cc55
-rw-r--r--sql/field.h7
-rw-r--r--sql/filesort.cc70
-rw-r--r--sql/ha_gemini.cc1365
-rw-r--r--sql/ha_gemini.h53
-rw-r--r--sql/ha_innobase.cc69
-rw-r--r--sql/ha_innobase.h2
-rw-r--r--sql/ha_myisam.cc26
-rw-r--r--sql/ha_myisam.h1
-rw-r--r--sql/ha_myisammrg.cc4
-rw-r--r--sql/handler.cc30
-rw-r--r--sql/handler.h3
-rw-r--r--sql/item.cc2
-rw-r--r--sql/item_cmpfunc.cc2
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/item_strfunc.cc9
-rw-r--r--sql/item_sum.cc64
-rw-r--r--sql/item_sum.h18
-rw-r--r--sql/lex.h4
-rw-r--r--sql/lock.cc26
-rw-r--r--sql/log.cc42
-rw-r--r--sql/log_event.cc628
-rw-r--r--sql/log_event.h192
-rw-r--r--sql/md5.c14
-rw-r--r--sql/md5.h12
-rw-r--r--sql/mini_client.cc523
-rw-r--r--sql/mini_client.h11
-rw-r--r--sql/mysql_priv.h25
-rw-r--r--sql/mysqlbinlog.cc18
-rw-r--r--sql/mysqld.cc121
-rw-r--r--sql/net_pkg.cc2
-rw-r--r--sql/net_serv.cc2
-rw-r--r--sql/opt_range.cc232
-rw-r--r--sql/opt_range.h18
-rw-r--r--sql/share/czech/errmsg.txt8
-rw-r--r--sql/share/danish/errmsg.txt8
-rw-r--r--sql/share/dutch/errmsg.txt8
-rw-r--r--sql/share/english/errmsg.txt12
-rw-r--r--sql/share/estonian/errmsg.txt8
-rw-r--r--sql/share/french/errmsg.txt8
-rw-r--r--sql/share/german/errmsg.txt36
-rw-r--r--sql/share/greek/errmsg.txt8
-rw-r--r--sql/share/hungarian/errmsg.txt8
-rw-r--r--sql/share/italian/errmsg.txt22
-rw-r--r--sql/share/japanese/errmsg.txt8
-rw-r--r--sql/share/korean/errmsg.txt8
-rw-r--r--sql/share/norwegian-ny/errmsg.txt8
-rw-r--r--sql/share/norwegian/errmsg.txt8
-rw-r--r--sql/share/polish/errmsg.txt8
-rw-r--r--sql/share/portuguese/errmsg.txt404
-rw-r--r--sql/share/romanian/errmsg.txt8
-rw-r--r--sql/share/russian/errmsg.txt8
-rw-r--r--sql/share/slovak/errmsg.txt8
-rw-r--r--sql/share/spanish/errmsg.txt8
-rw-r--r--sql/share/swedish/errmsg.OLD5
-rw-r--r--sql/share/swedish/errmsg.txt8
-rw-r--r--sql/slave.cc337
-rw-r--r--sql/slave.h19
-rw-r--r--sql/sql_base.cc83
-rw-r--r--sql/sql_class.cc4
-rw-r--r--sql/sql_class.h55
-rw-r--r--sql/sql_db.cc137
-rw-r--r--sql/sql_delete.cc672
-rw-r--r--sql/sql_insert.cc3
-rw-r--r--sql/sql_lex.cc19
-rw-r--r--sql/sql_lex.h68
-rw-r--r--sql/sql_parse.cc412
-rw-r--r--sql/sql_repl.cc497
-rw-r--r--sql/sql_repl.h23
-rw-r--r--sql/sql_select.cc87
-rw-r--r--sql/sql_select.h7
-rw-r--r--sql/sql_show.cc10
-rw-r--r--sql/sql_table.cc108
-rw-r--r--sql/sql_test.cc3
-rw-r--r--sql/sql_unions.cc34
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy809
-rw-r--r--sql/structs.h1
-rw-r--r--sql/time.cc4
-rw-r--r--sql/uniques.cc44
-rw-r--r--sql/unireg.h2
-rw-r--r--sql/violite.c430
-rw-r--r--support-files/binary-configure.sh2
-rw-r--r--support-files/mysql-max.spec.sh4
-rw-r--r--support-files/mysql-multi.server.sh10
-rw-r--r--support-files/mysql.server.sh8
-rw-r--r--support-files/mysql.spec.sh7
-rwxr-xr-xtests/fork_big.pl30
-rw-r--r--vio/Makefile.am16
-rw-r--r--vio/vio.c87
-rw-r--r--vio/viosocket.c (renamed from vio/viotcpip.c)45
-rw-r--r--vio/viossl.c79
-rw-r--r--vio/viosslfactories.c7
403 files changed, 15008 insertions, 5327 deletions
diff --git a/.bzrignore b/.bzrignore
index d15fdc76bdd..6065c54ef28 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -12,10 +12,12 @@
.*.swp
.deps
.gdb_history
+.gdbinit
.libs
.o
.out
.snprj/*
+.vimrc
BitKeeper/etc/config
BitKeeper/etc/csets
BitKeeper/etc/csets-in
@@ -119,6 +121,7 @@ bdb/include/gen_server_ext.h
bdb/include/hash_auto.h
bdb/include/log_auto.h
bdb/include/qam_auto.h
+bdb/include/rpc_server_ext.h
bdb/include/txn_auto.h
bdb/java/src/com/sleepycat/db/DbConstants.java
bdb/log/log_auto.c
@@ -183,6 +186,7 @@ libmysql_r/acconfig.h
libmysql_r/conf_to_src
libmysql_r/my_static.h
libmysql_r/mysys_priv.h
+libmysqld/backup_dir
libmysqld/convert.cc
libmysqld/derror.cc
libmysqld/errmsg.c
@@ -197,6 +201,7 @@ libmysqld/ha_isammrg.cc
libmysqld/ha_myisam.cc
libmysqld/ha_myisammrg.cc
libmysqld/handler.cc
+libmysqld/hash_filo.cc
libmysqld/hostname.cc
libmysqld/init.cc
libmysqld/item.cc
@@ -222,12 +227,14 @@ libmysqld/opt_sum.cc
libmysqld/password.c
libmysqld/procedure.cc
libmysqld/records.cc
+libmysqld/simple-test
libmysqld/slave.cc
libmysqld/sql_acl.cc
libmysqld/sql_analyse.cc
libmysqld/sql_base.cc
libmysqld/sql_cache.cc
libmysqld/sql_class.cc
+libmysqld/sql_command
libmysqld/sql_crypt.cc
libmysqld/sql_db.cc
libmysqld/sql_delete.cc
@@ -247,11 +254,14 @@ libmysqld/sql_string.cc
libmysqld/sql_table.cc
libmysqld/sql_test.cc
libmysqld/sql_udf.cc
+libmysqld/sql_unions.cc
libmysqld/sql_update.cc
libmysqld/sql_yacc.cc
+libmysqld/stacktrace.c
libmysqld/table.cc
libmysqld/thr_malloc.cc
libmysqld/time.cc
+libmysqld/uniques.cc
libmysqld/unireg.cc
libtool
linked_client_sources
@@ -260,6 +270,7 @@ linked_libmysql_r_sources
linked_libmysql_sources
linked_libmysqld_sources
linked_server_sources
+locked
myisam/ft_dump
myisam/ft_eval
myisam/ft_test1
@@ -274,6 +285,7 @@ mysql-test/gmon.out
mysql-test/install_test_db
mysql-test/mysql-test-run
mysql-test/r/*.reject
+mysql-test/r/rpl_log.eval
mysql-test/share/mysql
mysql-test/var/*
mysql.proj
@@ -302,6 +314,7 @@ scripts/mysql_zap
scripts/mysqlaccess
scripts/mysqlbug
scripts/mysqld_multi
+scripts/mysqld_safe
scripts/mysqldumpslow
scripts/mysqlhotcopy
scripts/safe_mysqld
@@ -309,11 +322,15 @@ sql-bench/Results-linux/ATIS-mysql_bdb-Linux_2.2.14_my_SMP_i686
sql-bench/bench-count-distinct
sql-bench/bench-init.pl
sql-bench/compare-results
+sql-bench/compare-results-all
sql-bench/copy-db
sql-bench/crash-me
+sql-bench/gif/*
+sql-bench/graph-compare-results
sql-bench/output/*
sql-bench/run-all-tests
sql-bench/server-cfg
+sql-bench/template.html
sql-bench/test-ATIS
sql-bench/test-alter-table
sql-bench/test-big-tables
@@ -332,6 +349,7 @@ sql/mysqld
sql/share/*.sys
sql/share/charsets/gmon.out
sql/share/gmon.out
+sql/share/mysql
sql/share/norwegian-ny/errmsg.sys
sql/share/norwegian/errmsg.sys
sql/sql_yacc.cc
diff --git a/BUILD/FINISH.sh b/BUILD/FINISH.sh
index 4f13f5f8e4d..368ab339c2b 100644
--- a/BUILD/FINISH.sh
+++ b/BUILD/FINISH.sh
@@ -15,6 +15,10 @@ $make -k clean || true
aclocal && autoheader && aclocal && automake && autoconf
(cd bdb/dist && sh s_all)
(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
+if [ -d gemini ]
+then
+ (cd gemini && aclocal && autoheader && aclocal && automake && autoconf)
+fi
CFLAGS=\"$cflags\" CXX=gcc CXXFLAGS=\"$cxxflags\" $configure"
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 1f45c5c18cb..cf5405565b8 100644
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -43,13 +43,17 @@ alpha_cflags="-mcpu=ev6 -Wa,-mev6" # Not used yet
pentium_cflags="-mpentiumpro"
sparc_cflags=""
+# be as fast as we can be without losing our ability to backtrace
fast_cflags="-O3 -fno-omit-frame-pointer"
-reckless_cflags="-O3 -fomit-frame-pointer -ffixed-ebp"
+# this is one is for someone who thinks 1% speedup is worth not being
+# able to backtrace
+reckless_cflags="-O3 -fomit-frame-pointer "
debug_cflags="-DEXTRA_DEBUG -DFORCE_INIT_OF_VARS -DSAFEMALLOC -DSAFE_MUTEX -O2"
base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti"
-base_configs="--prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-mysqld-ldflags=-all-static"
+base_configs="--prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-mysqld-ldflags=-all-static \
+ --with-client-ldflags=-all-static"
alpha_configs="" # Not used yet
pentium_configs=""
sparc_configs=""
diff --git a/BUILD/compile-pentium b/BUILD/compile-pentium
index 9607ca03e7e..11559be93de 100755
--- a/BUILD/compile-pentium
+++ b/BUILD/compile-pentium
@@ -3,7 +3,7 @@
path=`dirname $0`
. "$path/SETUP.sh"
-extra_flags="$pentium_cflags $reckless_cflags"
+extra_flags="$pentium_cflags $fast_cflags"
extra_configs="$pentium_configs"
strip=yes
diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium-debug-max
index dc88bab62d3..4149267811d 100755
--- a/BUILD/compile-pentium-debug-max
+++ b/BUILD/compile-pentium-debug-max
@@ -8,6 +8,6 @@ c_warnings="$c_warnings $debug_extra_warnings"
cxx_warnings="$cxx_warnings $debug_extra_warnings"
extra_configs="$pentium_configs $debug_configs"
-extra_configs="$extra_configs --with-berkeley-db --with-innodb"
+extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-embedded-server"
. "$path/FINISH.sh"
diff --git a/BUILD/compile-pentium-debug-openssl b/BUILD/compile-pentium-debug-openssl
new file mode 100755
index 00000000000..aa120e3175a
--- /dev/null
+++ b/BUILD/compile-pentium-debug-openssl
@@ -0,0 +1,13 @@
+#! /bin/sh
+
+path=`dirname $0`
+. "$path/SETUP.sh"
+
+extra_flags="$pentium_cflags $debug_cflags"
+c_warnings="$c_warnings $debug_extra_warnings"
+cxx_warnings="$cxx_warnings $debug_extra_warnings"
+extra_configs="$pentium_configs $debug_configs"
+
+extra_configs="$extra_configs --with-debug=full --with-vio --with-openssl"
+
+. "$path/FINISH.sh"
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 11ad15ceb0b..9e28a3403be 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -1,3 +1,4 @@
+Sinisa@sinisa.nasamreza.org
heikki@donna.mysql.fi
jani@hynda.mysql.fi
jani@janikt.pp.saunalahti.fi
@@ -5,13 +6,18 @@ jcole@abel.spaceapes.com
jcole@main.burghcom.com
jcole@tetra.spaceapes.com
monty@donna.mysql.fi
+monty@hundin.mysql.fi
monty@tik.mysql.fi
monty@work.mysql.com
mwagner@evoq.mwagner.org
paul@central.snake.net
+paul@teton.kitebird.com
root@x3.internalnet
sasha@mysql.sashanet.com
serg@serg.mysql.com
+tim@hundin.mysql.fi
tim@threads.polyesthetic.msg
+tim@white.box
tim@work.mysql.com
+tonu@hundin.mysql.fi
tonu@x3.internalnet
diff --git a/BitKeeper/triggers/post-commit b/BitKeeper/triggers/post-commit
index bb1f235204a..895c813c9e6 100755
--- a/BitKeeper/triggers/post-commit
+++ b/BitKeeper/triggers/post-commit
@@ -1,8 +1,9 @@
#!/bin/sh
#shift
-TO=dev@mysql.com
+TO=dev-public@mysql.com
FROM=$USER@mysql.com
+INTERNALS=internals@lists.mysql.com
LIMIT=10000
if [ "$REAL_EMAIL" = "" ]
@@ -28,6 +29,24 @@ EOF
bk changes -v -r+
bk cset -r+ -d
) | head -n $LIMIT | /usr/sbin/sendmail -t
+ echo "Notifying internals list at $INTERNALS"
+ (
+ cat <<EOF
+List-ID: <bk.mysql>
+From: $FROM
+To: $INTERNALS
+Subject: bk commit into 4.0 tree
+
+Below is the list of changes that have just been committed into a
+4.0 repository of $USER. When $USER does a push, they will be propogated to
+the main repository and within 24 hours after the push to the public repository.
+For information on how to access the public repository
+see http://www.mysql.com/doc/I/n/Installing_source_tree.html
+
+EOF
+ bk changes -v -r+
+ bk cset -r+ -d
+ ) | head -n $LIMIT | /usr/sbin/sendmail -t
else
echo "commit failed because '$BK_STATUS', sorry life is hard..."
fi
diff --git a/BitKeeper/triggers/post-incoming b/BitKeeper/triggers/post-incoming
new file mode 100755
index 00000000000..f1ea2255de9
--- /dev/null
+++ b/BitKeeper/triggers/post-incoming
@@ -0,0 +1,3 @@
+#! /bin/sh
+
+echo "Test: post-incoming works"
diff --git a/BitKeeper/triggers/post-outgoing b/BitKeeper/triggers/post-outgoing
new file mode 100755
index 00000000000..3fc2cdbad67
--- /dev/null
+++ b/BitKeeper/triggers/post-outgoing
@@ -0,0 +1,3 @@
+#! /bin/sh
+
+echo "Test: post-outgoing works"
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 3ef9ba614f9..78b06ed04f3 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -4,10 +4,10 @@ use Getopt::Long;
$opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env="";
$opt_dbd_options=$opt_perl_options=$opt_suffix="";
$opt_tmp=$version_suffix="";
-$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0;
+$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=0;
$opt_innodb=$opt_bdb=0;
-GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage();
+GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","enable-shared","no-crash-me","no-strip") || usage();
usage() if ($opt_help || $opt_Information);
usage() if (!$opt_distribution);
@@ -19,7 +19,7 @@ if ($opt_innodb || $opt_bdb)
chomp($host=`hostname`);
$full_host_name=$host;
-print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
+info("Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n");
$connect_option= ($opt_tcpip ? "--host=$host" : "");
$host =~ /^([^.-]*)/;
$host=$1 . $opt_suffix;
@@ -119,7 +119,10 @@ if ($opt_stage <= 1)
{
$opt_config_options.=" --with-client-ldflags=-all-static";
}
- $opt_config_options.= " --disable-shared"; # Default for binary versions
+ if (!$opt_enable_shared)
+ {
+ $opt_config_options.= " --disable-shared"; # Default for binary versions
+ }
if ($opt_bdb)
{
$opt_config_options.= " --with-berkeley-db"
@@ -146,10 +149,13 @@ if ($opt_stage <= 2)
#
if ($opt_stage <= 3)
{
+ my ($flags);
log_system("rm -fr mysql-3* mysql-4* $pwd/$host/*.tar.gz");
log_system("nm -n sql/mysqld | gzip -9 -v 2>&1 > sql/mysqld.sym.gz | cat");
- log_system("strip sql/mysqld extra/comp_err client/mysql sql/mysqld client/mysqlshow extra/replace isam/isamchk client/mysqladmin client/mysqldump extra/perror");
- check_system("scripts/make_binary_distribution $opt_tmp $opt_suffix",".tar.gz created");
+
+ $flags="";
+ $flags.="--no-strip" if ($opt_no_strip);
+ check_system("scripts/make_binary_distribution --tmp=$opt_tmp --suffix=$opt_suffix $flags",".tar.gz created");
safe_system("mv mysql*.tar.gz $pwd/$host");
safe_system("cp client/mysqladmin $pwd/$host/bin");
safe_system("$make clean") if ($opt_with_small_disk);
@@ -174,6 +180,7 @@ if ($opt_stage <= 4 && !$opt_no_test)
$tar_file =~ /(mysql-[^\/]*)\.tar/;
$ver=$1;
$test_dir="$pwd/$host/test/$ver";
+$ENV{"LD_LIBRARY_PATH"}= "$testdir/lib:" . $ENV{"LD_LIBRARY_PATH"};
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
@@ -237,7 +244,7 @@ if ($opt_stage <= 7 && $opt_perl_files && !$opt_no_perl && !$opt_no_test)
}
-if ($opt_stage <= 8 && !$opt_no_test)
+if ($opt_stage <= 8 && !$opt_no_test && !$opt_no_crash_me)
{
safe_cd("$test_dir/sql-bench");
log_system("rm -f limits/mysql.cfg");
diff --git a/Docs/Flags/australia.eps b/Docs/Flags/australia.eps
index f98c03e2c83..f98c03e2c83 100755..100644
--- a/Docs/Flags/australia.eps
+++ b/Docs/Flags/australia.eps
diff --git a/Docs/Flags/australia.txt b/Docs/Flags/australia.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/australia.txt
+++ b/Docs/Flags/australia.txt
diff --git a/Docs/Flags/austria.eps b/Docs/Flags/austria.eps
index 7a0b56f3690..7a0b56f3690 100755..100644
--- a/Docs/Flags/austria.eps
+++ b/Docs/Flags/austria.eps
diff --git a/Docs/Flags/austria.txt b/Docs/Flags/austria.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/austria.txt
+++ b/Docs/Flags/austria.txt
diff --git a/Docs/Flags/canada.eps b/Docs/Flags/canada.eps
index b770266de60..b770266de60 100755..100644
--- a/Docs/Flags/canada.eps
+++ b/Docs/Flags/canada.eps
diff --git a/Docs/Flags/canada.txt b/Docs/Flags/canada.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/canada.txt
+++ b/Docs/Flags/canada.txt
diff --git a/Docs/Flags/czech-republic.eps b/Docs/Flags/czech-republic.eps
index afa50e9a82d..afa50e9a82d 100755..100644
--- a/Docs/Flags/czech-republic.eps
+++ b/Docs/Flags/czech-republic.eps
diff --git a/Docs/Flags/czech-republic.txt b/Docs/Flags/czech-republic.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/czech-republic.txt
+++ b/Docs/Flags/czech-republic.txt
diff --git a/Docs/Flags/germany.eps b/Docs/Flags/germany.eps
index 568543e3680..568543e3680 100755..100644
--- a/Docs/Flags/germany.eps
+++ b/Docs/Flags/germany.eps
diff --git a/Docs/Flags/germany.txt b/Docs/Flags/germany.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/germany.txt
+++ b/Docs/Flags/germany.txt
diff --git a/Docs/Flags/great-britain.eps b/Docs/Flags/great-britain.eps
index 97a7ffc9b57..97a7ffc9b57 100755..100644
--- a/Docs/Flags/great-britain.eps
+++ b/Docs/Flags/great-britain.eps
diff --git a/Docs/Flags/great-britain.txt b/Docs/Flags/great-britain.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/great-britain.txt
+++ b/Docs/Flags/great-britain.txt
diff --git a/Docs/Flags/hungary.eps b/Docs/Flags/hungary.eps
index e405fc3cffe..e405fc3cffe 100755..100644
--- a/Docs/Flags/hungary.eps
+++ b/Docs/Flags/hungary.eps
diff --git a/Docs/Flags/hungary.txt b/Docs/Flags/hungary.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/hungary.txt
+++ b/Docs/Flags/hungary.txt
diff --git a/Docs/Flags/israel.eps b/Docs/Flags/israel.eps
index 3d3059a907a..3d3059a907a 100755..100644
--- a/Docs/Flags/israel.eps
+++ b/Docs/Flags/israel.eps
diff --git a/Docs/Flags/israel.txt b/Docs/Flags/israel.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/israel.txt
+++ b/Docs/Flags/israel.txt
diff --git a/Docs/Flags/italy.eps b/Docs/Flags/italy.eps
index 20c7c7d5da3..20c7c7d5da3 100755..100644
--- a/Docs/Flags/italy.eps
+++ b/Docs/Flags/italy.eps
diff --git a/Docs/Flags/italy.txt b/Docs/Flags/italy.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/italy.txt
+++ b/Docs/Flags/italy.txt
diff --git a/Docs/Flags/japan.eps b/Docs/Flags/japan.eps
index 8dee6e497ba..8dee6e497ba 100755..100644
--- a/Docs/Flags/japan.eps
+++ b/Docs/Flags/japan.eps
diff --git a/Docs/Flags/japan.txt b/Docs/Flags/japan.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/japan.txt
+++ b/Docs/Flags/japan.txt
diff --git a/Docs/Flags/latvia.eps b/Docs/Flags/latvia.eps
new file mode 100644
index 00000000000..9c1f81f3ddc
--- /dev/null
+++ b/Docs/Flags/latvia.eps
@@ -0,0 +1,99 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Creator: pnmtops
+%%Title: latvia.ps
+%%Pages: 1
+%%BoundingBox: 295 365 317 396
+%%EndComments
+/readstring {
+ currentfile exch readhexstring pop
+} bind def
+/rpicstr 32 string def
+/gpicstr 32 string def
+/bpicstr 32 string def
+%%EndProlog
+%%Page: 1 1
+gsave
+295.44 365.64 translate
+21.12 30.72 scale
+0.5 0.5 translate 90 rotate -0.5 -0.5 translate
+32 22 8
+[ 32 0 0 -22 0 22 ]
+{ rpicstr readstring }
+{ gpicstr readstring }
+{ bpicstr readstring }
+true 3
+colorimage
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+18181818181818181818181818181818181818181818180000ffffffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
+00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
+ffffffffffffffffffffffffffffffffff00009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800009494949494949494949494949494949494949494949494
+949494949494940000101010101010101010101010101010101010101010
+101010101010101010000018181818181818181818181818181818181818
+181818181818181818181800009494949494949494949494949494949494
+949494949494949494949494940000101010101010101010101010101010
+101010101010101010101010101010000018181818181818181818181818
+181818181818181818181818181818181800009494949494949494949494
+949494949494949494949494949494949494940000101010101010101010
+101010101010101010101010101010101010101010000018181818181818
+181818181818181818181818181818181818181818181800009494949494
+949494949494949494949494949494949494949494949494940000101010
+101010101010101010101010101010101010101010101010101010000018
+181818181818181818181818181818181818181818181818181818181800
+009494949494949494949494949494949494949494949494949494949494
+940000101010101010101010101010101010101010101010101010101010
+101010000018181818181818181818181818181818181818181818181818
+181818181800000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000
+grestore
+showpage
+%%Trailer
diff --git a/Docs/Flags/latvia.gif b/Docs/Flags/latvia.gif
new file mode 100644
index 00000000000..8a898328ebe
--- /dev/null
+++ b/Docs/Flags/latvia.gif
Binary files differ
diff --git a/Docs/Flags/latvia.txt b/Docs/Flags/latvia.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/Docs/Flags/latvia.txt
diff --git a/Docs/Flags/russia.eps b/Docs/Flags/russia.eps
index 85c5899d891..85c5899d891 100755..100644
--- a/Docs/Flags/russia.eps
+++ b/Docs/Flags/russia.eps
diff --git a/Docs/Flags/russia.txt b/Docs/Flags/russia.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/russia.txt
+++ b/Docs/Flags/russia.txt
diff --git a/Docs/Flags/south-korea.eps b/Docs/Flags/south-korea.eps
index a363ab514c4..a363ab514c4 100755..100644
--- a/Docs/Flags/south-korea.eps
+++ b/Docs/Flags/south-korea.eps
diff --git a/Docs/Flags/south-korea.txt b/Docs/Flags/south-korea.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/south-korea.txt
+++ b/Docs/Flags/south-korea.txt
diff --git a/Docs/Flags/sweden.eps b/Docs/Flags/sweden.eps
index 47cd1fa3e9c..47cd1fa3e9c 100755..100644
--- a/Docs/Flags/sweden.eps
+++ b/Docs/Flags/sweden.eps
diff --git a/Docs/Flags/sweden.txt b/Docs/Flags/sweden.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/sweden.txt
+++ b/Docs/Flags/sweden.txt
diff --git a/Docs/Flags/taiwan.eps b/Docs/Flags/taiwan.eps
index a514bdf2af4..a514bdf2af4 100755..100644
--- a/Docs/Flags/taiwan.eps
+++ b/Docs/Flags/taiwan.eps
diff --git a/Docs/Flags/taiwan.txt b/Docs/Flags/taiwan.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/taiwan.txt
+++ b/Docs/Flags/taiwan.txt
diff --git a/Docs/Flags/usa.eps b/Docs/Flags/usa.eps
index 31bd9996d11..31bd9996d11 100755..100644
--- a/Docs/Flags/usa.eps
+++ b/Docs/Flags/usa.eps
diff --git a/Docs/Flags/usa.txt b/Docs/Flags/usa.txt
index e69de29bb2d..e69de29bb2d 100755..100644
--- a/Docs/Flags/usa.txt
+++ b/Docs/Flags/usa.txt
diff --git a/Docs/internals.texi b/Docs/internals.texi
index 9bbd90a7a3a..e0574df550b 100644
--- a/Docs/internals.texi
+++ b/Docs/internals.texi
@@ -47,7 +47,7 @@ This is a manual about @strong{MySQL} internals.
@menu
@end menu
-@node caching
+@node caching,,,
@chapter How MySQL handles caching
@strong{MySQL} has the following caches:
@@ -89,7 +89,7 @@ found rows are cached in a join cache. One SELECT query can use many
join caches in the worst case.
@end itemize
-@node flush tables
+@node flush tables,,,
@chapter How MySQL handles flush tables
@itemize @bullet
@@ -134,7 +134,7 @@ After this it will give other threads a chance to open the same tables.
@end itemize
-@node Filesort
+@node Filesort,,,
@chapter How MySQL does sorting (filesort)
@itemize @bullet
@@ -174,7 +174,7 @@ and then we read the rows in the sorted order into a row buffer
@end itemize
-@node Coding guidelines
+@node Coding guidelines,,,
@chapter Coding guidelines
@itemize @bullet
@@ -289,7 +289,7 @@ Use pointers rather than array indexing when operating on strings.
@end itemize
-@node mysys functions
+@node mysys functions,,,
@chapter mysys functions
Functions i mysys: (For flags se my_sys.h)
@@ -433,6 +433,205 @@ Functions i mysys: (For flags se my_sys.h)
void end_key_cache _A((void));
- End key-cacheing.
+@node protocol,,,
+@chapter MySQL client/server protocol
+
+Raw packet without compression
+==============================
+-------------------------------------------------
+| Packet Length | Packet no | Data |
+| 3 Bytes | 1 Byte | n Bytes |
+-------------------------------------------------
+
+3 Byte packet length
+ The length is calculated with int3store
+ See include/global.h for details.
+ The max packetsize can be 16 MB.
+1 Byte packet no
+
+If no compression is used the first 4 bytes of each paket
+is the header of the paket.
+The packet number is incremented for each sent packet. The first
+packet starts with 0
+
+n Byte data
+
+The packet length can be recalculated with:
+length = byte1 + (256 * byte2) + (256 * 256 * byte3)
+
+Raw packet with compression
+===========================
+-----------------------------------------------------
+| Packet Length | Packet no | Uncomp. Packet Length |
+| 3 Bytes | 1 Byte | 3 Bytes |
+-----------------------------------------------------
+
+3 Byte packet length
+ The length is calculated with int3store
+ See include/global.h for details.
+ The max packetsize can be 16 MB.
+1 Byte packet no
+3 Byte uncompressed packet length
+
+If compression is used the first 7 bytes of each paket
+is the header of the paket.
+
+Basic packets
+==============
+OK-packet
+ For details see sql/net_pkg.cc
+ function send_ok
+ -------------------------------------------------
+ | Header | No of Rows | Affected Rows |
+ | | 1 Byte | 1-8 Byte |
+ -------------------------------------------------
+ | ID (last_insert_id) | Status | Length |
+ | 1-8 Byte | 2 Byte | 1-8 Byte |
+ -------------------------------------------------
+ | Messagetext |
+ | n Byte |
+ -------------------------------------------------
+
+ Header
+ 1 byte number of rows ? (always 0 ?)
+ 1-8 bytes affected rows
+ 1-8 byte id (last_insert_id)
+ 2 byte Status (usually 0)
+ If the OK-packege includes a message:
+ 1-8 bytes length of message
+ n bytes messagetext
+
+Error-packet
+ -------------------------------------------------
+ | Header | Statuscode | Error no |
+ | | 1 Byte | 2 Byte |
+ -------------------------------------------------
+ | Messagetext | 0x00 |
+ | n Byte | 1 Byte |
+ -------------------------------------------------
+
+ Header
+ 1 byte status code (0xFF = ERROR)
+ 2 byte error number (is only sent to new 3.23 clients.
+ n byte errortext
+ 1 byte 0x00
+
+
+
+The communication
+=================
+
+> Packet from server to client
+< Paket from client tor server
+
+ Login
+ ------
+ > 1. packet
+ Header
+ 1 byte protocolversion
+ n byte serverversion
+ 1 byte 0x00
+ 4 byte threadnumber
+ 8 byte crypt seed
+ 1 byte 0x00
+ 2 byte CLIENT_xxx options (see include/mysql_com.h
+ that is supported by the server
+ 1 byte number of current server charset
+ 2 byte server status variables (SERVER_STATUS_xxx flags)
+ 13 byte 0x00 (not used yet).
+
+ < 2. packet
+ Header
+ 2 byte CLIENT_xxx options
+ 3 byte max_allowed_packet for the client
+ n byte username
+ 1 byte 0x00
+ 8 byte crypted password
+ 1 byte 0x00
+ n byte databasename
+ 1 byte 0x00
+
+ > 3. packet
+ OK-packet
+
+
+ Command
+ --------
+ < 1. packet
+ Header
+ 1 byte command type (e.g.0x03 = query)
+ n byte query
+
+ Result set (after command)
+ --------------------------
+ > 2. packet
+ Header
+ 1-8 byte field_count (packed with net_store_length())
+
+ If field_count == 0 (command):
+ 1-8 byte affected rows
+ 1-8 byte insert id
+ 2 bytes server_status (SERVER_STATUS_xx)
+
+ If field_count == NULL_LENGTH (251)
+ LOAD DATA LOCAL INFILE
+
+ If field_count > 0 Result Set:
+
+ > n packets
+ Header Info
+ Column description: 5 data object /column
+ (See code in unpack_fields())
+
+ Columninfo for each column:
+ 1 data block table_name
+ 1 byte length of block
+ n byte data
+ 1 data block field_name
+ 1 byte length of block...
+ n byte data
+ 1 data block display length of field
+ 1 byte length of block
+ 3 bytes display length of filed
+ 1 data block type field of type (enum_field_types)
+ 1 byte length of block
+ 1 bytexs field of type
+ 1 data block flags
+ 1 byte length of block
+ 2 byte flags for the columns (NOT_NULL_FLAG, ZEROFILL_FLAG....)
+ 1 byte decimals
+
+ if table definition:
+ 1 data block default value
+
+ Actual result (one packet per row):
+ 4 byte header
+ 1-8 byte length of data
+ n data
+
+
+Fieldtype Codes:
+================
+
+ display_length |enum_field_type |flags
+ ----------------------------------------------------
+Blob 03 FF FF 00 |01 FC |03 90 00 00
+Mediumblob 03 FF FF FF |01 FC |03 90 00 00
+Tinyblob 03 FF 00 00 |01 FC |03 90 00 00
+Text 03 FF FF 00 |01 FC |03 10 00 00
+Mediumtext 03 FF FF FF |01 FC |03 10 00 00
+Tinytext 03 FF 00 00 |01 FC |03 10 00 00
+Integer 03 0B 00 00 |01 03 |03 03 42 00
+Mediumint 03 09 00 00 |01 09 |03 00 00 00
+Smallint 03 06 00 00 |01 02 |03 00 00 00
+Tinyint 03 04 00 00 |01 01 |03 00 00 00
+Varchar 03 XX 00 00 |01 FD |03 00 00 00
+Enum 03 05 00 00 |01 FE |03 00 01 00
+Datetime 03 13 00 00 |01 0C |03 00 00 00
+Timestamp 03 0E 00 00 |01 07 |03 61 04 00
+Time 03 08 00 00 |01 0B |03 00 00 00
+Date 03 0A 00 00 |01 0A |03 00 00 00
+
@c The Index was empty, and ugly, so I removed it. (jcole, Sep 7, 2000)
diff --git a/Docs/manual.texi b/Docs/manual.texi
index f63e2c17ff8..3312803f7b6 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -314,7 +314,6 @@ Windows Notes
* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
* Windows symbolic links:: Splitting data across different disks under Win32
* Windows compiling:: Compiling MySQL clients on Windows.
-* Windows and BDB tables.:: Windows and BDB Tables
* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
Post-installation Setup and Testing
@@ -370,7 +369,7 @@ The MySQL Access Privilege System
* Request access:: Access control, stage 2: Request verification
* Privilege changes:: When privilege changes take effect
* Default privileges:: Setting up the initial @strong{MySQL} privileges
-* Adding users:: Adding new user privileges to @strong{MySQL}
+* Adding users:: Adding new users to @strong{MySQL}
* Passwords:: How to set up passwords
* Access denied:: Causes of @code{Access denied} errors
@@ -497,9 +496,9 @@ MySQL Table Types
* MERGE:: MERGE tables
* ISAM:: ISAM tables
* HEAP:: HEAP tables
+* InnoDB:: InnoDB tables
* BDB:: BDB or Berkeley_db tables
* GEMINI:: GEMINI tables
-* InnoDB:: InnoDB tables
MyISAM Tables
@@ -518,23 +517,6 @@ MyISAM table problems.
* Corrupted MyISAM tables::
* MyISAM table close::
-BDB or Berkeley_DB Tables
-
-* BDB overview:: Overview of BDB Tables
-* BDB install:: Installing BDB
-* BDB start:: BDB startup options
-* BDB characteristic:: Some characteristic of @code{BDB} tables:
-* BDB TODO:: Some things we need to fix for BDB in the near future:
-* BDB portability:: Operating systems supported by @strong{BDB}
-* BDB errors:: Errors You May Get When Using BDB Tables
-
-GEMINI Tables
-
-* GEMINI overview::
-* GEMINI start::
-* GEMINI features::
-* GEMINI TODO::
-
InnoDB Tables
* InnoDB overview:: InnoDB tables overview
@@ -554,7 +536,7 @@ InnoDB Tables
Creating InnoDB table space
-* Error creating InnoDB::
+* Error creating InnoDB::
Backing up and recovering an InnoDB database
@@ -581,6 +563,38 @@ File space management and disk i/o
* InnoDB File space::
* InnoDB File Defragmenting::
+BDB or Berkeley_DB Tables
+
+* BDB overview:: Overview of BDB Tables
+* BDB install:: Installing BDB
+* BDB start:: BDB startup options
+* BDB characteristic:: Some characteristic of @code{BDB} tables:
+* BDB TODO:: Some things we need to fix for BDB in the near future:
+* BDB portability:: Operating systems supported by @strong{BDB}
+* BDB errors:: Errors You May Get When Using BDB Tables
+
+GEMINI Tables
+
+* GEMINI Overview::
+* Using GEMINI Tables::
+
+GEMINI Overview
+
+* GEMINI Features::
+* GEMINI Concepts::
+* GEMINI Limitations::
+
+Using GEMINI Tables
+
+* Startup Options::
+* Creating GEMINI Tables::
+* Backing Up GEMINI Tables::
+* Restoring GEMINI Tables::
+* Using Auto_Increment Columns With GEMINI Tables::
+* Performance Considerations::
+* Sample Configurations::
+* When To Use GEMINI Tables::
+
MySQL Tutorial
* Connecting-disconnecting:: Connecting to and disconnecting from the server
@@ -674,6 +688,7 @@ System/Compile Time and Startup Parameter Tuning
* Compile and link options:: How compiling and linking affects the speed of MySQL
* Disk issues:: Disk issues
+* Symbolic links:: Using Symbolic Links
* Server parameters:: Tuning server parameters
* Table cache:: How MySQL opens and closes tables
* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
@@ -683,9 +698,10 @@ System/Compile Time and Startup Parameter Tuning
* Table locking:: Table locking issues
* DNS::
-Disk Issues
+Using Symbolic Links
-* Symbolic links:: Using symbolic links for databases and tables
+* Symbolic links to database::
+* Symbolic links to tables::
Speed of Queries that Access or Update Data
@@ -717,6 +733,7 @@ MySQL Utilites
Maintaining a MySQL Installation
* Table maintenance:: Table maintenance and crash recovery
+* Using mysqlcheck:: Using mysqlcheck for maintenance and recovery
* Maintenance regimen:: Setting up a table maintenance regimen
* Table-info:: Getting information about a table
* Crash recovery:: Using @code{myisamchk} for crash recovery
@@ -937,6 +954,12 @@ How MySQL Compares to @code{mSQL}
* Protocol differences:: How @code{mSQL} and @strong{MySQL} client/server communications protocols differ
* Syntax differences:: How @code{mSQL} 2.0 SQL syntax differs from @strong{MySQL}
+How MySQL Compares to PostgreSQL
+
+* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
+* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL
+* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL
+
MySQL Internals
* MySQL threads:: MySQL threads
@@ -969,6 +992,7 @@ Changes in release 4.0.x (Development; Alpha)
Changes in release 3.23.x (Stable)
+* News-3.23.40:: Changes in release 3.23.40
* News-3.23.39:: Changes in release 3.23.39
* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
@@ -2120,7 +2144,7 @@ The server can provide error messages to clients in many languages.
@item
Clients may connect to the @strong{MySQL} server using TCP/IP Sockets,
-Unix Sockets (Unixes), or Named Pipes (NT).
+Unix Sockets (Unix), or Named Pipes (NT).
@item
The @strong{MySQL}-specific @code{SHOW} command can be used to retrieve
@@ -2267,8 +2291,9 @@ The Berkeley DB code is very stable, but we are still improving the interface
between @strong{MySQL} and BDB tables, so it will take some time before this
is as tested as the other table types.
-@item InnoDB Tables -- Alpha
-This is a very recent addition to @code{MySQL} and is not very tested yet.
+@item InnoDB Tables -- Beta
+This is a recent addition to @code{MySQL}. They appear to work good and
+can be used after some initial testing.
@item Automatic recovery of MyISAM tables - Beta
This only affects the new code that checks if the table was closed properly
@@ -2586,6 +2611,10 @@ M2D, a @strong{MySQL} Administration client for Windows. M2D supports
administration of @strong{MySQL} databases, creation of new databases and
tables, editing, and more.
+@item @uref{http://dlabs.4t2.com}
+Dexter, a small server written in Perl which can be used as a proxy server for
+@strong{MySQL} or as a database extender.
+
@item @uref{http://www.scibit.com/Products/Software/Utils/Mascon.asp}
Mascon is a powerful Win32 GUI for administering MySQL databases.
@@ -2835,7 +2864,7 @@ PTS: Project Tracking System.
@item @uref{http://tomato.nvgc.vt.edu/~hroberts/mot}
Job and software tracking system.
-@item @uref{http://www.cynergi.net/non-secure/exportsql/}
+@item @uref{http://www.cynergi.net/exportsql/}
ExportSQL: A script to export data from Access95+.
@item @uref{http://SAL.KachinaTech.COM/H/1/MYSQL.html}
@@ -3060,21 +3089,21 @@ from the local @strong{MySQL} list.
The following @strong{MySQL} mailing lists exist:
@table @code
-@item @email{announce-subscribe@@lists.mysql.com, announce}
+@item @email{announce-subscribe@@lists.mysql.com} announce
This is for announcement of new versions of @strong{MySQL} and related
programs. This is a low volume list all @strong{MySQL} users should
subscribe to.
-@item @email{mysql-subscribe@@lists.mysql.com, mysql}
+@item @email{mysql-subscribe@@lists.mysql.com} mysql
The main list for general @strong{MySQL} discussion. Please note that some
topics are better discussed on the more-specialized lists. If you post to the
wrong list, you may not get an answer!
-@item @email{mysql-digest-subscribe@@lists.mysql.com, mysql-digest}
+@item @email{mysql-digest-subscribe@@lists.mysql.com} mysql-digest
The @code{mysql} list in digest form. That means you get all individual
messages, sent as one large mail message once a day.
-@item @email{bugs-subscribe@@lists.mysql.com, bugs}
+@item @email{bugs-subscribe@@lists.mysql.com} bugs
On this list you should only post a full, repeatable bug report using
the @code{mysqlbug} script (if you are running on Windows, you should
include a description of the operating system and the @strong{MySQL} version).
@@ -3085,55 +3114,45 @@ bugs posted on this list will be corrected or documented in the next
@strong{MySQL} release! If there are only small code changes involved, we
will also post a patch that fixes the problem.
-@item @email{bugs-digest-subscribe@@lists.mysql.com, bugs-digest}
+@item @email{bugs-digest-subscribe@@lists.mysql.com} bugs-digest
The @code{bugs} list in digest form.
-@item @email{developer-subscribe@@lists.mysql.com, developer}
-This list has been depreciated in favor of the
-@email{internals-subscribe@@lists.mysql.com, internals} list (below).
-
-@item @email{developer-digest-subscribe@@lists.mysql.com, developer-digest}
-This list has been deprecated in favor of the
-@email{internals-digest-subscribe@@lists.mysql.com, internals-digest}
-list (below).
-
-@item @email{internals-subscribe@@lists.mysql.com, internals}
+@item @email{internals-subscribe@@lists.mysql.com} internals
A list for people who work on the @strong{MySQL} code. On this list one
can also discuss @strong{MySQL} development and post patches.
-@item @email{internals-digest-subscribe@@lists.mysql.com, internals-digest}
-A digest version of the @email{internals-subscribe@@lists.mysql.com, internals}
-list.
+@item @email{internals-digest-subscribe@@lists.mysql.com} internals-digest
+A digest version of the @code{internals} list.
-@item @email{java-subscribe@@lists.mysql.com, java}
+@item @email{java-subscribe@@lists.mysql.com} java
Discussion about @strong{MySQL} and Java. Mostly about the JDBC drivers.
-@item @email{java-digest-subscribe@@lists.mysql.com, java-digest}
+@item @email{java-digest-subscribe@@lists.mysql.com} java-digest
A digest version of the @code{java} list.
-@item @email{win32-subscribe@@lists.mysql.com, win32}
+@item @email{win32-subscribe@@lists.mysql.com} win32
All things concerning @strong{MySQL} on Microsoft operating systems such as
Win95, Win98, NT, and Win2000.
-@item @email{win32-digest-subscribe@@lists.mysql.com, win32-digest}
+@item @email{win32-digest-subscribe@@lists.mysql.com} win32-digest
A digest version of the @code{win32} list.
-@item @email{myodbc-subscribe@@lists.mysql.com, myodbc}
+@item @email{myodbc-subscribe@@lists.mysql.com} myodbc
All things about connecting to @strong{MySQL} with ODBC.
-@item @email{myodbc-digest-subscribe@@lists.mysql.com, myodbc-digest}
+@item @email{myodbc-digest-subscribe@@lists.mysql.com} myodbc-digest
A digest version of the @code{myodbc} list.
-@item @email{plusplus-subscribe@@lists.mysql.com, plusplus}
+@item @email{plusplus-subscribe@@lists.mysql.com} plusplus
All things concerning programming with the C++ API to @strong{MySQL}.
-@item @email{plusplus-digest-subscribe@@lists.mysql.com, plusplus-digest}
+@item @email{plusplus-digest-subscribe@@lists.mysql.com} plusplus-digest
A digest version of the @code{plusplus} list.
-@item @email{msql-mysql-modules-subscribe@@lists.mysql.com, msql-mysql-modules}
-A list about the Perl support in @strong{MySQL}.
+@item @email{msql-mysql-modules-subscribe@@lists.mysql.com} msql-mysql-modules
+A list about the Perl support in @strong{MySQL}. msql-mysql-modules
-@item @email{msql-mysql-modules-digest-subscribe@@lists.mysql.com, msql-mysql-modules-digest}
+@item @email{msql-mysql-modules-digest-subscribe@@lists.mysql.com} msql-mysql-modules-digest
A digest version of the @code{msql-mysql-modules} list.
@end table
@@ -3149,16 +3168,16 @@ English. Note that these are not operated by @strong{MySQL AB}, so we can't
guarantee the quality on these.
@table @code
-@item @email{mysql-france-subscribe@@yahoogroups.com, A French mailing list}
-@item @email{list@@tinc.net, A Korean mailing list}
+@item @email{mysql-france-subscribe@@yahoogroups.com} A French mailing list
+@item @email{list@@tinc.net} A Korean mailing list
Email @code{subscribe mysql your@@email.address} to this list.
-@item @email{mysql-de-request@@lists.4t2.com, A German mailing list}
+@item @email{mysql-de-request@@lists.4t2.com} A German mailing list
Email @code{subscribe mysql-de your@@email.address} to this list.
You can find information about this mailing list at
@uref{http://www.4t2.com/mysql}.
-@item @email{mysql-br-request@@listas.linkway.com.br, A Portugese mailing list}
+@item @email{mysql-br-request@@listas.linkway.com.br} A Portugese mailing list
Email @code{subscribe mysql-br your@@email.address} to this list.
-@item @email{mysql-alta@@elistas.net, A Spanish mailing list}
+@item @email{mysql-alta@@elistas.net} A Spanish mailing list
Email @code{subscribe mysql your@@email.address} to this list.
@end table
@@ -3564,7 +3583,7 @@ Note that older versions of @strong{MySQL} are still using a more
@uref{http://www.mysql.com/support/arrangements/mypl.html, strict license}.
See the documentation for that version for more information. If you need a
commercial @strong{MySQL} license, because the GPL license doesn't suit your
-application, you can buy one at @uref{https://order.mysql.com/license.htmy}.
+application, you can buy one at @uref{https://order.mysql.com/}.
For normal internal use, @strong{MySQL} costs nothing. You do not have
to pay us if you do not want to.
@@ -3629,7 +3648,7 @@ contact us. @xref{Contact information}.
If you require a @strong{MySQL} license, the easiest way to pay for it
is to use the license form on @strong{MySQL}'s secure server at
-@uref{https://order.mysql.com/license.htmy}. Other forms of payment are
+@uref{https://order.mysql.com/}. Other forms of payment are
discussed in @ref{Payment information}.
@cindex copyrights
@@ -3906,7 +3925,7 @@ BOX 6434, Torsgatan 21
@end example
If you want to pay by credit card over the Internet, you can use
-@uref{https://order.mysql.com/license.htmy, MySQL AB's secure license form}.
+@uref{https://order.mysql.com/, MySQL AB's secure license form}.
You can also print a copy of the license form, fill it in, and send it by fax
to:
@@ -4517,6 +4536,12 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@uref{ftp://ftp.esat.net/mirrors/download.sourceforge.net/pub/mirrors/mysql/, FTP}
@item
+@c Added 20010524
+@c EMAIL: arvids@parks.lv (Arvids)
+@image{Flags/latvia} Latvia [linux.lv] @
+@uref{ftp://ftp.linux.lv/pub/software/mysql/, FTP}
+
+@item
@c Added 20001125
@c EMAIL: mleicher@silverpoint.nl (Marcel Leicher)
@image{Flags/netherlands} Netherlands [Silverpoint] @
@@ -4719,7 +4744,7 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c Added 20000925
@image{Flags/usa} USA [ValueClick, Los Angeles CA] @
@uref{http://mysql.valueclick.com/, WWW}
-@uref{ftp://mysql.valueclick.com/mysql/, FTP}
+@uref{ftp://mysql.valueclick.com/pub/mysql/Downloads/, FTP}
@c @item
@c Not ok 20000919; Non-existent (Matt)
@@ -4796,10 +4821,10 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@uref{http://mysql.linuxwired.net/, WWW}
@uref{ftp://ftp.linuxwired.net/pub/mirrors/mysql/, FTP}
-@item
+@c @item
@c EMAIL: dan@surfsouth.com (Dan Muntz)
-@image{Flags/usa} USA [Venoma.Org/Valdosta, GA] @
-@uref{http://mysql.venoma.org/, WWW}
+@c @image{Flags/usa} USA [Venoma.Org/Valdosta, GA] @
+@c @uref{http://mysql.venoma.org/, WWW}
@item
@c EMAIL: hkind@adgrafix.com (Hans Kind)
@@ -4999,8 +5024,8 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c Added 980610
@c EMAIL: jason@dstc.edu.au (Jason Andrade)
@image{Flags/australia} Australia [AARNet/Queensland] @
-@uref{http://mirror.aarnet.edu.au/mysql, WWW}
-@uref{ftp://mirror.aarnet.edu.au/pub/mysql, FTP}
+@uref{http://mysql.mirror.aarnet.edu.au/, WWW}
+@uref{ftp://mysql.mirror.aarnet.edu.au/, FTP}
@c @item
@c Added 980805. Removed 000102 'no such directory'
@@ -5236,7 +5261,7 @@ clients can connect to both @strong{MySQL} versions.
The extended @strong{MySQL} binary distribution is marked with the
@code{-max} suffix and is configured with the same options as
-@code{mysqld-max}. @xref{mysqld-max}.
+@code{mysqld-max}. @xref{mysqld-max, , @code{mysqld-max}}.
If you want to use the @code{MySQL-Max} RPM, you must first
install the standard @code{MySQL} RPM.
@@ -5577,8 +5602,8 @@ indicates the type of operating system for which the distribution is intended
@item
If you see a binary distribution marked with the @code{-max} prefix, this
means that the binary has support for transaction-safe tables and other
-features. @xref{mysqld-max}. Note that all binaries are built from
-the same @strong{MySQL} source distribution.
+features. @xref{mysqld-max, , @code{mysqld-max}}. Note that all binaries
+are built from the same @strong{MySQL} source distribution.
@item
Add a user and group for @code{mysqld} to run as:
@@ -5590,8 +5615,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different
-Unixes. They may also be called @code{adduser} and @code{addgroup}. You may
-wish to call the user and group something else instead of @code{mysql}.
+versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
+You may wish to call the user and group something else instead of @code{mysql}.
@item
Change into the intended installation directory:
@@ -5634,7 +5659,8 @@ programs properly. @xref{Environment variables}.
@item scripts
This directory contains the @code{mysql_install_db} script used to initialize
-the server access permissions.
+the @code{mysql} database containing the grant tables that store the server
+access permissions.
@end table
@item
@@ -5700,7 +5726,7 @@ You can start the @strong{MySQL} server with the following command:
shell> bin/safe_mysqld --user=mysql &
@end example
-@xref{safe_mysqld}.
+@xref{safe_mysqld, , @code{safe_mysqld}}.
@xref{Post-installation}.
@@ -5772,7 +5798,7 @@ installation, you may want to make a copy of your previously installed
@strong{MySQL} startup file if you made any changes to it, so you don't lose
your changes.)
-After installing the RPM file(s), the @file{mysqld} daemon should be running
+After installing the RPM file(s), the @code{mysqld} daemon should be running
and you should now be able to start using @strong{MySQL}.
@xref{Post-installation}.
@@ -5808,7 +5834,7 @@ files.
The following sections indicate some of the issues that have been observed
on particular systems when installing @strong{MySQL} from a binary
-distribution.
+distribution or from RPM files.
@cindex binary distributions, on Linux
@cindex Linux, binary distribution
@@ -6104,8 +6130,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group, and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different
-Unixes. They may also be called @code{adduser} and @code{addgroup}. You may
-wish to call the user and group something else instead of @code{mysql}.
+versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
+You may wish to call the user and group something else instead of @code{mysql}.
@item
Unpack the distribution into the current directory:
@@ -6416,6 +6442,7 @@ shell> CXXFLAGS=-DDONT_USE_DEFAULT_FIELDS ./configure
@cindex character sets
@findex configure option, --with-charset
+@findex configure option, --with-extra-charset
@item
By default, @strong{MySQL} uses the ISO-8859-1 (Latin1) character set. To
change the default set, use the @code{--with-charset} option:
@@ -6441,6 +6468,13 @@ indexes may be sorted incorrectly otherwise. (This can happen if you
install @strong{MySQL}, create some tables, then reconfigure
@strong{MySQL} to use a different character set and reinstall it.)
+With the option @code{--with-extra-charset=LIST} you can define
+which additional character sets should be incompiled in the server.
+
+Here @code{LIST} is either a list of character set separated with space,
+@code{complex} to include all characters that can't be dynamicly loaded
+or @code{all} to include all character sets into the binaries.
+
@item
To configure @strong{MySQL} with debugging code, use the @code{--with-debug}
option:
@@ -6460,8 +6494,8 @@ applications. @xref{Thread-safe clients}.
@item
Options that pertain to particular systems can be found in the
-system-specific sections later in this chapter.
-@xref{Source install system issues}.
+system-specific sections later in this chapter. @xref{Source install
+system issues}.
@end itemize
@node Installing source tree, Compilation problems, Installing source, Installing
@@ -7304,6 +7338,10 @@ with @code{--static}. If you try to do so, you will get the error:
@example
ld: fatal: library -ldl: not found
+
+or
+
+undefined reference to `dlopen'
@end example
If too many processes try to connect very rapidly to @code{mysqld}, you will
@@ -7449,6 +7487,9 @@ Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
@node Solaris x86, SunOS, Solaris 2.7, Source install system issues
@subsection Solaris x86 Notes
+On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run
+'strip' in.
+
If you are using @code{gcc} or @code{egcs} on Solaris x86 and you
experience problems with core dumps under load, you should use the
following @code{configure} command:
@@ -7507,6 +7548,11 @@ Linux version that doesn't have @code{glibc2}, you must install
LinuxThreads before trying to compile @strong{MySQL}. You can get
LinuxThreads at @uref{http://www.mysql.com/Downloads/Linux}.
+@strong{NOTE:} We have seen some strange problems with Linux 2.2.14 and
+@strong{MySQL} on SMP systems; If you have a SMP system, we recommend
+you to upgrade to Linux 2.4 ASAP! Your system will be faster and more
+stable by doing this!
+
Note that @code{glibc} versions before and including Version 2.1.1 have
a fatal bug in @code{pthread_mutex_timedwait} handling, which is used
when you do @code{INSERT DELAYED}. We recommend you to not use
@@ -7521,6 +7567,40 @@ relative to the root of @code{glibc} Note that @strong{MySQL} will not be
stable with around 600-1000 connections if @code{STACK_SIZE} is the default
of 2 MB.
+If you have a problem with that @strong{MySQL} can't open enough files,
+or connections, it may be that you haven't configured Linux to handle
+enough files.
+
+In Linux 2.2 and forwards, you can check the number of allocated
+file handlers by doing:
+
+@example
+cat /proc/sys/fs/file-max
+cat /proc/sys/fs/dquot-max
+cat /proc/sys/fs/super-max
+@end example
+
+If you have more than 16M of memory, you should add something like the
+following in your boot script (@file{/etc/rc/boot.local} on SuSE):
+
+@example
+echo 65536 > /proc/sys/fs/file-max
+echo 8192 > /proc/sys/fs/dquot-max
+echo 1024 > /proc/sys/fs/super-max
+@end example
+
+You can also run the above from the command line as root, but in this case
+your old limits will be used next time your computer reboots.
+
+You should also add /etc/my.cnf:
+
+@example
+[safe_mysqld]
+open_files_limit=8192
+@end example
+
+The above should allow @strong{MySQL} to create up to 8192 connections + files.
+
The @code{STACK_SIZE} constant in LinuxThreads controls the spacing of thread
stacks in the address space. It needs to be large enough so that there will
be plenty of room for the stack of each individual thread, but small enough
@@ -7659,13 +7739,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV
signal, you can start @code{mysqld} with the @code{--core-file} option. Note
that you also probably need to raise the @code{core file size} by adding
@code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld}
-with @code{--core-file-sizes=1000000}. @xref{safe_mysqld}.
+with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}.
To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can
start @code{mysqld} with the @code{--core-file} option. Note that you also probably
need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to
@code{safe_mysqld} or starting @code{safe_mysqld} with
-@code{--core-file-sizes=1000000}. @xref{safe_mysqld}.
+@code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}.
If you are linking your own @strong{MySQL} client and get the error:
@@ -7993,7 +8073,7 @@ shell> nohup mysqld [options] &
@code{nohup} causes the command following it to ignore any @code{SIGHUP}
signal sent from the terminal. Alternatively, start the server by running
@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
-@xref{safe_mysqld}.
+@xref{safe_mysqld, , @code{safe_mysqld}}.
If you get a problem when compiling mysys/get_opt.c, just remove the
line #define _NO_PROTO from the start of that file!
@@ -8187,6 +8267,17 @@ CC=gcc CXX=gcc CXXFLAGS=-O3 \
./configure --prefix=/usr/local/mysql --with-thread-safe-client --with-named-thread-libs=-lpthread
@end example
+On Irix 6.5.11 with native Irix C and C++ compilers ver. 7.3.1.2, the
+following is reported to work
+
+@example
+CC=cc CXX=CC CFLAGS='-O3 -n32 -TARG:platform=IP22 -I/usr/local/include \
+-L/usr/local/lib' CXXFLAGS='-O3 -n32 -TARG:platform=IP22 \
+-I/usr/local/include -L/usr/local/lib' ./configure --prefix=/usr/local/mysql \
+--with-berkeley-db --with-innodb \
+--with-libwrap=/usr/local --with-named-curses-libs=/usr/local/lib/libncurses.a
+@end example
+
@node FreeBSD, NetBSD, SGI-Irix, Source install system issues
@subsection FreeBSD Notes
@@ -8250,7 +8341,8 @@ FreeBSD is also known to have a very low default file handle limit.
safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf
(and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the
appropriate class for this user in the password file if you are not
-using the default (use: chpass mysqld-user-name). @xref{safe_mysqld}.
+using the default (use: chpass mysqld-user-name). @xref{safe_mysqld, ,
+@code{safe_mysqld}}.
If you get problems with the current date in @strong{MySQL}, setting the
@code{TZ} variable will probably help. @xref{Environment variables}.
@@ -8479,7 +8571,7 @@ The following @code{configure} command should work:
@example
shell> CFLAGS="-D_XOPEN_XPG4" CXX=gcc CXXFLAGS="-D_XOPEN_XPG4" \
./configure \
- --with-debug --prefix=/usr/local/mysql \
+ --prefix=/usr/local/mysql \
--with-named-thread-libs="-lgthreads -lsocket -lgen -lgthreads" \
--with-named-curses-libs="-lcurses"
@end example
@@ -8702,10 +8794,10 @@ the DCE libraries while you compile @code{gcc} 2.95!
@node HP-UX 11.x, Mac OS X, HP-UX 10.20, Source install system issues
@subsection HP-UX Version 11.x Notes
-For HPUX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
+For HP-UX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
-Because of some critical bugs in the standard HPUX libraries, one should
-install the following patches before trying to run @strong{MySQL} on HPUX 11.0:
+Because of some critical bugs in the standard HP-UX libraries, you should
+install the following patches before trying to run @strong{MySQL} on HP-UX 11.0:
@example
PHKL_22840 Streams cumulative
@@ -8715,7 +8807,7 @@ PHNE_22397 ARPA cumulative
This will solve a problem that one gets @code{EWOULDBLOCK} from @code{recv()}
and @code{EBADF} from @code{accept()} in threaded applications.
-If you are using @code{gcc} 2.95.1 on an unpatched HPUX 11.x system,
+If you are using @code{gcc} 2.95.1 on an unpatched HP-UX 11.x system,
you will get the error:
@example
@@ -8754,8 +8846,8 @@ After this, the following configure line should work:
CFLAGS="-fomit-frame-pointer -O3 -fpic" CXX=gcc CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -O3" ./configure --prefix=/usr/local/mysql --disable-shared
@end example
-Here is some information that a HPUX Version 11.x user sent us about compiling
-@strong{MySQL} with HPUX:x compiler:
+Here is some information that a HP-UX Version 11.x user sent us about compiling
+@strong{MySQL} with HP-UX:x compiler:
@example
Environment:
@@ -8865,8 +8957,8 @@ in a while.
@section Windows Notes
This section describes installation and use of @strong{MySQL} on Windows.
-This is also described in the @file{README} file that comes with the
-@strong{MySQL} Windows distribution.
+This information is also provided in the @file{README} file that comes
+with the @strong{MySQL} Windows distribution.
@menu
* Windows installation:: Installing @strong{MySQL} on Windows
@@ -8876,13 +8968,16 @@ This is also described in the @file{README} file that comes with the
* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
* Windows symbolic links:: Splitting data across different disks under Win32
* Windows compiling:: Compiling MySQL clients on Windows.
-* Windows and BDB tables.:: Windows and BDB Tables
* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
@end menu
@node Windows installation, Win95 start, Windows, Windows
@subsection Installing MySQL on Windows
+The following instructions apply to precompiled binary distributions.
+If you download a source distribution, you will have to compile and install
+it yourself.
+
If you don't have a copy of the @strong{MySQL} distribution, you should
first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}.
@@ -8895,23 +8990,30 @@ To install either distribution, unzip it in some empty directory and run the
@code{Setup.exe} program.
By default, @strong{MySQL}-Windows is configured to be installed in
-@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere, install it
-in @file{C:\mysql} first, then move the installation to where you want it. If
-you do move @strong{MySQL}, you must tell @code{mysqld} where everything is by
-supplying options to @code{mysqld}. Use @code{C:\mysql\bin\mysqld --help} to
-display all options! For example, if you have moved the @strong{MySQL}
-distribution to @file{D:\programs\mysql}, you must start @code{mysqld} with:
-@code{D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql}
+@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere,
+install it in @file{C:\mysql} first, then move the installation to
+where you want it. If you do move @strong{MySQL}, you must indicate
+where everything is located by supplying a @code{--basedir} option when
+you start the server. For example, if you have moved the @strong{MySQL}
+distribution to @file{D:\programs\mysql}, you must start @code{mysqld}
+like this:
+
+@example
+C:\> D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql
+@end example
+
+Use @code{mysqld --help} to display all the options that @code{mysqld}
+understands!
With all newer @strong{MySQL} versions, you can also create a
@file{C:\my.cnf} file that holds any default options for the
@strong{MySQL} server. Copy the file @file{\mysql\my-xxxxx.cnf} to
@file{C:\my.cnf} and edit it to suit your setup. Note that you should
specify all paths with @samp{/} instead of @samp{\}. If you use
-@samp{\}, you need to specify it twice, as @samp{\} is the escape
+@samp{\}, you need to specify it twice, because @samp{\} is the escape
character in @strong{MySQL}. @xref{Option files}.
-Starting from @strong{MySQL} 3.23.38, the Windows distribution includes
+Starting with @strong{MySQL} 3.23.38, the Windows distribution includes
both the normal and the @strong{MySQL-Max} binaries. The main benefit
of using the normal @code{mysqld.exe} binary is that it's a little
faster and uses less resources.
@@ -8925,9 +9027,8 @@ symbolic links, BDB and InnoDB tables.
@item @code{mysqld-opt} @tab
Optimized binary with no support for transactional tables.
@item @code{mysqld-nt} @tab
-Optimized for a Pentium Pro processor. Has support for
-named pipes. You can run this version on Win98, but in
-this case no named pipes are created and you must
+Optimized binary for NT with support for named pipes. You can run this
+version on Win98, but in this case no named pipes are created and you must
have TCP/IP installed.
@item @code{mysqld-max} @tab
Optimized binary with support for symbolic links, BDB and InnoDB tables.
@@ -8969,14 +9070,19 @@ You can kill the @strong{MySQL} server by executing:
C:\> C:\mysql\bin\mysqladmin -u root shutdown
@end example
-Note that Win95 and Win98 don't support creation of named pipes. On
-Win95 and Win98, you can only use named pipes to connect to a remote
-@strong{MySQL} running on an NT server.
+Note that Win95 and Win98 don't support creation of named pipes.
+On Win95 and Win98, you can only use named pipes to connect to a
+remote @strong{MySQL} server running on a Windows NT server host.
+(The @strong{MySQL} server must also support named pipes, of
+course. For example, using @code{mysqld-opt} under NT will not allow
+named pipe connections. You should use either @code{mysqld-nt} or
+@code{mysqld-max-nt}.)
-If @code{mysqld} doesn't start, please check whether or not the
-@file{\mysql\mysql.err} file contains any reason for this. You can also
-try to start the server with @code{mysqld --standalone}; In this case, you may
-get some useful information on the screen that may help solve the problem.
+If @code{mysqld} doesn't start, please check the
+@file{\mysql\data\mysql.err} file to see if the server wrote any message
+there to indicate the cause of the problem. You can also try to start
+the server with @code{mysqld --standalone}; In this case, you may get
+some useful information on the screen that may help solve the problem.
The last option is to start @code{mysqld} with @code{--standalone
--debug}. In this case @code{mysqld} will write a log file
@@ -9008,9 +9114,9 @@ or
C:\> C:\mysql\bin\mysqld-max-nt --install
@end example
-(You can also use @code{mysqld} binaries that don't end with
-@code{-nt.exe} on NT, but those cannot be started as a service or use
-named pipes.)
+(Under Windows NT, you can actually install any of the server binaries
+as a service, but only those having names that end with @code{-nt.exe}
+provide support for named pipes.)
You can start and stop the @strong{MySQL} service with these commands:
@@ -9028,9 +9134,9 @@ with the default service options. If you have stopped @code{mysqld-nt}, you
have to start it with @code{NET START mysql}.
The service is installed with the name @code{MySQL}. Once installed, it must
-be started using the Services Control Manager (SCM) Utility (found in Control
-Panel) or by using the @code{NET START MySQL} command. If any options are
-desired, they must be specified as ``Startup parameters'' in the SCM utility
+be started using the Services Control Manager (SCM) Utility found in the
+Control Panel, or by using the @code{NET START MySQL} command. If any options
+are desired, they must be specified as ``Startup parameters'' in the SCM utility
before you start the @strong{MySQL} service. Once running, @code{mysqld-nt}
can be stopped using @code{mysqladmin}, or from the SCM utility or by using
the command @code{NET STOP MySQL}. If you use SCM to stop @code{mysqld-nt},
@@ -9148,14 +9254,12 @@ server, you can do so using this command:
C:\> mysqladmin --user=root --password=your_password shutdown
@end example
-If you are using the old shareware version of @strong{MySQL} Version 3.21
-under Windows, the above command will fail with an error: @code{parse error
-near 'SET OPTION password'}. This is because the old shareware version,
-which is based on @strong{MySQL} Version 3.21, doesn't have the
-@code{SET PASSWORD} command. The fix is in this case to upgrade to
-the Version 3.22 shareware.
+If you are using the old shareware version of @strong{MySQL} Version
+3.21 under Windows, the above command will fail with an error:
+@code{parse error near 'SET OPTION password'}. The fix is in to upgrade
+to the current @strong{MySQL} version, which is freely available.
-With the newer @strong{MySQL} versions you can easily add new users
+With the current @strong{MySQL} versions you can easily add new users
and change privileges with @code{GRANT} and @code{REVOKE} commands.
@xref{GRANT}.
@@ -9170,7 +9274,7 @@ Here is a note about how to connect to get a secure connection to remote
@itemize @bullet
@item
-Install an SSH client on your Windows machine --- As a user, the best non-free
+Install an SSH client on your Windows machine. As a user, the best non-free
one I've found is from @code{SecureCRT} from @uref{http://www.vandyke.com/}.
Another option is @code{f-secure} from @uref{http://www.f-secure.com/}. You
can also find some free ones on @strong{Google} at
@@ -9224,12 +9328,26 @@ Note that the symbolic link will be used only if the directory
For example, if the @strong{MySQL} data directory is @file{C:\mysql\data}
and you want to have database @code{foo} located at @file{D:\data\foo}, you
should create the file @file{C:\mysql\data\foo.sym} that contains the
-text @code{D:\data\foo}. After that, all tables created in the database
+text @code{D:\data\foo\}. After that, all tables created in the database
@code{foo} will be created in @file{D:\data\foo}.
+Note that because of the speed penalty you get when opening every table,
+we have not enabled this by default even if you have compiled
+@strong{MySQL} with support for this. To enable symlinks you should put
+in your @code{my.cnf} or @code{my.ini} file the following entry:
+
+@example
+[mysqld]
+use-symbolic-links
+@end example
+
+In @strong{MySQL} 4.0 we will enable symlinks by default. Then you
+should instead use the @code{skip-symlink} option if you want to
+disable this.
+
@cindex compiling, on Windows
@cindex Windows, compiling on
-@node Windows compiling, Windows and BDB tables., Windows symbolic links, Windows
+@node Windows compiling, Windows vs Unix, Windows symbolic links, Windows
@subsection Compiling MySQL Clients on Windows
In your source files, you should include @file{windows.h} before you include
@@ -9249,19 +9367,9 @@ with the static @file{mysqlclient.lib} library.
Note that as the mysqlclient libraries are compiled as threaded libraries,
you should also compile your code to be multi-threaded!
-@cindex BDB tables
-@cindex tables, BDB
-@node Windows and BDB tables., Windows vs Unix, Windows compiling, Windows
-@subsection Windows and BDB Tables
-
-We will shortly do a full test on the new BDB interface on Windows.
-When this is done we will start to release binary distributions (for
-Windows and Unix) of @strong{MySQL} that will include support for BDB
-tables.
-
@cindex Windows, versus Unix
@cindex operating systems, Windows versus Unix
-@node Windows vs Unix, , Windows and BDB tables., Windows
+@node Windows vs Unix, , Windows compiling, Windows
@subsection MySQL-Windows Compared to Unix MySQL
@strong{MySQL}-Windows has by now proven itself to be very stable. This version
@@ -9419,11 +9527,6 @@ For the moment, the workaround is to list the parameters in the
@file{C:\my.cnf} file instead.
@item
-When you suspend a laptop running Win95, the @code{mysqld} daemon doesn't
-accept new connections when the laptop is resumed. We don't know if this
-is a problem with Win95, TCP/IP, or @strong{MySQL}.
-
-@item
It would be real nice to be able to kill @code{mysqld} from the task manager.
For the moment, you must use @code{mysqladmin shutdown}.
@@ -9527,20 +9630,17 @@ and are configured with the following compilers and options:
@item SunOS 4.1.4 2 sun4c with @code{gcc} 2.7.2.1
@code{CC=gcc CXX=gcc CXXFLAGS="-O3 -felide-constructors" ./configure --prefix=/usr/local/mysql --disable-shared --with-extra-charsets=complex --enable-assembler}
-@item SunOS 5.5.1 sun4u with @code{egcs} 1.0.3a
-@code{CC=gcc CFLAGS="-O3 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O3 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
-
-@item SunOS 5.6 sun4u with @code{egcs} 2.90.27
-@code{CC=gcc CFLAGS="-O3 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O3 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
+@item SunOS 5.5.1 (and above) sun4u with @code{egcs} 1.0.3a or 2.90.27 or gcc 2.95.2 and newer
+@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex --enable-assembler}
@item SunOS 5.6 i86pc with @code{gcc} 2.8.1
@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
@item Linux 2.0.33 i386 with @code{pgcc} 2.90.29 (@code{egcs} 1.0.3a)
-@code{CFLAGS="-O3 -mpentium -mstack-align-double -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O3 -mpentium -mstack-align-double -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --with-extra-charsets=complex}
+@code{CFLAGS="-O3 -mpentium -mstack-align-double" CXX=gcc CXXFLAGS="-O3 -mpentium -mstack-align-double -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --with-extra-charsets=complex}
@item Linux 2.2.x with x686 with @code{gcc} 2.95.2
-@code{CFLAGS="-O3 -mpentiumpro -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O3 -mpentiumpro -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charset=complex}
+@code{CFLAGS="-O3 -mpentiumpro" CXX=gcc CXXFLAGS="-O3 -mpentiumpro -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charset=complex}
@item SCO 3.2v5.0.4 i386 with @code{gcc} 2.7-95q4
@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
@@ -9604,7 +9704,7 @@ shell> cd mysql_installation_directory
shell> ./bin/safe_mysqld --user=mysql &
@end example
-For a binary distribution, do this:
+For a binary distribution (not RPM or pkg packages), do this:
@example
shell> cd mysql_installation_directory
@@ -9678,7 +9778,7 @@ mysqld: Can't find file: 'host.frm'
The above may also happen with a binary @strong{MySQL} distribution if you
don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
-@xref{safe_mysqld}.
+@xref{safe_mysqld, , @code{safe_mysqld}}.
You might need to run @code{mysql_install_db} as @code{root}. However,
if you prefer, you can run the @strong{MySQL} server as an unprivileged
@@ -9979,7 +10079,8 @@ system startup and shutdown, and is described more fully in
@item
By invoking @code{safe_mysqld}, which tries to determine the proper options
-for @code{mysqld} and then runs it with those options. @xref{safe_mysqld}.
+for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, ,
+@code{safe_mysqld}}.
@item
On NT you should install @code{mysqld} as a service as follows:
@@ -10035,7 +10136,7 @@ correctly, check the log file to see if you can find out why. Log files
are located in the data directory (typically
@file{/usr/local/mysql/data} for a binary distribution,
@file{/usr/local/var} for a source distribution,
-@file{\mysql\mysql.err} on Windows.) Look in the data directory for
+@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for
files with names of the form @file{host_name.err} and
@file{host_name.log} where @code{host_name} is the name of your server
host. Then check the last few lines of these files:
@@ -10111,14 +10212,14 @@ library and for which @strong{MySQL} must be configured to use MIT-pthreads.
If you can't get @code{mysqld} to start you can try to make a trace file
to find the problem. @xref{Making trace files}.
+If you are using InnoDB tables, refer to the InnoDB-specific startup
+options. @xref{InnoDB start}.
+
If you are using BDB (Berkeley DB) tables, you should familiarize
yourself with the different BDB specific startup options. @xref{BDB start}.
If you are using Gemini tables, refer to the Gemini-specific startup options.
-@xref{GEMINI start}.
-
-If you are using InnoDB tables, refer to the InnoDB-specific startup
-options. @xref{InnoDB start}.
+@xref{Using GEMINI Tables}.
@node Automatic start, Command-line options, Starting server, Post-installation
@subsection Starting and Stopping MySQL Automatically
@@ -10126,6 +10227,10 @@ options. @xref{InnoDB start}.
@cindex stopping, the server
@cindex server, starting and stopping
+The @code{mysql.server} and @code{safe_mysqld} scripts can be used to start
+the server automatically at system startup time. @code{mysql.server} can also
+be used to stop the server.
+
The @code{mysql.server} script can be used to start or stop the server
by invoking it with @code{start} or @code{stop} arguments:
@@ -10143,9 +10248,8 @@ the @strong{MySQL} installation directory, then invokes @code{safe_mysqld}.
You might need to edit @code{mysql.server} if you have a binary distribution
that you've installed in a non-standard location. Modify it to @code{cd}
into the proper directory before it runs @code{safe_mysqld}. If you want the
-server to run as some specific user, you can change the
-@code{mysql_daemon_user=root} line to use another user. You can also modify
-@code{mysql.server} to pass other options to @code{safe_mysqld}.
+server to run as some specific user, add an appropriate @code{user} line
+to the @file{/etc/my.cnf} file, as shown later in this section.
@code{mysql.server stop} brings down the server by sending a signal to it.
You can take down the server manually by executing @code{mysqladmin shutdown}.
@@ -10171,23 +10275,23 @@ this:
datadir=/usr/local/mysql/var
socket=/tmp/mysqld.sock
port=3306
+user=mysql
[mysql.server]
-user=mysql
basedir=/usr/local/mysql
@end example
-The @code{mysql.server} script uses the following variables:
-@code{user}, @code{datadir}, @code{basedir}, @code{bindir}, and
-@code{pid-file}.
+The @code{mysql.server} script understands the following options:
+@code{datadir}, @code{basedir}, and @code{pid-file}.
-The following table shows which option sections each of the startup script
-uses:
+The following table shows which option groups each of the startup scripts
+read from option files:
@multitable @columnfractions .20 .80
+@item @strong{Script} @tab @strong{Option groups}
@item @code{mysqld} @tab @code{mysqld} and @code{server}
-@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld} and @code{server}
-@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld} and @code{server}
+@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
+@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
@end multitable
@xref{Option files}.
@@ -10228,7 +10332,8 @@ though.
@item --core-file
Write a core file if @code{mysqld} dies. For some systems you must also
-specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}.
+specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld, ,
+@code{safe_mysqld}}.
@item -h, --datadir=path
Path to the database root.
@@ -10408,9 +10513,9 @@ recommended for systems where only local requests are allowed. @xref{DNS}.
Don't use new, possible wrong routines. Implies @code{--skip-delay-key-write}.
This will also set default table type to @code{ISAM}. @xref{ISAM}.
-@item --skip-stack-trace
-Don't write stack traces. This option is useful when you are running
-@code{mysqld} under a debugger. @xref{Debugging server}.
+@item --skip-symlink
+Don't delete or rename files that a symlinked file in the data directory
+points to.
@item --skip-safemalloc
If @strong{MySQL} is configured with @code{--with-debug=full}, all programs
@@ -10422,6 +10527,10 @@ need memory checking, by using this option.
Don't allow 'SHOW DATABASE' commands, unless the user has @strong{process}
privilege.
+@item --skip-stack-trace
+Don't write stack traces. This option is useful when you are running
+@code{mysqld} under a debugger. @xref{Debugging server}.
+
@item --skip-thread-priority
Disable using thread priorities for faster response time.
@@ -10443,6 +10552,9 @@ Run @code{mysqld} daemon as user @code{user_name}. This option is
@item -V, --version
Output version information and exit.
+@item -W, --warnings
+Print out warnings like @code{Aborted connection...} to the @code{.err} file.
+@xref{Communication errors}.
@end table
@cindex default options
@@ -10564,7 +10676,7 @@ password=my_password
no-auto-rehash
set-variable = connect_timeout=2
-[mysql-hot-copy]
+[mysqlhotcopy]
interactive-timeout
@end example
@@ -10572,7 +10684,7 @@ interactive-timeout
@tindex .my.cnf file
If you have a source distribution, you will find sample configuration
files named @file{my-xxxx.cnf} in the @file{support-files} directory.
-If you have a binary distribution, look in the @file{DIR/share/mysql}
+If you have a binary distribution, look in the @file{DIR/support-files}
directory, where @code{DIR} is the pathname to the @strong{MySQL}
installation directory (typically @file{/usr/local/mysql}). Currently
there are sample configuration files for small, medium, large, and very
@@ -10649,7 +10761,7 @@ The above is the quick and dirty way that one commonly uses for testing.
The nice thing with this is that all connections you do in the above shell
will automatically be directed to the new running server!
-If you need to do this more permanently, you should create an own option
+If you need to do this more permanently, you should create an option
file for each server. @xref{Option files}. In your startup script that
is executed at boot time (mysql.server?) you should specify for both
servers:
@@ -10743,8 +10855,8 @@ the old @code{ISAM} type. You don't have to convert your old tables to
use these with Version 3.23. By default, all new tables will be created with
type @code{MyISAM} (unless you start @code{mysqld} with the
@code{--default-table-type=isam} option). You can change an @code{ISAM}
-table to a @code{MyISAM} table with @code{ALTER TABLE} or the Perl script
-@code{mysql_convert_table_format}.
+table to a @code{MyISAM} table with @code{ALTER TABLE table_name TYPE=MyISAM}
+or the Perl script @code{mysql_convert_table_format}.
Version 3.22 and 3.21 clients will work without any problems with a Version
3.23 server.
@@ -11918,7 +12030,7 @@ system. This section describes how it works.
* Request access:: Access control, stage 2: Request verification
* Privilege changes:: When privilege changes take effect
* Default privileges:: Setting up the initial @strong{MySQL} privileges
-* Adding users:: Adding new user privileges to @strong{MySQL}
+* Adding users:: Adding new users to @strong{MySQL}
* Passwords:: How to set up passwords
* Access denied:: Causes of @code{Access denied} errors
@end menu
@@ -11947,9 +12059,10 @@ When running @strong{MySQL}, follow these guidelines whenever possible:
@itemize @bullet
@item
DON'T EVER GIVE ANYONE (EXCEPT THE @strong{MySQL} ROOT USER) ACCESS TO THE
-mysql.user TABLE! The encrypted password is the real password in
-@strong{MySQL}. If you know this for one user, you can easily log in as
-him if you have access to his 'host'.
+@code{user} TABLE IN THE @code{mysql} DATABASE! The encrypted password
+is the real password in @strong{MySQL}. If you know the password listed in
+the @code{user} table for a given user, you can easily log in as that
+user if you have access to the host listed for that account.
@item
Learn the @strong{MySQL} access privilege system. The @code{GRANT} and
@@ -11978,15 +12091,15 @@ computer becomes compromised, the intruder can take the full list of
passwords and use them. Instead use @code{MD5()} or another one-way
hashing function.
@item
-Do not use passwords from dictionaries. There are special programs to
+Do not choose passwords from dictionaries. There are special programs to
break them. Even passwords like ``xfish98'' are very bad. Much better is
``duag98'' which contains the same word ``fish'' but typed one key to the
left on a standard QWERTY keyboard. Another method is to use ``Mhall'' which
is taken from the first characters of each word in the sentence ``Mary had
-a little lamb.'' This is easy to remember and type, but hard to guess for
-someone who does not know it.
+a little lamb.'' This is easy to remember and type, but difficult to guess
+for someone who does not know it.
@item
-Invest in a firewall. This protects from at least 50% of all types of
+Invest in a firewall. This protects you from at least 50% of all types of
exploits in any software. Put @strong{MySQL} behind the firewall or in
a demilitarized zone (DMZ).
@@ -11995,11 +12108,16 @@ Checklist:
@item
Try to scan your ports from the Internet using a tool such as
@code{nmap}. @strong{MySQL} uses port 3306 by default. This port should
-be inaccessible from untrusted hosts. Another simple way to check whether or
-not your @strong{MySQL} port is open is to type @code{telnet
-server_host 3306} from some remote machine, where
-@code{server_host} is the hostname of your @strong{MySQL}
-server. If you get a connection and some garbage characters, the port is
+be inaccessible from untrusted hosts. Another simple way to check whether
+or not your @strong{MySQL} port is open is to try the following command
+from some remote machine, where @code{server_host} is the hostname of
+your @strong{MySQL} server:
+
+@example
+shell> telnet server_host 3306
+@end example
+
+If you get a connection and some garbage characters, the port is
open, and should be closed on your firewall or router, unless you really
have a good reason to keep it open. If @code{telnet} just hangs or the
connection is refused, everything is OK; the port is blocked.
@@ -12055,11 +12173,13 @@ not give your applications any more access privileges than they need.
Users of PHP:
@itemize @bullet
@item Check out the @code{addslashes()} function.
+As of PHP 4.0.3, a @code{mysql_escape_string()} function is available
+that is based on the function of the same name in the @strong{MySQL} C API.
@end itemize
@item
Users of @strong{MySQL} C API:
@itemize @bullet
-@item Check out the @code{mysql_escape()} API call.
+@item Check out the @code{mysql_escape_string()} API call.
@end itemize
@item
Users of @strong{MySQL}++:
@@ -12071,6 +12191,11 @@ Users of Perl DBI:
@itemize @bullet
@item Check out the @code{quote()} method or use placeholders.
@end itemize
+@item
+Users of Java JDBC:
+@itemize @bullet
+@item Use a @code{PreparedStatement} object and placeholders.
+@end itemize
@end itemize
@item
@@ -12106,15 +12231,15 @@ connection, however the encryption algorithm is not very strong, and
with some effort a clever attacker can crack the password if he is able
to sniff the traffic between the client and the server. If the
connection between the client and the server goes through an untrusted
-network, you should use an @strong{SSH} tunnel to encrypt the
+network, you should use an SSH tunnel to encrypt the
communication.
All other information is transferred as text that can be read by anyone
who is able to watch the connection. If you are concerned about this,
you can use the compressed protocol (in @strong{MySQL} Version 3.22 and above)
to make things much harder. To make things even more secure you should use
-@code{ssh}. You can find an open source ssh client at
-@uref{http://www.openssh.org}, and a commercial ssh client at
+@code{ssh}. You can find an open source @code{ssh} client at
+@uref{http://www.openssh.org}, and a commercial @code{ssh} client at
@uref{http://www.ssh.com}. With this, you can get an encrypted TCP/IP
connection between a @strong{MySQL} server and a @strong{MySQL} client.
@@ -12139,35 +12264,44 @@ mysql> FLUSH PRIVILEGES;
@end example
@item
-Don't run the @strong{MySQL} daemon as the Unix @code{root} user.
-It is very dangerous as any user with @code{FILE} privileges will be able to
-create files
-as @code{root} (for example, @code{~root/.bashrc}). To prevent this
-@code{mysqld} will refuse to run as @code{root} unless it is specified
-directly via @code{--user=root} option.
+Don't run the @strong{MySQL} daemon as the Unix @code{root} user. This is
+very dangerous, because any user with @code{FILE} privileges will be able
+to create files as @code{root} (for example, @code{~root/.bashrc}). To
+prevent this, @code{mysqld} will refuse to run as @code{root} unless it
+is specified directly using a @code{--user=root} option.
+
+@code{mysqld} can be run as an ordinary unprivileged user instead.
+You can also create a new Unix user @code{mysql} to make everything
+even more secure. If you run @code{mysqld} as another Unix user,
+you don't need to change the @code{root} user name in the @code{user}
+table, because @strong{MySQL} user names have nothing to do with Unix
+user names. To start @code{mysqld} as another Unix user, add a @code{user}
+line that specifies the user name to the @code{[mysqld]} group of the
+@file{/etc/my.cnf} option file or the @file{my.cnf} option file in the
+server's data directory. For example:
+
+@example
+[mysqld]
+user=mysql
+@end example
-@code{mysqld} can be run as any user instead. You can also create a new
-Unix user @code{mysql} to make everything even more secure. If you run
-@code{mysqld} as another Unix user, you don't need to change the
-@code{root} user name in the @code{user} table, because @strong{MySQL}
-user names have nothing to do with Unix user names. You can edit the
-@code{mysql.server} script to start @code{mysqld} as another Unix user.
-Normally this is done with the @code{su} command. For more details, see
-@ref{Changing MySQL user, , Changing @strong{MySQL} user}.
+This will cause the server to start as the designated user whether you
+start it manually or by using @code{safe_mysqld} or @code{mysql.server}.
+For more details, see @ref{Changing MySQL user, , Changing @strong{MySQL}
+user}.
@item
-If you put a password for the Unix @code{root} user in the @code{mysql.server}
-script, make sure this script is readable only by @code{root}.
+Don't support symlinks to tables (This can be disabled with the
+@code{--skip-symlink} option. This is especially important if you run
+@code{mysqld} as root as anyone that has write access to the mysqld data
+directories could then delete any file in the system!
+@xref{Symbolic links to tables}.
@item
Check that the Unix user that @code{mysqld} runs as is the only user with
read/write privileges in the database directories.
@item
-On Unix platforms, do not run @code{mysqld} as root unless you really
-need to. Consider creating a user named @code{mysql} for that purpose.
-
-@item
Don't give the @strong{process} privilege to all users. The output of
@code{mysqladmin processlist} shows the text of the currently executing
queries, so any user who is allowed to execute that command might be able to
@@ -12267,7 +12401,6 @@ DATA INFILE} and administrative operations.
@cindex user names, and passwords
@cindex passwords, for users
-
There are several distinctions between the way user names and passwords are
used by @strong{MySQL} and the way they are used by Unix or Windows:
@@ -12301,6 +12434,42 @@ knowing your 'scrambled' password is enough to be able to connect to
the @strong{MySQL} server!
@end itemize
+@strong{MySQL} users and they privileges are normally created with the
+@code{GRANT} command. @xref{GRANT}.
+
+When you login to a @strong{MySQL} server with a command line client you
+should specify the password with @code{--password=your-password}.
+@xref{Connecting}.
+
+@example
+mysql --user=monty --password=guess database_name
+@end example
+
+If you want the client to prompt for a password, you should use
+@code{--password} without any argument
+
+@example
+mysql --user=monty --password database_name
+@end example
+
+or the short form:
+
+@example
+mysql -u monty -p database_name
+@end example
+
+Note that in the last example the password is @strong{NOT} 'database_name'.
+
+If you want to use the -p option to supply a password you should do like this:
+
+@example
+mysql -u monty -pguess database_name
+@end example
+
+On some system the library call that @strong{MySQL} uses to prompt for a
+password will automaticly cut the password to 8 characters. Internally
+@strong{MySQL} doesn't have any limit for the length of the password.
+
@node Connecting, Password security, User names, Privilege system
@section Connecting to the MySQL Server
@cindex connecting, to the server
@@ -13360,12 +13529,15 @@ running @code{mysql_install_db}.
@findex GRANT statement
@findex statements, GRANT
@node Adding users, Passwords, Default privileges, Privilege system
-@section Adding New User Privileges to MySQL
+@section Adding New Users to MySQL
You can add users two different ways: by using @code{GRANT} statements
or by manipulating the @strong{MySQL} grant tables directly. The
preferred method is to use @code{GRANT} statements, because they are
-more concise and less error-prone.
+more concise and less error-prone. @xref{GRANT}.
+
+There is also a lot of contributed programs like @code{phpmyadmin} that
+can be used to create and administrate users. @xref{Contrib}.
The examples below show how to use the @code{mysql} client to set up new
users. These examples assume that privileges are set up according to the
@@ -13476,6 +13648,11 @@ mysql> GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP
IDENTIFIED BY 'stupid';
@end example
+The reason that we do to grant statements for the user 'custom' is that
+we want the give the user access to @strong{MySQL} both from the local
+machine with Unix sockets and from the remote machine 'whitehouse.gov'
+over TCP/IP.
+
To set up the user's privileges by modifying the grant tables directly,
run these commands (note the @code{FLUSH PRIVILEGES} at the end):
@@ -16535,6 +16712,16 @@ mysql> select 'David_' LIKE 'David|_' ESCAPE '|';
-> 1
@end example
+The following two statements illustrate that string comparisons are
+case insensitive unless one of the operands is a binary string:
+
+@example
+mysql> select 'abc' LIKE 'ABC';
+ -> 1
+mysql> SELECT 'abc' LIKE BINARY 'ABC';
+ -> 0
+@end example
+
@code{LIKE} is allowed on numeric expressions! (This is a @strong{MySQL}
extension to the ANSI SQL @code{LIKE}.)
@@ -18811,6 +18998,8 @@ or DELAY_KEY_WRITE = @{0 | 1@}
or ROW_FORMAT= @{ default | dynamic | fixed | compressed @}
or RAID_TYPE= @{1 | STRIPED | RAID0 @} RAID_CHUNKS=# RAID_CHUNKSIZE=#
or UNION = (table_name,[table_name...])
+or DATA DIRECTORY="directory"
+or INDEX DIRECTORY="directory"
select_statement:
[IGNORE | REPLACE] SELECT ... (Some legal select statement)
@@ -18865,10 +19054,10 @@ When you insert a value of @code{NULL} (recommended) or @code{0} into an
If you delete the row containing the maximum value for an
@code{AUTO_INCREMENT} column, the value will be reused with an
-@code{ISAM}, @code{BDB} or @code{INNODB} table but not with a
-@code{MyISAM} table. If you delete all rows in the table with
-@code{DELETE FROM table_name} (without a @code{WHERE}) in
-@code{AUTOCOMMIT} mode, the sequence starts over for both table types.
+@code{ISAM}, @code{GEMINI} or @code{BDB} table but not with a
+@code{MyISAM} or @code{InnoDB} table. If you delete all rows in the table
+with @code{DELETE FROM table_name} (without a @code{WHERE}) in
+@code{AUTOCOMMIT} mode, the sequence starts over for all table types.
@strong{NOTE:} There can be only one @code{AUTO_INCREMENT} column per
table, and it must be indexed. @strong{MySQL} Version 3.23 will also only
@@ -19142,6 +19331,14 @@ In the created table the @code{PRIMARY} key will be placed first, followed
by all @code{UNIQUE} keys and then the normal keys. This helps the
@strong{MySQL} optimizer to prioritize which key to use and also more quickly
detect duplicated @code{UNIQUE} keys.
+
+@item
+By using @code{DATA DIRECTORY="directory"} or @code{INDEX
+DIRECTORY="directory"} you can specify where the table handler should
+put it's table and index files. This only works for @code{MyISAM} tables
+in @code{MySQL} 4.0, when you are not using the @code{--skip-symlink}
+option. @xref{Symbolic links to tables}.
+
@end itemize
@cindex silent column changes
@@ -19278,9 +19475,6 @@ INDEX} are @strong{MySQL} extensions to ANSI SQL92.
@code{MODIFY} is an Oracle extension to @code{ALTER TABLE}.
@item
-@code{TRUNCATE} is an Oracle extension. @xref{TRUNCATE}.
-
-@item
The optional word @code{COLUMN} is a pure noise word and can be omitted.
@item
@@ -19341,8 +19535,7 @@ a table row. The default is to add the column last.
or removes the old default value.
If the old default is removed and the column can be @code{NULL}, the new
default is @code{NULL}. If the column cannot be @code{NULL}, @strong{MySQL}
-assigns a default value.
-Default value assignment is described in
+assigns a default value, as described in
@ref{CREATE TABLE, , @code{CREATE TABLE}}.
@findex DROP INDEX
@@ -19355,6 +19548,10 @@ If columns are dropped from a table, the columns are also removed from any
index of which they are a part. If all columns that make up an index are
dropped, the index is dropped as well.
+@item
+If a table contains only one column, the column cannot be dropped.
+If what you intend is to remove the table, use @code{DROP TABLE} instead.
+
@findex DROP PRIMARY KEY
@item
@code{DROP PRIMARY KEY} drops the primary index. If no such
@@ -19461,6 +19658,11 @@ sequence number by executing @code{SET INSERT_ID=#} before
@code{ALTER TABLE} or using the @code{AUTO_INCREMENT = #} table option.
@xref{SET OPTION}.
+With MyISAM tables, if you don't change the @code{AUTO_INCREMENT}
+column, the sequence number will not be affected. If you drop an
+@code{AUTO_INCREMENT} column and then add another @code{AUTO_INCREMENT}
+column, the numbers will start from 1 again.
+
@xref{ALTER TABLE problems}.
@findex RENAME TABLE
@@ -19667,6 +19869,8 @@ minimum needed to restore it. Currenlty only works for @code{MyISAM}
tables. For @code{MyISAM} table, copies @code{.frm} (definition) and
@code{.MYD} (data) files. The index file can be rebuilt from those two.
+Before using this command, please see @xref{Backup}.
+
During the backup, read lock will be held for each table, one at time,
as they are being backed up. If you want to backup several tables as
a snapshot, you must first issue @code{LOCK TABLES} obtaining a read
@@ -19875,6 +20079,8 @@ valid, the table can be re-created this way, even if the data or index
files have become corrupted.
@end itemize
+@code{TRUNCATE} is an Oracle SQL extension.
+
@findex SELECT
@node SELECT, JOIN, TRUNCATE, Reference
@section @code{SELECT} Syntax
@@ -20495,9 +20701,9 @@ Version 3.22.15. It is a @strong{MySQL} extension to ANSI SQL92.
@code{INSERT DELAYED} only works with @code{ISAM} and @code{MyISAM}
tables. Note that as @code{MyISAM} tables supports concurrent
-@code{SELECT} and @code{INSERT}, if there is no empty blocks in the data
-file, you very seldom need to use @code{INSERT DELAYED} with
-@code{MyISAM}.
+@code{SELECT} and @code{INSERT}, if there is no free blocks in the
+middle of the data file, you very seldom need to use @code{INSERT
+DELAYED} with @code{MyISAM}. @xref{MyISAM}.
When you use @code{INSERT DELAYED}, the client will get an OK at once
and the row will be inserted when the table is not in use by any other thread.
@@ -20630,6 +20836,13 @@ In other words, you can't access the values of the old row from a
@code{REPLACE} statement. In some old @strong{MySQL} version it looked
like you could do this, but that was a bug that has been corrected.
+When one uses a @code{REPLACE} command, @code{mysql_affected_rows()}
+will return 2 if the new row replaced and old row. This is because in
+this case one row was inserted and then the duplicate was deleted.
+
+The above makes it easy to check if @code{REPLACE} added or replaced a
+row.
+
@findex LOAD DATA INFILE
@node LOAD DATA, UPDATE, REPLACE, Reference
@section @code{LOAD DATA INFILE} Syntax
@@ -22646,8 +22859,6 @@ This statement is provided for Oracle compatibility.
The @code{SHOW} statement provides similar information.
@xref{SHOW, , @code{SHOW}}.
-
-
@findex BEGIN
@findex COMMIT
@findex ROLLBACK
@@ -23005,8 +23216,9 @@ REVOKE priv_type [(column_list)] [, priv_type [(column_list)] ...]
@code{GRANT} is implemented in @strong{MySQL} Version 3.22.11 or later. For
earlier @strong{MySQL} versions, the @code{GRANT} statement does nothing.
-The @code{GRANT} and @code{REVOKE} commands allow system administrators to
-grant and revoke rights to @strong{MySQL} users at four privilege levels:
+The @code{GRANT} and @code{REVOKE} commands allow system administrators
+to create users and grant and revoke rights to @strong{MySQL} users at
+four privilege levels:
@table @strong
@item Global level
@@ -23026,6 +23238,7 @@ Column privileges apply to single columns in a given table. These privileges are
stored in the @code{mysql.columns_priv} table.
@end table
+If you give a grant for a users that doesn't exists, that user is created.
For examples of how @code{GRANT} works, see @ref{Adding users}.
For the @code{GRANT} and @code{REVOKE} statements, @code{priv_type} may be
@@ -23607,9 +23820,9 @@ of both worlds.
* MERGE:: MERGE tables
* ISAM:: ISAM tables
* HEAP:: HEAP tables
+* InnoDB:: InnoDB tables
* BDB:: BDB or Berkeley_db tables
* GEMINI:: GEMINI tables
-* InnoDB:: InnoDB tables
@end menu
@node MyISAM, MERGE, Table types, Table types
@@ -23633,8 +23846,12 @@ the table was closed correctly. If @code{mysqld} is started with
@code{--myisam-recover}, @code{MyISAM} tables will automatically be
checked and/or repaired on open if the table wasn't closed properly.
@item
-You can @code{INSERT} new rows in a table without deleted rows,
-while other threads are reading from the table.
+You can @code{INSERT} new rows in a table that doesn't have free blocks
+in the middle of the data file, at the same time other threads are
+reading from the table (concurrent insert). An free block can come from
+an update of a dynamic length row with much data to a row with less data
+or when deleting rows. When all free blocks are used up, all future
+inserts will be concurrent again.
@item
Support for big files (63-bit) on filesystems/operating systems that
support big files.
@@ -23692,6 +23909,10 @@ with updates and inserts. This is done by automatically combining adjacent
deleted blocks and by extending blocks if the next block is deleted.
@item
@code{myisampack} can pack @code{BLOB} and @code{VARCHAR} columns.
+@item
+You can use put the datafile and index file on different directories
+to get more speed (with the @code{DATA/INDEX DIRECTORY="path"} option to
+@code{CREATE TABLE}). @xref{CREATE TABLE}.
@end itemize
@code{MyISAM} also supports the following things, which @strong{MySQL}
@@ -24270,6 +24491,14 @@ tables are:
@item Tables are compressed with @code{pack_isam} rather than with @code{myisampack}.
@end itemize
+If you want to convert an @code{ISAM} table to a @code{MyISAM} table so
+that you can use utilities such as @code{mysqlcheck}, use an @code{ALTER
+TABLE} statement:
+
+@example
+mysql> ALTER TABLE tbl_name TYPE = MYISAM;
+@end example
+
@cindex tables, @code{HEAP}
@node HEAP, BDB, ISAM, Table types
@section HEAP Tables
@@ -24348,375 +24577,6 @@ SUM_OVER_ALL_KEYS(max_length_of_key + sizeof(char*) * 2)
@code{sizeof(char*)} is 4 on 32-bit machines and 8 on 64-bit machines.
-@cindex tables, @code{BDB}
-@cindex tables, @code{Berkeley DB}
-@node BDB, GEMINI, HEAP, Table types
-@section BDB or Berkeley_DB Tables
-
-@menu
-* BDB overview:: Overview of BDB Tables
-* BDB install:: Installing BDB
-* BDB start:: BDB startup options
-* BDB characteristic:: Some characteristic of @code{BDB} tables:
-* BDB TODO:: Some things we need to fix for BDB in the near future:
-* BDB portability:: Operating systems supported by @strong{BDB}
-* BDB errors:: Errors You May Get When Using BDB Tables
-@end menu
-
-@node BDB overview, BDB install, BDB, BDB
-@subsection Overview of BDB Tables
-
-Support for BDB tables is included in the @strong{MySQL} source distribution
-starting from Version 3.23.34 and is activated in the @strong{MySQL}-Max
-binary.
-
-BerkeleyDB, available at @uref{http://www.sleepycat.com/} has provided
-@strong{MySQL} with a transactional table handler. By using BerkeleyDB
-tables, your tables may have a greater chance of surviving crashes, and also
-provides @code{COMMIT} and @code{ROLLBACK} on transactions. The
-@strong{MySQL} source distribution comes with a BDB distribution that has a
-couple of small patches to make it work more smoothly with @strong{MySQL}.
-You can't use a non-patched @code{BDB} version with @strong{MySQL}.
-
-We at @strong{MySQL AB} are working in close cooperation with Sleepycat to
-keep the quality of the @strong{MySQL}/BDB interface high.
-
-When it comes to supporting BDB tables, we are committed to help our
-users to locate the problem and help creating a reproducable test case
-for any problems involving BDB tables. Any such test case will be
-forwarded to Sleepycat who in turn will help us find and fix the
-problem. As this is a two stage operation, any problems with BDB tables
-may take a little longer for us to fix than for other table handlers.
-However, as the BerkeleyDB code itself has been used by many other
-applications than @strong{MySQL}, we don't envision any big problems with
-this. @xref{Table handler support}.
-
-@node BDB install, BDB start, BDB overview, BDB
-@subsection Installing BDB
-
-If you have downloaded a binary version of @strong{MySQL} that includes
-support for BerkeleyDB, simply follow the instructions for installing a
-binary version of @strong{MySQL}.
-@xref{Installing binary}. @xref{mysqld-max}.
-
-To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
-Version 3.23.34 or newer and configure @code{MySQL} with the
-@code{--with-berkeley-db} option. @xref{Installing source}.
-
-@example
-cd /path/to/source/of/mysql-3.23.34
-./configure --with-berkeley-db
-@end example
-
-Please refer to the manual provided with the @code{BDB} distribution for
-more updated information.
-
-Even though Berkeley DB is in itself very tested and reliable,
-the @strong{MySQL} interface is still considered beta quality.
-We are actively improving and optimizing it to get it stable very
-soon.
-
-@node BDB start, BDB characteristic, BDB install, BDB
-@subsection BDB startup options
-
-If you are running with @code{AUTOCOMMIT=0} then your changes in @code{BDB}
-tables will not be updated until you execute @code{COMMIT}. Instead of commit
-you can execute @code{ROLLBACK} to forget your changes. @xref{COMMIT}.
-
-If you are running with @code{AUTOCOMMIT=1} (the default), your changes
-will be committed immediately. You can start an extended transaction with
-the @code{BEGIN WORK} SQL command, after which your changes will not be
-committed until you execute @code{COMMIT} (or decide to @code{ROLLBACK}
-the changes).
-
-The following options to @code{mysqld} can be used to change the behavior of
-BDB tables:
-
-@multitable @columnfractions .30 .70
-@item @strong{Option} @tab @strong{Meaning}
-@item @code{--bdb-home=directory} @tab Base directory for BDB tables. This should be the same directory you use for --datadir.
-@item @code{--bdb-lock-detect=#} @tab Berkeley lock detect. One of (DEFAULT, OLDEST, RANDOM, or YOUNGEST).
-@item @code{--bdb-logdir=directory} @tab Berkeley DB log file directory.
-@item @code{--bdb-no-sync} @tab Don't synchronously flush logs.
-@item @code{--bdb-no-recover} @tab Don't start Berkeley DB in recover mode.
-@item @code{--bdb-shared-data} @tab Start Berkeley DB in multi-process mode (Don't use @code{DB_PRIVATE} when initializing Berkeley DB)
-@item @code{--bdb-tmpdir=directory} @tab Berkeley DB tempfile name.
-@item @code{--skip-bdb} @tab Don't use berkeley db.
-@item @code{-O bdb_max_lock=1000} @tab Set the maximum number of locks possible. @xref{SHOW VARIABLES}.
-@end multitable
-
-If you use @code{--skip-bdb}, @strong{MySQL} will not initialize the
-Berkeley DB library and this will save a lot of memory. Of course,
-you cannot use @code{BDB} tables if you are using this option.
-
-Normally you should start @code{mysqld} without @code{--bdb-no-recover} if you
-intend to use BDB tables. This may, however, give you problems when you
-try to start @code{mysqld} if the BDB log files are corrupted. @xref{Starting
-server}.
-
-With @code{bdb_max_lock} you can specify the maximum number of locks
-(10000 by default) you can have active on a BDB table. You should
-increase this if you get errors of type @code{bdb: Lock table is out of
-available locks} or @code{Got error 12 from ...} when you have do long
-transactions or when @code{mysqld} has to examine a lot of rows to
-calculate the query.
-
-You may also want to change @code{binlog_cache_size} and
-@code{max_binlog_cache_size} if you are using big multi-line transactions.
-@xref{COMMIT}.
-
-@node BDB characteristic, BDB TODO, BDB start, BDB
-@subsection Some characteristic of @code{BDB} tables:
-
-@itemize @bullet
-@item
-To be able to rollback transactions BDB maintain log files. For maximum
-performance you should place these on another disk than your databases
-by using the @code{--bdb_log_dir} options.
-@item
-@strong{MySQL} performs a checkpoint each time a new BDB log
-file is started, and removes any log files that are not needed for
-current transactions. One can also run @code{FLUSH LOGS} at any time
-to checkpoint the Berkeley DB tables.
-
-For disaster recovery, one should use table backups plus
-@strong{MySQL}'s binary log. @xref{Backup}.
-
-@strong{Warning}: If you delete old log files that are in use, BDB will
-not be able to do recovery at all and you may loose data if something
-goes wrong.
-@item
-@strong{MySQL} requires a @code{PRIMARY KEY} in each BDB table to be
-able to refer to previously read rows. If you don't create one,
-@strong{MySQL} will create an maintain a hidden @code{PRIMARY KEY} for
-you. The hidden key has a length of 5 bytes and is incremented for each
-insert attempt.
-@item
-If all columns you access in a @code{BDB} table are part of the same index or
-part of the primary key, then @strong{MySQL} can execute the query
-without having to access the actual row. In a @code{MyISAM} table the
-above holds only if the columns are part of the same index.
-@item
-The @code{PRIMARY KEY} will be faster than any other key, as the
-@code{PRIMARY KEY} is stored together with the row data. As the other keys are
-stored as the key data + the @code{PRIMARY KEY}, it's important to keep the
-@code{PRIMARY KEY} as short as possible to save disk and get better speed.
-@item
-@code{LOCK TABLES} works on @code{BDB} tables as with other tables. If
-you don't use @code{LOCK TABLE}, @strong{MYSQL} will issue an internal
-multiple-write lock on the table to ensure that the table will be
-properly locked if another thread issues a table lock.
-@item
-Internal locking in @code{BDB} tables is done on page level.
-@item
-@code{SELECT COUNT(*) FROM table_name} is slow as @code{BDB} tables doesn't
-maintain a count of the number of rows in the table.
-@item
-Scanning is slower than with @code{MyISAM} tables as one has data in BDB
-tables stored in B-trees and not in a separate data file.
-@item
-The application must always be prepared to handle cases where
-any change of a @code{BDB} table may make an automatic rollback and any
-read may fail with a deadlock error.
-@item
-Keys are not compressed to previous keys as with ISAM or MyISAM
-tables. In other words, the key information will take a little more
-space in @code{BDB} tables compared to MyISAM tables which don't use
-@code{PACK_KEYS=0}.
-@item
-There is often holes in the BDB table to allow you to insert new rows in
-the middle of the key tree. This makes BDB tables somewhat larger than
-MyISAM tables.
-@item
-The optimizer needs to know an approximation of the number of rows in
-the table. @strong{MySQL} solves this by counting inserts and
-maintaining this in a separate segment in each BDB table. If you don't
-do a lot of @code{DELETE} or @code{ROLLBACK}:s this number should be
-accurate enough for the @strong{MySQL} optimizer, but as @strong{MySQL}
-only store the number on close, it may be wrong if @strong{MySQL} dies
-unexpectedly. It should not be fatal even if this number is not 100 %
-correct. One can update the number of rows by executing @code{ANALYZE
-TABLE} or @code{OPTIMIZE TABLE}. @xref{ANALYZE TABLE} . @xref{OPTIMIZE
-TABLE}.
-@item
-If you get full disk with a @code{BDB} table, you will get an error
-(probably error 28) and the transaction should roll back. This is in
-contrast with @code{MyISAM} and @code{ISAM} tables where @code{mysqld} will
-wait for enough free disk before continuing.
-@end itemize
-
-@node BDB TODO, BDB portability, BDB characteristic, BDB
-@subsection Some things we need to fix for BDB in the near future:
-
-@itemize @bullet
-@item
-It's very slow to open many BDB tables at the same time. If you are
-going to use BDB tables, you should not have a very big table cache (>
-256 ?) and you should use @code{--no-auto-rehash} with the @code{mysql}
-client. We plan to partly fix this in 4.0.
-@item
-@code{SHOW TABLE STATUS} doesn't yet provide that much information for BDB
-tables.
-@item
-Optimize performance.
-@item
-Change to not use page locks at all when we are scanning tables.
-@end itemize
-
-@node BDB portability, BDB errors, BDB TODO, BDB
-@subsection Operating systems supported by @strong{BDB}
-
-If you after having built @strong{MySQL} with support for BDB tables get
-the following error in the log file when you start @code{mysqld}:
-
-@example
-bdb: architecture lacks fast mutexes: applications cannot be threaded
-Can't init dtabases
-@end example
-
-This means that @code{BDB} tables are not supported for your architecture.
-In this case you have to rebuild @strong{MySQL} without BDB table support.
-
-NOTE: The following list is not complete; We will update this as we get
-more information about this.
-
-Currently we know that BDB tables works with the following operating
-system.
-
-@itemize @bullet
-@item
-Linux 2.x intel
-@item
-Solaris sparc
-@item
-SCO OpenServer
-@item
-SCO UnixWare 7.0.1
-@end itemize
-
-It doesn't work with the following operating systems:
-
-@itemize @bullet
-@item
-Linux 2.x Alpha
-@item
-Max OS X
-@end itemize
-
-@node BDB errors, , BDB portability, BDB
-@subsection Errors You May Get When Using BDB Tables
-
-@itemize @bullet
-@item
-If you get the following error in the @code{hostname.err log} when
-starting @code{mysqld}:
-
-@example
-bdb: Ignoring log file: .../log.XXXXXXXXXX: unsupported log version #
-@end example
-it means that the new @code{BDB} version doesn't support the old log
-file format. In this case you have to delete all @code{BDB} log BDB
-from your database directory (the files that has the format
-@code{log.XXXXXXXXXX} ) and restart @code{mysqld}. We would also
-recommend you to do a @code{mysqldump --opt} of your old @code{BDB}
-tables, delete the old table and restore the dump.
-@item
-If you are running in not @code{auto_commit} mode and delete a table you
-are using by another thread you may get the following error messages in
-the @strong{MySQL} error file:
-
-@example
-001119 23:43:56 bdb: Missing log fileid entry
-001119 23:43:56 bdb: txn_abort: Log undo failed for LSN: 1 3644744: Invalid
-@end example
-
-This is not fatal but we don't recommend that you delete tables if you are
-not in @code{auto_commit} mode, until this problem is fixed (the fix is
-not trivial).
-@end itemize
-
-@cindex tables, @code{GEMINI}
-@node GEMINI, InnoDB, BDB, Table types
-@section GEMINI Tables
-
-@menu
-* GEMINI overview::
-* GEMINI start::
-* GEMINI features::
-* GEMINI TODO::
-@end menu
-
-@node GEMINI overview, GEMINI start, GEMINI, GEMINI
-@subsection Overview of GEMINI tables
-
-The @code{GEMINI} table type is developed and supported by NuSphere Corporation
-(@uref{http://www.nusphere.com}). It features row-level locking, transaction
-support (@code{COMMIT} and @code{ROLLBACK}), and automatic crash recovery.
-
-@code{GEMINI} tables will be included in some future @strong{MySQL} 3.23.X
-source distribution.
-
-@node GEMINI start, GEMINI features, GEMINI overview, GEMINI
-@subsection GEMINI startup options
-
-If you are running with @code{AUTOCOMMIT=0} then your changes in @code{GEMINI}
-tables will not be updated until you execute @code{COMMIT}. Instead of commit
-you can execute @code{ROLLBACK} to forget your changes. @xref{COMMIT}.
-
-If you are running with @code{AUTOCOMMIT=1} (the default), your changes
-will be committed immediately. You can start an extended transaction with
-the @code{BEGIN WORK} SQL command, after which your changes will not be
-committed until you execute @code{COMMIT} (or decide to @code{ROLLBACK}
-the changes).
-
-The following options to @code{mysqld} can be used to change the behavior of
-GEMINI tables:
-
-@multitable @columnfractions .30 .70
-@item @strong{Option} @tab @strong{Meaning}
-@item @code{--gemini-full-recovery} @tab Default.
-@item @code{--gemini-no-recovery} @tab Turn off recovery logging. Not recommended.
-@item @code{--gemini-lazy-commit} @tab Relaxes the flush log at commit rule.
-@item @code{--gemini-unbuffered-io} @tab All database writes bypass OS cache.
-@item @code{--skip-gemini} @tab Don't use Gemini.
-@item @code{--O gemini_db_buffers=#} @tab Number of database buffers in database cache.
-@item @code{--O gemini_connection_limit=#} @tab Maximum number of connections to Gemini.
-@item @code{--O gemini_spin_retries=#} @tab Spin lock retries (optimization).
-@item @code{--O gemini_io_threads=#} @tab Number of background I/O threads.
-@item @code{--O gemini_lock_table_size=#} @tab Set the maximum number of locks. Default 4096.
-@end multitable
-
-If you use @code{--skip-gemini}, @strong{MySQL} will not initialize the
-Gemini table handler, saving memory; you cannot use Gemini tables if you
-use @code{--skip-gemini}.
-
-@node GEMINI features, GEMINI TODO, GEMINI start, GEMINI
-@subsection Features of @code{GEMINI} tables:
-
-@itemize @bullet
-@item
-If a query result can be resolved solely from the index key, Gemini will
-not read the actual row stored in the database.
-@item
-Locking on Gemini tables is done at row level.
-@item
-@code{SELECT COUNT(*) FROM table_name} is fast; Gemini maintains a count
-of the number of rows in the table.
-@end itemize
-
-@node GEMINI TODO, , GEMINI features, GEMINI
-@subsection Current limitations of @code{GEMINI} tables:
-
-@itemize @bullet
-@item
-BLOB columns are not supported in @code{GEMINI} tables.
-@item
-The maximum number of concurrent users accessing @code{GEMINI} tables is
-limited by @code{gemini_connection_limit}. The default is 100 users.
-@end itemize
-
-NuSphere is working on removing these limitations.
-
@node InnoDB, , GEMINI, Table types
@section InnoDB Tables
@@ -24747,7 +24607,7 @@ binary.
If you have downloaded a binary version of @strong{MySQL} that includes
support for InnoDB (mysqld-max), simply follow the instructions for
installing a binary version of @strong{MySQL}. @xref{Installing binary}.
-@xref{mysqld-max}.
+@xref{mysqld-max, , @code{mysqld-max}}.
To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer
and configure @code{MySQL} with the @code{--with-innodb} option.
@@ -24769,6 +24629,11 @@ InnoDB has been designed for maximum performance when processing
large data volumes. Its CPU efficiency is probably not
matched by any other disk-based relational database engine.
+You can find the latest information about InnoDB at
+@uref{http://www.innodb.com}. The most up-to-date version of the
+InnoDB manual is always placed there, and you can also order commercial
+support for InnoDB.
+
Technically, InnoDB is a database backend placed under @strong{MySQL}. InnoDB
has its own buffer pool for caching data and indexes in main
memory. InnoDB stores its tables and indexes in a tablespace, which
@@ -24918,6 +24783,17 @@ in its own lock table and rolls back the transaction. If you use
than InnoDB in the same transaction, then a deadlock may arise which
InnoDB cannot notice. In cases like this the timeout is useful to
resolve the situation.
+@item @code{innodb_flush_method} @tab
+(Available from 3.23.40 up.)
+The default value for this is @code{fdatasync}.
+Another option is @code{O_DSYNC}.
+Options @code{littlesync} and @code{nosync} have the
+risk that in an operating system crash or a power outage you may easily
+end up with a half-written database page, and you have to do a recovery
+from a backup. See the section "InnoDB performance tuning", item 6, below
+for tips on how to set this parameter. If you are happy with your database
+performance it is wisest not to specify this parameter at all, in which
+case it will get the default value.
@end multitable
@node InnoDB init, Using InnoDB tables, InnoDB start, InnoDB
@@ -24943,11 +24819,14 @@ InnoDB: Database physically writes the file full: wait...
InnoDB: Data file /home/heikki/data/ibdata2 did not exist: new to be created
InnoDB: Setting file /home/heikki/data/ibdata2 size to 262144000
InnoDB: Database physically writes the file full: wait...
-InnoDB: Log file /home/heikki/data/logs/ib_logfile0 did not exist: new to be created
+InnoDB: Log file /home/heikki/data/logs/ib_logfile0 did not exist: new to be c
+reated
InnoDB: Setting log file /home/heikki/data/logs/ib_logfile0 size to 5242880
-InnoDB: Log file /home/heikki/data/logs/ib_logfile1 did not exist: new to be created
+InnoDB: Log file /home/heikki/data/logs/ib_logfile1 did not exist: new to be c
+reated
InnoDB: Setting log file /home/heikki/data/logs/ib_logfile1 size to 5242880
-InnoDB: Log file /home/heikki/data/logs/ib_logfile2 did not exist: new to be created
+InnoDB: Log file /home/heikki/data/logs/ib_logfile2 did not exist: new to be c
+reated
InnoDB: Setting log file /home/heikki/data/logs/ib_logfile2 size to 5242880
InnoDB: Started
mysqld: ready for connections
@@ -24979,7 +24858,7 @@ mysqld: ready for connections
@end example
@menu
-* Error creating InnoDB::
+* Error creating InnoDB::
@end menu
@node Error creating InnoDB, , InnoDB init, InnoDB init
@@ -25035,6 +24914,46 @@ InnoDB has its own internal data dictionary, and you will get problems
if the @strong{MySQL} @file{.frm} files are out of 'sync' with the InnoDB
internal data dictionary.
+@subsubsection Converting MyISAM tables to InnoDB
+
+InnoDB does not have a special optimization for separate index creation.
+Therefore it does not pay to export and import the table and create indexes
+afterwards.
+The fastest way to alter a table to InnoDB is to do the inserts
+directly to an InnoDB table, that is, use @code{ALTER TABLE ... TYPE=INNODB},
+or create an empty InnoDB table with identical definitions and insert
+the rows with @code{INSERT INTO ... SELECT * FROM ...}.
+
+To get better control over the insertion process, it may be good to insert
+big tables in pieces:
+
+@example
+INSERT INTO newtable SELECT * FROM oldtable WHERE yourkey > something
+ AND yourkey <= somethingelse;
+@end example
+
+After all data has been inserted you can rename the tables.
+
+During the conversion of big tables you should set the InnoDB
+buffer pool size big
+to reduce disk i/o. Not bigger than 80 % of the physical memory, though.
+You should set InnoDB log files big, and also the log buffer large.
+
+Make sure you do not run out of tablespace: InnoDB tables take a lot
+more space than MyISAM tables. If an @code{ALTER TABLE} runs out
+of space, it will start a rollback, and that can take hours if it is
+disk-bound.
+In inserts InnoDB uses the insert buffer to merge secondary index records
+to indexes in batches. That saves a lot of disk i/o. In rollback no such
+mechanism is used, and the rollback can take 30 times longer than the
+insertion.
+
+In the case of a runaway rollback, if you do not have valuable data in your
+database,
+it is better that you kill the database process and delete all InnoDB data
+and log files and all InnoDB table @file{.frm} files, and start
+your job again, rather than wait for millions of disk i/os to complete.
+
@node Adding and removing, Backing up, Using InnoDB tables, InnoDB
@subsection Adding and removing InnoDB data and log files
@@ -25435,6 +25354,103 @@ set by the SQL statement may be preserved. This is because InnoDB
stores row locks in a format where it cannot afterwards know which was
set by which SQL statement.
+@subsection Performance tuning tips
+
+@strong{1.}
+If the Unix @file{top} or the Windows @file{Task Manager} shows that
+the CPU usage percentage with your workload is less than 70 %,
+your workload is probably
+disk-bound. Maybe you are making too many transaction commits, or the
+buffer pool is too small.
+Making the buffer pool bigger can help, but do not set
+it bigger than 80 % of physical memory.
+
+@strong{2.}
+Wrap several modifications into one transaction. InnoDB must
+flush the log to disk at each transaction commit, if that transaction
+made modifications to the database. Since the rotation speed of a disk
+is typically
+at most 167 revolutions/second, that constrains the number of commits
+to the same 167/second if the disk does not fool the operating system.
+
+@strong{3.}
+If you can afford the loss of some latest committed transactions, you can
+set the @file{my.cnf} parameter @code{innodb_flush_log_at_trx_commit}
+to zero. InnoDB tries to flush the log anyway once in a second,
+though the flush is not guaranteed.
+
+@strong{4.}
+Make your log files big, even as big as the buffer pool. When InnoDB
+has written the log files full, it has to write the modified contents
+of the buffer pool to disk in a checkpoint. Small log files will cause many
+unnecessary disk writes. The drawback in big log files is that recovery
+time will be longer.
+
+@strong{5.}
+Also the log buffer should be quite big, say 8 MB.
+
+@strong{6.} (Relevant from 3.23.39 up.)
+In some versions of Linux and Unix, flushing files to disk with the Unix
+@code{fdatasync} and other similar methods is surprisingly slow.
+The default method InnoDB uses is the @code{fdatasync} function.
+If you are not satisfied with the database write performance, you may
+try setting @code{innodb_flush_method} in @file{my.cnf}
+to @code{O_DSYNC}, though O_DSYNC seems to be slower on most systems.
+You can also try setting it to @code{littlesync}, which means that
+InnoDB does not call the file flush for every write it does to a
+file, but only
+in log flush at transaction commits and data file flush at a checkpoint.
+The drawback in @code{littlesync} is that if the operating system
+crashes, you can easily end up with a half-written database page,
+and you have to
+do a recovery from a backup. With @code{nosync} you have even less safety:
+InnoDB will only flush the database files to disk at database shutdown
+
+@strong{7.} In importing data to InnoDB, make sure that MySQL does not have
+@code{autocommit=1} on. Then every insert requires a log flush to disk.
+Put before your plain SQL import file line
+
+@example
+set autocommit=0;
+@end example
+
+and after it
+
+@example
+commit;
+@end example
+
+If you use the @file{mysqldump} option @code{--opt}, you will get dump
+files which are fast to import also to an InnoDB table, even without wrapping
+them to the above @code{set autocommit=0; ... commit;} wrappers.
+
+@strong{8.}
+Beware of big rollbacks of mass inserts: InnoDB uses the insert buffer
+to save disk i/o in inserts, but in a corresponding rollback no such
+mechanism is used. A disk-bound rollback can take 30 times the time
+of the corresponding insert. Killing the database process will not
+help because the rollback will start again at the database startup. The
+only way to get rid of a runaway rollback is to increase the buffer pool
+so that the rollback becomes CPU-bound and runs fast, or delete the whole
+InnoDB database.
+
+@strong{9.}
+Beware also of other big disk-bound operations.
+Use @code{DROP TABLE}
+or @code{TRUNCATE} (from MySQL-4.0 up) to empty a table, not
+@code{DELETE FROM yourtable}.
+
+@strong{10.}
+Use the multi-line @code{INSERT} to reduce
+communication overhead between the client and the server if you need
+to insert many rows:
+
+@example
+INSERT INTO yourtable VALUES (1, 2), (5, 5);
+@end example
+
+This tip is of course valid for inserts into any table type, not just InnoDB.
+
@node Implementation, Table and index, InnoDB transaction model, InnoDB
@subsection Implementation of multiversioning
@@ -25655,11 +25671,11 @@ integer that can be stored in the specified integer type.
In disk i/o InnoDB uses asynchronous i/o. On Windows NT
it uses the native asynchronous i/o provided by the operating system.
-On Unixes InnoDB uses simulated asynchronous i/o built
+On Unix, InnoDB uses simulated asynchronous i/o built
into InnoDB: InnoDB creates a number of i/o threads to take care
of i/o operations, such as read-ahead. In a future version we will
add support for simulated aio on Windows NT and native aio on those
-Unixes which have one.
+versions of Unix which have one.
On Windows NT InnoDB uses non-buffered i/o. That means that the disk
pages InnoDB reads or writes are not buffered in the operating system
@@ -25670,7 +25686,7 @@ just define the raw disk in place of a data file in @file{my.cnf}.
You must give the exact size in bytes of the raw disk in @file{my.cnf},
because at startup InnoDB checks that the size of the file
is the same as specified in the configuration file. Using a raw disk
-you can on some Unixes perform non-buffered i/o.
+you can on some versions of Unix perform non-buffered i/o.
There are two read-ahead heuristics in InnoDB: sequential read-ahead
and random read-ahead. In sequential read-ahead InnoDB notices that
@@ -25787,6 +25803,11 @@ they roll back the corresponding SQL statement.
@subsection Some restrictions on InnoDB tables
@itemize @bullet
+
+@item @code{SHOW TABLE STATUS} does not give accurate statistics
+on InnoDB tables, except for the physical size reserved by the table.
+The row count is only a rough estimate used in SQL optimization.
+
@item
If you try to create an unique index on a prefix of a column you will get an
error:
@@ -25835,17 +25856,17 @@ files your operating system supports. Support for > 4 GB files will
be added to InnoDB in a future version.
@item
The maximum tablespace size is 4 billion database pages. This is also
-the maximum size for a table.
+the maximum size for a table. The minimum tablespace size is 10 MB.
@end itemize
@node InnoDB contact information, , InnoDB restrictions, InnoDB
@subsection InnoDB contact information
-Contact information of Innobase Oy, producer of the InnoDB engine:
+Contact information of Innobase Oy, producer of the InnoDB engine.
+Website: @uref{http://www.innodb.com}. Email:
+@email{Heikki.Tuuri@@innodb.com}
@example
-Website: www.innobase.fi
-Heikki.Tuuri@@innobase.inet.fi
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
InnoDB Oy Inc.
World Trade Center Helsinki
@@ -25855,6 +25876,1142 @@ P.O.Box 800
Finland
@end example
+@cindex tables, @code{BDB}
+@cindex tables, @code{Berkeley DB}
+@node BDB, GEMINI, HEAP, Table types
+@section BDB or Berkeley_DB Tables
+
+@menu
+* BDB overview:: Overview of BDB Tables
+* BDB install:: Installing BDB
+* BDB start:: BDB startup options
+* BDB characteristic:: Some characteristic of @code{BDB} tables:
+* BDB TODO:: Some things we need to fix for BDB in the near future:
+* BDB portability:: Operating systems supported by @strong{BDB}
+* BDB errors:: Errors You May Get When Using BDB Tables
+@end menu
+
+@node BDB overview, BDB install, BDB, BDB
+@subsection Overview of BDB Tables
+
+Support for BDB tables is included in the @strong{MySQL} source distribution
+starting from Version 3.23.34 and is activated in the @strong{MySQL}-Max
+binary.
+
+BerkeleyDB, available at @uref{http://www.sleepycat.com/} has provided
+@strong{MySQL} with a transactional table handler. By using BerkeleyDB
+tables, your tables may have a greater chance of surviving crashes, and also
+provides @code{COMMIT} and @code{ROLLBACK} on transactions. The
+@strong{MySQL} source distribution comes with a BDB distribution that has a
+couple of small patches to make it work more smoothly with @strong{MySQL}.
+You can't use a non-patched @code{BDB} version with @strong{MySQL}.
+
+We at @strong{MySQL AB} are working in close cooperation with Sleepycat to
+keep the quality of the @strong{MySQL}/BDB interface high.
+
+When it comes to supporting BDB tables, we are committed to help our
+users to locate the problem and help creating a reproducable test case
+for any problems involving BDB tables. Any such test case will be
+forwarded to Sleepycat who in turn will help us find and fix the
+problem. As this is a two stage operation, any problems with BDB tables
+may take a little longer for us to fix than for other table handlers.
+However, as the BerkeleyDB code itself has been used by many other
+applications than @strong{MySQL}, we don't envision any big problems with
+this. @xref{Table handler support}.
+
+@node BDB install, BDB start, BDB overview, BDB
+@subsection Installing BDB
+
+If you have downloaded a binary version of @strong{MySQL} that includes
+support for BerkeleyDB, simply follow the instructions for installing a
+binary version of @strong{MySQL}.
+@xref{Installing binary}. @xref{mysqld-max, , @code{mysqld-max}}.
+
+To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
+Version 3.23.34 or newer and configure @code{MySQL} with the
+@code{--with-berkeley-db} option. @xref{Installing source}.
+
+@example
+cd /path/to/source/of/mysql-3.23.34
+./configure --with-berkeley-db
+@end example
+
+Please refer to the manual provided with the @code{BDB} distribution for
+more updated information.
+
+Even though Berkeley DB is in itself very tested and reliable,
+the @strong{MySQL} interface is still considered beta quality.
+We are actively improving and optimizing it to get it stable very
+soon.
+
+@node BDB start, BDB characteristic, BDB install, BDB
+@subsection BDB startup options
+
+If you are running with @code{AUTOCOMMIT=0} then your changes in @code{BDB}
+tables will not be updated until you execute @code{COMMIT}. Instead of commit
+you can execute @code{ROLLBACK} to forget your changes. @xref{COMMIT}.
+
+If you are running with @code{AUTOCOMMIT=1} (the default), your changes
+will be committed immediately. You can start an extended transaction with
+the @code{BEGIN WORK} SQL command, after which your changes will not be
+committed until you execute @code{COMMIT} (or decide to @code{ROLLBACK}
+the changes).
+
+The following options to @code{mysqld} can be used to change the behavior of
+BDB tables:
+
+@multitable @columnfractions .30 .70
+@item @strong{Option} @tab @strong{Meaning}
+@item @code{--bdb-home=directory} @tab Base directory for BDB tables. This should be the same directory you use for --datadir.
+@item @code{--bdb-lock-detect=#} @tab Berkeley lock detect. One of (DEFAULT, OLDEST, RANDOM, or YOUNGEST).
+@item @code{--bdb-logdir=directory} @tab Berkeley DB log file directory.
+@item @code{--bdb-no-sync} @tab Don't synchronously flush logs.
+@item @code{--bdb-no-recover} @tab Don't start Berkeley DB in recover mode.
+@item @code{--bdb-shared-data} @tab Start Berkeley DB in multi-process mode (Don't use @code{DB_PRIVATE} when initializing Berkeley DB)
+@item @code{--bdb-tmpdir=directory} @tab Berkeley DB tempfile name.
+@item @code{--skip-bdb} @tab Don't use berkeley db.
+@item @code{-O bdb_max_lock=1000} @tab Set the maximum number of locks possible. @xref{SHOW VARIABLES}.
+@end multitable
+
+If you use @code{--skip-bdb}, @strong{MySQL} will not initialize the
+Berkeley DB library and this will save a lot of memory. Of course,
+you cannot use @code{BDB} tables if you are using this option.
+
+Normally you should start @code{mysqld} without @code{--bdb-no-recover} if you
+intend to use BDB tables. This may, however, give you problems when you
+try to start @code{mysqld} if the BDB log files are corrupted. @xref{Starting
+server}.
+
+With @code{bdb_max_lock} you can specify the maximum number of locks
+(10000 by default) you can have active on a BDB table. You should
+increase this if you get errors of type @code{bdb: Lock table is out of
+available locks} or @code{Got error 12 from ...} when you have do long
+transactions or when @code{mysqld} has to examine a lot of rows to
+calculate the query.
+
+You may also want to change @code{binlog_cache_size} and
+@code{max_binlog_cache_size} if you are using big multi-line transactions.
+@xref{COMMIT}.
+
+@node BDB characteristic, BDB TODO, BDB start, BDB
+@subsection Some characteristic of @code{BDB} tables:
+
+@itemize @bullet
+@item
+To be able to rollback transactions BDB maintain log files. For maximum
+performance you should place these on another disk than your databases
+by using the @code{--bdb_log_dir} options.
+@item
+@strong{MySQL} performs a checkpoint each time a new BDB log
+file is started, and removes any log files that are not needed for
+current transactions. One can also run @code{FLUSH LOGS} at any time
+to checkpoint the Berkeley DB tables.
+
+For disaster recovery, one should use table backups plus
+@strong{MySQL}'s binary log. @xref{Backup}.
+
+@strong{Warning}: If you delete old log files that are in use, BDB will
+not be able to do recovery at all and you may loose data if something
+goes wrong.
+@item
+@strong{MySQL} requires a @code{PRIMARY KEY} in each BDB table to be
+able to refer to previously read rows. If you don't create one,
+@strong{MySQL} will create an maintain a hidden @code{PRIMARY KEY} for
+you. The hidden key has a length of 5 bytes and is incremented for each
+insert attempt.
+@item
+If all columns you access in a @code{BDB} table are part of the same index or
+part of the primary key, then @strong{MySQL} can execute the query
+without having to access the actual row. In a @code{MyISAM} table the
+above holds only if the columns are part of the same index.
+@item
+The @code{PRIMARY KEY} will be faster than any other key, as the
+@code{PRIMARY KEY} is stored together with the row data. As the other keys are
+stored as the key data + the @code{PRIMARY KEY}, it's important to keep the
+@code{PRIMARY KEY} as short as possible to save disk and get better speed.
+@item
+@code{LOCK TABLES} works on @code{BDB} tables as with other tables. If
+you don't use @code{LOCK TABLE}, @strong{MYSQL} will issue an internal
+multiple-write lock on the table to ensure that the table will be
+properly locked if another thread issues a table lock.
+@item
+Internal locking in @code{BDB} tables is done on page level.
+@item
+@code{SELECT COUNT(*) FROM table_name} is slow as @code{BDB} tables doesn't
+maintain a count of the number of rows in the table.
+@item
+Scanning is slower than with @code{MyISAM} tables as one has data in BDB
+tables stored in B-trees and not in a separate data file.
+@item
+The application must always be prepared to handle cases where
+any change of a @code{BDB} table may make an automatic rollback and any
+read may fail with a deadlock error.
+@item
+Keys are not compressed to previous keys as with ISAM or MyISAM
+tables. In other words, the key information will take a little more
+space in @code{BDB} tables compared to MyISAM tables which don't use
+@code{PACK_KEYS=0}.
+@item
+There is often holes in the BDB table to allow you to insert new rows in
+the middle of the key tree. This makes BDB tables somewhat larger than
+MyISAM tables.
+@item
+The optimizer needs to know an approximation of the number of rows in
+the table. @strong{MySQL} solves this by counting inserts and
+maintaining this in a separate segment in each BDB table. If you don't
+do a lot of @code{DELETE} or @code{ROLLBACK}:s this number should be
+accurate enough for the @strong{MySQL} optimizer, but as @strong{MySQL}
+only store the number on close, it may be wrong if @strong{MySQL} dies
+unexpectedly. It should not be fatal even if this number is not 100 %
+correct. One can update the number of rows by executing @code{ANALYZE
+TABLE} or @code{OPTIMIZE TABLE}. @xref{ANALYZE TABLE} . @xref{OPTIMIZE
+TABLE}.
+@item
+If you get full disk with a @code{BDB} table, you will get an error
+(probably error 28) and the transaction should roll back. This is in
+contrast with @code{MyISAM} and @code{ISAM} tables where @code{mysqld} will
+wait for enough free disk before continuing.
+@end itemize
+
+@node BDB TODO, BDB portability, BDB characteristic, BDB
+@subsection Some things we need to fix for BDB in the near future:
+
+@itemize @bullet
+@item
+It's very slow to open many BDB tables at the same time. If you are
+going to use BDB tables, you should not have a very big table cache (>
+256 ?) and you should use @code{--no-auto-rehash} with the @code{mysql}
+client. We plan to partly fix this in 4.0.
+@item
+@code{SHOW TABLE STATUS} doesn't yet provide that much information for BDB
+tables.
+@item
+Optimize performance.
+@item
+Change to not use page locks at all when we are scanning tables.
+@end itemize
+
+@node BDB portability, BDB errors, BDB TODO, BDB
+@subsection Operating systems supported by @strong{BDB}
+
+If you after having built @strong{MySQL} with support for BDB tables get
+the following error in the log file when you start @code{mysqld}:
+
+@example
+bdb: architecture lacks fast mutexes: applications cannot be threaded
+Can't init dtabases
+@end example
+
+This means that @code{BDB} tables are not supported for your architecture.
+In this case you have to rebuild @strong{MySQL} without BDB table support.
+
+NOTE: The following list is not complete; We will update this as we get
+more information about this.
+
+Currently we know that BDB tables works with the following operating
+system.
+
+@itemize @bullet
+@item
+Linux 2.x intel
+@item
+Solaris sparc
+@item
+SCO OpenServer
+@item
+SCO UnixWare 7.0.1
+@end itemize
+
+It doesn't work with the following operating systems:
+
+@itemize @bullet
+@item
+Linux 2.x Alpha
+@item
+Max OS X
+@end itemize
+
+@node BDB errors, , BDB portability, BDB
+@subsection Errors You May Get When Using BDB Tables
+
+@itemize @bullet
+@item
+If you get the following error in the @code{hostname.err log} when
+starting @code{mysqld}:
+
+@example
+bdb: Ignoring log file: .../log.XXXXXXXXXX: unsupported log version #
+@end example
+it means that the new @code{BDB} version doesn't support the old log
+file format. In this case you have to delete all @code{BDB} log BDB
+from your database directory (the files that has the format
+@code{log.XXXXXXXXXX} ) and restart @code{mysqld}. We would also
+recommend you to do a @code{mysqldump --opt} of your old @code{BDB}
+tables, delete the old table and restore the dump.
+@item
+If you are running in not @code{auto_commit} mode and delete a table you
+are using by another thread you may get the following error messages in
+the @strong{MySQL} error file:
+
+@example
+001119 23:43:56 bdb: Missing log fileid entry
+001119 23:43:56 bdb: txn_abort: Log undo failed for LSN: 1 3644744: Invalid
+@end example
+
+This is not fatal but we don't recommend that you delete tables if you are
+not in @code{auto_commit} mode, until this problem is fixed (the fix is
+not trivial).
+@end itemize
+
+@cindex GEMINI tables
+@node GEMINI, InnoDB, BDB, Table types
+@section GEMINI Tables
+
+@cindex GEMINI tables, overview
+@menu
+* GEMINI Overview::
+* Using GEMINI Tables::
+@end menu
+
+@node GEMINI Overview, Using GEMINI Tables, GEMINI, GEMINI
+@subsection GEMINI Overview
+
+GEMINI is currently not included in the @strong{MySQL} 3.23 distribution
+because it's not to our knowledge an open source (GPL) product.
+
+@code{GEMINI} is a transaction-safe table handler for @strong{MySQL}. It
+provides row-level locking, robust transaction support and reliable
+crash recovery. It is targeted for databases that need to handle heavy
+multi-user updates typical of transaction processing applications while
+still providing excellent performance for read-intensive operations. The
+@code{GEMINI} table type is developed and supported by NuSphere
+Corporation (see @url{http://www.nusphere.com}).
+
+@code{GEMINI} provides full ACID transaction properties (Atomic,
+Consistent, Independent, and Durable) with a programming model that
+includes support for statement atomicity and all four standard isolation
+levels (Read Uncommitted, Read Committed, Repeatable Read, and
+Serializable) defined in the SQL standard.
+
+The @code{GEMINI} tables support row-level and table-level locking to
+increase concurrency in applications and allow reading of tables without
+locking for maximum concurrency in a heavy update environment. The
+transaction, locking, and recovery mechanisms are tightly integrated to
+eliminate unnecessary administration overhead.
+
+In general, if @code{GEMINI} tables are selected for an application, it
+is recommended that all tables updated in the application be
+@code{GEMINI} tables to provide well-defined system behavior. If
+non-@code{GEMINI} tables are mixed into the application then, ACID
+transaction properties cannot be maintained. While there are clearly
+cases where mixing table types is appropriate, it should always be done
+with careful consideration of the impact on transaction consistency and
+recoverability needs of the application and underlying database.
+
+The @code{GEMINI} table type is derived from a successful commercial
+database and uses the storage kernel technology tightly integrated with
+@strong{MySQL} server. The basic @code{GEMINI} technology is in use by
+millions of users worldwide in production environments today. This
+maturity allows @code{GEMINI} tables to provide a solution for those
+users who require transaction-based behavior as part of their
+applications.
+
+The @code{GEMINI} table handler supports a configurable data cache that
+allows a significant portion of any database to be maintained in memory
+while still allowing durable updates.
+
+@cindex GEMINI tables, features
+@menu
+* GEMINI Features::
+* GEMINI Concepts::
+* GEMINI Limitations::
+@end menu
+
+@node GEMINI Features, GEMINI Concepts, GEMINI Overview, GEMINI Overview
+@subsubsection GEMINI Features
+
+The following summarizes the major features provided by @code{GEMINI}
+tables.
+
+@itemize @bullet
+@item
+Supports all optimization statistics used by the @strong{MySQL} optimizer
+including table cardinality, index range estimates and multi-component
+selectivity to insure optimal query performance.
+
+@item
+Maintains exact cardinality information for each table so @code{SELECT
+COUNT(*) FROM} table-name always returns an answer immediately.
+
+@item
+Supports index-only queries; when index data is sufficient to resolve a
+query no record data is read (for non character types).
+
+@item
+@code{GEMINI} uses block based I/O for better performance. There is no
+performance penalty for using @code{VARCHAR} fields. The maximum record size is
+currently 32K.
+
+@item
+The number of rows in a single @code{GEMINI} table can be 4 quintillion
+(full use of 64 bits).
+
+@item
+Individual tables can be as large as 16 petabytes.
+
+@item
+Locking is done at a record or row level rather than at table level
+unless table locks are explicitly requested. When a row is inserted into
+a table, other rows can be updated, inserted or deleted without waiting
+for the inserted row to be committed.
+
+@item
+Provides durable transactions backed by a crash recovery mechanism that
+returns the database to a known consistent state in the event of an
+unexpected failure.
+
+@item
+Support for all isolation levels and statement atomicity defined in the
+SQL standard.
+
+@item
+Reliable Master Replication; the master database can survive system
+failure and recover all committed transactions.
+@end itemize
+
+@cindex GEMINI tables, concepts
+@node GEMINI Concepts, GEMINI Limitations, GEMINI Features, GEMINI Overview
+@subsubsection GEMINI Concepts
+
+This section highlights some of the important concepts behind
+@code{GEMINI} and the @code{GEMINI} programming model, including:
+
+@itemize @bullet
+@item
+ACID Transactions
+@item
+Transaction COMMIT/ROLLBACK
+@item
+Statement Atomicity
+@item
+Recovery
+@item
+Isolation Levels
+@item
+Row-Level Locking
+@end itemize
+
+These features are described below.
+
+@cindex GEMINI tables, ACID transactions
+@noindent
+@strong{ACID Transactions}
+
+ACID in the context of transactions is an acronym which stands for
+@emph{Atomicity}, @emph{Consistency}, @emph{Isolation}, @emph{Durability}.
+
+@multitable @columnfractions .25 .75
+@item @sc{Attribute} @tab @sc{Description}
+@item
+@strong{Atomicity}
+@tab A transaction allows for the grouping of one or more changes to
+tables and rows in the database to form an atomic or indivisible
+operation. That is, either all of the changes occur or none of them
+do. If for any reason the transaction cannot be completed, everything
+this transaction changed can be restored to the state it was in prior to
+the start of the transaction via a rollback operation.
+
+@item
+@strong{Consistency}
+@tab
+Transactions always operate on a consistent view of the data and when
+they end always leave the data in a consistent state. Data may be said to
+be consistent as long as it conforms to a set of invariants, such as no
+two rows in the customer table have the same customer ID and all orders
+have an associated customer row. While a transaction executes, these
+invariants may be violated, but no other transaction will be allowed to
+see these inconsistencies, and all such inconsistencies will have been
+eliminated by the time the transaction ends.
+
+@item
+@strong{Isolation}
+@tab To a given transaction, it should appear as though it is running
+all by itself on the database. The effects of concurrently running
+transactions are invisible to this transaction, and the effects of this
+transaction are invisible to others until the transaction is committed.
+
+@item
+@strong{Durability}
+@tab Once a transaction is committed, its effects are guaranteed to
+persist even in the event of subsequent system failures. Until the
+transaction commits, not only are any changes made by that transaction
+not durable, but are guaranteed not to persist in the face of a system
+failures, as crash recovery will rollback their effects.
+@end multitable
+
+@cindex GEMINI tables, COMMIT/ROLLBACK
+@noindent
+@strong{Transaction COMMIT/ROLLBACK}
+
+As stated above, a transaction is a group of work being done to
+data. Unless otherwise directed, @strong{MySQL} considers each statement
+a transaction in itself. Multiple updates can be accomplished by placing
+them in a single statement, however they are limited to a single table.
+
+Applications tend to require more robust use of transaction
+concepts. Take, for example, a system that processes an order: A row may
+be inserted in an order table, additional rows may be added to an
+order-line table, updates may be made to inventory tables, etc. It is
+important that if the order completes, all the changes are made to all
+the tables involved; likewise if the order fails, none of the changes to
+the tables must occur. To facilitate this requirement, @strong{MySQL}
+has syntax to start a transaction called @code{BEGIN WORK}. All
+statements that occur after the @code{BEGIN WORK} statement are grouped
+into a single transaction. The end of this transaction occurs when a
+@code{COMMIT} or @code{ROLLBACK} statement is encountered. After the
+@code{COMMIT} or @code{ROLLBACK} the system returns back to the behavior
+before the @code{BEGIN WORK} statement was encountered where every
+statement is a transaction.
+
+To permanently turn off the behavior where every statement is a
+transaction, @strong{MySQL} added a variable called
+@code{AUTOCOMMIT}. The @code{AUTOCOMMIT} variable can have two values,
+@code{1} and @code{0}. The mode where every statement is a transaction
+is when @code{AUTOCOMMIT} is set to @code{1} (@code{AUTOCOMMIT=1}). When
+@code{AUTOCOMMIT} is set to @code{0} (@code{AUTOCOMMIT=0}), then every
+statement is part of the same transaction until the transaction end by
+either @code{COMMIT} or @code{ROLLBACK}. Once a transaction completes, a
+new transaction is immediately started and the process repeats.
+
+Here is an example of the SQL statements that you may find in a typical
+order:
+
+@example
+BEGIN WORK;
+ INSERT INTO order VALUES ...;
+ INSERT INTO order-lines VALUES ...;
+ INSERT INTO order-lines VALUES ...;
+ INSERT INTO order-lines VALUES ...;
+ UPDATE inventory WHERE ...;
+COMMIT;
+@end example
+
+This example shows how to use the @code{BEGIN WORK} statement to start a
+transaction. If the variable @code{AUTOCOMMIT} is set to @code{0}, then
+a transaction would have been started already. In this case, the
+@code{BEGIN WORK} commits the current transaction and starts a new one.
+
+@cindex GEMINI tables, statement atomicity
+@noindent
+@strong{Statement Atomicity}
+
+As mentioned above, when running with @code{AUTOCOMMIT} set to @code{1},
+each statement executes as a single transaction. When a statement has an
+error, then all changes make by the statement must be
+undone. Transactions support this behavior. Non-transaction safe table
+handlers would have a partial statement update where some of the changes
+from the statement would be contained in the database and other changes
+from the statement would not. Work would need to be done to manually
+recover from the error.
+
+@cindex GEMINI tables, recovery
+@noindent
+@strong{Recovery}
+
+Transactions are the basis for database recovery. Recovery is what
+supports the Durability attribute of the ACID transaction.
+
+@code{GEMINI} uses a separate file called the Recovery Log located in
+the @code{$DATADIR} directory named @code{gemini.rl}. This file
+maintains the integrity of all the @code{GEMINI} tables. @code{GEMINI}
+can not recover any data from non-@code{GEMINI} tables. In addition, the
+@code{gemini.rl} file is used to rollback transactions in support of the
+@code{ROLLBACK} statement.
+
+In the event of a system failure, the next time the @strong{MySQL}
+server is started, @code{GEMINI} will automatically go through its
+crash recovery process. The result of crash recovery is that all the
+@code{GEMINI} tables will contain the latest changes made to them, and
+all transactions that were open at the time of the crash will have been
+rolled back.
+
+The @code{GEMINI} Recovery Log reuses space when it can. Space can be
+reused when information in the Recovery Log is no longer needed for
+crash recovery or rollback.
+
+@cindex GEMINI tables, isolation levels
+@noindent
+@strong{Isolation Levels}
+
+There are four isolation levels supported by @code{GEMINI}:
+
+@itemize @bullet
+@item
+READ UNCOMMITTED
+@item
+READ COMMITTED
+@item
+REPEATABLE READ
+@item
+SERIALIZABLE
+@end itemize
+
+These isolation levels apply only to shared locks obtained by select
+statements, excluding select for update. Statements that get exclusive
+locks always retain those locks until the transaction commits or rolls
+back.
+
+By default, @code{GEMINI} operates at the @code{READ COMMITTED}
+level. You can override the default using the following command:
+
+@example
+SET [GLOBAL | SESSION] TRANSACTION ISOLATION LEVEL [READ UNCOMMITTED |
+READ COMMITTED | REPEATABLE READ | SERIALIZABLE ]
+@end example
+
+If the @code{SESSION} qualifier used, the specified isolation level
+persists for the entire session. If the @code{GLOBAL} qualifier is used,
+the specified isolation level is applied to all new connections from
+this point forward. Note that the specified isolation level will not
+change the behavior for existing connections including the connection
+that exectues the @code{SET GLOBAL TRANSACTION ISOLATION LEVEL}
+statement.
+
+@multitable @columnfractions .30 .70
+@item @sc{Isolation Level} @tab @sc{Description}
+
+@item
+@strong{READ UNCOMMITTED}
+@tab Does not obtain any locks when reading rows. This means that if a
+row is locked by another process in a transaction that has a more strict
+isolation level, the @code{READ UNCOMMITTED} query will not wait until
+the locks are released before reading the row. You will get an error if
+attempt any updates while running at this isolation level.
+
+@item
+@strong{READ COMMITTED}
+@tab Locks the requested rows long enough to copy the row from the
+database block to the client row buffer. If a @code{READ COMMITTED}
+query finds that a row is locked exclusively by another process, it will
+wait until either the row has been released, or the lock timeout value
+has expired.
+
+@item
+@strong{REPEATABLE READ}
+@tab Locks all the rows needed to satisfy the query. These locks are
+held until the transaction ends (commits or rolls back). If a
+@code{REPEATABLE READ} query finds that a row is locked exclusively by
+another process, it will wait until either the row has been released, or
+the lock timeout value has expired.
+
+@item
+@strong{SERIALIZABLE}
+@tab Locks the table that contains the rows needed to satisfy the
+query. This lock is held until the transaction ends (commits or rolls
+back). If a @code{SERIALIZABLE} query finds that a row is exclusively
+locked by another process, it will wait until either the row has been
+released, or the lock timeout value has expired.
+@end multitable
+
+The statements that get exclusive locks are @code{INSERT},
+@code{UPDATE}, @code{DELETE} and @code{SELECT ... FOR UPDATE}. Select
+statements without the @code{FOR UPDATE} qualifier get shared locks
+which allow other not ''for update'' select statements to read the same
+rows but block anyone trying to update the row from accessing it. Rows
+or tables with exclusive locks block all access to the row from other
+transactions until the transaction ends.
+
+In general terms, the higher the Isolation level the more likelihood of
+having concurrent locks and therefore lock conflicts. In such cases,
+adjust the @code{-O gemini_lock_table_size} accordingly.
+
+@cindex GEMINI tables, row-level locking
+@noindent
+@strong{Row-Level Locking}
+
+@code{GEMINI} uses row locks, which allows high concurrency for requests
+on the same table.
+
+In order to avoid lock table overflow, SQL statements that require
+applying locks to a large number of rows should either be run at the
+serializable isolation level or should be covered by a lock table
+statement.
+
+Memory must be pre-allocated for the lock table. The mysqld server
+startup option @code{-0 gemini_lock_table_size} can be used to adjust
+the number of concurrent locks.
+
+@cindex GEMINI tables, limitations
+@node GEMINI Limitations, , GEMINI Concepts, GEMINI Overview
+@subsubsection GEMINI Limitations
+
+The following limitations are in effect for the current version of
+@code{GEMINI}:
+
+@itemize @bullet
+@item
+@code{DROP DATABASE} does not work with @code{GEMINI} tables; instead,
+drop all the tables in the database first, then drop the database.
+
+@item
+Maximum number of @code{GEMINI} tables is 1012.
+
+@item
+Maximum number of @code{GEMINI} files a server can manage is 1012. Each
+table consumes one file; an additional file is consumed if the table has
+any indexes defined on it.
+
+@item
+Maximum size of BLOBs is 16MB.
+
+@item
+@code{FULLTEXT} indexes are not supported with @code{GEMINI} tables.
+
+@item
+There is no support for multi-component @code{AUTO_INCREMENT} fields
+that provide alternating values at the component level. If you try to
+create such a field, @code{GEMINI} will refuse.
+
+@item
+@code{TEMPORARY TABLES} are not supported by @code{GEMINI}. The
+statement @code{CREATE TEMPORARY TABLE ... TYPE=GEMINI} will generate
+the response: @code{ERROR 1005: Can't create table '/tmp/#sqlxxxxx'
+(errno: 0)}.
+
+@item
+@code{FLUSH TABLES} has not been implemented with @code{GEMINI} tables.
+@end itemize
+
+@cindex GEMINI tables, using
+@node Using GEMINI Tables, , GEMINI Overview, GEMINI
+@subsection Using GEMINI Tables
+
+This section explains the various startup options you can use with
+@code{GEMINI} tables, how to backup @code{GEMINI} tables, some
+performance considerations and sample configurations, and a brief
+discussion of when to use @code{GEMINI} tables.
+
+Specifically, the topics covered in this section are:
+
+@itemize @bullet
+@item
+Startup Options
+@item
+Creating @code{GEMINI} Tables
+@item
+Backing Up @code{GEMINI} Tables
+@item
+Using Auto_Increment Columns With @code{GEMINI} Tables
+@item
+Performance Considerations
+@item
+Sample Configurations
+@item
+When To Use @code{GEMINI} Tables
+@end itemize
+
+@cindex GEMINI tables, startup options
+@menu
+* Startup Options::
+* Creating GEMINI Tables::
+* Backing Up GEMINI Tables::
+* Restoring GEMINI Tables::
+* Using Auto_Increment Columns With GEMINI Tables::
+* Performance Considerations::
+* Sample Configurations::
+* When To Use GEMINI Tables::
+@end menu
+
+@node Startup Options, Creating GEMINI Tables, Using GEMINI Tables, Using GEMINI Tables
+@subsubsection Startup Options
+
+The table below lists options to mysqld that can be used to change the
+behavior of @code{GEMINI} tables.
+
+@multitable @columnfractions .40 .60
+@item @sc{Option} @tab @sc{Description}
+
+@item
+@code{--default-table-type=gemini}
+@tab Sets the default table handler to be @code{GEMINI}. All create
+table statements will create @code{GEMINI} tables unless otherwise
+specified with @code{TYPE=@var{table-type}}. As noted above, there is
+currently a limitation with @code{TEMPORARY} tables using @code{GEMINI}.
+
+@item
+@code{--gemini-flush-log-at-commit}
+@tab Forces the recovery log buffers to be flushed after every
+commit. This can have a serious performance penalty, so use with
+caution.
+
+@item
+@code{--gemini-recovery=FULL | NONE | FORCE}
+@tab Sets the recovery mode. Default is @code{FULL}. @code{NONE} is
+useful for performing repeatable batch operations because the updates
+are not recorded in the recovery log. @code{FORCE} skips crash recovery
+upon startup; this corrupts the database, and should be used in
+emergencies only.
+
+@item
+@code{--gemini-unbuffered-io}
+@tab All database writes bypass the OS cache. This can provide a
+performance boost on heavily updated systems where most of the dataset
+being worked on is cached in memory with the @code{gemini_buffer_cache}
+parameter.
+
+@item
+@code{--O gemini_buffer_cache=size}
+@tab Amount of memory to allocate for database buffers, including Index
+and Record information. It is recommended that this number be 10% of the
+total size of all @code{GEMINI} tables. Do not exceed amount of memory
+on the system!
+
+@item
+@code{--O gemini_connection_limit=#}
+@tab Maximum number of connections to @code{GEMINI}; default is
+@code{100}. Each connection consumes about 1K of memory.
+
+@item
+@code{--O gemini_io_threads=#}
+@tab Number of background I/O threads; default is @code{2}. Increase the
+number when using @code{--gemini-unbuffered-io}
+
+@item
+@code{--O gemini_lock_table_size=#}
+@tab Sets the maximum number of concurrent locks; default is 4096. Using
+@code{SET [ GLOBAL | SESSION ] TRANSACTION ISOLATION = ...} will
+determine how long a program will hold row locks.
+
+@item
+@code{--O gemini_lock_wait_timeout=seconds}
+@tab Number of seconds to wait for record locks when performing queries;
+default is 10 seconds. Using @code{SET [ GLOBAL | SESSION ] TRANSACTION
+ISOLATION = ...} will determine how long a program will hold row locks.
+
+@item
+@code{--skip-gemini}
+@tab Do not use @code{GEMINI}. If you use @code{--skip-gemini}, @strong{MySQL}
+will not initialize the @code{GEMINI} table handler, saving memory; you
+cannot use @code{GEMINI} tables if you use @code{--skip-gemini}.
+
+@item
+@code{--transaction-isolation=READ-UNCOMMITTED | READ-COMMITTED | REPEATABLE-READ | SERIALIZABLE}
+@tab Sets the GLOBAL transaction isolation level for all users that
+connect to the server; can be overridden with the SET ISOLATION LEVEL
+statement.
+@end multitable
+
+@cindex GEMINI tables, creating
+@node Creating GEMINI Tables, Backing Up GEMINI Tables, Startup Options, Using GEMINI Tables
+@subsubsection Creating GEMINI Tables
+
+@code{GEMINI} tables can be created by either using the @code{CREATE
+TABLE} syntax or the @code{ALTER TABLE} syntax.
+
+@itemize @bullet
+@item
+The syntax for creating a @code{GEMINI} table is:
+
+@example
+CREATE TABLE @var{table-name} (....) TYPE=GEMINI;
+@end example
+
+@item
+The syntax to convert a table to @code{GEMINI} is:
+
+@example
+ALTER TABLE @var{table-name} TYPE=GEMINI;
+@end example
+@end itemize
+
+@xref{Tutorial}, for more information on how to create and use
+@code{MySQL} tables.
+
+@cindex GEMINI tables, backing up
+@node Backing Up GEMINI Tables, Restoring GEMINI Tables, Creating GEMINI Tables, Using GEMINI Tables
+@subsubsection Backing Up GEMINI Tables
+
+@code{GEMINI} supports both @code{BACKUP TABLE} and @code{RESTORE TABLE}
+syntax. To learn more about how to use @code{BACKUP} and @code{RESTORE},
+see @ref{BACKUP TABLE} and @ref{RESTORE TABLE}.
+
+To backup @code{GEMINI} tables outside of the @code{MySQL} environment,
+you must first shut down the @code{MySQL} server. Once the server is
+shut down, you can copy the files associated with @code{GEMINI} to a
+different location. The files that make up the @code{GEMINI} table
+handler are:
+
+@itemize @bullet
+@item
+All files associated with a table with a @code{.gmd} extention below the
+@code{$DATADIR} directory. Such files include @code{@var{table}.gmd},
+@code{@var{table}.gmi}, and @code{@var{table}.frm}
+@item
+@code{gemini.db} in the @code{$DATADIR} directory
+@item
+@code{gemini.rl} in the @code{$DATADIR} directory
+@item
+@code{gemini.lg} in the @code{$DATADIR} directory
+@end itemize
+
+All the @code{GEMINI} files must be copied together. You can not copy
+just the @code{.gmi} and @code{.gmd} files to a different
+@code{$DATADIR} and have them become part of a new database. You can
+copy an entire @code{$DATADIR} directory to another location and start a
+@strong{MySQL} server using the new @code{$DATADIR}.
+
+@cindex GEMINI tables, restoring
+@node Restoring GEMINI Tables, Using Auto_Increment Columns With GEMINI Tables, Backing Up GEMINI Tables, Using GEMINI Tables
+@subsubsection Restoring GEMINI Tables
+
+To restore @code{GEMINI} tables outside of the @code{MySQL} environment,
+you must first shut down the @code{MySQL} server. Once the server is
+shut down, you can remove all @code{GEMINI} files in the target
+@code{$DATADIR} and then copy the files previously backed up into the
+@code{$DATADIR} directory.
+
+As mentioned above, the files that make up the @code{GEMINI} table
+handler are:
+
+@itemize @bullet
+@item
+All files associated with a table with a @code{.gmd} extention below the
+@code{$DATADIR} directory. Such files include @code{@var{table}.gmd},
+@code{@var{table}.gmi}, and @code{@var{table}.frm}
+@item
+@code{gemini.db} in the @code{$DATADIR} directory
+@item
+@code{gemini.rl} in the @code{$DATADIR} directory
+@item
+@code{gemini.lg} in the @code{$DATADIR} directory
+@end itemize
+
+When restoring a table, all the @code{GEMINI} files must be copied
+together. You can not restore just the @code{.gmi} and @code{.gmd}
+files.
+
+@cindex GEMINI tables, auto_increment
+@node Using Auto_Increment Columns With GEMINI Tables, Performance Considerations, Restoring GEMINI Tables, Using GEMINI Tables
+@subsubsection Using Auto_Increment Columns With GEMINI Tables
+
+As mentioned previously, @code{GEMINI} tables support row-level and
+table-level locking to increase concurrency in applications and to allow
+reading of tables without locking for maximum concurrency in heavy
+update environments. This feature has several implications when working
+with @code{auto_increment} tables.
+
+In @code{MySQL}, when a column is defined as an @code{auto_increment}
+column, and a row is inserted into the table with a @code{NULL} for the
+column, the @code{auto_increment} column is updated to be 1 higher than
+the highest value in the column.
+
+With @code{MyISAM} tables, the @code{auto_increment} function is
+implemented by looking in the index and finding the highest value and
+adding 1 to it. This is possible because the entire @code{ISAM} table is
+locked during the update period and the increment value is therefore
+guaranteed to not be changing.
+
+With @code{GEMINI} tables, the @code{auto_increment} function is
+implemented by maintaining a counter in a separate location from the
+table data. Instead of looking at the highest value in the table index,
+@code{GEMINI} tables look at this separately maintained counter. This
+means that in a transactional model, unlike the bottleneck inherent in
+the @code{MyISAM} approach, @code{GEMINI} users do @b{not} have to wait
+until the transaction that added the last value either commits or
+rollbacks before looking at the value.
+
+Two side-effects of the @code{GEMINI} implementation are:
+
+@itemize @bullet
+@item
+If an insert is done where the column with the @code{auto_increment} is
+specified, and this specified value is the highest value, @code{MyISAM}
+uses it as its @code{auto_increment} value, and every subsequent insert
+is based on this. By contrast, @code{GEMINI} does not use this value,
+but instead uses the value maintained in the separate @code{GEMINI}
+counter location.
+
+@item
+To set the counter to a specific value, you can use @code{SET
+insert_id=#} and insert a new row in the table. However, as a general
+rule, values should not be inserted into an @code{auto_increment}
+column; the database manager should be maintaining this field, not the
+application. @code{SET insert_id} is a recovery mechanism that should be
+used in case of error only.
+@end itemize
+
+Note that if you delete the row containing the maximum value for an
+@code{auto_increment} column, the value will be reused with a
+@code{GEMINI} table but not with a @code{MyISAM} table.
+
+See @ref{CREATE TABLE} for more information about creating
+@code{auto_increment} columns.
+
+@cindex GEMINI tables, peformance considerations
+@node Performance Considerations, Sample Configurations, Using Auto_Increment Columns With GEMINI Tables, Using GEMINI Tables
+@subsubsection Performance Considerations
+
+In addition to designing the best possible application, configuration of
+the data and the server startup parameters need to be considered. How
+the hardware is being used can have a dramatic affect on how fast the
+system will respond to queries. Disk Drives and Memory must both be
+considered.
+
+@noindent
+@strong{Disk Drives}
+
+For best performance, you want to spread the data out over as many disks
+as possible. Using RAID 10 stripes work very well. If there are a lot of
+updates then the recovery log (@code{gemini.rl}) should be on a
+relatively quiet disk drive.
+
+To spread the data out without using RAID 10, you can do the following:
+
+@itemize @bullet
+@item
+Group all the tables into three categories: Heavy Use, Moderate Use,
+Light Use.
+
+@item
+Take the number of disk drives available and use a round-robin approach
+to the three categories grouping the tables on a disk drive. The result
+will be an equal distribution of Heavy/Moderate/Light tables assigned to
+each disk drive.
+
+@item
+Once the tables have been converted to @code{GEMINI} by using the
+@code{ALTER TABLE <name> TYPE=GEMINI} statements, move (@code{mv}) the
+@code{.gmd} and @code{.gmi} files to a different disk drive and link
+(@code{ln -s}) them back to the original directory where the @code{.frm}
+file resides.
+
+@item
+Finally, move the @code{gemini.rl} file to its quiet disk location and link
+the file back to the @code{$DATADIR} directory.
+@end itemize
+
+@noindent
+@strong{Memory}
+
+The more data that can be placed in memory the faster the access to the
+data. Figure out how large the @code{GEMINI} data is by adding up the
+@code{.gmd} and @code{.gmi} file sizes. If you can, put at least 10% of
+the data into memory. You allocate memory for the rows and indexes by
+using the @code{gemini_buffer_cache} startup parameter. For example:
+
+@example
+mysqld -O gemini_buffer_cache=800M
+@end example
+
+@noindent
+would allocate 800 MB of memory for the @code{GEMINI} buffer cache.
+
+@cindex GEMINI tables, sample configurations
+@node Sample Configurations, When To Use GEMINI Tables, Performance Considerations, Using GEMINI Tables
+@subsubsection Sample Configurations
+
+Based on the performance considerations above, we can look at some
+examples for how to get the best performance out of the system when
+using @code{GEMINI} tables.
+
+@multitable @columnfractions .30 .70
+@item @sc{Hardware} @tab @sc{Configuration}
+@item
+One CPU, 128MB memory, one disk drive
+@tab Allocate 80MB of memory for reading and updating @code{GEMINI}
+tables by starting the mysqld server with the following option:
+
+@example
+-O gemini_buffer_cache=80M
+@end example
+
+@item
+Two CPUs, 512MB memory, four disk drives
+@tab Use RAID 10 to stripe the data across all available disks, or use
+the method described in the performance considerations section,
+above. Allocate 450MB of memory for reading/updating @code{GEMINI}
+tables:
+
+@example
+-O gemini_buffer_cache=450M
+@end example
+@end multitable
+
+@cindex GEMINI tables, when to use
+@node When To Use GEMINI Tables, , Sample Configurations, Using GEMINI Tables
+@subsubsection When To Use GEMINI Tables
+
+Because the @code{GEMINI} table handler provides crash recovery and
+transaction support, there is extra overhead that is not found in other
+non-transaction safe table handlers. Here are some general guidelines
+for when to employ @code{GEMINI} and when to use other non-transaction
+safe tables (@code{NTST}).
+
+Note that in the following table, you could instead of GEMINI use
+InnoDB or BDB tables.
+
+@multitable @columnfractions .30 .25 .45
+@item
+@sc{Access Trends} @tab @sc{Table Type} @tab @sc{Reason}
+@item
+Read-only
+@tab @code{NTST}
+@tab Less overhead and faster
+@item
+Critical data
+@tab @code{GEMINI}
+@tab Crash recovery protection
+@item
+High concurrency
+@tab @code{GEMINI}
+@tab Row-level locking
+@item
+Heavy update
+@tab @code{GEMINI}
+@tab Row-level locking
+@end multitable
+
+The table below shows how a typical application schema could be defined.
+
+@multitable @columnfractions .15 .30 .25 .30
+@item
+@sc{Table} @tab @sc{Contents} @tab @sc{Table Type} @tab @sc{Reason}
+@item
+account
+@tab Customer account data
+@tab @code{GEMINI}
+@tab Critical data, heavy update
+@item
+order
+@tab Orders for a customer
+@tab @code{GEMINI}
+@tab Critical data, heavy update
+@item
+orderline
+@tab Orderline detail for an order
+@tab @code{GEMINI}
+@tab Critical data, heavy update
+@item
+invdesc
+@tab Inventory description
+@tab @code{NTST}
+@tab Read-only, frequent access
+@item
+salesrep
+@tab Sales rep information
+@tab @code{NTST}
+@tab Infrequent update
+@item
+inventory
+@tab Inventory information
+@tab @code{GEMINI}
+@tab High concurrency, critical data
+@item
+config
+@tab System configuration
+@tab @code{NTST}
+@tab Read-only
+@end multitable
@cindex tutorial
@cindex terminal monitor, defined
@@ -28272,8 +29429,9 @@ your changes with the new @file{errmsg.txt} file.
@node Character sets, Adding character set, Languages, Languages
@subsection The Character Set Used for Data and Sorting
-By default, @strong{MySQL} uses the ISO-8859-1 (Latin1) character
-set. This is the character set used in the USA and western Europe.
+By default, @strong{MySQL} uses the ISO-8859-1 (Latin1) character set
+with sorting according to Swedish/Finnish. This is the character set suitable
+in the USA and western Europe.
All standard @strong{MySQL} binaries are compiled with
@code{--with-extra-charsets=complex}. This will add code to all
@@ -28285,12 +29443,12 @@ The character set determines what characters are allowed in names and how
things are sorted by the @code{ORDER BY} and @code{GROUP BY} clauses of
the @code{SELECT} statement.
-You can change the character set with the
-@code{--default-character-set} option when you start the server.
-The character sets available depend on the @code{--with-charset=charset}
-option to @code{configure}, and the character set configuration files
-listed in @file{SHAREDIR/charsets/Index}.
-@xref{Quick install}.
+You can change the character set with the @code{--default-character-set}
+option when you start the server. The character sets available depend
+on the @code{--with-charset=charset} and @code{--with-extra-charset=
+list-of-charset | complex | all} options to @code{configure}, and the
+character set configuration files listed in
+@file{SHAREDIR/charsets/Index}. @xref{configure options}.
If you change the character set when running @strong{MySQL} (which may
also change the sort order), you must run myisamchk -r -q on all
@@ -28823,6 +29981,11 @@ Replication will be done correctly with @code{AUTO_INCREMENT},
@code{RAND()}. You can, for example, use @code{UNIX_TIMESTAMP()} for the
argument to @code{RAND()}.
@item
+You have to use the same character set (@code{--default-character-set})
+on the master and the slave. If not, you may get duplicate key errors on
+the slave, because a key that is regarded as unique on the master may
+not be that in the other character set.
+@item
@code{LOAD DATA INFILE} will be handled properly as long as the file
still resides on the master server at the time of update
propagation. @code{LOAD LOCAL DATA INFILE} will be skipped.
@@ -29925,6 +31088,7 @@ are using @code{--skip-locking}
@menu
* Compile and link options:: How compiling and linking affects the speed of MySQL
* Disk issues:: Disk issues
+* Symbolic links:: Using Symbolic Links
* Server parameters:: Tuning server parameters
* Table cache:: How MySQL opens and closes tables
* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
@@ -30037,7 +31201,7 @@ Linux binary is linked statically to get it faster and more portable.
@cindex disk issues
@cindex performance, disk issues
-@node Disk issues, Server parameters, Compile and link options, System
+@node Disk issues, Symbolic links, Compile and link options, System
@subsection Disk Issues
@itemize @bullet
@@ -30115,31 +31279,40 @@ really useful on a database server), you can mount your file systems
with the noatime flag.
@end itemize
-@menu
-* Symbolic links:: Using symbolic links for databases and tables
-@end menu
-
@cindex symbolic links
@cindex links, symbolic
-@cindex databases, symbolic links
-@cindex tables, symbolic links
-@node Symbolic links, , Disk issues, Disk issues
-@subsubsection Using Symbolic Links for Databases and Tables
+@node Symbolic links, Server parameters, Disk issues, System
+@subsection Using Symbolic Links
You can move tables and databases from the database directory to other
locations and replace them with symbolic links to the new locations.
You might want to do this, for example, to move a database to a file
-system with more free space.
+system with more free space or increase the speed of your system by
+spreading your tables to different disk.
+
+The recommended may to do this, is to just symlink databases to different
+disk and only symlink tables as a last resort.
+.
+
+@cindex databases, symbolic links
+@menu
+* Symbolic links to database::
+* Symbolic links to tables::
+@end menu
+
+@node Symbolic links to database, Symbolic links to tables, Symbolic links, Symbolic links
+@subsubsection Using Symbolic Links for Databases
-If @strong{MySQL} notices that a table is symbolically linked, it will
-resolve the symlink and use the table it points to instead. This works
-on all systems that support the @code{realpath()} call (at least Linux
-and Solaris support @code{realpath()})! On systems that don't support
-@code{realpath()}, you should not access the table through the real path
-and through the symlink at the same time! If you do, the table will be
-inconsistent after any update.
+The way to symlink a database is to first create a directory on some
+disk where you have free space and then create a symlink to it from
+the @strong{MySQL} database directory.
+
+@example
+shell> mkdir /dr1/databases/test
+shell> ln -s /dr1/databases/test mysqld-datadir
+@end example
-@strong{MySQL} doesn't that you link one directory to multiple
+@strong{MySQL} doesn't support that you link one directory to multiple
databases. Replacing a database directory with a symbolic link will
work fine as long as you don't make a symbolic link between databases.
Suppose you have a database @code{db1} under the @strong{MySQL} data
@@ -30171,11 +31344,82 @@ On Windows you can use internal symbolic links to directories by compiling
@strong{MySQL} with @code{-DUSE_SYMDIR}. This allows you to put different
databases on different disks. @xref{Windows symbolic links}.
+@cindex databases, symbolic links
+@node Symbolic links to tables, , Symbolic links to database, Symbolic links
+@subsubsection Using Symbolic Links for Tables
+
+Before @strong{MySQL} 4.0 you should not symlink tables, if you are not
+very carefully with them. The problem is that if you run @code{ALTER
+TABLE}, @code{REPAIR TABLE} or @code{OPTIMIZE TABLE} on a symlinked
+table, the symlinks will be removed and replaced by the original
+files. This happens because the above command works by creating a
+temporary file in the database directory and when the command is
+complete, replace the original file with the temporary file.
+
+You should not symlink tables on system that doesn't have a fully
+working @code{realpath()} call. (At least Linux and Solaris support
+@code{realpath()})
+
+In @strong{MySQL} 4.0 symlinks is only fully supported for @code{MyISAM}
+tables. For other table types you will probably get strange problems
+when doing any of the above mentioned commands.
+
+The handling of symbolic links in @strong{MySQL} 4.0 works the following
+way (this is mostly relevant only for @code{MyISAM} tables).
+
+@itemize @bullet
+@item
+In the data directory you will always have the table definition file
+and the data/index files.
+@item
+You can symlink the index file and the data file to different directories
+independent of the other.
+@item
+The symlinking can be done from the operating system (if @code{mysqld} is
+not running) or with the @code{INDEX/DATA DIRECTORY="path-to-dir"} command
+in @code{CREATE TABLE}. @xref{CREATE TABLE}.
+@item
+@code{myisamchk} will not replace a symlink with the index/file but
+work directly on the files the symlinks points to. Any temporary files
+will be created in the same directory where the data/index file is.
+@item
+When you drop a table that is using symlinks, both the symlink and the
+file the symlink points to is dropped. This is a good reason to why you
+should NOT run @code{mysqld} as root and not allow persons to have write
+access to the @strong{MySQL} database directories.
+@item
+If you rename a table with @code{ALTER TABLE RENAME} and you don't change
+database, the symlink in the database directory will be renamed to the new
+name and the data/index file will be renamed accordingly.
+@item
+If you use @code{ALTER TABLE RENAME} to move a table to another database,
+then the table will be moved to the other database directory and the old
+symlinks and the files they pointed to will be deleted.
+@item
+If you are not using symlinks you should use the @code{--skip-symlink}
+option to @code{mysqld} to ensure that no one can drop or rename a file
+outside of the @code{mysqld} data directory.
+@end itemize
+
+Things that are not yet supported:
+
+@cindex TODO, symlinks
+@itemize @bullet
+@item
+@code{ALTER TABLE} ignores all @code{INDEX/DATA DIRECTORY="path"} options.
+@item
+@code{CREATE TABLE} doesn't report if the table has symbolic links.
+@item
+@code{mysqldump} doesn't include the symbolic links information in the output.
+@item
+@code{BACKUP TABLE} and @code{RESTORE TABLE} doesn't use symbolic links.
+@end itemize
+
@cindex parameters, server
@cindex @code{mysqld} server, buffer sizes
@cindex buffer sizes, @code{mysqld} server
@cindex startup parameters
-@node Server parameters, Table cache, Disk issues, System
+@node Server parameters, Table cache, Symbolic links, System
@subsection Tuning Server Parameters
You can get the default buffer sizes used by the @code{mysqld} server
@@ -31382,7 +32626,6 @@ Since @strong{MySQL 4.0} you can also use
@code{ALTER TABLE tbl_name ENABLE KEYS} instead of
@code{myisamchk -r -q /path/to/db/tbl_name}. This way you can also skip
@code{FLUSH TABLES} steps.
-
@item
You can speed up insertions by locking your tables:
@@ -32151,7 +33394,7 @@ with the @code{-max} prefix. This makes it very easy to test out a
another @code{mysqld} binary in an existing installation. Just
run @code{configure} with the options you want and then install the
new @code{mysqld} binary as @code{mysqld-max} in the same directory
-where your old @code{mysqld} binary is. @xref{safe_mysqld}.
+where your old @code{mysqld} binary is. @xref{safe_mysqld, , @code{safe_mysqld}}.
The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
feature. It just installs the @code{mysqld-max} executable and
@@ -32164,7 +33407,7 @@ binaries includes:
@multitable @columnfractions .4 .3 .3
@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB}
@item AIX 4.3 @tab N @tab Y
-@item HPUX 11.0 @tab N @tab Y
+@item HP-UX 11.0 @tab N @tab Y
@item Linux-Alpha @tab N @tab Y
@item Linux-Intel @tab Y @tab Y
@item Linux-Ia64 @tab N @tab Y
@@ -32399,7 +33642,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary
release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
-a -x ./bin/mysqld
--------------------------------------------------------------------------
-@xref{safe_mysqld}.
+@xref{safe_mysqld, , @code{safe_mysqld}}.
@end example
The above test should be successful, or you may encounter problems.
@item
@@ -32927,7 +34170,7 @@ server). The dump will contain SQL statements to create the table
and/or populate the table.
If you are doing a backup on the server, you should consider using
-the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy}.
+the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
@example
shell> mysqldump [OPTIONS] database [tables]
@@ -33760,11 +35003,16 @@ After you have installed the packed table into the @strong{MySQL} database
directory you should do @code{mysqladmin flush-tables} to force @code{mysqld}
to start using the new table.
+If you want to unpack a packed table, you can do this with the
+@code{--unpack} option to @code{isamchk} or @code{myisamchk}.
+
+
@cindex installation maintenance
@cindex maintaining, tables
@cindex tables, maintaining
@cindex databases, maintaining
-@cindex @code{mysiamchk}
+@cindex @code{myisamchk}
+@cindex @code{mysqlcheck}
@cindex crash, recovery
@cindex recovery, from crash
@node Maintenance, Adding functions, Tools, Top
@@ -33772,6 +35020,7 @@ to start using the new table.
@menu
* Table maintenance:: Table maintenance and crash recovery
+* Using mysqlcheck:: Using mysqlcheck for maintenance and recovery
* Maintenance regimen:: Setting up a table maintenance regimen
* Table-info:: Getting information about a table
* Crash recovery:: Using @code{myisamchk} for crash recovery
@@ -33782,7 +35031,7 @@ This chapter covers what you should know about maintaining a @strong{MySQL}
distribution. You will learn how to care for your tables on a regular
basis, and what to do when disaster strikes.
-@node Table maintenance, Maintenance regimen, Maintenance, Maintenance
+@node Table maintenance, Using mysqlcheck, Maintenance, Maintenance
@section Using @code{myisamchk} for Table Maintenance and Crash Recovery
Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
@@ -34044,7 +35293,8 @@ This can be used to get faster inserts! Deactivated indexes can be
reactivated by using @code{myisamchk -r}. keys.
@item -l or --no-symlinks
Do not follow symbolic links. Normally @code{myisamchk} repairs the
-table a symlink points at.
+table a symlink points at. This option doesn't exist in MySQL 4.0,
+as MySQL 4.0 will not remove symlinks during repair.
@item -r or --recover
Can fix almost anything except unique keys that aren't unique
(which is an extremely unlikely error with ISAM/MyISAM tables).
@@ -34164,9 +35414,132 @@ This space is allocated on the temporary disk (specified by @code{TMPDIR} or
If you have a problem with disk space during repair, you can try to use
@code{--safe-recover} instead of @code{--recover}.
+@node Using mysqlcheck, Maintenance regimen, Table maintenance, Maintenance
+@section Using @code{mysqlcheck} for Table Maintenance and Crash Recovery
+
+Since @strong{MySQL} version 3.23.38 you will be able to use a new
+checking and repairing tool for @code{MyISAM} tables. The difference to
+@code{myisamchk} is that @code{mysqlcheck} should be used when the
+@code{mysqld} server is running, where as @code{myisamchk} should be used
+when it is not. The benefit is that you no longer have to take the
+server down for checking or repairing your tables.
+
+@code{mysqlcheck} uses @strong{MySQL} server commands @code{CHECK},
+@code{REPAIR}, @code{ANALYZE} and @code{OPTIMIZE} in a convenient way
+for the user.
+
+There are three alternative ways to invoke @code{mysqlcheck}:
+
+@example
+shell> mysqlcheck [OPTIONS] database [tables]
+shell> mysqlcheck [OPTIONS] --databases DB1 [DB2 DB3...]
+shell> mysqlcheck [OPTIONS] --all-databases
+@end example
+
+So it can be used in a similar way as @code{mysqldump} when it
+comes to what databases and tables you want to choose.
+
+@code{mysqlcheck} does have a special feature compared to the other
+clients; the default behavior, checking tables (-c), can be changed by
+renaming the binary. So if you want to have a tool that repairs tables
+by default, you should just copy @code{mysqlcheck} to your harddrive
+with a new name, @code{mysqlrepair}, or alternatively make a symbolic
+link to @code{mysqlrepair} and name the symbolic link as
+@code{mysqlrepair}. If you invoke @code{mysqlrepair} now, it will repair
+tables by default.
+
+The names that you can use to change @code{mysqlcheck} default behavior
+are here:
+
+@example
+mysqlrepair: The default option will be -r
+mysqlanalyze: The default option will be -a
+mysqloptimize: The default option will be -o
+@end example
+
+The options available for @code{mysqlcheck} are listed here, please
+check what your version supports with @code{mysqlcheck --help}.
+
+@table @code
+@item -A, --all-databases
+Check all the databases. This will be same as --databases with all
+databases selected
+@item -1, --all-in-1
+Instead of making one query for each table, execute all queries in 1
+query separately for each database. Table names will be in a comma
+separated list.
+@item -a, --analyze
+Analyze given tables.
+@item --auto-repair
+If a checked table is corrupted, automatically fix it. Repairing will be
+done after all tables have been checked, if corrupted ones were found.
+@item -#, --debug=...
+Output debug log. Often this is 'd:t:o,filename'
+@item --character-sets-dir=...
+Directory where character sets are
+@item -c, --check
+Check table for errors
+@item -C, --check-only-changed
+Check only tables that have changed since last check or haven't been
+closed properly.
+@item --compress
+Use compression in server/client protocol.
+@item -?, --help
+Display this help message and exit.
+@item -B, --databases
+To check several databases. Note the difference in usage; In this case
+no tables are given. All name arguments are regarded as database names.
+@item --default-character-set=...
+Set the default character set
+@item -F, --fast
+Check only tables that hasn't been closed properly
+@item -f, --force
+Continue even if we get an sql-error.
+@item -e, --extended
+If you are using this option with CHECK TABLE, it will ensure that the
+table is 100 percent consistent, but will take a long time.
+
+If you are using this option with REPAIR TABLE, it will run an extended
+repair on the table, which may not only take a long time to execute, but
+may produce a lot of garbage rows also!
+@item -h, --host=...
+Connect to host.
+@item -m, --medium-check
+Faster than extended-check, but only finds 99.99 percent of all
+errors. Should be good enough for most cases.
+@item -o, --optimize
+Optimize table
+@item -p, --password[=...]
+Password to use when connecting to server. If password is not given
+it's solicited on the tty.
+@item -P, --port=...
+Port number to use for connection.
+@item -q, --quick
+If you are using this option with CHECK TABLE, it prevents the check
+from scanning the rows to check for wrong links. This is the fastest
+check.
+
+If you are using this option with REPAIR TABLE, it will try to repair
+only the index tree. This is the fastest repair method for a table.
+@item -r, --repair
+Can fix almost anything except unique keys that aren't unique.
+@item -s, --silent
+Print only error messages.
+@item -S, --socket=...
+Socket file to use for connection.
+@item --tables
+Overrides option --databases (-B).
+@item -u, --user=#
+User for login if not current user.
+@item -v, --verbose
+Print info about the various stages.
+@item -V, --version
+Output version information and exit.
+@end table
+
@cindex maintaining, tables
@cindex tables, maintenance regimen
-@node Maintenance regimen, Table-info, Table maintenance, Maintenance
+@node Maintenance regimen, Table-info, Using mysqlcheck, Maintenance
@section Setting Up a Table Maintenance Regimen
Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
@@ -36966,6 +38339,9 @@ option.
@node Communication errors, Full table, Packet too large, Common errors
@subsection Communication Errors / Aborted Connection
+Starting with @code{MySQL 3.23.40} you only get the @code{Aborted
+connection} error of you start @code{mysqld} with @code{--warnings}.
+
If you find errors like the following in your error log.
@example
@@ -37355,11 +38731,15 @@ user and use the @code{--user=user_name} option. @code{mysqld} will switch
to run as the Unix user @code{user_name} before accepting any connections.
@item
-If you are using the @code{mysql.server} script to start @code{mysqld} when
-the system is rebooted, you should edit @code{mysql.server} to use @code{su}
-to run @code{mysqld} as user @code{user_name}, or to invoke @code{mysqld}
-with the @code{--user} option. (No changes to @code{safe_mysqld} are
-necessary.)
+To start the server as the given user name automatically at system
+startup time, add a @code{user} line that specifies the user name to
+the @code{[mysqld]} group of the @file{/etc/my.cnf} option file or the
+@file{my.cnf} option file in the server's data directory. For example:
+
+@example
+[mysqld]
+user=user_name
+@end example
@end enumerate
At this point, your @code{mysqld} process should be running fine and dandy as
@@ -37486,6 +38866,8 @@ shell> perror 23
File table overflow
shell> perror 24
Too many open files
+shell> perror 11
+Resource temporarily unavailable
@end example
The problem here is that @code{mysqld} is trying to keep open too many
@@ -37823,6 +39205,12 @@ Post the test file using @code{mysqlbug} to @email{mysql@@lists.mysql.com}.
@node ALTER TABLE problems, Change column order, No matching rows, Problems
@section Problems with @code{ALTER TABLE}.
+@code{ALTER TABLE} changes a table to the current character set.
+If you during @code{ALTER TABLE} get a duplicate key error, then the cause
+is either that the new character sets maps to keys to the same value
+or that the table is corrupted, in which case you should run
+@code{REPAIR TABLE} on the table.
+
If @code{ALTER TABLE} dies with an error like this:
@example
@@ -37997,11 +39385,12 @@ database directory. The @code{FLUSH TABLE} is needed to ensure that
the all active index pages is written to disk before you start the backup.
If you want to make a SQL level backup of a table, you can use
-@code{SELECT INTO OUTFILE} or @code{BACKUP
-TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}.
+@code{SELECT INTO OUTFILE} or @code{BACKUP TABLE}. @xref{SELECT}.
+@xref{BACKUP TABLE}.
Another way to back up a database is to use the @code{mysqldump} program or
-the @code{mysqlhotcopy script}. @xref{mysqldump}. @xref{mysqlhotcopy}.
+the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}.
+@xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
@enumerate
@item
@@ -38093,7 +39482,8 @@ be an Internet service provider that wants to provide independent
If you want to run multiple servers, the easiest way is to compile the servers
with different TCP/IP ports and socket files so they are not
-both listening to the same TCP/IP port or socket file. @xref{mysqld_multi}.
+both listening to the same TCP/IP port or socket file. @xref{mysqld_multi, ,
+@code{mysqld_multi}}.
Assume an existing server is configured for the default port number and
socket file. Then configure the new server with a @code{configure} command
@@ -38240,7 +39630,7 @@ switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}.
@code{mysqld} writes all errors to the stderr, which the
@code{safe_mysqld} script redirects to a file called
@code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly
-to @file{mysql.err}).
+to @file{\mysql\data\mysql.err}).
This contains information indicating when @code{mysqld} was started and
stopped and also any critical errors found when running. If @code{mysqld}
@@ -40463,7 +41853,7 @@ query string.)
If you want to know if the query should return a result set or not, you can
use @code{mysql_field_count()} to check for this.
-@xref{mysql_field_count, @code{mysql_field_count}}.
+@xref{mysql_field_count, , @code{mysql_field_count}}.
@subsubheading Return Values
@@ -40525,7 +41915,7 @@ specified explicitly.
@item
The @code{passwd} parameter contains the password for @code{user}. If
@code{passwd} is @code{NULL}, only entries in the @code{user} table for the
-user that have a blank password field will be checked for a match. This
+user that have a blank (empty) password field will be checked for a match. This
allows the database administrator to set up the @strong{MySQL} privilege
system in such a way that users get different privileges depending on whether
or not they have specified a password.
@@ -40558,7 +41948,7 @@ of the following flags in very special circumstances:
@code{mysqld} to be more ODBC-friendly.
@item @code{CLIENT_COMPRESS} @tab Use compression protocol.
@item @code{CLIENT_FOUND_ROWS} @tab Return the number of found (matched) rows, not the number of affected rows.
-@item @code{CLIENT_IGNORE_SPACE} $tab Allow spaces after function names. Makes all functions names reserved words.
+@item @code{CLIENT_IGNORE_SPACE} @tab Allow spaces after function names. Makes all functions names reserved words.
@item @code{CLIENT_INTERACTIVE} @tab Allow @code{interactive_timeout} seconds (instead of @code{wait_timeout} seconds) of inactivity before closing the connection.
@item @code{CLIENT_NO_SCHEMA} @tab Don't allow the @code{db_name.tbl_name.col_name} syntax. This is for ODBC. It causes the parser to generate an error if you use that syntax, which is useful for trapping bugs in some ODBC programs.
@item @code{CLIENT_ODBC} @tab The client is an ODBC client. This changes
@@ -41330,6 +42720,9 @@ For more information on Object Oriented Programming
@uref{http://language.perl.com/info/documentation.html}
@end example
+Note that if you want to use transactions with Perl, you need to have
+@code{Msql-Mysql-modules} version 1.2216 or newer.
+
Installation instructions for @strong{MySQL} Perl support are given in
@ref{Perl support}.
@@ -42395,53 +43788,518 @@ users.
@item
@end table
-@cindex PostgreSQL, comparison
+@cindex PostgreSQL/MySQL, overview
@node Compare PostgreSQL, , Compare mSQL, Comparisons
@section How MySQL Compares to PostgreSQL
+When reading the following, please note that both products are
+continually evolving. We at @strong{MySQL AB} and the PostgreSQL
+developers are both working on making our respective database as good as
+possible, so we are both a serious choice to any commercial database.
+
+The following comparison is made by us at MySQL AB. We have tried to be
+as accurate and fair as possible, but because we don't have a full
+knowledge of all PostgreSQL features while we know MySQL througly, we
+may have got some things wrong. We will however correct these when they
+come to our attention.
+
We would first like to note that @code{PostgreSQL} and @strong{MySQL}
-are both widely used products, but their design goals are completely
-different. This means that for some applications @strong{MySQL} is more
-suitable and for others @code{PostgreSQL} is more suitable. When
-choosing which database to use, you should first check if the database's
-feature set is good enough to satisfy your application. If you need
-speed, @strong{MySQL} is probably your best choice. If you need some
-of the extra features that @code{PostgreSQL} can offer, you should use
+are both widely used products, but with different design goals, even if
+we are both striving to be ANSI SQL compatible. This means that for
+some applications @strong{MySQL} is more suitable and for others
+@code{PostgreSQL} is more suitable. When choosing which database to
+use, you should first check if the database's feature set satisfies your
+application. If you need speed, @strong{MySQL} is probably your best
+choice. If you need some of the extra features that only @code{PostgreSQL}
+can offer, you should use @code{PostgreSQL}.
+
+@cindex PostgreSQL/MySQL, strategies
+@menu
+* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
+* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL
+* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL
+@end menu
+
+@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL
+@subsection MySQL and PostgreSQL development strategies
+
+When adding things to MySQL we take pride to do an optimal, definite
+solution. The code should be so good that we shouldn't have any need to
+change it in the foreseeable future. We also do not like to sacrifice
+speed for features but instead will do our utmost to find a solution
+that will give maximal throughput. This means that development will take
+a little longer, but the end result will be well worth this. This kind
+of development is only possible because all server code are checked by
+one of a few (currently two) persons before it's included in the
+@strong{MySQL} server.
+
+We at MySQL AB believe in frequent releases to be able to push out new
+features quickly to our users. Because of this we do a new small release
+about every 3 weeks, which a major branch every year. All releases are
+throughly tested with our testing tools on a lot of different platforms.
+
+PostgreSQL is based on a kernel with lots of contributors. In this setup
+it makes sense to prioritize adding a lot of new features, instead of
+implementing them optimally, because one can always optimize things
+later if there arises a need for this.
+
+Another big difference between @strong{MySQL} and PostgreSQL is that
+nearly all of the code in the MySQL server are coded by developers that
+are employed by MySQL AB and are still working on the server code. The
+exceptions are the transaction engines and the regexp library.
+
+This is in sharp contrast to the PostgreSQL code where the majority of
+the code is coded by a big group of people with different backgrounds.
+It was only recently that the PostgreSQL developers announced that they
+current developer group had finally had time to take a look at all
+the code in the current PostgreSQL release.
+
+Both of the above development methods has it's own merits and drawbacks.
+We here at @strong{MySQL AB} think of course that our model is better
+because our model gives better code consistence, more optimal and
+reusable code and, in our opinion, fewer bugs. Because we are the
+authors of the @strong{MySQL} server code we are better able to
+coordinate new features and releases.
+
+@cindex PostgreSQL/MySQL, features
+@node MySQL-PostgreSQL features, MySQL-PostgreSQL benchmarks, MySQL-PostgreSQL goals, Compare PostgreSQL
+@subsection Featurevise Comparison of MySQL and PostgreSQL
+
+On the @uref{http://www.mysql.com/information/crash-me.php, crash-me}
+page you can find a list of those database constructs and limits that
+one can detect automatically with a program. Note however that a lot of
+the numerical limits may be changed with startup options for respective
+database. The above web page is however extremely useful when you want to
+ensure that your applications works with many different databases or
+when you want to convert your application from one datbase to another.
+
+@strong{MySQL} offers the following advantages over PostgreSQL:
+
+@itemize @bullet
+@item
+@code{MySQL} is generally much faster than PostgreSQL.
+@xref{MySQL-PostgreSQL benchmarks}.
+@item
+Because @strong{MySQL} has a much larger user base than PostgreSQL the
+code is more tested and has historically been more stable than
+PostgreSQL. @strong{MySQL} is the much more used in production
+environments than PostgreSQL, mostly thanks to that @strong{MySQL AB},
+former TCX DataKonsult AB, has provided top quality commercial support
+for @strong{MySQL} from the day it was released, whereas until recently
+PostgreSQL was unsupported.
+@item
+@strong{MySQL} works on more platforms than PostgreSQL. @xref{Which OS}.
+@item
+@strong{MySQL} works better on Windows; @strong{MySQL} is running as a
+native windows application (a service on NT/Win2000/WinXP), while
+PostgreSQL is run under the cygwin emulation. We have heard that
+PostgreSQL is not yet that stable on windows but we haven't been able to
+verify this ourselves.
+@item
+@strong{MySQL} has more API to other languages and is supported by more
+programs than PostgreSQL. @xref{Contrib}.
+@item
+@strong{MySQL} works on 24/7 heavy duty systems. In most circumstances
+you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't
+yet support 24/7 systems because you have have to run @code{vacuum()}
+once in a while to reclaim space from @code{UPDATE} and @code{DELETE}
+commands and to perform statistics analyzes that are critical to get
+good performance with PostgreSQL. Vacuum is also needed after adding
+a lot of new rows to a table. On a busy system with lots of changes
+vacuum must be run very frequently, in the worst cases even many times a
+day. During the @code{vacuum()} run, which may take hours if the
+database is big, the database is from a production standpoint
+practically dead. The PostgreSQL team has fixing this on their TODO,
+but we assume that this is not an easy thing to fix permanently.
+@item
+A working, tested replication feature used by sites like
+@uref{http://finance.yahoo.com, Yahoo finance},
+@uref{http://www.mobile.de/,mobile.de} and
+@uref{http://www.slashdot.org,Slashdot}.
+@item
+Included in the @strong{MySQL} distribution is included two different
+testing suits (@file{mysql-test-run} and
+@uref{http://www.mysql.com/information/crash-me.php,crash-me}) and a
+benchmark suite. The test system is actively updated with code to test
+each new feature and almost all repeatable bugs that comes to our
+attention. We test @strong{MySQL} with these on a lot of platforms
+before every release. These tests are more sofisticated than anything
+have seen from PostgreSQL and ensures that the @strong{MySQL} code keeps
+at a high standard.
+@item
+There are far moore books in print on @strong{MySQL} than on PostgreSQL.
+O'Reilly, Sams, Que, and New Riders are all major publishers with books
+about MySQL. All @strong{MySQL} features is also documented in the
+@strong{MySQL} on-line manual because when a feature is implemented, the
+@strong{MySQL} developers are required to document it before it's
+included in the source.
+@item
+@strong{MySQL} has supports more of the standard ODBC functions than
@code{PostgreSQL}.
+@item
+@strong{MySQL} has a much more sophisticated @code{ALTER TABLE}.
+@item
+@strong{MySQL} has support for tables without transactions for
+applications that need all speed they can get. The tables may be memory
+based,@code{HEAP} tables or disk based @code{MyISAM}. @xref{Table types}.
+@item
+@strong{MySQL} has support for 3 different table handles that support
+transactions (@code{BDB}, @code{InnoDB} and @code{Gemini}. Because
+every transaction engine performs differently under different
+conditions, this gives the application writer more options to find an
+optimal solution for his/her setup. @xref{Table types}.
+@item
+@code{MERGE} tables gives you a unique way to instantly make a view over
+a set of identical tables and use these as one. This is perfectly for
+systems where you have log files that you order for example by month.
+@xref{MERGE}.
+@item
+The option to compress read-only tables, but still have direct access to
+the rows in the table, gives you better performance by minimizing disk
+reads. This is very useful when you are archiving
+things.@xref{myisampack}.
+@item
+@strong{MySQL} has internal support for text search. @xref{Fulltext Search}.
+@item
+You can access many databases from the same connection (depending of course
+on your privileges).
+@item
+@strong{MySQL} is coded from the start with multi-threading while
+PostgreSQL uses processes. Because context switching and access to
+common storage areas is much faster between threads, than are separate
+processes, this gives @strong{MySQL} a big speed advantage in multi-user
+applications and also makes it easier for @strong{MySQL} to take full
+advantage of symmetric multiprocessor systems (SMP).
+@item
+@strong{MySQL} has a much more sophisticated privilege system than
+PostgreSQL. While PostgreSQL only supports @code{INSERT},
+@code{SELECT}, @code{update/delete} grants per user on a database or a
+table @strong{MySQL} allows you to define a full set of different
+privileges on database, table and columns level. @strong{MySQL} also allows
+you to specify the privilege on host+user combinations. @xref{GRANT}.
+@item
+@strong{MySQL} supports a compressed server/client protocol which
+improves performance over slow links.
+@item
+@strong{MySQL} employs the table handler concept and is the only
+relational database we know of built around this concept. This allows
+different low level table types to be swapped into the SQL engine, each
+table type optimized for a different performance characteristics.
+@item
+All @code{MySQL} table types (except @strong{InnoDB}) are implemented as
+files (ie: one table per file), which makes it really easy to backup,
+move, delete and even symlink databases and tables when the server is
+down.
+@item
+Tools to repair and optimize @strong{MyISAM} tables (the most common
+@strong{MySQL} table type). A repair tool is only needed when a
+physical corruption of a data file happens, usually from a hardware
+failure. It allows a majority of the data to be recovered.
+@item
+Upgrading @strong{MySQL} is painless. When you are upgrading @strong{MySQL},
+you don't need to dump/restore your data, as you have to do with most
+PostgreSQL upgrades.
+@end itemize
+
+Drawbacks with @strong{MySQL} compared to PostgreSQL:
+
+@itemize @bullet
+@item
+The transaction support in @strong{MySQL} is not yet as well tested as
+PostgreSQL's system.
+@item
+Because @strong{MySQL} uses threads, which are still a moving target on
+many OS, one must either use binaries from
+@uref{http://www.mysql.com/downloads} or carefully follow our
+instructions on
+@uref{http://www.mysql.com/doc/I/n/Installing_source.html} to get an
+optimal binary that works in all cases.
+@item
+Table locking, as used by the non-transactional @code{MyISAM} tables, is
+in many cases faster than page locks, row locks or versioning. The
+drawback however is that if one doesn't take into account how table
+locks works, a single long-running query can block a table for updates
+for a long time. This can usable be avoided when designing the
+application. If not, one can always switch the trouble table to use one
+of the transactional table types. @xref{Table locking}.
+@item
+With UDF (user defined functions) one can extend @strong{MySQL} with
+both normal SQL functions and aggregates, but this is not as easy or as
+flexible as in PostgreSQL. @xref{Adding functions}.
+@item
+Updates and deletes that goes over multiple tables is harder to do in
+@strong{MySQL}. (Will be fixed in @strong{MySQL} 4.0 with multi-table
+@code{DELETE} and multi-table @code{UPDATE} and in @strong{MySQL} 4.1
+with @code{SUB-SELECT})
+@end itemize
+
+PostgreSQL offers currently the following advantages over @strong{MySQL}:
+
+Note that because we know the @strong{MySQL} road map, we have included
+in the following table the version when @strong{MySQL} should support
+this feature. Unfortunately we couldn't do this for previous comparison,
+because we don't know the PostgreSQL roadmap.
+
+@multitable @columnfractions .70 .30
+@item @strong{Feature} @tab @strong{MySQL version}
+@item Subselects @tab 4.1
+@item Foreign keys @tab 4.0 and 4.1
+@item Views. @tab 4.2
+@item Stored procedures in multiple languages @tab 4.1
+@item Extensible type system. @tab Not planed
+@item Unions @tab 4.0.
+@item Full join. @tab 4.0 or 4.1.
+@item Triggers. @tab 4.1
+@item Constrainst @tab 4.1
+@item Cursors @tab 4.1 or 4.2
+@item Extensible index types like R-trees @tab R-trees are planned to 4.2
+@item Inherited tables @tab Not planned
+@end multitable
-@code{PostgreSQL} has some more advanced features like user-defined
-types, triggers, rules, and some transaction support (currently it
-has about the same semantics as @strong{MySQL}'s transactions in that the
-transaction is not 100% atomic). However, PostgreSQL lacks many of the
-standard types and functions from ANSI SQL and ODBC. See the @code{crash-me}
-Web page (@uref{http://www.mysql.com/information/crash-me.php}) for a complete
-list of limits and which types and functions are supported or unsupported.
+Other reasons to use PostgreSQL:
-Normally, @code{PostgreSQL} is a magnitude slower than @strong{MySQL}.
-@xref{Benchmarks}. This is due largely to the fact that they have only
-transaction-safe tables and that their transactions system is not as
-sophisticated as Berkeley DB's. In @strong{MySQL} you can decide per
-table if you want the table to be fast or take the speed penalty of
-making it transaction-safe.
+@itemize @bullet
+@item
+Standard usage is in PostgreSQL closer to ANSI SQL in some cases.
+@item
+One can get speed up PostgreSQL by coding things as stored procedures.
+@item
+Bigger team of developers that contributes to the server.
+@end itemize
-The most important things that @code{PostgreSQL} supports that @strong{MySQL}
-doesn't yet support:
+Drawbacks with PostgreSQL compared to @strong{MySQL}:
-@table @code
-@item Sub select
-@item Foreign keys
-@item Stored procedures
-@item An extendable type system.
-@item A way to extend the SQL to handle new key types (like R-trees)
-@end table
+@itemize @bullet
+@item
+@code{Vaccum()} makes PostgreSQL hard to use in a 24/7 environment.
+@item
+Only transactional tables.
+@item
+Much slower insert/delete/update.
+@end itemize
+
+For a complete list of drawbacks, you should also examine the first table
+in this section.
-@strong{MySQL}, on the other hand, supports many ANSI SQL constructs
-that @code{PostgreSQL} doesn't support. Most of these can be found at the
-@uref{http://www.mysql.com/information/crash-me.php, @code{crash-me} Web page}.
+@cindex PostgreSQL/MySQL, benchmarks
+@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, Compare PostgreSQL
+@subsection Benchmarking MySQL and PostgreSQL
-If you really need the rich type system @code{PostgreSQL} offers and you
-can afford the speed penalty of having to do everything transaction
-safe, you should take a look at @code{PostgreSQL}.
+The only open source benchmark, that we know of, that can be used to
+benchmark @strong{MySQL} and PostgreSQL (and other databases) is our
+own. It can be found at:
+@uref{http://www.mysql.com/information/benchmarks.html}.
+
+We have many times asked the PostgreSQL developers and some PostgreSQL
+users to help us extend this benchmark to make the definitive benchmark
+for databases, but unfortunately we haven't got any feedback for this.
+
+We, the @strong{MySQL} developers, have because of this spent a lot of
+hours to get maximum performance from PostgreSQL for the benchmarks, but
+because we don't know PostgreSQL intimately we are sure that there are
+things that we have missed. We have on the benchmark page documented
+exactly how we did run the benchmark so that it should be easy for
+anyone to repeat and verify our results.
+
+The benchmarks are usually run with and without the @code{--fast}
+option. When run with @code{--fast} we are trying to use every trick
+the server can do to get the code to execute as fast as possible.
+The idea is that the normal run should show how the server would work in
+a default setup and the @code{--fast} run shows how the server would do
+if the application developer would use extensions in the server to make
+his application run faster.
+
+When running with PostgreSQL and @code{--fast} we do a @code{vacuum()}
+between after every major table update and drop table to make the database
+in perfect shape for the following selects. The time for vacuum() is
+measured separately.
+
+When running with PostgreSQL 7.1.1 we could however not run with
+@code{--fast} because during the insert test, the postmaster (the
+PostgreSQL deamon) died and the database was so corrupted that it was
+impossible to restart postmaster. (The details about the machine we run
+the benchmark can be found on the benchmark page). After this happened
+twice, we decided to postpone the @code{--fast} test until next
+PostgreSQL release.
+
+Before going to the other benchmarks we know of, We would like to give
+some background to benchmarks:
+
+It's very easy to write a test that shows ANY database to be best
+database in the world, by just restricting the test to something the
+database is very good at and not test anything that the database is not
+good at; If one after this publish the result with a single figure
+things is even easier.
+
+This would be like we would measure the speed of @strong{MySQL} compared
+to PostgreSQL by looking at the summary time of the MySQL benchmarks on
+our web page. Based on this @strong{MySQL} would be more than 40 times
+faster than PostgreSQL, something that is of course not true. We could
+make things even worse by just taking the test where PostgreSQL performs
+worst and claim that @strong{MySQL} is more than 2000 times faster than
+PostgreSQL.
+
+The case is that @strong{MySQL} does a lot of optimizations that
+PostgreSQL doesn't do and the other way around. An SQL optimizer is a
+very complex thing and a company could spend years on just making the
+optimizer faster and faster.
+
+When looking at the benchmark results you should look for things that
+you do in your application and just use these results to decide which
+database would be best suited for your application. The benchmark
+results also shows things a particular database is not good at and should
+give you a notion about things to avoid and what you may have to do in
+other ways.
+
+We know of two benchmark tests that claims that PostgreSQL performers
+better than @strong{MySQL}. These both where multi-user tests, a test
+that we here at @strong{MySQL AB} haven't had time to write and include in
+the benchmark suite, mainly because it's a big task to do this in a
+manner that is fair against all databases.
+
+One is the benchmark paid for by
+@uref{http://www.greatbridge.com/about/press.php?content_id=4,Great
+Bridge}.
+
+This is the worst benchmark we have ever seen anyone ever conduct. This
+was not only tuned to only test what PostgreSQL is absolutely best at,
+it was also totally unfair against every other database involved in the
+test.
+
+@strong{NOTE}: We know that not even some of the main PostgreSQL
+developers did like the way Great Bridge conducted the benchmark, so we
+don't blame them for the way the benchmark was made.
+
+This benchmark has been condemned in a lot of postings and newsgroups so
+we will here just shortly repeat some things that where wrong with it.
+
+@itemize @bullet
+@item
+The tests where run with an expensive commercial tool, that makes it
+impossible for an open source company like us to verify the benchmarks,
+or even check how the benchmark where really done. The tool is not even
+a true benchmark tool, but a application/setup testing tool. To refer
+this as STANDARD benchmark tool is to stretch the truth a long way.
+@item
+Great Bridge admitted that they had optimized the PostgreSQL database
+(with vacuum() before the test) and tuned the startup for the tests,
+something they hadn't done for any of the other databases involved. To
+say "This process optimizes indexes and frees up disk space a bit. The
+optimized indexes boost performance by some margin". Our benchmarks
+clearly indicates that the difference in running a lot of selects on a
+database with and without vacuum() can easily differ by a factor of 10.
+@item
+The test results where also strange; The AS3AP test documentation
+mentions that the test does:
+
+"selections, simple joins, projections, aggregates, one-tuple updates,
+and bulk updates"
+
+PostgreSQL is good at doing selects and joins (especially after a
+vacuum()), but doesn't perform as well on inserts/updates; The
+benchmarks seem to indicate that only SELECTs where done (or very few
+updates) . This could easily explain they good results for PostgreSQL in
+this test. The bad results for MySQL will be obvious a bit down in this
+document.
+@item
+They did run the so called benchmark from a Windows machine against a
+Linux machine over ODBC, a setup that no normal database user would ever
+do when running a heavy multi-user application. This tested more the
+ODBC driver and the Windows protocol used between the clients than the
+database itself.
+@item
+When running the database against Oracle and MS-SQL (Great Bridge has
+indirectly indicated that the databases they used in the test),
+they didn't use the native protocol but instead ODBC. Anyone that has
+ever used Oracle, knows that all real application uses the native
+interface instead of ODBC. Doing a test through ODBC and claiming that
+the results had anything to do with using the database for real can't
+be regarded as fair play. They should have done two tests with and
+without ODBC to provide the right facts (after having got experts to tune
+all involved databases of course).
+@item
+They refer to the TPC-C tests, but doesn't anywhere mention that the
+tests they did where not a true TPC-C test and they where not even
+allowed to call it a TPC-C test. A TPC-C test can only be conducted by
+the rules approved by the @uref{http://www.tpc.org,TPC-council}. Great
+Bridge didn't do that. By doing this they have both violated the TPC
+trademark and miscredited their own benchmarks. The rules set by the
+TPC-council are very strict to ensure that no one can produce false
+results or make unprovable statements. Apparently Great Bridge wasn't
+interested in doing this.
+@item
+After the first test, we contacted Great Bridge and mentioned to them
+some of the obvious mistakes they had done with @strong{MySQL}; Running
+with a debug version of our ODBC driver, running on a Linux system that
+wasn't optimized for threads, using an old MySQL version when there was
+a recommended newer one available, not starting @strong{MySQL} with the
+right options for heavy multi-user use (the default installation of
+MySQL is tuned for minimal resource use). Great Bridge did run a new
+test, with our optimized ODBC driver and with better startup options for
+MySQL, but refused to either use our updated glibc library or our
+standard binary (used by 80% of our users), which was statically linked
+with a fixed glibc library.
+
+According to what we know, Great Bridge did nothing to ensure that the
+other databases where setup correctly to run good in their test
+environment. We are sure however that they didn't contact Oracle or
+Microsoft to ask for their advice in this matter ;)
+@item
+The benchmark was paid for by Great Bridge, and they decided to publish
+only partial chosen results (instead of publishing it all).
+@end itemize
+
+Tim Perdue, a long time PostgreSQL fan and a reluctant MySQL user
+published a comparison on
+@uref{http://www.phpbuilder.com/columns/tim20001112.php3,phpbuider}.
+
+When we got aware of the comparison, we phoned Tim Perdue about this
+because there was a lot of strange things in his results. For example,
+he claimed that MySQL had a problem with five users in his tests, when we
+know that there are users with similar machines as his that are using
+MySQL with 2000 simultaneous connections doing 400 queries per second (In
+this case the limit was the web bandwidth, not the database).
+
+It sounded like he was using a Linux kernel that either had some
+problems with many threads (Linux kernels before 2.4 had a problem with
+this but we have documented how to fix this and Tim should be aware of
+this problem). The other possible problem could have been an old glibc
+library and that Tim didn't use a MySQL binary from our site, which is
+linked with a corrected glibc library, but had compiled a version of his
+own with. In any of the above cases, the symptom would have been exactly
+what Tim had measured.
+
+We asked Tim if we could get access to his data so that we could repeat
+the benchmark and if he could check the MySQL version on the machine to
+find out what was wrong and he promised to come back to us about this.
+He has not done that yet.
+
+Because of this we can't put any trust in this benchmark either :(
+
+Conclusion:
+
+The only benchmarks that exist today that anyone can download and run
+against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here
+at @strong{MySQL} believe that open source databases should be tested
+with open source tools! This is the only way to ensure that no one
+does tests that nobody can reproduce and use this to claim that a
+database is better than another. Without knowing all the facts it's
+impossible to answer the claims of the tester.
+
+The thing we find strange is that every test we have seen about
+PostgreSQL, that is impossible to reproduce, claims that PostgreSQL is
+better in most cases while our tests, which anyone can reproduce,
+clearly shows otherwise. With this we don't want to say that PostgreSQL
+isn't good at many things (It is!) We would just like to see a fair test
+where they are very good so that we could get some friendly competition
+going!
+
+For more information about our benchmarks suite see @xref{MySQL
+Benchmarks}.
+
+We are working on an even better benchmark suite, including much better
+documentation of what the individual tests really do and how to add more
+tests to the suite.
@cindex internals
@cindex threads
@@ -42648,7 +44506,7 @@ attachments, you should ftp all the relevant files to:
@end itemize
@node Reporting mysqltest bugs, , extending mysqltest, MySQL test suite
-@subsection Extending the MySQL Test Suite
+@subsection Reporting bugs in the MySQL Test Suite
If your @strong{MySQL} version doesn't pass the test suite you should
do the following:
@@ -42680,6 +44538,10 @@ description of your system, the version of the mysqld binary and how you
compiled it.
@item
+Try also to run @code{mysql-test-run} with the @code{--force} option to
+see if there is any other test that fails.
+
+@item
If you have compiled @strong{MySQL} yourself, check our manual for how
to compile @strong{MySQL} on your platform or, preferable, use one of
the binaries we have compiled for you at
@@ -42919,6 +44781,8 @@ An online magazine featuring music, literature, arts, and design content.
@item @uref{http://kids.msfc.nasa.gov, NASA KIDS}
@item @uref{http://science.nasa.gov, Sience@@NASA}
+@item @uref{http://www.handy.de/, handy.de}
+
@item @uref{http://lindev.jmc.tju.edu/qwor, Qt Widget and Object Repository}
@item @uref{http://www.samba-choro.com.br, Brazilian samba site (in Portuguese)}
@@ -42933,6 +44797,9 @@ tickets for this event is implemented using @strong{MySQL} and tcl/tk. More than
service with millions of users.}
@item @uref{http://f1.tauzero.se, Forza Motorsport}
+
+@item @uref{http://www.dreamhost.com/, DreamHost Web Hosting}
+
@end itemize
@cindex services
@@ -43270,16 +45137,16 @@ interface, you should fetch the @code{Data-Dumper}, @code{DBI}, and
Perl @code{Data-Dumper} module. Useful with @code{DBI}/@code{DBD} support for
older Perl installations.
-@item @uref{http://www.mysql.com/Downloads/Contrib/DBI-1.14.tar.gz, DBI-1.14.tar.gz}
+@item @uref{http://www.mysql.com/Downloads/Contrib/DBI-1.15.tar.gz, DBI-1.15.tar.gz}
Perl @code{DBI} module.
-@item @uref{http://www.mysql.com/Downloads/Contrib/KAMXbase1.0.tar.gz,KAMXbase1.0.tar.gz}
+@item @uref{http://www.mysql.com/Downloads/Contrib/KAMXbase1.2.tar.gz,KAMXbase1.2.tar.gz}
Convert between @file{.dbf} files and @strong{MySQL} tables. Perl
module written by Pratap Pereira @email{pereira@@ee.eng.ohio-state.edu},
extended by Kevin A. McGrail @email{kmcgrail@@digital1.peregrinehw.com}.
This converter can handle MEMO fields.
-@item @uref{http://www.mysql.com/Downloads/Contrib/Msql-Mysql-modules-1.2215.tar.gz, Msql-Mysql-modules-1.2215.tar.gz}
+@item @uref{http://www.mysql.com/Downloads/Contrib/Msql-Mysql-modules-1.2216.tar.gz, Msql-Mysql-modules-1.2216.tar.gz}
Perl @code{DBD} module to access mSQL and @strong{MySQL} databases.
@item @uref{http://www.mysql.com/Downloads/Contrib/Data-ShowTable-3.3.tar.gz, Data-ShowTable-3.3.tar.gz}
@@ -43498,8 +45365,8 @@ of several databases simultaneously. By Innovative-IT Development AB.
The @strong{MySQL} GUI client homepage. By Sinisa at @strong{MySQL AB}.
@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_navigator_0.9.0.tar.gz, MySQL navigator 0.9}
-MySQL Navigator is MySQL database server GUI client program. The purpose
-of MySQL Navigator is to provide a useful client interface to MySQL
+MySQL Navigator is a @strong{MySQL} database server GUI client program. The purpose
+of MySQL Navigator is to provide a useful client interface to @strong{MySQL}
database servers, whilst supporting multiple operating systems and
languages. You can currently import/export database, enter queries, get
result sets, edit scripts, run scripts, add, alter, and delete users,
@@ -43522,7 +45389,7 @@ You can always find the latest version
@uref{http://www.trash.net/~ffischer/admin/index.html, here}.
@item @uref{http://www.mysql.com/Downloads/Win32/MySQL-Maker-1.0.zip,MySQL-Maker 1.0}.
-Shareware @strong{MySQL} client for Windows. It's WYSIWYG tool which allows
+Shareware @strong{MySQL} client for Windows. It's a WYSIWYG tool which allows
you to create, change and delete databases and tables.
You can change field - structure and add, change and delete data in
these tables directly without ODBC-driver.
@@ -43532,9 +45399,14 @@ these tables directly without ODBC-driver.
Windows GUI (binary only) to administrate a database, by David B. Mansel,
@email{david@@zhadum.org}.
+@item @uref{http://home.online.no/~runeberg/myqa, MyQA}
+is a Linux-based query client for the @strong{MySQL} database server. MyQA
+lets you enter SQL queries, execute them, and view the results, all in a
+graphical user interface. The GUI is roughly similar to that of the
+'Query Analyzer' client that comes with MS SQL Server.
@item @uref{http://members.xoom.com/_opex_/mysqlmanager/index.html, MySQL Manager}
-a graphical MySQL server manager for MySQL server written in Java, for Windows
+a graphical @strong{MySQL} server manager for @strong{MySQL} server written in Java, for Windows
@item @uref{http://www.mysql.com/Downloads/Win32/netadmin.zip, netadmin.zip}
@@ -43569,6 +45441,11 @@ Some features:
@itemize @bullet
@item Manage servers, databases, tables, columns, indexes, and users
@item Import wizard to import structure and data from MS Access, MS Excel, Dbase, FoxPro, Paradox, and ODBC Databases.
+
+@item @uref{http://www.mysql.com/Downloads/Contrib/KMYENG113.zip,KMYENG113.zip}
+An administrator GUI for @strong{MySQL}. Works only on windows, no source.
+Available in English and Japanese. By Mitunobu Kaneko.
+Home page: @uref{http://sql.jnts.ne.jp/}
@end itemize
@item @uref{http://www.mysql.com/Downloads/Contrib/xmysqladmin-1.0.tar.gz, xmysqladmin-1.0.tar.gz}
@@ -43610,6 +45487,24 @@ data either by clicking on the table folder or by composing their own SQL
statements with our built-in SQL editor. The tool has been tested with
Oracle 8 and @strong{MySQL} as the back-end databases. It requires JDK 1.3 from
JavaSoft.
+@item @uref{http://www.jetools.com/products/databrowser/, DataBrowser}
+The DataBrowser is a cross-database, cross-platform data access tool. It is more
+user friendly than tools like SQL Plus, psql (command line based tools). It is more
+flexible than TOAD, ISQL, PGAccess which are GUI's that are limitied to a single
+platform or database.
+@item @uref{http://www.intrex.net/amit/software/, SQLC}
+The SQL Console is a standalone java application that allows you to connect to a
+ SQL database system and issue SQL queries and updates. It has an easy-to use
+graphical user interface. The SQL Console uses JDBC to connect to the database
+systems and, therefore, with proper JDBC drivers, you can use this utility to
+connect to some of the most popular database systems.
+@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_mmc.zip, MySQL MMC}
+MySQL MMC is a GUI Management Tool developed using kdevelop
+with a very good interface completely like Microsoft
+Enterprise Tool (for SQL Server) or Sybase Central. We
+can use it to manage server, database, table, index,
+users and to edit table data in grid or execute Sql
+by Query Analysis.
@end itemize
@cindex Web clients
@@ -43659,7 +45554,7 @@ html templates. By Alex Krohn.
This cgi scripts in Perl enables you to edit content of Mysql
database. By Tomas Zeman.
@item
-@uref{http://futurerealm.com/opensource/futuresql.htm, FutureSQL Web Database Administration Tool}.
+@uref{http://worldcommunity.com/opensource/futuresql, FutureSQL Web Database Administration Tool}.
FutureSQL by Peter F. Brown, is a free, open source rapid application
development Web database administration tool, written in Perl,
using @strong{MySQL}. It uses @code{DBI:DBD} and @code{CGI.pm}.
@@ -43682,7 +45577,7 @@ and run update queries. Originally written to implement a simple fast
low-overhead banner-rotation system. By Sasha Pachev.
@item @uref{http://htcheck.sourceforge.net, htCheck} - URL checker with
-MySQL backend. Spidered URLs can later be queried using SQL to retrieve
+@strong{MySQL} backend. Spidered URLs can later be queried using SQL to retrieve
various kinds of information, eg. broken links. Written by Gabriele Bartolini.
@item @uref{http://www.odbsoft.com/cook/sources.htm}
@@ -43839,7 +45734,7 @@ detection of @code{TIMESTAMP} fields), provides warnings and suggestions
while converting, quotes @strong{all} special characters in text and
binary data, and so on. It will also convert to @code{mSQL} v1 and v2,
and is free of charge for anyone. See
-@uref{http://www.cynergi.net/prod/exportsql/} for the latest version. By
+@uref{http://www.cynergi.net/exportsql/} for the latest version. By
Pedro Freire, @email{support@@cynergi.net}. NOTE: Doesn't work with
Access2!
@item @uref{http://www.mysql.com/Downloads/Contrib/access_to_mysql.txt, access_to_mysql.txt}
@@ -43873,9 +45768,16 @@ table for a different site you are working on, but the table is just a
bit different (that is - fields in different order, etc.).
By Steve Shreeve.
@item @uref{http://www.mysql.com/Downloads/Contrib/oracledump, oracledump}
-Perl program to convert Oracle databases to @strong{MySQL}. By Johan Andersson.
+Perl program to convert Oracle databases to @strong{MySQL}. Has same
+output format as mysqldump. By Johan Andersson.
+
@item @uref{http://www.mysql.com/Downloads/Contrib/excel2mysql, excel2mysql}
Perl program to import Excel spreadsheets into a @strong{MySQL} database. By Stephen Hurd @email{shurd@@sk.sympatico.ca}
+
+@item @uref{http://www.mysql.com/Downloads/Contrib/T2S_100.ZIP, T2S_100.ZIP}.
+Windows program to convert text files to @strong{MySQL} databases. By
+Asaf Azulay.
+
@end itemize
@appendixsec Using MySQL with Other Products
@@ -43911,6 +45813,10 @@ Patches for @code{radiusd} to make it support @strong{MySQL}. By Wim Bonis,
@appendixsec Useful Tools
@itemize @bullet
+@item @uref{http://worldcommunity.com/opensource/utilities/mysql_backup.html, MySQL Backup}.
+
+A backup script for MySQL. By Peter F. Brown.
+
@item @uref{http://www.mysql.com/Downloads/Contrib/mytop, mytop}
@item @uref{http://public.yahoo.com/~jzawodn/mytop/, mytop home page}
mytop is a Perl program that allows you to monitor @strong{MySQL} servers by
@@ -44362,6 +46268,8 @@ Slovak error messages.
Romanian error messages.
@item Peter Feher
Hungarian error messages.
+@item Roberto M. Serqueira
+Portugise error messages.
@item David Sacerdote @email{davids@@secnet.com}
Ideas for secure checking of DNS hostnames.
@item Wei-Jou Chen @email{jou@@nematic.ieo.nctu.edu.tw}
@@ -44376,7 +46284,7 @@ Active mailing list member.
Ported (and extended) the benchmark suite to @code{DBI}/@code{DBD}. Have
been of great help with @code{crash-me} and running benchmarks. Some new
date functions. The mysql_setpermissions script.
-@item Jay Flaherty @email{fty@@utk.edu}
+@item Jay Flaherty @email{fty@@mediapulse.com}
Big parts of the Perl @code{DBI}/@code{DBD} section in the manual.
@item Paul Southworth @email{pauls@@etext.org}, Ray Loyzaga @email{yar@@cs.su.oz.au}
Proof-reading of the Reference Manual.
@@ -44522,11 +46430,30 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
Added @code{ALTER TABLE table_name DISABLE KEYS} and
@code{ALTER TABLE table_name ENABLE KEYS} commands.
@item
-Added @code{HANDLER} command.
+@code{LOAD DATA FROM MASTER} "auto-magically" sets up a slave.
+@item
+Renamed @code{safe_mysqld} to @code{mysqld_safe}.
+@item
+Allow one to use @code{IN} instead of @code{FROM} in @code{SHOW} commands.
+@item
+@code{SHOW INDEXES} is now a synonym for @code{SHOW INDEX}.
@item
-Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This make it
-possible to know how many rows a query would have returned if one hadn't
-used @code{LIMIT}.
+Added support for symbolic links to @code{MyISAM} tables. Symlink handling is
+now enabled by default for Windows.
+@item
+@code{LOAD DATA FROM MASTER} "auto-magically" sets up a slave.
+@item
+A new @code{HANDLER} interface to @code{MyISAM} tables.
+@item
+@code{COUNT(DISTINCT)} is about 30% faster.
+@item
+Creating full text indexes are now much faster.
+@item
+Searching on packed (@code{CHAR}/@code{VARCHAR}) keys are now much faster.
+@item
+Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This makes it
+possible to know how many rows a query would have returned
+without a @code{LIMIT} clause.
@item
Changed output format of @code{SHOW OPEN TABLES}.
@item
@@ -44584,6 +46511,7 @@ users use this code as the rest of the code and because of this we are
not yet 100% confident in this code.
@menu
+* News-3.23.40:: Changes in release 3.23.40
* News-3.23.39:: Changes in release 3.23.39
* News-3.23.38:: Changes in release 3.23.38
* News-3.23.37:: Changes in release 3.23.37
@@ -44627,11 +46555,64 @@ not yet 100% confident in this code.
* News-3.23.0:: Changes in release 3.23.0
@end menu
-@node News-3.23.39, News-3.23.38, News-3.23.x, News-3.23.x
+@node News-3.23.40, News-3.23.39, News-3.23.x, News-3.23.x
+@appendixsubsec Changes in release 3.23.40
+@itemize @bullet
+@item
+Added option @code{--warnings} to @code{mysqld}. Now @code{mysqld}
+only prints the error @code{Aborted connection} if this option is used.
+@item
+Fixed parser to allow floats of type @code{1.0e1} (no sign after @code{e}).
+@item
+Option @code{--force} to @code{myisamchk} now also updates states.
+@item
+Added option @code{--warnings} to @code{mysqld}. Now @code{mysqld}
+only prints the error @code{Aborted connection} if this option is used.
+@item
+Fixed problem with @code{SHOW CREATE TABLE} when you didn't have a
+@code{PRIMARY KEY}.
+@item
+Fixed properly the rename of @code{innodb_unix_file_flush_method} to
+@code{innodb_flush_method}.
+@item
+Fixed bug when converting @code{UNSIGNED BIGINT} to @code{DOUBLE}. This caused
+a problem when doing comparisons with @code{BIGINT}'s outside of the
+signed range.
+@item
+Fixed bug in @code{BDB} tables when querying empty tables.
+@item
+Fixed a bug when using @code{COUNT(DISTINCT)} with @code{LEFT JOIN} and
+there wasn't any matching rows.
+@end itemize
+
+@node News-3.23.39, News-3.23.38, News-3.23.40, News-3.23.x
@appendixsubsec Changes in release 3.23.39
@itemize @bullet
@item
-Fixed that date-part extract functions works with dates where day
+The @code{AUTO_INCREMENT} sequence wasn't reset when dropping
+and adding an @code{AUTO_INCREMENT} column.
+@item
+@code{CREATE ... SELECT} now creates non-unique indexes delayed.
+@item
+Fixed problem where @code{LOCK TABLES table_name READ} followed by
+@code{FLUSH TABLES} put an exclusive lock on the table.
+@item
+@code{REAL} @@variables with was represented with 2 digits when
+converted to strings.
+@item
+Fixed problem that client 'hung' when @code{LOAD TABLE FROM MASTER} failed.
+@item
+Running @code{myisamchk --fast --force} will no longer repair tables
+that only had the open count wrong.
+@item
+Added functions to handle symbolic links to make life easier in 4.0.
+@item
+We are now using the @code{-lcma} thread library on HP-UX 10.20 so
+that @strong{MySQL} will be more stable on HP-UX.
+@item
+Fixed problem with @code{IF()} and number of decimals in the result.
+@item
+Fixed date-part extraction functions to work with dates where day
and/or month is 0.
@item
Extended argument length in option files from 256 to 512 chars.
@@ -44639,7 +46620,7 @@ Extended argument length in option files from 256 to 512 chars.
Fixed problem with shutdown when @code{INSERT DELAYED} was waiting for
a @code{LOCK TABLE}.
@item
-Fixed coredump bug buged in InnoDB when tablespace was full.
+Fixed coredump bug in InnoDB when tablespace was full.
@item
Fixed problem with @code{MERGE} tables and big tables (> 4G) when using
@code{ORDER BY}.
@@ -45052,7 +47033,7 @@ Fixed problem when using @code{DECIMAL()} keys on negative numbers.
always returned @code{NULL}.
@item
Fixed security bug in something (please upgrade if you are using a earlier
-MySQL 3.23 version).
+@strong{MySQL} 3.23 version).
@item
Fixed buffer overflow bug when writing a certain error message.
@item
@@ -45221,7 +47202,7 @@ slave server restart.
@item
@code{SHOW KEYS} now shows whether or not key is @code{FULLTEXT}.
@item
-New script @file{mysqld_multi}. @xref{mysqld_multi}.
+New script @file{mysqld_multi}. @xref{mysqld_multi, , @code{mysqld_multi}}.
@item
Added new script, @file{mysql-multi.server.sh}. Thanks to
Tim Bunce @email{Tim.Bunce@@ig.co.uk} for modifying @file{mysql.server} to
@@ -45274,8 +47255,8 @@ read by @code{mysql_options()}.
Added new options @code{--pager[=...]}, @code{--no-pager},
@code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The
new corresponding interactive commands are @code{pager}, @code{nopager},
-@code{tee} and @code{notee}. @xref{mysql}, @code{mysql --help} and the
-interactive help for more information.
+@code{tee} and @code{notee}. @xref{mysql, , @code{mysql}}, @code{mysql --help}
+and the interactive help for more information.
@item
Fixed crash when automatic repair of @code{MyISAM} table failed.
@item
@@ -45861,7 +47842,7 @@ Added table locks to Berkeley DB.
Fixed a bug with @code{LEFT JOIN} and @code{ORDER BY} where the first
table had only one matching row.
@item
-Added 4 sample @code{my.cfg} example files in the @file{support-files}
+Added 4 sample @code{my.cnf} example files in the @file{support-files}
directory.
@item
Fixed @code{duplicated key} problem when doing big @code{GROUP BY}'s.
@@ -49875,6 +51856,10 @@ Everything in this list is approximately in the order it will be done. If you
want to affect the priority order, please register a license or support us and
tell us what you want to have done more quickly. @xref{Licensing and Support}.
+The plan is that we in the future will support the full ANSI SQL99
+standard, but with a lot of useful extensions. The challenge is to do
+this without sacrifying the speed or compromise the code.
+
@node TODO MySQL 4.0, TODO future, TODO, TODO
@appendixsec Things that should be in 4.0
@@ -49964,7 +51949,7 @@ of @code{analyze} is run on all sub tables.
@end itemize
@node TODO future, TODO sometime, TODO MySQL 4.0, TODO
-@appendixsec Things that must done in the real near future
+@appendixsec Things that must be done in the real near future
@itemize @bullet
@item
diff --git a/Makefile.am b/Makefile.am
index 7343f617449..d6bfb156d29 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -22,7 +22,7 @@ TAR = gtar
EXTRA_DIST = INSTALL-SOURCE README \
COPYING COPYING.LIB MIRRORS
SUBDIRS = include @docs_dirs@ @readline_dir@ \
- @thread_dirs@ @pstack_dirs@ @sql_client_dirs@ \
+ @thread_dirs@ @pstack_dirs@ vio @sql_client_dirs@ \
@sql_server_dirs@ @libmysqld_dirs@ scripts tests man \
@bench_dirs@ support-files @fs_dirs@
diff --git a/acinclude.m4 b/acinclude.m4
index 81fedcc5827..d2c74d1c0a4 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1097,10 +1097,10 @@ dnl echo "DBG_GEM1: gemini='$gemini'"
gemini_includes=
gemini_libs=
case "$gemini" in
- no | default | *)
+ no)
AC_MSG_RESULT([Not using Gemini DB])
;;
- yes )
+ yes | default | *)
have_gemini_db="yes"
gemini_includes="-I../gemini/incl -I../gemini"
gemini_libs="\
diff --git a/bdb/include/rpc_server_ext.h b/bdb/include/rpc_server_ext.h
deleted file mode 100644
index 4abb0768134..00000000000
--- a/bdb/include/rpc_server_ext.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* DO NOT EDIT: automatically built by dist/s_include. */
-#ifndef _rpc_server_ext_h_
-#define _rpc_server_ext_h_
-#if defined(__cplusplus)
-extern "C" {
-#endif
-void __db_stats_freelist __P((__db_stat_statsreplist **));
-void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
-void __dbsrv_timeout __P((int));
-void __dbclear_ctp __P((ct_entry *));
-void __dbdel_ctp __P((ct_entry *));
-ct_entry *new_ct_ent __P((u_int32_t *));
-ct_entry *get_tableent __P((long));
-void __dbsrv_active __P((ct_entry *));
-int __dbc_close_int __P((ct_entry *));
-int __dbenv_close_int __P((long, int));
-char *get_home __P((char *));
-#if defined(__cplusplus)
-}
-#endif
-#endif /* _rpc_server_ext_h_ */
diff --git a/client/Makefile.am b/client/Makefile.am
index 24221dcab74..6766b389704 100644
--- a/client/Makefile.am
+++ b/client/Makefile.am
@@ -20,7 +20,7 @@ INCLUDES = -I$(srcdir)/../include \
-I../include -I$(srcdir)/.. -I$(top_srcdir) \
-I..
LIBS = @CLIENT_LIBS@
-LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysql/libmysqlclient.la
+LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysql/libmysqlclient.la
bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow mysqldump mysqlimport mysqltest
noinst_PROGRAMS = insert_test select_test thread_test
noinst_HEADERS = sql_string.h completion_hash.h my_readline.h
diff --git a/client/errmsg.c b/client/errmsg.c
index 1d95e5ac84f..9f67a15dcb2 100644
--- a/client/errmsg.c
+++ b/client/errmsg.c
@@ -47,6 +47,44 @@ const char *client_errors[]=
"Can't initialize character set %-.64s (path: %-.64s)",
"Got packet bigger than 'max_allowed_packet'",
"Embedded server",
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
+
+};
+
+/* Start of code added by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
+
+#elif defined PORTUGUESE
+const char *client_errors[]=
+{
+ "Erro desconhecido do MySQL",
+ "Não pode criar 'UNIX socket' (%d)",
+ "Não pode se conectar ao servidor MySQL local através do 'socket' '%-.64s' (%d)",
+ "Não pode se conectar ao servidor MySQL em '%-.64s' (%d)",
+ "Não pode criar 'socket TCP/IP' (%d)",
+ "'Host' servidor MySQL '%-.64s' (%d) desconhecido",
+ "Servidor MySQL desapareceu",
+ "Incompatibilidade de protocolos. Versão do Servidor: %d - Versão do Cliente: %d",
+ "Cliente do MySQL com falta de memória",
+ "Informação inválida de 'host'",
+ "Localhost via 'UNIX socket'",
+ "%-.64s via 'TCP/IP'",
+ "Erro na negociação de acesso ao servidor",
+ "Conexão perdida com servidor MySQL durante 'query'",
+ "Comandos fora de sincronismo. Você não pode executar este comando agora",
+ "%-.64s via 'named pipe'",
+ "Não pode esperar pelo 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode abrir 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode estabelecer o estado do 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode inicializar conjunto de caracteres %-.64s (caminho %-.64s)",
+ "Obteve pacote maior do que 'max_allowed_packet'",
+ "Embedded server",
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
};
#else /* ENGLISH */
@@ -74,6 +112,10 @@ const char *client_errors[]=
"Can't initialize character set %-.64s (path: %-.64s)",
"Got packet bigger than 'max_allowed_packet'",
"Embedded server",
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
};
#endif
diff --git a/client/mysqladmin.c b/client/mysqladmin.c
index 1e6bf3c5219..3570cefc4ae 100644
--- a/client/mysqladmin.c
+++ b/client/mysqladmin.c
@@ -28,7 +28,7 @@
#include <my_pthread.h> /* because of signal() */
#endif
-#define ADMIN_VERSION "8.20"
+#define ADMIN_VERSION "8.21"
#define MAX_MYSQL_VAR 64
#define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */
#define MAX_TRUNC_LENGTH 3
@@ -870,7 +870,7 @@ static int drop_db(MYSQL *mysql, const char *db)
return -1;
}
}
- sprintf(name_buff,"drop database %.*s",FN_REFLEN,db);
+ sprintf(name_buff,"drop database `%.*s`",FN_REFLEN,db);
if (mysql_query(mysql,name_buff))
{
my_printf_error(0,"DROP DATABASE %s failed;\nerror: '%s'",MYF(ME_BELL),
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index 3d4d4597ef5..ebaa8366c72 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -16,7 +16,7 @@
/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
-#define CHECK_VERSION "1.01"
+#define CHECK_VERSION "1.02"
#include <global.h>
#include <my_sys.h>
@@ -338,7 +338,7 @@ static int get_options(int *argc, char ***argv)
{
int pnlen = strlen(my_progname);
- if (pnlen < 6) // name too short
+ if (pnlen < 6) /* name too short */
what_to_do = DO_CHECK;
else if (!strcmp("repair", my_progname + pnlen - 6))
what_to_do = DO_REPAIR;
@@ -503,25 +503,24 @@ static int use_db(char *database)
static int handle_request_for_tables(char *tables, uint length)
{
- char *query, *end, options[100];
+ char *query, *end, options[100], message[100];
const char *op = 0;
options[0] = 0;
+ end = options;
switch (what_to_do) {
case DO_CHECK:
op = "CHECK";
- end = options;
- if (opt_quick) end = strmov(end, "QUICK");
- if (opt_fast) end = strmov(end, "FAST");
- if (opt_medium_check) end = strmov(end, "MEDIUM"); /* Default */
- if (opt_extended) end = strmov(end, "EXTENDED");
- if (opt_check_only_changed) end = strmov(end, "CHANGED");
+ if (opt_quick) end = strmov(end, " QUICK");
+ if (opt_fast) end = strmov(end, " FAST");
+ if (opt_medium_check) end = strmov(end, " MEDIUM"); /* Default */
+ if (opt_extended) end = strmov(end, " EXTENDED");
+ if (opt_check_only_changed) end = strmov(end, " CHANGED");
break;
case DO_REPAIR:
op = "REPAIR";
- end = options;
- if (opt_quick) end = strmov(end, "QUICK");
- if (opt_extended) end = strmov(end, "EXTENDED");
+ if (opt_quick) end = strmov(end, " QUICK");
+ if (opt_extended) end = strmov(end, " EXTENDED");
break;
case DO_ANALYZE:
op = "ANALYZE";
@@ -533,11 +532,11 @@ static int handle_request_for_tables(char *tables, uint length)
if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME))))
return 1;
- sprintf(query, "%s TABLE %s %s", op, options, tables);
+ sprintf(query, "%s TABLE %s %s", op, tables, options);
if (mysql_query(sock, query))
{
- sprintf(options, "when executing '%s TABLE'", op);
- DBerror(sock, options);
+ sprintf(message, "when executing '%s TABLE ... %s", op, options);
+ DBerror(sock, message);
return 1;
}
print_result();
@@ -551,23 +550,34 @@ static void print_result()
MYSQL_RES *res;
MYSQL_ROW row;
char prev[NAME_LEN*2+2];
- int i;
+ uint i;
+ my_bool found_error=0;
res = mysql_use_result(sock);
prev[0] = '\0';
for (i = 0; (row = mysql_fetch_row(res)); i++)
{
int changed = strcmp(prev, row[0]);
- int status = !strcmp(row[2], "status");
- if (opt_silent && status)
- continue;
+ my_bool status = !strcmp(row[2], "status");
+
+ if (status)
+ {
+ if (found_error)
+ {
+ if (what_to_do != DO_REPAIR && opt_auto_repair &&
+ (!opt_fast || strcmp(row[3],"OK")))
+ insert_dynamic(&tables4repair, row[0]);
+ }
+ found_error=0;
+ if (opt_silent)
+ continue;
+ }
if (status && changed)
printf("%-50s %s", row[0], row[3]);
else if (!status && changed)
{
printf("%s\n%-9s: %s", row[0], row[2], row[3]);
- if (what_to_do != DO_REPAIR && opt_auto_repair)
- insert_dynamic(&tables4repair, row[0]);
+ found_error=1;
}
else
printf("%-9s: %s", row[2], row[3]);
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index 8fffe02a52f..199318abc2f 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -16,7 +16,7 @@
/* Show databases, tables or columns */
-#define SHOW_VERSION "8.2"
+#define SHOW_VERSION "8.3"
#include <global.h>
#include <my_sys.h>
@@ -30,6 +30,7 @@
static my_string host=0,opt_password=0,user=0;
static my_bool opt_show_keys=0,opt_compress=0,opt_status=0;
+static uint opt_verbose=0;
static void get_options(int *argc,char ***argv);
static uint opt_mysql_port=0;
@@ -140,6 +141,7 @@ static struct option long_options[] =
#ifndef DONT_ALLOW_USER_CHANGE
{"user", required_argument, 0, 'u'},
#endif
+ {"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
{0, 0, 0, 0}
};
@@ -181,6 +183,8 @@ static void usage(void)
-u, --user=# user for login if not current user\n");
#endif
printf("\
+ -v, --verbose more verbose output; You can use this multiple times\n\
+ to get even more verbose output.\n\
-V, --version output version information and exit\n");
puts("\n\
@@ -200,7 +204,7 @@ get_options(int *argc,char ***argv)
int c,option_index;
my_bool tty_password=0;
- while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?VWi",long_options,
+ while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?vVWi",long_options,
&option_index)) != EOF)
{
switch(c) {
@@ -210,6 +214,9 @@ get_options(int *argc,char ***argv)
case 'c':
charsets_dir= optarg;
break;
+ case 'v':
+ opt_verbose++;
+ break;
case 'h':
host = optarg;
break;
@@ -277,10 +284,13 @@ static int
list_dbs(MYSQL *mysql,const char *wild)
{
const char *header;
- uint length;
+ uint length, counter = 0;
+ ulong rowcount = 0L;
+ char tables[NAME_LEN+1], rows[NAME_LEN+1];
+ char query[255];
MYSQL_FIELD *field;
MYSQL_RES *result;
- MYSQL_ROW row;
+ MYSQL_ROW row, trow, rrow;
if (!(result=mysql_list_dbs(mysql,wild)))
{
@@ -297,10 +307,79 @@ list_dbs(MYSQL *mysql,const char *wild)
if (length < field->max_length)
length=field->max_length;
- print_header(header,length,NullS);
+ if (!opt_verbose)
+ print_header(header,length,NullS);
+ else if (opt_verbose == 1)
+ print_header(header,length,"Tables",6,NullS);
+ else
+ print_header(header,length,"Tables",6,"Total Rows",12,NullS);
+
while ((row = mysql_fetch_row(result)))
- print_row(row[0],length,0);
- print_trailer(length,0);
+ {
+ counter++;
+
+ if (opt_verbose)
+ {
+ /*
+ * Original code by MG16373; Slightly modified by Monty.
+ * Print now the count of tables and rows for each database.
+ */
+
+ if (!(mysql_select_db(mysql,row[0])))
+ {
+ MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL);
+ if (mysql_affected_rows(mysql) > 0)
+ {
+ sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql));
+ rowcount = 0;
+ if (opt_verbose > 1)
+ {
+ while ((trow = mysql_fetch_row(tresult)))
+ {
+ sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]);
+ if (!(mysql_query(mysql,query)))
+ {
+ MYSQL_RES *rresult;
+ if ((rresult = mysql_store_result(mysql)))
+ {
+ rrow = mysql_fetch_row(rresult);
+ rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10);
+ mysql_free_result(rresult);
+ }
+ }
+ }
+ sprintf(rows,"%12lu",rowcount);
+ }
+ }
+ else
+ {
+ sprintf(tables,"%6d",0);
+ sprintf(rows,"%12d",0);
+ }
+ mysql_free_result(tresult);
+ }
+ else
+ {
+ strmov(tables,"N/A");
+ strmov(rows,"N/A");
+ }
+ }
+
+ if (!opt_verbose)
+ print_row(row[0],length,0);
+ else if (opt_verbose == 1)
+ print_row(row[0],length,tables,6,NullS);
+ else
+ print_row(row[0],length,tables,6,rows,12,NullS);
+ }
+
+ print_trailer(length,
+ (opt_verbose > 0 ? 6 : 0),
+ (opt_verbose > 1 ? 12 :0),
+ 0);
+
+ if (counter && opt_verbose)
+ printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
@@ -310,10 +389,11 @@ static int
list_tables(MYSQL *mysql,const char *db,const char *table)
{
const char *header;
- uint head_length;
+ uint head_length, counter = 0;
+ char query[255], rows[64], fields[16];
MYSQL_FIELD *field;
MYSQL_RES *result;
- MYSQL_ROW row;
+ MYSQL_ROW row, rrow;
if (mysql_select_db(mysql,db))
{
@@ -338,14 +418,81 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
if (head_length < field->max_length)
head_length=field->max_length;
- print_header(header,head_length,NullS);
+ if (!opt_verbose)
+ print_header(header,head_length,NullS);
+ else if (opt_verbose == 1)
+ print_header(header,head_length,"Columns",8,NullS);
+ else
+ print_header(header,head_length,"Columns",8, "Total Rows",10,NullS);
+
while ((row = mysql_fetch_row(result)))
- print_row(row[0],head_length,0);
- print_trailer(head_length,0);
+ {
+ /*
+ * Modified by MG16373
+ * Print now the count of rows for each table.
+ */
+ counter++;
+ if (opt_verbose > 0)
+ {
+ if (!(mysql_select_db(mysql,db)))
+ {
+ MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL);
+ ulong rowcount=0L;
+ if (!rresult)
+ {
+ strmov(fields,"N/A");
+ strmov(rows,"N/A");
+ }
+ else
+ {
+ sprintf(fields,"%8u",(uint) mysql_num_fields(rresult));
+ mysql_free_result(rresult);
+
+ if (opt_verbose > 1)
+ {
+ sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]);
+ if (!(mysql_query(mysql,query)))
+ {
+ if ((rresult = mysql_store_result(mysql)))
+ {
+ rrow = mysql_fetch_row(rresult);
+ rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10);
+ mysql_free_result(rresult);
+ }
+ sprintf(rows,"%10lu",rowcount);
+ }
+ else
+ sprintf(rows,"%10d",0);
+ }
+ }
+ }
+ else
+ {
+ strmov(fields,"N/A");
+ strmov(rows,"N/A");
+ }
+ }
+ if (!opt_verbose)
+ print_row(row[0],head_length,NullS);
+ else if (opt_verbose == 1)
+ print_row(row[0],head_length, fields,8, NullS);
+ else
+ print_row(row[0],head_length, fields,8, rows,10, NullS);
+ }
+
+ print_trailer(head_length,
+ (opt_verbose > 0 ? 8 : 0),
+ (opt_verbose > 1 ? 10 :0),
+ 0);
+
+ if (counter && opt_verbose)
+ printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : "");
+
mysql_free_result(result);
return 0;
}
+
static int
list_table_status(MYSQL *mysql,const char *db,const char *wild)
{
diff --git a/client/mysqltest.c b/client/mysqltest.c
index e1ca5638340..ed74d6d3416 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -43,7 +43,7 @@
**********************************************************************/
-#define MTEST_VERSION "1.8"
+#define MTEST_VERSION "1.10"
#include <global.h>
#include <my_sys.h>
@@ -84,7 +84,7 @@
static int record = 0, verbose = 0, silent = 0, opt_sleep=0;
static char *db = 0, *pass=0;
const char* user = 0, *host = 0, *unix_sock = 0;
-static int port = 0;
+static int port = 0, opt_big_test=0;
static uint start_lineno, *lineno;
static char **default_argv;
@@ -95,9 +95,13 @@ static FILE** cur_file;
static FILE** file_stack_end;
static uint lineno_stack[MAX_INCLUDE_DEPTH];
static char TMPDIR[FN_REFLEN];
+static int *block_ok_stack_end;
-static int block_stack[BLOCK_STACK_DEPTH];
static int *cur_block, *block_stack_end;
+static int block_stack[BLOCK_STACK_DEPTH];
+
+
+static int block_ok_stack[BLOCK_STACK_DEPTH];
static uint global_expected_errno[MAX_EXPECTED_ERRORS];
DYNAMIC_ARRAY q_lines;
@@ -121,7 +125,7 @@ typedef struct
PARSER parser;
MASTER_POS master_pos;
-int block_ok = 1; /* set to 0 if the current block should not be executed */
+int* block_ok; /* set to 0 if the current block should not be executed */
int false_block_depth = 0;
const char* result_file = 0; /* if set, all results are concated and
compared against this file*/
@@ -159,6 +163,8 @@ Q_SYNC_WITH_MASTER, Q_ERROR,
Q_SEND, Q_REAP,
Q_DIRTY_CLOSE, Q_REPLACE,
Q_PING, Q_EVAL,
+Q_RPL_PROBE, Q_ENABLE_RPL_PARSE,
+Q_DISABLE_RPL_PARSE, Q_EVAL_RESULT,
Q_UNKNOWN, /* Unknown command. */
Q_COMMENT, /* Comments, ignored. */
Q_COMMENT_WITH_COMMAND
@@ -167,7 +173,7 @@ Q_COMMENT_WITH_COMMAND
/* this should really be called command */
struct st_query
{
- char *query, *query_buf,*first_argument;
+ char *query, *query_buf,*first_argument,*end;
int first_word_len;
my_bool abort_on_error, require_file;
uint expected_errno[MAX_EXPECTED_ERRORS];
@@ -188,6 +194,8 @@ const char *command_names[] = {
"send", "reap",
"dirty_close", "replace_result",
"ping", "eval",
+ "rpl_probe", "enable_rpl_parse",
+ "disable_rpl_parse", "eval_result",
0
};
@@ -230,10 +238,12 @@ void free_pointer_array(POINTER_ARRAY *pa);
static int initialize_replace_buffer(void);
static void free_replace_buffer(void);
static void do_eval(DYNAMIC_STRING* query_eval, const char* query);
+void str_to_file(const char* fname, char* str, int size);
struct st_replace *glob_replace;
static char *out_buff;
static uint out_length;
+static int eval_result = 0;
static void do_eval(DYNAMIC_STRING* query_eval, const char* query)
{
@@ -290,7 +300,7 @@ static void close_files()
{
do
{
- if (*cur_file != stdin)
+ if (*cur_file != stdin && *cur_file)
my_fclose(*cur_file,MYF(0));
} while (cur_file-- != file_stack);
}
@@ -352,7 +362,9 @@ static void abort_not_supported_test()
static void verbose_msg(const char* fmt, ...)
{
va_list args;
- if (!verbose) return;
+ DBUG_ENTER("verbose_msg");
+ if (!verbose)
+ DBUG_VOID_RETURN;
va_start(args, fmt);
@@ -360,6 +372,7 @@ static void verbose_msg(const char* fmt, ...)
vfprintf(stderr, fmt, args);
fprintf(stderr, "\n");
va_end(args);
+ DBUG_VOID_RETURN;
}
@@ -382,25 +395,53 @@ int hex_val(int c)
int dyn_string_cmp(DYNAMIC_STRING* ds, const char* fname)
{
MY_STAT stat_info;
- char *tmp;
+ char *tmp, *res_ptr;
+ char eval_file[FN_REFLEN];
int res;
+ uint res_len;
int fd;
+ DYNAMIC_STRING res_ds;
DBUG_ENTER("dyn_string_cmp");
if (!my_stat(fname, &stat_info, MYF(MY_WME)))
die(NullS);
- if (stat_info.st_size != ds->length)
+ if (!eval_result && stat_info.st_size != ds->length)
DBUG_RETURN(2);
- if (!(tmp = (char*) my_malloc(ds->length, MYF(MY_WME))))
+ if (!(tmp = (char*) my_malloc(stat_info.st_size + 1, MYF(MY_WME))))
die(NullS);
if ((fd = my_open(fname, O_RDONLY, MYF(MY_WME))) < 0)
die(NullS);
if (my_read(fd, (byte*)tmp, stat_info.st_size, MYF(MY_WME|MY_NABP)))
die(NullS);
- res = (memcmp(tmp, ds->str, stat_info.st_size)) ? 1 : 0;
+ tmp[stat_info.st_size] = 0;
+ init_dynamic_string(&res_ds, "", 0, 65536);
+ if (eval_result)
+ {
+ do_eval(&res_ds, tmp);
+ res_ptr = res_ds.str;
+ if((res_len = res_ds.length) != ds->length)
+ {
+ res = 2;
+ goto err;
+ }
+ }
+ else
+ {
+ res_ptr = tmp;
+ res_len = stat_info.st_size;
+ }
+
+ res = (memcmp(res_ptr, ds->str, res_len)) ? 1 : 0;
+
+err:
+ if(res && eval_result)
+ str_to_file(fn_format(eval_file, fname, "", ".eval",2), res_ptr,
+ res_len);
+
my_free((gptr) tmp, MYF(0));
my_close(fd, MYF(MY_WME));
-
+ dynstr_free(&res_ds);
+
DBUG_RETURN(res);
}
@@ -446,12 +487,12 @@ VAR* var_get(const char* var_name, const char** var_name_end, int raw)
{
const char* save_var_name = var_name, *end;
end = (var_name_end) ? *var_name_end : 0;
- while(isalnum(*var_name) || *var_name == '_')
- {
- if(end && var_name == end)
- break;
- ++var_name;
- }
+ while (isvar(*var_name))
+ {
+ if(end && var_name == end)
+ break;
+ ++var_name;
+ }
if(var_name == save_var_name)
die("Empty variable");
@@ -497,7 +538,6 @@ int var_set(char* var_name, char* var_name_end, char* var_val,
char* var_val_end)
{
int digit;
- int val_len;
VAR* v;
if (*var_name++ != '$')
{
@@ -512,21 +552,8 @@ int var_set(char* var_name, char* var_name_end, char* var_val,
}
else
v = var_reg + digit;
- if (v->alloced_len < (val_len = (int)(var_val_end - var_val)+1))
- {
- v->alloced_len = (val_len < MIN_VAR_ALLOC) ? MIN_VAR_ALLOC : val_len;
- if (!(v->str_val =
- v->str_val ? my_realloc(v->str_val, v->alloced_len, MYF(MY_WME)) :
- my_malloc(v->alloced_len, MYF(MY_WME))))
- die("Out of memory");
- }
- val_len--;
- memcpy(v->str_val, var_val, val_len);
- v->str_val_len = val_len;
- v->str_val[val_len] = 0;
- v->int_val = atoi(v->str_val);
- v->int_dirty=0;
- return 0;
+
+ return eval_expr(v, var_val, (const char**)&var_val_end);
}
int open_file(const char* name)
@@ -554,6 +581,35 @@ int do_source(struct st_query* q)
return open_file(name);
}
+int var_query_set(VAR* v, const char* p, const char** p_end)
+{
+ char* end = (char*)((p_end && *p_end) ? *p_end : p + strlen(p));
+ MYSQL_RES *res;
+ MYSQL_ROW row;
+ MYSQL* mysql = &cur_con->mysql;
+ LINT_INIT(res);
+
+ while (end > p && *end != '`')
+ --end;
+ if (p == end)
+ die("Syntax error in query, missing '`'");
+ ++p;
+
+ if (mysql_real_query(mysql, p, (int)(end - p)) ||
+ !(res = mysql_store_result(mysql)))
+ {
+ *end = 0;
+ die("Error running query '%s': %s", p, mysql_error(mysql));
+ }
+
+ if ((row = mysql_fetch_row(res)) && row[0])
+ eval_expr(v, row[0], 0);
+ else
+ eval_expr(v, "", 0);
+
+ mysql_free_result(res);
+ return 0;
+}
int eval_expr(VAR* v, const char* p, const char** p_end)
{
@@ -566,10 +622,27 @@ int eval_expr(VAR* v, const char* p, const char** p_end)
return 0;
}
}
+ else if(*p == '`')
+ {
+ return var_query_set(v, p, p_end);
+ }
else
{
- v->str_val = (char*)p;
- v->str_val_len = (p_end && *p_end) ? (int) (*p_end - p) : (int) strlen(p);
+ int new_val_len = (p_end && *p_end) ?
+ (int) (*p_end - p) : (int) strlen(p);
+ if (new_val_len + 1 >= v->alloced_len)
+ {
+ v->alloced_len = (new_val_len < MIN_VAR_ALLOC - 1) ?
+ MIN_VAR_ALLOC : new_val_len + 1;
+ if (!(v->str_val =
+ v->str_val ? my_realloc(v->str_val, v->alloced_len,
+ MYF(MY_WME)) :
+ my_malloc(v->alloced_len, MYF(MY_WME))))
+ die("Out of memory");
+ }
+ v->str_val_len = new_val_len;
+ memcpy(v->str_val, p, new_val_len);
+ v->str_val[new_val_len] = 0;
v->int_val=atoi(p);
v->int_dirty=0;
return 0;
@@ -642,6 +715,11 @@ int do_sync_with_master(struct st_query* q)
char query_buf[FN_REFLEN+128];
int offset = 0;
char* p = q->first_argument;
+ int rpl_parse;
+
+ rpl_parse = mysql_rpl_parse_enabled(mysql);
+ mysql_disable_rpl_parse(mysql);
+
if(*p)
offset = atoi(p);
@@ -658,7 +736,10 @@ int do_sync_with_master(struct st_query* q)
if(!row[0])
die("Error on slave while syncing with master");
mysql_free_result(res);
-
+
+ if(rpl_parse)
+ mysql_enable_rpl_parse(mysql);
+
return 0;
}
@@ -667,6 +748,11 @@ int do_save_master_pos()
MYSQL_RES* res;
MYSQL_ROW row;
MYSQL* mysql = &cur_con->mysql;
+ int rpl_parse;
+
+ rpl_parse = mysql_rpl_parse_enabled(mysql);
+ mysql_disable_rpl_parse(mysql);
+
if(mysql_query(mysql, "show master status"))
die("At line %u: failed in show master status: %d: %s", start_lineno,
mysql_errno(mysql), mysql_error(mysql));
@@ -678,6 +764,9 @@ int do_save_master_pos()
strncpy(master_pos.file, row[0], sizeof(master_pos.file));
master_pos.pos = strtoul(row[1], (char**) 0, 10);
mysql_free_result(res);
+
+ if(rpl_parse)
+ mysql_enable_rpl_parse(mysql);
return 0;
}
@@ -697,11 +786,29 @@ int do_let(struct st_query* q)
while(*p && isspace(*p))
p++;
var_val_start = p;
- while(*p && !isspace(*p))
- p++;
- return var_set(var_name, var_name_end, var_val_start, p);
+ return var_set(var_name, var_name_end, var_val_start, q->end);
+}
+
+int do_rpl_probe(struct st_query* __attribute__((unused)) q)
+{
+ if(mysql_rpl_probe(&cur_con->mysql))
+ die("Failed in mysql_rpl_probe(): %s", mysql_error(&cur_con->mysql));
+ return 0;
+}
+
+int do_enable_rpl_parse(struct st_query* __attribute__((unused)) q)
+{
+ mysql_enable_rpl_parse(&cur_con->mysql);
+ return 0;
+}
+
+int do_disable_rpl_parse(struct st_query* __attribute__((unused)) q)
+{
+ mysql_disable_rpl_parse(&cur_con->mysql);
+ return 0;
}
+
int do_sleep(struct st_query* q)
{
char* p=q->first_argument;
@@ -1015,7 +1122,8 @@ int do_connect(struct st_query* q)
if (!mysql_init(&next_con->mysql))
die("Failed on mysql_init()");
- con_sock=fn_format(buff, con_sock, TMPDIR, "",0);
+ if (con_sock)
+ con_sock=fn_format(buff, con_sock, TMPDIR, "",0);
if (!con_db[0])
con_db=db;
con_error = 1;
@@ -1047,13 +1155,14 @@ int do_done(struct st_query* q)
q->type = Q_END_BLOCK;
if (cur_block == block_stack)
die("Stray '}' - end of block before beginning");
- if (block_ok)
+ if (*block_ok--)
+ {
parser.current_line = *--cur_block;
+ }
else
{
- if (!--false_block_depth)
- block_ok = 1;
++parser.current_line;
+ --cur_block;
}
return 0;
}
@@ -1065,11 +1174,14 @@ int do_while(struct st_query* q)
VAR v;
if (cur_block == block_stack_end)
die("Nesting too deeply");
- if (!block_ok)
+ if (!*block_ok)
{
++false_block_depth;
+ *++block_ok = 0;
+ *cur_block++ = parser.current_line++;
return 0;
}
+
expr_start = strchr(p, '(');
if (!expr_start)
die("missing '(' in while");
@@ -1080,9 +1192,11 @@ int do_while(struct st_query* q)
*cur_block++ = parser.current_line++;
if (!v.int_val)
{
- block_ok = 0;
- false_block_depth = 1;
+ *++block_ok = 0;
+ false_block_depth++;
}
+ else
+ *++block_ok = 1;
return 0;
}
@@ -1346,7 +1460,7 @@ int read_query(struct st_query** q_ptr)
q->first_word_len = (uint) (p - q->query);
while (*p && isspace(*p)) p++;
q->first_argument=p;
-
+ q->end = strend(q->query);
parser.read_lines++;
return 0;
}
@@ -1356,6 +1470,7 @@ struct option long_options[] =
{
{"debug", optional_argument, 0, '#'},
{"database", required_argument, 0, 'D'},
+ {"big-test", no_argument, 0, 'B'},
{"help", no_argument, 0, '?'},
{"host", required_argument, 0, 'h'},
{"password", optional_argument, 0, 'p'},
@@ -1366,6 +1481,7 @@ struct option long_options[] =
{"silent", no_argument, 0, 'q'},
{"sleep", required_argument, 0, 'T'},
{"socket", required_argument, 0, 'S'},
+ {"test-file", required_argument, 0, 'x'},
{"tmpdir", required_argument, 0, 't'},
{"user", required_argument, 0, 'u'},
{"verbose", no_argument, 0, 'v'},
@@ -1398,6 +1514,7 @@ void usage()
-u, --user=... User for login.\n\
-p[password], --password[=...]\n\
Password to use when connecting to server.\n\
+ -B, --big-test Define BIG_TEST to 1\n\
-D, --database=... Database to use.\n\
-P, --port=... Port number to use for connection.\n\
-S, --socket=... Socket file to use for connection.\n\
@@ -1405,6 +1522,7 @@ void usage()
-T, --sleep=# Sleep always this many seconds on sleep commands\n\
-r, --record Record output of test_file into result file.\n\
-R, --result-file=... Read/Store result from/in this file.\n\
+ -x, --test-file=... Read test from/in this file (default stdin).\n\
-v, --verbose Write more.\n\
-q, --quiet, --silent Suppress all normal output.\n\
-V, --version Output version information and exit.\n\
@@ -1419,7 +1537,7 @@ int parse_args(int argc, char **argv)
load_defaults("my",load_default_groups,&argc,&argv);
default_argv= argv;
- while((c = getopt_long(argc, argv, "h:p::u:P:D:S:R:t:T:#:?rvVq",
+ while((c = getopt_long(argc, argv, "h:p::u:BP:D:S:R:x:t:T:#:?rvVq",
long_options, &option_index)) != EOF)
{
switch(c) {
@@ -1438,6 +1556,10 @@ int parse_args(int argc, char **argv)
case 'R':
result_file = optarg;
break;
+ case 'x':
+ if (!(*cur_file = my_fopen(optarg, O_RDONLY, MYF(MY_WME))))
+ die("Could not open %s: errno = %d", optarg, errno);
+ break;
case 'p':
if (optarg)
{
@@ -1448,6 +1570,9 @@ int parse_args(int argc, char **argv)
else
tty_password=1;
break;
+ case 'B':
+ opt_big_test=1;
+ break;
case 'P':
port = atoi(optarg);
break;
@@ -1522,10 +1647,12 @@ void reject_dump(const char* record_file, char* buf, int size)
str_to_file(fn_format(reject_file, record_file,"",".reject",2), buf, size);
}
-/* flags control the phased/stages of query execution to be performed
+/*
+* flags control the phased/stages of query execution to be performed
* if QUERY_SEND bit is on, the query will be sent. If QUERY_REAP is on
* the result will be read - for regular query, both bits must be on
*/
+
int run_query(MYSQL* mysql, struct st_query* q, int flags)
{
MYSQL_RES* res = 0;
@@ -1576,8 +1703,6 @@ int run_query(MYSQL* mysql, struct st_query* q, int flags)
if (q->abort_on_error)
die("At line %u: query '%s' failed: %d: %s", start_lineno, query,
mysql_errno(mysql), mysql_error(mysql));
- /*die("At line %u: Failed in mysql_store_result for query '%s' (%d)",
- start_lineno, query, mysql_errno(mysql));*/
else
{
for (i=0 ; q->expected_errno[i] ; i++)
@@ -1587,13 +1712,15 @@ int run_query(MYSQL* mysql, struct st_query* q, int flags)
}
if (i)
{
- verbose_msg("query '%s' failed with wrong errno\
- %d instead of %d...", q->query, mysql_errno(mysql), q->expected_errno[0]);
+ verbose_msg("query '%s' failed with wrong errno %d instead of %d...",
+ q->query, mysql_errno(mysql), q->expected_errno[0]);
+ error=1;
goto end;
}
verbose_msg("query '%s' failed: %d: %s", q->query, mysql_errno(mysql),
mysql_error(mysql));
- /* if we do not abort on error, failure to run the query does
+ /*
+ if we do not abort on error, failure to run the query does
not fail the whole test case
*/
goto end;
@@ -1720,11 +1847,13 @@ static VAR* var_init(const char* name, int name_len, const char* val,
if(!val_len)
val_len = strlen(val) ;
val_alloc_len = val_len + 16; /* room to grow */
- if(!(tmp_var = (VAR*)my_malloc(sizeof(*tmp_var) + val_alloc_len
+ if(!(tmp_var = (VAR*)my_malloc(sizeof(*tmp_var)
+ name_len, MYF(MY_WME))))
die("Out of memory");
tmp_var->name = (char*)tmp_var + sizeof(*tmp_var);
- tmp_var->str_val = tmp_var->name + name_len;
+ if(!(tmp_var->str_val = my_malloc(val_alloc_len, MYF(MY_WME))))
+ die("Out of memory");
+
memcpy(tmp_var->name, name, name_len);
memcpy(tmp_var->str_val, val, val_len + 1);
tmp_var->name_len = name_len;
@@ -1737,6 +1866,7 @@ static VAR* var_init(const char* name, int name_len, const char* val,
static void var_free(void* v)
{
+ my_free(((VAR*)v)->str_val, MYF(MY_WME));
my_free(v, MYF(MY_WME));
}
@@ -1752,13 +1882,15 @@ static void var_from_env(const char* name, const char* def_val)
hash_insert(&var_hash, (byte*)v);
}
+
static void init_var_hash()
{
- if(hash_init(&var_hash, 1024, 0, 0, get_var_key, var_free, MYF(0)))
+ if (hash_init(&var_hash, 1024, 0, 0, get_var_key, var_free, MYF(0)))
die("Variable hash initialization failed");
var_from_env("MASTER_MYPORT", "9306");
var_from_env("SLAVE_MYPORT", "9307");
- var_from_env("MYSQL_TEST_DIR", "");
+ var_from_env("MYSQL_TEST_DIR", "/tmp");
+ var_from_env("BIG_TEST", opt_big_test ? "1" : "0");
}
int main(int argc, char** argv)
@@ -1785,7 +1917,11 @@ int main(int argc, char** argv)
INIT_Q_LINES);
memset(block_stack, 0, sizeof(block_stack));
block_stack_end = block_stack + BLOCK_STACK_DEPTH;
+ memset(block_ok_stack, 0, sizeof(block_stack));
+ block_ok_stack_end = block_ok_stack + BLOCK_STACK_DEPTH;
cur_block = block_stack;
+ block_ok = block_ok_stack;
+ *block_ok = 1;
init_dynamic_string(&ds_res, "", 0, 65536);
parse_args(argc, argv);
init_var_hash();
@@ -1809,7 +1945,7 @@ int main(int argc, char** argv)
int current_line_inc = 1, processed = 0;
if (q->type == Q_UNKNOWN || q->type == Q_COMMENT_WITH_COMMAND)
get_query_type(q);
- if (block_ok)
+ if (*block_ok)
{
processed = 1;
switch (q->type) {
@@ -1818,6 +1954,9 @@ int main(int argc, char** argv)
case Q_DISCONNECT:
case Q_DIRTY_CLOSE:
close_connection(q); break;
+ case Q_RPL_PROBE: do_rpl_probe(q); break;
+ case Q_ENABLE_RPL_PARSE: do_enable_rpl_parse(q); break;
+ case Q_DISABLE_RPL_PARSE: do_disable_rpl_parse(q); break;
case Q_SOURCE: do_source(q); break;
case Q_SLEEP: do_sleep(q); break;
case Q_INC: do_inc(q); break;
@@ -1825,6 +1964,7 @@ int main(int argc, char** argv)
case Q_ECHO: do_echo(q); break;
case Q_SYSTEM: do_system(q); break;
case Q_LET: do_let(q); break;
+ case Q_EVAL_RESULT: eval_result = 1; break;
case Q_EVAL:
if (q->query == q->query_buf)
q->query += q->first_word_len;
@@ -1885,7 +2025,7 @@ int main(int argc, char** argv)
case Q_SAVE_MASTER_POS: do_save_master_pos(); break;
case Q_SYNC_WITH_MASTER: do_sync_with_master(q); break;
case Q_COMMENT: /* Ignore row */
- case Q_COMMENT_WITH_COMMAND:
+ case Q_COMMENT_WITH_COMMAND:
case Q_PING:
(void) mysql_ping(&cur_con->mysql);
break;
diff --git a/configure.in b/configure.in
index cbc6fe85619..d9c443c0dc8 100644
--- a/configure.in
+++ b/configure.in
@@ -285,8 +285,10 @@ export CC CFLAGS LD LDFLAGS
if test "$GXX" = "yes"
then
- # mysqld requires this when compiled with gcc
- CXXFLAGS="$CXXFLAGS -fno-implicit-templates"
+ # mysqld requires -fno-implicit-templates.
+ # Disable exceptions as they seams to create problems with gcc and threads.
+ # mysqld doesn't use run-time-type-checking, so we disable it.
+ CXXFLAGS="$CXXFLAGS -fno-implicit-templates -fno-exceptions -fno-rtti"
fi
# Avoid bug in fcntl on some versions of linux
@@ -401,7 +403,7 @@ AM_PROG_CC_STDC
if test "$am_cv_prog_cc_stdc" = "no"
then
- AC_MSG_ERROR([MySQL requiers a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.])
+ AC_MSG_ERROR([MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.])
fi
NOINST_LDFLAGS=
@@ -786,6 +788,11 @@ case $SYSTEM_TYPE in
echo "Enabling snprintf workaround for hpux 10.20"
CFLAGS="$CFLAGS -DHAVE_BROKEN_SNPRINTF -DSIGNALS_DONT_BREAK_READ"
CXXFLAGS="$CXXFLAGS -DHAVE_BROKEN_SNPRINTF -D_INCLUDE_LONGLONG -DSIGNALS_DONT_BREAK_READ"
+ if test "$with_named_thread" = "no"
+ then
+ echo "Using --with-named-thread=-lpthread"
+ with_named_thread="-lcma"
+ fi
;;
*hpux11.*)
echo "Enabling pread/pwrite workaround for hpux 11"
@@ -1082,7 +1089,7 @@ fi
AC_MSG_CHECKING("named thread libs:")
if test "$with_named_thread" != "no"
then
- LIBS="$LIBS $with_named_thread"
+ LIBS="$with_named_thread $LIBS $with_named_thread"
with_posix_threads="yes"
with_mit_threads="no"
AC_MSG_RESULT("$with_named_thread")
@@ -1995,7 +2002,7 @@ then
THREAD_LOBJECTS="thr_alarm.o thr_lock.o thr_mutex.o thr_rwlock.o my_pthread.o my_thr_init.o"
AC_SUBST(THREAD_LOBJECTS)
sql_server_dirs="strings dbug mysys extra regex isam merge myisam myisammrg heap sql"
- server_scripts="safe_mysqld mysql_install_db"
+ server_scripts="mysqld_safe mysql_install_db"
if test X"$have_berkeley_db" != Xno; then
if test X"$have_berkeley_db" != Xyes; then
# we must build berkeley db from source
@@ -2074,6 +2081,17 @@ EOF
echo "END OF INNODB CONFIGURATION"
fi
+ if test "X$have_gemini_db" = "Xyes"; then
+ sql_server_dirs="gemini $sql_server_dirs"
+ echo "CONFIGURING FOR GEMINI DB"
+ (cd gemini && sh ./configure) \
+ || AC_MSG_ERROR([could not configure Gemini DB])
+
+ echo "END OF GEMINI DB CONFIGURATION"
+
+ AC_DEFINE(HAVE_GEMINI_DB)
+ fi
+
if test "$with_posix_threads" = "no" -o "$with_mit_threads" = "yes"
then
# MIT user level threads
diff --git a/include/errmsg.h b/include/errmsg.h
index 12a3ee5557a..427174ffa53 100644
--- a/include/errmsg.h
+++ b/include/errmsg.h
@@ -54,3 +54,10 @@ extern const char *client_errors[]; /* Error messages */
#define CR_CANT_READ_CHARSET 2019
#define CR_NET_PACKET_TOO_LARGE 2020
#define CR_EMBEDDED_CONNECTION 2021
+#define CR_PROBE_SLAVE_STATUS 2022
+#define CR_PROBE_SLAVE_HOSTS 2023
+#define CR_PROBE_SLAVE_CONNECT 2024
+#define CR_PROBE_MASTER_CONNECT 2025
+
+
+
diff --git a/include/m_string.h b/include/m_string.h
index ce5197f17af..84c42e0c8b9 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -69,10 +69,6 @@
# define memmove(d, s, n) bmove((d), (s), (n)) /* our bmove */
#endif
-#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
-#define strmov(A,B) stpcpy((A),(B))
-#endif
-
/* Unixware 7 */
#if !defined(HAVE_BFILL)
# define bfill(A,B,C) memset((A),(C),(B))
@@ -90,6 +86,13 @@
extern "C" {
#endif
+#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
+#define strmov(A,B) stpcpy((A),(B))
+#ifndef stpcpy
+extern char *stpcpy(char *, const char *); /* For AIX with gcc 2.95.3 */
+#endif
+#endif
+
extern char NEAR _dig_vec[]; /* Declared in int2str() */
#ifdef BAD_STRING_COMPILER
@@ -148,7 +151,7 @@ extern void bchange(char *dst,uint old_len,const char *src,
uint new_len,uint tot_len);
extern void strappend(char *s,uint len,pchar fill);
extern char *strend(const char *s);
-extern char *strcend(const char *, pchar);
+extern char *strcend(const char *, pchar);
extern char *strfield(char *src,int fields,int chars,int blanks,
int tabch);
extern char *strfill(my_string s,uint len,pchar fill);
diff --git a/include/my_base.h b/include/my_base.h
index 7e63b72c934..f1d6c005262 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -215,6 +215,7 @@ enum ha_base_keytype {
#define HA_ERR_CRASHED_ON_USAGE 145 /* Table must be repaired */
#define HA_ERR_LOCK_WAIT_TIMEOUT 146
#define HA_ERR_LOCK_TABLE_FULL 147
+#define HA_ERR_READ_ONLY_TRANSACTION 148 /* Updates not allowed */
/* Other constants */
diff --git a/include/my_sys.h b/include/my_sys.h
index b5d59d2e801..c492262d925 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -62,6 +62,8 @@ extern int NEAR my_errno; /* Last error in mysys */
#define MY_DONT_CHECK_FILESIZE 128 /* Option to init_io_cache() */
#define MY_LINK_WARNING 32 /* my_redel() gives warning if links */
#define MY_COPYTIME 64 /* my_redel() copys time */
+#define MY_DELETE_OLD 256 /* my_create_with_symlink() */
+#define MY_RESOLVE_LINK 128 /* my_realpath(); Only resolve links */
#define MY_HOLD_ORIGINAL_MODES 128 /* my_copy() holds to file modes */
#define MY_REDEL_MAKE_BACKUP 256
#define MY_SEEK_NOT_DONE 32 /* my_lock may have to do a seek */
@@ -108,10 +110,7 @@ extern int NEAR my_errno; /* Last error in mysys */
/* root_alloc flags */
#define MY_KEEP_PREALLOC 1
-#define MY_MARK_BLOCKS_FREE 2 /* do not my_free() blocks,
- just move used into free list
- and mark all blocks as fully free
- */
+#define MY_MARK_BLOCKS_FREE 2 /* move used to free list and reuse them */
/* defines when allocating data */
@@ -210,7 +209,7 @@ extern long lCurMemory,lMaxMemory; /* from safemalloc */
extern ulong my_default_record_cache_size;
extern my_bool NEAR my_disable_locking,NEAR my_disable_async_io,
- NEAR my_disable_flush_key_blocks;
+ NEAR my_disable_flush_key_blocks, NEAR my_disable_symlinks;
extern char wild_many,wild_one,wild_prefix;
extern const char *charsets_dir;
extern char *defaults_extra_file;
@@ -382,6 +381,14 @@ extern File my_create(const char *FileName,int CreateFlags,
int AccsesFlags, myf MyFlags);
extern int my_close(File Filedes,myf MyFlags);
extern int my_mkdir(const char *dir, int Flags, myf MyFlags);
+extern int my_readlink(char *to, const char *filename, myf MyFlags);
+extern int my_realpath(char *to, const char *filename, myf MyFlags);
+extern File my_create_with_symlink(const char *linkname, const char *filename,
+ int createflags, int access_flags,
+ myf MyFlags);
+extern int my_delete_with_symlink(const char *name, myf MyFlags);
+extern int my_rename_with_symlink(const char *from,const char *to,myf MyFlags);
+extern int my_symlink(const char *content, const char *linkname, myf MyFlags);
extern uint my_read(File Filedes,byte *Buffer,uint Count,myf MyFlags);
extern uint my_pread(File Filedes,byte *Buffer,uint Count,my_off_t offset,
myf MyFlags);
@@ -432,8 +439,14 @@ extern int my_redel(const char *from, const char *to, int MyFlags);
extern int my_copystat(const char *from, const char *to, int MyFlags);
extern my_string my_filename(File fd);
+#ifndef THREAD
extern void dont_break(void);
extern void allow_break(void);
+#else
+#define dont_break()
+#define allow_break()
+#endif
+
extern void my_remember_signal(int signal_number,sig_handler (*func)(int));
extern void caseup(my_string str,uint length);
extern void casedn(my_string str,uint length);
diff --git a/include/myisam.h b/include/myisam.h
index ec141bb5912..d2cf3822575 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -68,6 +68,7 @@ typedef struct st_mi_isaminfo /* Struct from h_info */
ulong mean_reclength; /* Mean recordlength (if packed) */
ulonglong auto_increment;
ulonglong key_map; /* Which keys are used */
+ char *data_file_name, *index_file_name;
uint keys; /* Number of keys in use */
uint options; /* HA_OPTIONS_... used */
int errkey, /* With key was dupplicated on err */
@@ -86,6 +87,7 @@ typedef struct st_mi_isaminfo /* Struct from h_info */
typedef struct st_mi_create_info
{
+ char *index_file_name, *data_file_name; /* If using symlinks */
ha_rows max_rows;
ha_rows reloc_rows;
ulonglong auto_increment;
diff --git a/include/mysql.h b/include/mysql.h
index b5d918a98af..4ce7e80bcb9 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -127,6 +127,15 @@ typedef struct st_mysql_data {
struct st_mysql_options {
unsigned int connect_timeout,client_flag;
my_bool compress,named_pipe;
+ my_bool rpl_probe; /* on connect, find out the replication
+ role of the server, and establish connections
+ to all the peers */
+ my_bool rpl_parse; /* each call to mysql_real_query() will parse
+ it to tell if it is a read or a write, and
+ direct it to the slave or the master */
+ my_bool no_master_reads; /* if set, never read from
+ a master,only from slave, when doing
+ a read that is replication-aware */
unsigned int port;
char *host,*init_command,*user,*password,*unix_socket,*db;
char *my_cnf_file,*my_cnf_group, *charset_dir, *charset_name;
@@ -145,6 +154,14 @@ enum mysql_option { MYSQL_OPT_CONNECT_TIMEOUT, MYSQL_OPT_COMPRESS,
enum mysql_status { MYSQL_STATUS_READY,MYSQL_STATUS_GET_RESULT,
MYSQL_STATUS_USE_RESULT};
+/* there are three types of queries - the ones that have to go to
+ the master, the ones that go to a slave, and the adminstrative
+ type which must happen on the pivot connectioin
+*/
+enum mysql_rpl_type { MYSQL_RPL_MASTER, MYSQL_RPL_SLAVE,
+ MYSQL_RPL_ADMIN };
+
+
typedef struct st_mysql {
NET net; /* Communication parameters */
gptr connector_fd; /* ConnectorFd for SSL */
@@ -168,6 +185,21 @@ typedef struct st_mysql {
char scramble_buff[9];
struct charset_info_st *charset;
unsigned int server_language;
+
+ /* pointers to the master, and the next slave
+ connections, points to itself if lone connection */
+ struct st_mysql* master, *next_slave;
+
+ struct st_mysql* last_used_slave; /* needed for round-robin slave pick */
+ struct st_mysql* last_used_con; /* needed for send/read/store/use
+ result to work
+ correctly with replication
+ */
+ my_bool rpl_pivot; /* set if this is the original connection,
+ not a master or a slave we have added though
+ mysql_rpl_probe() or mysql_set_master()/
+ mysql_add_slave()
+ */
} MYSQL;
@@ -242,6 +274,46 @@ int STDCALL mysql_send_query(MYSQL *mysql, const char *q,
int STDCALL mysql_read_query_result(MYSQL *mysql);
int STDCALL mysql_real_query(MYSQL *mysql, const char *q,
unsigned int length);
+/* perform query on master */
+int STDCALL mysql_master_query(MYSQL *mysql, const char *q,
+ unsigned int length);
+int STDCALL mysql_master_send_query(MYSQL *mysql, const char *q,
+ unsigned int length);
+/* perform query on slave */
+int STDCALL mysql_slave_query(MYSQL *mysql, const char *q,
+ unsigned int length);
+int STDCALL mysql_slave_send_query(MYSQL *mysql, const char *q,
+ unsigned int length);
+
+/* enable/disable parsing of all queries to decide
+ if they go on master or slave */
+void STDCALL mysql_enable_rpl_parse(MYSQL* mysql);
+void STDCALL mysql_disable_rpl_parse(MYSQL* mysql);
+/* get the value of the parse flag */
+int STDCALL mysql_rpl_parse_enabled(MYSQL* mysql);
+
+/* enable/disable reads from master */
+void STDCALL mysql_enable_reads_from_master(MYSQL* mysql);
+void STDCALL mysql_disable_reads_from_master(MYSQL* mysql);
+/* get the value of the master read flag */
+int STDCALL mysql_reads_from_master_enabled(MYSQL* mysql);
+
+enum mysql_rpl_type STDCALL mysql_rpl_query_type(const char* q, int len);
+
+/* discover the master and its slaves */
+int STDCALL mysql_rpl_probe(MYSQL* mysql);
+
+/* set the master, close/free the old one, if it is not a pivot */
+int STDCALL mysql_set_master(MYSQL* mysql, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd);
+int STDCALL mysql_add_slave(MYSQL* mysql, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd);
+
+
int STDCALL mysql_create_db(MYSQL *mysql, const char *DB);
int STDCALL mysql_drop_db(MYSQL *mysql, const char *DB);
int STDCALL mysql_shutdown(MYSQL *mysql);
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 874430910ef..67a483705ee 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -42,7 +42,8 @@ enum enum_server_command {COM_SLEEP,COM_QUIT,COM_INIT_DB,COM_QUERY,
COM_PROCESS_INFO,COM_CONNECT,COM_PROCESS_KILL,
COM_DEBUG,COM_PING,COM_TIME,COM_DELAYED_INSERT,
COM_CHANGE_USER, COM_BINLOG_DUMP,
- COM_TABLE_DUMP, COM_CONNECT_OUT};
+ COM_TABLE_DUMP, COM_CONNECT_OUT,
+ COM_REGISTER_SLAVE};
#define NOT_NULL_FLAG 1 /* Field can't be NULL */
#define PRI_KEY_FLAG 2 /* Field is part of a primary key */
@@ -100,15 +101,8 @@ enum enum_server_command {COM_SLEEP,COM_QUIT,COM_INIT_DB,COM_QUERY,
#define NET_WRITE_TIMEOUT 60 /* Timeout on write */
#define NET_WAIT_TIMEOUT 8*60*60 /* Wait for new query */
-#ifndef Vio_defined
-#define Vio_defined
-#ifdef HAVE_VIO
-class Vio; /* Fill Vio class in C++ */
-#else
struct st_vio; /* Only C */
typedef struct st_vio Vio;
-#endif
-#endif
typedef struct st_net {
Vio* vio;
@@ -226,9 +220,6 @@ my_bool check_scramble(const char *, const char *message,
unsigned long *salt,my_bool old_ver);
char *get_tty_password(char *opt_message);
void hash_password(unsigned long *result, const char *password);
-#ifdef __cplusplus
-}
-#endif
/* Some other useful functions */
@@ -236,6 +227,10 @@ void my_init(void);
void load_defaults(const char *conf_file, const char **groups,
int *argc, char ***argv);
+#ifdef __cplusplus
+}
+#endif
+
#define NULL_LENGTH ((unsigned long) ~0) /* For net_store_length */
#ifdef __WIN__
diff --git a/include/mysqld_error.h b/include/mysqld_error.h
index 4f46c40ff49..106c49dc110 100644
--- a/include/mysqld_error.h
+++ b/include/mysqld_error.h
@@ -205,4 +205,12 @@
#define ER_SLAVE_THREAD 1202
#define ER_TOO_MANY_USER_CONNECTIONS 1203
#define ER_SET_CONSTANTS_ONLY 1204
-#define ER_ERROR_MESSAGES 205
+#define ER_LOCK_WAIT_TIMEOUT 1205
+#define ER_LOCK_TABLE_FULL 1206
+#define ER_READ_ONLY_TRANSACTION 1207
+#define ER_DROP_DB_WITH_READ_LOCK 1208
+#define ER_CREATE_DB_WITH_READ_LOCK 1209
+#define ER_CONNECT_TO_MASTER 1210
+#define ER_QUERY_ON_MASTER 1211
+#define ER_SHOW_BINLOG_EVENTS 1212
+#define ER_ERROR_MESSAGES 213
diff --git a/include/mysys_err.h b/include/mysys_err.h
index b379f5bcbc9..2d23ead36b6 100644
--- a/include/mysys_err.h
+++ b/include/mysys_err.h
@@ -22,7 +22,7 @@ extern "C" {
#endif
#define GLOB 0 /* Error maps */
-#define GLOBERRS 24 /* Max number of error messages in map's */
+#define GLOBERRS 27 /* Max number of error messages in map's */
#define EE(X) globerrs[ X ] /* Defines to add error to right map */
extern const char * NEAR globerrs[]; /* my_error_messages is here */
@@ -51,6 +51,9 @@ extern const char * NEAR globerrs[]; /* my_error_messages is here */
#define EE_CANT_MKDIR 21
#define EE_UNKNOWN_CHARSET 22
#define EE_OUT_OF_FILERESOURCES 23
+#define EE_CANT_READLINK 24
+#define EE_CANT_SYMLINK 25
+#define EE_REALPATH 26
#ifdef __cplusplus
}
diff --git a/include/vio.h b/include/vio.h
deleted file mode 100644
index acea6280f45..00000000000
--- a/include/vio.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/* We implement virtual IO by trapping needed vio_* calls and mapping
- * them to different function pointers by type
- */
-
-
-
-
-/*
- * Purpose: include file for st_vio that will work with C and C++
- */
-
-#ifndef vio_violite_h_
-#define vio_violite_h_
-
-#include "my_net.h" /* needed because of struct in_addr */
-
-
-/* Simple vio interface in C; The functions are implemented in violite.c */
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-enum enum_vio_type { VIO_CLOSED, VIO_TYPE_TCPIP, VIO_TYPE_SOCKET,
- VIO_TYPE_NAMEDPIPE, VIO_TYPE_SSL};
-#ifndef st_vio_defined
-#define st_vio_defined
-struct st_vio; /* Only C */
-typedef struct st_vio st_vio;
-#endif
-
-st_vio* vio_new(my_socket sd,
- enum enum_vio_type type,
- my_bool localhost);
-#ifdef __WIN__
-st_vio* vio_new_win32pipe(HANDLE hPipe);
-#endif
-void vio_delete(st_vio* vio);
-
-#ifdef EMBEDDED_LIBRARY
-void vio_reset(st_vio *vio);
-#endif
-
-/*
- * vio_read and vio_write should have the same semantics
- * as read(2) and write(2).
- */
-int vio_read( st_vio* vio,
- gptr buf, int size);
-int vio_write( st_vio* vio,
- const gptr buf,
- int size);
-/*
- * Whenever the socket is set to blocking mode or not.
- */
-int vio_blocking( st_vio* vio,
- my_bool onoff);
-my_bool vio_is_blocking( st_vio* vio);
-/*
- * setsockopt TCP_NODELAY at IPPROTO_TCP level, when possible.
- */
- int vio_fastsend( st_vio* vio);
-/*
- * setsockopt SO_KEEPALIVE at SOL_SOCKET level, when possible.
- */
-int vio_keepalive( st_vio* vio,
- my_bool onoff);
-/*
- * Whenever we should retry the last read/write operation.
- */
-my_bool vio_should_retry( st_vio* vio);
-/*
- * When the workday is over...
- */
-int vio_close(st_vio* vio);
-/*
- * Short text description of the socket for those, who are curious..
- */
-const char* vio_description( st_vio* vio);
-
-/* Return the type of the connection */
- enum enum_vio_type vio_type(st_vio* vio);
-
-/* Return last error number */
-int vio_errno(st_vio *vio);
-
-/* Get socket number */
-my_socket vio_fd(st_vio *vio);
-
-/*
- * Remote peer's address and name in text form.
- */
-my_bool vio_peer_addr(st_vio * vio, char *buf);
-
-/* Remotes in_addr */
-
-void vio_in_addr(st_vio *vio, struct in_addr *in);
-
- /* Return 1 if there is data to be read */
-my_bool vio_poll_read(st_vio *vio,uint timeout);
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* vio_violite_h_ */
-#ifdef HAVE_VIO
-#ifndef DONT_MAP_VIO
-#define vio_delete(vio) (vio)->viodelete(vio)
-#define vio_errno(vio) (vio)->vioerrno(vio)
-#define vio_read(vio, buf, size) (vio)->read(vio,buf,size)
-#define vio_write(vio, buf, size) (vio)->write(vio, buf, size)
-#define vio_blocking(vio, set_blocking_mode) (vio)->vioblocking(vio, set_blocking_mode)
-#define vio_is_blocking(vio) (vio)->is_blocking(vio)
-#define vio_fastsend(vio) (vio)->fastsend(vio)
-#define vio_keepalive(vio, set_keep_alive) (vio)->viokeepalive(vio, set_keep_alive)
-#define vio_should_retry(vio) (vio)->should_retry(vio)
-#define vio_close(vio) ((vio)->vioclose)(vio)
-#define vio_peer_addr(vio, buf) (vio)->peer_addr(vio, buf)
-#define vio_in_addr(vio, in) (vio)->in_addr(vio, in)
-#define vio_poll_read(vio,timeout) (vio)->poll_read(vio,timeout)
-#endif /* !DONT_MAP_VIO */
-#endif /* HAVE_VIO */
-
-
-#ifdef HAVE_OPENSSL
-#include <openssl/x509.h>
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/pem.h>
-#include <openssl/asn1.h>
-#include "my_net.h" /* needed because of struct in_addr */
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#ifndef st_vio_defined
-#define st_vio_defined
-struct st_vio; /* Only C */
-typedef struct st_vio st_vio;
-#endif
-
-void vio_ssl_delete(st_vio* vio);
-
-#ifdef EMBEDDED_LIBRARY
-void vio_reset(st_vio *vio);
-#endif
-
-int vio_ssl_read(st_vio* vio,gptr buf, int size);
-int vio_ssl_write(st_vio* vio,const gptr buf,int size);
-int vio_ssl_blocking(st_vio* vio,my_bool onoff);
-my_bool vio_ssl_is_blocking(st_vio* vio);
-
-/* setsockopt TCP_NODELAY at IPPROTO_TCP level, when possible. */
- int vio_ssl_fastsend(st_vio* vio);
-/* setsockopt SO_KEEPALIVE at SOL_SOCKET level, when possible. */
-int vio_ssl_keepalive(st_vio* vio, my_bool onoff);
-/* Whenever we should retry the last read/write operation. */
-my_bool vio_ssl_should_retry(st_vio* vio);
-/* When the workday is over... */
-int vio_ssl_close(st_vio* vio);
-/* Return last error number */
-int vio_ssl_errno(st_vio *vio);
-my_bool vio_ssl_peer_addr(st_vio * vio, char *buf);
-void vio_ssl_in_addr(st_vio *vio, struct in_addr *in);
-
-/* Return 1 if there is data to be read */
-my_bool vio_ssl_poll_read(st_vio *vio,uint timeout);
-
-#ifdef HAVE_OPENSSL
-
-/* Single copy for server */
-struct st_VioSSLAcceptorFd
-{
- SSL_CTX* ssl_context_;
- SSL_METHOD* ssl_method_;
- struct st_VioSSLAcceptorFd* session_id_context_;
- enum {
- state_connect = 1,
- state_accept = 2
- };
- BIO* bio_;
- char *ssl_cip_;
- char desc_[100];
- st_vio* sd_;
-
- /* function pointers which are only once for SSL server */
-// struct st_vio *(*sslaccept)(struct st_VioSSLAcceptorFd*,st_vio*);
-};
-
-/* One copy for client */
-struct st_VioSSLConnectorFd
-{
- BIO* bio_;
- gptr ssl_;
- SSL_CTX* ssl_context_;
- SSL_METHOD* ssl_method_;
- /* function pointers which are only once for SSL client */
-};
-struct st_vio *sslaccept(struct st_VioSSLAcceptorFd*, struct st_vio*);
-struct st_vio *sslconnect(struct st_VioSSLConnectorFd*, struct st_vio*);
-
-#else /* HAVE_OPENSSL */
-/* This dummy is required to maintain proper size of st_mysql in mysql.h */
-struct st_VioSSLConnectorFd {};
-#endif /* HAVE_OPENSSL */
-struct st_VioSSLConnectorFd *new_VioSSLConnectorFd(
- const char* key_file,const char* cert_file,const char* ca_file,const char* ca_path);
-struct st_VioSSLAcceptorFd *new_VioSSLAcceptorFd(
- const char* key_file,const char* cert_file,const char* ca_file,const char* ca_path);
-struct st_vio* new_VioSSL(struct st_VioSSLAcceptorFd* fd, struct st_vio * sd,int state);
-static int
-init_bio_(struct st_VioSSLAcceptorFd* fd, struct st_vio* sd, int state, int bio_flags);
-static void
-report_errors();
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* HAVE_OPENSSL */
-
-#ifndef __WIN__
-#define HANDLE void *
-#endif
-
-/* This structure is for every connection on both sides */
-struct st_vio
-{
- my_socket sd; /* my_socket - real or imaginary */
- HANDLE hPipe;
- my_bool localhost; /* Are we from localhost? */
- int fcntl_mode; /* Buffered fcntl(sd,F_GETFL) */
- struct sockaddr_in local; /* Local internet address */
- struct sockaddr_in remote; /* Remote internet address */
- enum enum_vio_type type; /* Type of connection */
- char desc[30]; /* String description */
-#ifdef HAVE_VIO
- /* function pointers. They are similar for socket/SSL/whatever */
- void (*viodelete)(st_vio *);
- int(*vioerrno)(st_vio*);
- int(*read)(st_vio*, gptr, int);
- int(*write)(st_vio*, gptr, int);
- int(*vioblocking)(st_vio*, my_bool);
- my_bool(*is_blocking)(st_vio*);
- int(*viokeepalive)(st_vio *, my_bool);
- int(*fastsend)(st_vio *);
- my_bool(*peer_addr)(st_vio*, gptr);
- void(*in_addr)(st_vio*, struct in_addr*);
- my_bool(*should_retry)(st_vio *);
- int(*vioclose)(st_vio *);
- my_bool(*poll_read)(st_vio *,uint);
-
-#ifdef HAVE_OPENSSL
- BIO* bio_;
- SSL* ssl_;
- my_bool open_;
-#endif /* HAVE_OPENSSL */
-#endif /* HAVE_VIO */
-};
-
-
diff --git a/include/violite.h b/include/violite.h
index 7b14dae3610..bc10a8f527c 100644
--- a/include/violite.h
+++ b/include/violite.h
@@ -25,9 +25,6 @@
#include "my_net.h" /* needed because of struct in_addr */
-#ifdef HAVE_VIO
-#include <Vio.h> /* Full VIO interface */
-#else
/* Simple vio interface in C; The functions are implemented in violite.c */
@@ -35,14 +32,12 @@
extern "C" {
#endif /* __cplusplus */
-#ifndef Vio_defined
-#define Vio_defined
-struct st_vio; /* Only C */
-typedef struct st_vio Vio;
-#endif
-
enum enum_vio_type { VIO_CLOSED, VIO_TYPE_TCPIP, VIO_TYPE_SOCKET,
- VIO_TYPE_NAMEDPIPE, VIO_TYPE_SSL};
+ VIO_TYPE_NAMEDPIPE, VIO_TYPE_SSL};
+
+#ifndef __WIN__
+#define HANDLE void *
+#endif
Vio* vio_new(my_socket sd,
enum enum_vio_type type,
@@ -54,6 +49,10 @@ void vio_delete(Vio* vio);
#ifdef EMBEDDED_LIBRARY
void vio_reset(Vio *vio);
+#else
+void vio_reset(Vio* vio, enum enum_vio_type type,
+ my_socket sd, HANDLE hPipe,
+ my_bool localhost);
#endif
/*
@@ -87,7 +86,7 @@ my_bool vio_should_retry( Vio* vio);
/*
* When the workday is over...
*/
-int vio_close( Vio* vio);
+int vio_close(Vio* vio);
/*
* Short text description of the socket for those, who are curious..
*/
@@ -97,15 +96,15 @@ const char* vio_description( Vio* vio);
enum enum_vio_type vio_type(Vio* vio);
/* Return last error number */
-int vio_errno(Vio *vio);
+int vio_errno(Vio*vio);
/* Get socket number */
-my_socket vio_fd(Vio *vio);
+my_socket vio_fd(Vio*vio);
/*
* Remote peer's address and name in text form.
*/
-my_bool vio_peer_addr(Vio * vio, char *buf);
+my_bool vio_peer_addr(Vio* vio, char *buf);
/* Remotes in_addr */
@@ -117,5 +116,142 @@ my_bool vio_poll_read(Vio *vio,uint timeout);
#ifdef __cplusplus
}
#endif
-#endif /* HAVE_VIO */
#endif /* vio_violite_h_ */
+#ifdef HAVE_VIO
+#ifndef DONT_MAP_VIO
+#define vio_delete(vio) (vio)->viodelete(vio)
+#define vio_errno(vio) (vio)->vioerrno(vio)
+#define vio_read(vio, buf, size) (vio)->read(vio,buf,size)
+#define vio_write(vio, buf, size) (vio)->write(vio, buf, size)
+#define vio_blocking(vio, set_blocking_mode) (vio)->vioblocking(vio, set_blocking_mode)
+#define vio_is_blocking(vio) (vio)->is_blocking(vio)
+#define vio_fastsend(vio) (vio)->fastsend(vio)
+#define vio_keepalive(vio, set_keep_alive) (vio)->viokeepalive(vio, set_keep_alive)
+#define vio_should_retry(vio) (vio)->should_retry(vio)
+#define vio_close(vio) ((vio)->vioclose)(vio)
+#define vio_peer_addr(vio, buf) (vio)->peer_addr(vio, buf)
+#define vio_in_addr(vio, in) (vio)->in_addr(vio, in)
+#define vio_poll_read(vio,timeout) (vio)->poll_read(vio,timeout)
+#endif /* !DONT_MAP_VIO */
+#endif /* HAVE_VIO */
+
+
+#ifdef HAVE_OPENSSL
+#include <openssl/x509.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/pem.h>
+#include <openssl/asn1.h>
+#include "my_net.h" /* needed because of struct in_addr */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+void vio_ssl_delete(Vio* vio);
+
+int vio_ssl_read(Vio* vio,gptr buf, int size);
+int vio_ssl_write(Vio* vio,const gptr buf,int size);
+int vio_ssl_blocking(Vio* vio,my_bool onoff);
+my_bool vio_ssl_is_blocking(Vio* vio);
+
+/* setsockopt TCP_NODELAY at IPPROTO_TCP level, when possible. */
+ int vio_ssl_fastsend(Vio* vio);
+/* setsockopt SO_KEEPALIVE at SOL_SOCKET level, when possible. */
+int vio_ssl_keepalive(Vio* vio, my_bool onoff);
+/* Whenever we should retry the last read/write operation. */
+my_bool vio_ssl_should_retry(Vio* vio);
+/* When the workday is over... */
+int vio_ssl_close(Vio* vio);
+/* Return last error number */
+int vio_ssl_errno(Vio *vio);
+my_bool vio_ssl_peer_addr(Vio* vio, char *buf);
+void vio_ssl_in_addr(Vio *vio, struct in_addr *in);
+
+/* Return 1 if there is data to be read */
+my_bool vio_ssl_poll_read(Vio *vio,uint timeout);
+
+#ifdef HAVE_OPENSSL
+
+/* Single copy for server */
+struct st_VioSSLAcceptorFd
+{
+ SSL_CTX* ssl_context_;
+ SSL_METHOD* ssl_method_;
+ struct st_VioSSLAcceptorFd* session_id_context_;
+ enum {
+ state_connect = 1,
+ state_accept = 2
+ };
+ BIO* bio_;
+ char *ssl_cip_;
+ char desc_[100];
+ Vio* sd_;
+
+ /* function pointers which are only once for SSL server
+ Vio*(*sslaccept)(struct st_VioSSLAcceptorFd*,Vio*); */
+};
+
+/* One copy for client */
+struct st_VioSSLConnectorFd
+{
+ SSL_CTX* ssl_context_;
+ SSL_METHOD* ssl_method_;
+ /* function pointers which are only once for SSL client */
+};
+Vio *sslaccept(struct st_VioSSLAcceptorFd*, Vio*);
+Vio *sslconnect(struct st_VioSSLConnectorFd*, Vio*);
+
+#else /* HAVE_OPENSSL */
+/* This dummy is required to maintain proper size of st_mysql in mysql.h */
+struct st_VioSSLConnectorFd {};
+#endif /* HAVE_OPENSSL */
+struct st_VioSSLConnectorFd *new_VioSSLConnectorFd(
+ const char* key_file,const char* cert_file,const char* ca_file,const char* ca_path);
+struct st_VioSSLAcceptorFd *new_VioSSLAcceptorFd(
+ const char* key_file,const char* cert_file,const char* ca_file,const char* ca_path);
+Vio* new_VioSSL(struct st_VioSSLAcceptorFd* fd, Vio* sd,int state);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* HAVE_OPENSSL */
+
+#ifndef EMBEDDED_LIBRARY
+/* This structure is for every connection on both sides */
+struct st_vio
+{
+ my_socket sd; /* my_socket - real or imaginary */
+ HANDLE hPipe;
+ my_bool localhost; /* Are we from localhost? */
+ int fcntl_mode; /* Buffered fcntl(sd,F_GETFL) */
+ struct sockaddr_in local; /* Local internet address */
+ struct sockaddr_in remote; /* Remote internet address */
+ enum enum_vio_type type; /* Type of connection */
+ char desc[30]; /* String description */
+#ifdef HAVE_VIO
+ /* function pointers. They are similar for socket/SSL/whatever */
+ void (*viodelete)(Vio*);
+ int(*vioerrno)(Vio*);
+ int(*read)(Vio*, gptr, int);
+ int(*write)(Vio*, gptr, int);
+ int(*vioblocking)(Vio*, my_bool);
+ my_bool(*is_blocking)(Vio*);
+ int(*viokeepalive)(Vio*, my_bool);
+ int(*fastsend)(Vio*);
+ my_bool(*peer_addr)(Vio*, gptr);
+ void(*in_addr)(Vio*, struct in_addr*);
+ my_bool(*should_retry)(Vio*);
+ int(*vioclose)(Vio*);
+ my_bool(*poll_read)(Vio*,uint);
+
+#ifdef HAVE_OPENSSL
+ BIO* bio_;
+ SSL* ssl_;
+ my_bool open_;
+#endif /* HAVE_OPENSSL */
+#endif /* HAVE_VIO */
+};
+#endif /* EMBEDDED_LIBRARY */
+
diff --git a/innobase/btr/btr0btr.c b/innobase/btr/btr0btr.c
index 63e70eb1b83..2507f805cd6 100644
--- a/innobase/btr/btr0btr.c
+++ b/innobase/btr/btr0btr.c
@@ -2239,11 +2239,92 @@ btr_check_node_ptr(
}
/****************************************************************
+Checks the size and number of fields in a record based on the definition of
+the index. */
+static
+ibool
+btr_index_rec_validate(
+/*====================*/
+ /* out: TRUE if ok */
+ rec_t* rec, /* in: index record */
+ dict_index_t* index) /* in: index */
+{
+ dtype_t* type;
+ byte* data;
+ ulint len;
+ ulint n;
+ ulint i;
+
+ n = dict_index_get_n_fields(index);
+
+ if (rec_get_n_fields(rec) != n) {
+ fprintf(stderr, "Record has %lu fields, should have %lu\n",
+ rec_get_n_fields(rec), n);
+
+ return(FALSE);
+ }
+
+ for (i = 0; i < n; i++) {
+ data = rec_get_nth_field(rec, i, &len);
+
+ type = dict_index_get_nth_type(index, i);
+
+ if (len != UNIV_SQL_NULL && dtype_is_fixed_size(type)
+ && len != dtype_get_fixed_size(type)) {
+ fprintf(stderr,
+ "Record field %lu len is %lu, should be %lu\n",
+ i, len, dtype_get_fixed_size(type));
+
+ return(FALSE);
+ }
+ }
+
+ return(TRUE);
+}
+
+/****************************************************************
+Checks the size and number of fields in records based on the definition of
+the index. */
+static
+ibool
+btr_index_page_validate(
+/*====================*/
+ /* out: TRUE if ok */
+ page_t* page, /* in: index page */
+ dict_index_t* index) /* in: index */
+{
+ rec_t* rec;
+ page_cur_t cur;
+ ibool ret = TRUE;
+
+ page_cur_set_before_first(page, &cur);
+ page_cur_move_to_next(&cur);
+
+ for (;;) {
+ rec = (&cur)->rec;
+
+ if (page_cur_is_after_last(&cur)) {
+ break;
+ }
+
+ if (!btr_index_rec_validate(rec, index)) {
+
+ ret = FALSE;
+ }
+
+ page_cur_move_to_next(&cur);
+ }
+
+ return(ret);
+}
+
+/****************************************************************
Validates index tree level. */
static
-void
+ibool
btr_validate_level(
/*===============*/
+ /* out: TRUE if ok */
dict_tree_t* tree, /* in: index tree */
ulint level) /* in: level number */
{
@@ -2260,7 +2341,9 @@ btr_validate_level(
page_cur_t cursor;
mem_heap_t* heap;
dtuple_t* node_ptr_tuple;
-
+ ibool ret = TRUE;
+ dict_index_t* index;
+
mtr_start(&mtr);
page = btr_root_get(tree, &mtr);
@@ -2278,13 +2361,31 @@ btr_validate_level(
page = btr_node_ptr_get_child(node_ptr, &mtr);
}
+ index = UT_LIST_GET_FIRST(tree->tree_indexes);
+
/* Now we are on the desired level */
loop:
mtr_x_lock(dict_tree_get_lock(tree), &mtr);
- /* Check ordering of records */
- page_validate(page, UT_LIST_GET_FIRST(tree->tree_indexes));
+ /* Check ordering etc. of records */
+
+ if (!page_validate(page, index)) {
+ fprintf(stderr, "Error in page %lu in index %s\n",
+ buf_frame_get_page_no(page), index->name);
+ ret = FALSE;
+ }
+
+ if (level == 0) {
+ if (!btr_index_page_validate(page, index)) {
+ fprintf(stderr,
+ "Error in page %lu in index %s\n",
+ buf_frame_get_page_no(page), index->name);
+
+ ret = FALSE;
+ }
+ }
+
ut_a(btr_page_get_level(page, &mtr) == level);
right_page_no = btr_page_get_next(page, &mtr);
@@ -2374,14 +2475,17 @@ loop:
goto loop;
}
+
+ return(ret);
}
/******************************************************************
Checks the consistency of an index tree. */
-void
+ibool
btr_validate_tree(
/*==============*/
+ /* out: TRUE if ok */
dict_tree_t* tree) /* in: tree */
{
mtr_t mtr;
@@ -2397,8 +2501,15 @@ btr_validate_tree(
for (i = 0; i <= n; i++) {
- btr_validate_level(tree, n - i);
+ if (!btr_validate_level(tree, n - i)) {
+
+ mtr_commit(&mtr);
+
+ return(FALSE);
+ }
}
mtr_commit(&mtr);
+
+ return(TRUE);
}
diff --git a/innobase/btr/btr0cur.c b/innobase/btr/btr0cur.c
index e0e59152895..a8680c6b380 100644
--- a/innobase/btr/btr0cur.c
+++ b/innobase/btr/btr0cur.c
@@ -163,9 +163,14 @@ btr_cur_search_to_nth_level(
BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer
to the left neighbor page, in the cases
- BTR_SEARCH_PREV and BTR_MODIFY_PREV */
+ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
+ NOTE that if has_search_latch
+ is != 0, we maybe do not have a latch set
+ on the cursor page, we assume
+ the caller uses his search latch
+ to protect the record! */
btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
- s- or x-latched */
+ s- or x-latched, but see also above! */
ulint has_search_latch,/* in: info on the latch mode the
caller currently has on btr_search_latch:
RW_S_LATCH, or 0 */
diff --git a/innobase/btr/btr0pcur.c b/innobase/btr/btr0pcur.c
index 0388785b3fe..5e625553929 100644
--- a/innobase/btr/btr0pcur.c
+++ b/innobase/btr/btr0pcur.c
@@ -246,6 +246,12 @@ btr_pcur_restore_position(
&& btr_pcur_is_on_user_rec(cursor, mtr)
&& (0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor)))) {
+ /* We have to store the NEW value for the modify clock, since
+ the cursor can now be on a different page! */
+
+ cursor->modify_clock = buf_frame_get_modify_clock(
+ buf_frame_align(
+ btr_pcur_get_rec(cursor)));
mem_heap_free(heap);
return(TRUE);
diff --git a/innobase/btr/btr0sea.c b/innobase/btr/btr0sea.c
index 318bf97e7d2..ac4e7c5ba3f 100644
--- a/innobase/btr/btr0sea.c
+++ b/innobase/btr/btr0sea.c
@@ -601,7 +601,12 @@ btr_search_guess_on_hash(
btr_search_t* info, /* in: index search info */
dtuple_t* tuple, /* in: logical record */
ulint mode, /* in: PAGE_CUR_L, ... */
- ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
+ NOTE that only if has_search_latch
+ is 0, we will have a latch set on
+ the cursor page, otherwise we assume
+ the caller uses his search latch
+ to protect the record! */
btr_cur_t* cursor, /* out: tree cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
@@ -722,7 +727,9 @@ btr_search_guess_on_hash(
}
if (!success) {
- btr_leaf_page_release(page, latch_mode, mtr);
+ if (!has_search_latch) {
+ btr_leaf_page_release(page, latch_mode, mtr);
+ }
goto failure;
}
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index 0046a3761a6..ede9e621462 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -241,6 +241,8 @@ buf_block_init(
block->modify_clock = ut_dulint_zero;
+ block->file_page_was_freed = FALSE;
+
rw_lock_create(&(block->lock));
ut_ad(rw_lock_validate(&(block->lock)));
@@ -543,6 +545,64 @@ buf_page_peek(
}
/************************************************************************
+Sets file_page_was_freed TRUE if the page is found in the buffer pool.
+This function should be called when we free a file page and want the
+debug version to check that it is not accessed any more unless
+reallocated. */
+
+buf_block_t*
+buf_page_set_file_page_was_freed(
+/*=============================*/
+ /* out: control block if found from page hash table,
+ otherwise NULL */
+ ulint space, /* in: space id */
+ ulint offset) /* in: page number */
+{
+ buf_block_t* block;
+
+ mutex_enter_fast(&(buf_pool->mutex));
+
+ block = buf_page_hash_get(space, offset);
+
+ if (block) {
+ block->file_page_was_freed = TRUE;
+ }
+
+ mutex_exit(&(buf_pool->mutex));
+
+ return(block);
+}
+
+/************************************************************************
+Sets file_page_was_freed FALSE if the page is found in the buffer pool.
+This function should be called when we free a file page and want the
+debug version to check that it is not accessed any more unless
+reallocated. */
+
+buf_block_t*
+buf_page_reset_file_page_was_freed(
+/*===============================*/
+ /* out: control block if found from page hash table,
+ otherwise NULL */
+ ulint space, /* in: space id */
+ ulint offset) /* in: page number */
+{
+ buf_block_t* block;
+
+ mutex_enter_fast(&(buf_pool->mutex));
+
+ block = buf_page_hash_get(space, offset);
+
+ if (block) {
+ block->file_page_was_freed = FALSE;
+ }
+
+ mutex_exit(&(buf_pool->mutex));
+
+ return(block);
+}
+
+/************************************************************************
This is the general function used to get access to a database page. */
buf_frame_t*
@@ -646,6 +706,9 @@ loop:
block->accessed = TRUE;
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ ut_a(block->file_page_was_freed == FALSE);
+#endif
mutex_exit(&(buf_pool->mutex));
#ifdef UNIV_DEBUG
@@ -842,6 +905,9 @@ buf_page_optimistic_get_func(
ut_ad(block->buf_fix_count > 0);
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ ut_a(block->file_page_was_freed == FALSE);
+#endif
if (!accessed) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -949,6 +1015,9 @@ buf_page_get_known_nowait(
#endif
ut_ad(block->buf_fix_count > 0);
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ ut_a(block->file_page_was_freed == FALSE);
+#endif
#ifdef UNIV_IBUF_DEBUG
ut_a((mode == BUF_KEEP_OLD)
@@ -996,6 +1065,8 @@ buf_page_init(
block->n_hash_helps = 0;
block->is_hashed = FALSE;
+
+ block->file_page_was_freed = FALSE;
}
/************************************************************************
@@ -1126,6 +1197,8 @@ buf_page_create(
#ifdef UNIV_IBUF_DEBUG
ut_a(ibuf_count_get(block->space, block->offset) == 0);
#endif
+ block->file_page_was_freed = FALSE;
+
/* Page can be found in buf_pool */
mutex_exit(&(buf_pool->mutex));
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index 90bdde1ebc6..7129b8d20a9 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -182,8 +182,8 @@ buf_flush_write_complete(
buf_pool->LRU_flush_ended++;
}
-/* printf("n pending flush %lu\n",
- buf_pool->n_flush[block->flush_type]); */
+ /* printf("n pending flush %lu\n",
+ buf_pool->n_flush[block->flush_type]); */
if ((buf_pool->n_flush[block->flush_type] == 0)
&& (buf_pool->init_flush[block->flush_type] == FALSE)) {
@@ -421,6 +421,8 @@ buf_flush_try_neighbors(
/* In simulated aio we wake up the i/o-handler threads now that
we have posted a batch of writes: */
+ /* printf("Flush count %lu ; Waking i/o handlers\n", count); */
+
os_aio_simulated_wake_handler_threads();
return(count);
diff --git a/innobase/buf/buf0lru.c b/innobase/buf/buf0lru.c
index 4626dc2757b..142beaaaa15 100644
--- a/innobase/buf/buf0lru.c
+++ b/innobase/buf/buf0lru.c
@@ -260,9 +260,9 @@ loop:
*/
if (n_iterations > 30) {
fprintf(stderr,
- "Innobase: Warning: difficult to find free blocks from\n"
- "Innobase: the buffer pool! Consider increasing the\n"
- "Innobase: buffer pool size.\n");
+ "InnoDB: Warning: difficult to find free blocks from\n"
+ "InnoDB: the buffer pool (%lu search iterations)! Consider\n"
+ "InnoDB: increasing the buffer pool size.\n", n_iterations);
}
}
diff --git a/innobase/buf/buf0rea.c b/innobase/buf/buf0rea.c
index 644dd226a0e..728bf4404b8 100644
--- a/innobase/buf/buf0rea.c
+++ b/innobase/buf/buf0rea.c
@@ -18,6 +18,7 @@ Created 11/5/1995 Heikki Tuuri
#include "log0recv.h"
#include "trx0sys.h"
#include "os0file.h"
+#include "srv0start.h"
/* The size in blocks of the area where the random read-ahead algorithm counts
the accessed pages when deciding whether to read-ahead */
@@ -132,10 +133,16 @@ buf_read_ahead_random(
ulint low, high;
ulint i;
- if (ibuf_bitmap_page(offset)) {
+ if (srv_startup_is_before_trx_rollback_phase) {
+ /* No read-ahead to avoid thread deadlocks */
+ return(0);
+ }
+
+ if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) {
- /* If it is an ibuf bitmap page, we do no read-ahead, as
- that could break the ibuf page access order */
+ /* If it is an ibuf bitmap page or trx sys hdr, we do
+ no read-ahead, as that could break the ibuf page access
+ order */
return(0);
}
@@ -301,9 +308,16 @@ buf_read_ahead_linear(
ulint low, high;
ulint i;
- if (ibuf_bitmap_page(offset)) {
- /* If it is an ibuf bitmap page, we do no read-ahead, as
- that could break the ibuf page access order */
+ if (srv_startup_is_before_trx_rollback_phase) {
+ /* No read-ahead to avoid thread deadlocks */
+ return(0);
+ }
+
+ if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) {
+
+ /* If it is an ibuf bitmap page or trx sys hdr, we do
+ no read-ahead, as that could break the ibuf page access
+ order */
return(0);
}
diff --git a/innobase/configure.in b/innobase/configure.in
index 83d302c6dc4..48bb9504219 100644
--- a/innobase/configure.in
+++ b/innobase/configure.in
@@ -38,6 +38,7 @@ AC_CHECK_HEADERS(aio.h sched.h)
AC_CHECK_SIZEOF(int, 4)
AC_CHECK_FUNCS(sched_yield)
AC_CHECK_FUNCS(fdatasync)
+AC_CHECK_FUNCS(localtime_r)
#AC_C_INLINE Already checked in MySQL
AC_C_BIGENDIAN
@@ -95,6 +96,11 @@ case "$target_os" in
CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
esac
+case "$target" in
+ i[[4567]]86-*-*)
+ CFLAGS="$CFLAGS -DUNIV_INTEL_X86";;
+esac
+
AC_OUTPUT(Makefile os/Makefile ut/Makefile btr/Makefile
buf/Makefile com/Makefile data/Makefile
dict/Makefile dyn/Makefile
diff --git a/innobase/dict/dict0boot.c b/innobase/dict/dict0boot.c
index 260e8d4c276..35fdfce16a6 100644
--- a/innobase/dict/dict0boot.c
+++ b/innobase/dict/dict0boot.c
@@ -313,6 +313,11 @@ dict_boot(void)
mtr_commit(&mtr);
/*-------------------------*/
+
+ /* Initialize the insert buffer table and index for each tablespace */
+
+ ibuf_init_at_db_start();
+
/* Load definitions of other indexes on system tables */
dict_load_sys_table(dict_sys->sys_tables);
@@ -320,10 +325,6 @@ dict_boot(void)
dict_load_sys_table(dict_sys->sys_indexes);
dict_load_sys_table(dict_sys->sys_fields);
- /* Initialize the insert buffer table and index for each tablespace */
-
- ibuf_init_at_db_start();
-
mutex_exit(&(dict_sys->mutex));
}
diff --git a/innobase/fsp/fsp0fsp.c b/innobase/fsp/fsp0fsp.c
index 095ca497ee2..101fb5f3ba0 100644
--- a/innobase/fsp/fsp0fsp.c
+++ b/innobase/fsp/fsp0fsp.c
@@ -127,11 +127,9 @@ typedef byte fseg_inode_t;
page number within space, FIL_NULL
means that the slot is not in use */
/*-------------------------------------*/
-#define FSEG_INODE_SIZE (16 + 3 * FLST_BASE_NODE_SIZE +\
- FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE)
+#define FSEG_INODE_SIZE (16 + 3 * FLST_BASE_NODE_SIZE + FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE)
-#define FSP_SEG_INODES_PER_PAGE ((UNIV_PAGE_SIZE - FSEG_ARR_OFFSET - 10)\
- / FSEG_INODE_SIZE)
+#define FSP_SEG_INODES_PER_PAGE ((UNIV_PAGE_SIZE - FSEG_ARR_OFFSET - 10) / FSEG_INODE_SIZE)
/* Number of segment inodes which fit on a
single page */
@@ -198,8 +196,7 @@ the extent are free and which contain old tuple version to clean. */
/* File extent data structure size in bytes. The "+ 7 ) / 8" part in the
definition rounds the number of bytes upward. */
-#define XDES_SIZE (XDES_BITMAP +\
- (FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE + 7) / 8)
+#define XDES_SIZE (XDES_BITMAP + (FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE + 7) / 8)
/* Offset of the descriptor array on a descriptor page */
#define XDES_ARR_OFFSET (FSP_HEADER_OFFSET + FSP_HEADER_SIZE)
@@ -2539,6 +2536,10 @@ fseg_free_page(
seg_inode = fseg_inode_get(seg_header, mtr);
fseg_free_page_low(seg_inode, space, page, mtr);
+
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ buf_page_set_file_page_was_freed(space, page);
+#endif
}
/**************************************************************************
@@ -2602,6 +2603,14 @@ fseg_free_extent(
}
fsp_free_extent(space, page, mtr);
+
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ for (i = 0; i < FSP_EXTENT_SIZE; i++) {
+
+ buf_page_set_file_page_was_freed(space,
+ first_page_in_extent + i);
+ }
+#endif
}
/**************************************************************************
@@ -2633,6 +2642,14 @@ fseg_free_step(
MTR_MEMO_X_LOCK));
mtr_x_lock(fil_space_get_latch(space), mtr);
+ descr = xdes_get_descriptor(space, buf_frame_get_page_no(header), mtr);
+
+ /* Check that the header resides on a page which has not been
+ freed yet */
+
+ ut_a(descr);
+ ut_a(xdes_get_bit(descr, XDES_FREE_BIT, buf_frame_get_page_no(header)
+ % FSP_EXTENT_SIZE, mtr) == FALSE);
inode = fseg_inode_get(header, mtr);
descr = fseg_get_first_extent(inode, mtr);
@@ -2647,7 +2664,6 @@ fseg_free_step(
}
/* Free a frag page */
-
n = fseg_find_last_used_frag_page_slot(inode, mtr);
if (n == ULINT_UNDEFINED) {
@@ -2659,6 +2675,16 @@ fseg_free_step(
fseg_free_page_low(inode, space,
fseg_get_nth_frag_page_no(inode, n, mtr), mtr);
+
+ n = fseg_find_last_used_frag_page_slot(inode, mtr);
+
+ if (n == ULINT_UNDEFINED) {
+ /* Freeing completed: free the segment inode */
+ fsp_free_seg_inode(space, inode, mtr);
+
+ return(TRUE);
+ }
+
return(FALSE);
}
diff --git a/innobase/ibuf/ibuf0ibuf.c b/innobase/ibuf/ibuf0ibuf.c
index 7227b54e71e..171c6169927 100644
--- a/innobase/ibuf/ibuf0ibuf.c
+++ b/innobase/ibuf/ibuf0ibuf.c
@@ -1382,6 +1382,9 @@ ibuf_remove_free_page(
fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
space, page_no, &mtr);
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ buf_page_reset_file_page_was_freed(space, page_no);
+#endif
ibuf_enter();
mutex_enter(&ibuf_mutex);
@@ -1413,6 +1416,9 @@ ibuf_remove_free_page(
ibuf_bitmap_page_set_bits(bitmap_page, page_no, IBUF_BITMAP_IBUF,
FALSE, &mtr);
+#ifdef UNIV_DEBUG_FILE_ACCESSES
+ buf_page_set_file_page_was_freed(space, page_no);
+#endif
mtr_commit(&mtr);
mutex_exit(&ibuf_mutex);
@@ -2431,6 +2437,8 @@ ibuf_merge_or_delete_for_page(
block = buf_block_align(page);
rw_lock_x_lock_move_ownership(&(block->lock));
+
+ ut_a(fil_page_get_type(page) == FIL_PAGE_INDEX);
}
n_inserts = 0;
diff --git a/innobase/include/btr0btr.h b/innobase/include/btr0btr.h
index d2ac9952695..f8a3000ca8a 100644
--- a/innobase/include/btr0btr.h
+++ b/innobase/include/btr0btr.h
@@ -376,9 +376,10 @@ btr_print_tree(
/******************************************************************
Checks the consistency of an index tree. */
-void
+ibool
btr_validate_tree(
/*==============*/
+ /* out: TRUE if ok */
dict_tree_t* tree); /* in: tree */
#define BTR_N_LEAF_PAGES 1
diff --git a/innobase/include/btr0cur.h b/innobase/include/btr0cur.h
index 79ec56c8e50..4ce2177bfe8 100644
--- a/innobase/include/btr0cur.h
+++ b/innobase/include/btr0cur.h
@@ -98,12 +98,18 @@ btr_cur_search_to_nth_level(
the previous page of the record! Inserts
should always be made using PAGE_CUR_LE to
search the position! */
- ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
+ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ..., ORed with
+ BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer
to the left neighbor page, in the cases
- BTR_SEARCH_PREV and BTR_MODIFY_PREV */
- btr_cur_t* cursor, /* out: tree cursor; the cursor page is s- or
- x-latched */
+ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
+ NOTE that if has_search_latch
+ is != 0, we maybe do not have a latch set
+ on the cursor page, we assume
+ the caller uses his search latch
+ to protect the record! */
+ btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
+ s- or x-latched, but see also above! */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
RW_S_LATCH, or 0 */
diff --git a/innobase/include/btr0pcur.h b/innobase/include/btr0pcur.h
index c07d5199d8c..6465093e3c1 100644
--- a/innobase/include/btr0pcur.h
+++ b/innobase/include/btr0pcur.h
@@ -87,7 +87,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the
record! */
- ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
+ NOTE that if has_search_latch != 0 then
+ we maybe do not acquire a latch on the cursor
+ page, but assume that the caller uses his
+ btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
diff --git a/innobase/include/btr0pcur.ic b/innobase/include/btr0pcur.ic
index 7f31f8fe502..8e927689208 100644
--- a/innobase/include/btr0pcur.ic
+++ b/innobase/include/btr0pcur.ic
@@ -492,7 +492,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the
record! */
- ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */
+ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
+ NOTE that if has_search_latch != 0 then
+ we maybe do not acquire a latch on the cursor
+ page, but assume that the caller uses his
+ btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch:
diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h
index 5e90f5952fc..7f3e20a4505 100644
--- a/innobase/include/buf0buf.h
+++ b/innobase/include/buf0buf.h
@@ -293,6 +293,32 @@ buf_page_peek_block(
ulint space, /* in: space id */
ulint offset);/* in: page number */
/************************************************************************
+Sets file_page_was_freed TRUE if the page is found in the buffer pool.
+This function should be called when we free a file page and want the
+debug version to check that it is not accessed any more unless
+reallocated. */
+
+buf_block_t*
+buf_page_set_file_page_was_freed(
+/*=============================*/
+ /* out: control block if found from page hash table,
+ otherwise NULL */
+ ulint space, /* in: space id */
+ ulint offset); /* in: page number */
+/************************************************************************
+Sets file_page_was_freed FALSE if the page is found in the buffer pool.
+This function should be called when we free a file page and want the
+debug version to check that it is not accessed any more unless
+reallocated. */
+
+buf_block_t*
+buf_page_reset_file_page_was_freed(
+/*===============================*/
+ /* out: control block if found from page hash table,
+ otherwise NULL */
+ ulint space, /* in: space id */
+ ulint offset); /* in: page number */
+/************************************************************************
Recommends a move of a block to the start of the LRU list if there is danger
of dropping from the buffer pool. NOTE: does not reserve the buffer pool
mutex. */
@@ -706,6 +732,9 @@ struct buf_block_struct{
which bufferfixes the block acquires
an s-latch here; so we can use the
debug utilities in sync0rw */
+ ibool file_page_was_freed;
+ /* this is set to TRUE when fsp
+ frees a page in buffer pool */
};
/* The buffer pool structure. NOTE! The definition appears here only for
diff --git a/innobase/include/mem0pool.h b/innobase/include/mem0pool.h
index 14198920bca..eb675b4a7f9 100644
--- a/innobase/include/mem0pool.h
+++ b/innobase/include/mem0pool.h
@@ -31,7 +31,8 @@ struct mem_area_struct{
};
/* Each memory area takes this many extra bytes for control information */
-#define MEM_AREA_EXTRA_SIZE (sizeof(struct mem_area_struct))
+#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\
+ UNIV_MEM_ALIGNMENT))
/************************************************************************
Creates a memory pool. */
diff --git a/innobase/include/page0cur.ic b/innobase/include/page0cur.ic
index 4313036adaf..39f8ab11513 100644
--- a/innobase/include/page0cur.ic
+++ b/innobase/include/page0cur.ic
@@ -171,10 +171,10 @@ page_cur_search(
ut_ad(dtuple_check_typed(tuple));
page_cur_search_with_match(page, tuple, mode,
- &low_matched_fields,
- &low_matched_bytes,
&up_matched_fields,
&up_matched_bytes,
+ &low_matched_fields,
+ &low_matched_bytes,
cursor);
return(low_matched_fields);
}
diff --git a/innobase/include/row0mysql.h b/innobase/include/row0mysql.h
index d47fa729dce..554da2c035c 100644
--- a/innobase/include/row0mysql.h
+++ b/innobase/include/row0mysql.h
@@ -229,6 +229,15 @@ row_rename_table_for_mysql(
char* old_name, /* in: old table name */
char* new_name, /* in: new table name */
trx_t* trx); /* in: transaction handle */
+/*************************************************************************
+Checks a table for corruption. */
+
+ulint
+row_check_table_for_mysql(
+/*======================*/
+ /* out: DB_ERROR or DB_SUCCESS */
+ row_prebuilt_t* prebuilt); /* in: prebuilt struct in MySQL
+ handle */
/* A struct describing a place for an individual column in the MySQL
row format which is presented to the table handler in ha_innobase.
@@ -281,7 +290,8 @@ struct row_prebuilt_struct {
is set to TRUE */
dict_index_t* index; /* current index for a search, if any */
ulint template_type; /* ROW_MYSQL_WHOLE_ROW,
- ROW_MYSQL_REC_FIELDS or
+ ROW_MYSQL_REC_FIELDS,
+ ROW_MYSQL_DUMMY_TEMPLATE, or
ROW_MYSQL_NO_TEMPLATE */
ulint n_template; /* number of elements in the
template */
@@ -359,6 +369,8 @@ struct row_prebuilt_struct {
#define ROW_MYSQL_WHOLE_ROW 0
#define ROW_MYSQL_REC_FIELDS 1
#define ROW_MYSQL_NO_TEMPLATE 2
+#define ROW_MYSQL_DUMMY_TEMPLATE 3 /* dummy template used in
+ row_scan_and_check_index */
#ifndef UNIV_NONINL
#include "row0mysql.ic"
diff --git a/innobase/include/srv0start.h b/innobase/include/srv0start.h
index 66eeb4f2e3c..e2b20f3b5fc 100644
--- a/innobase/include/srv0start.h
+++ b/innobase/include/srv0start.h
@@ -28,4 +28,7 @@ int
innobase_shutdown_for_mysql(void);
/*=============================*/
/* out: DB_SUCCESS or error code */
+
+extern ibool srv_startup_is_before_trx_rollback_phase;
+
#endif
diff --git a/innobase/include/sync0sync.ic b/innobase/include/sync0sync.ic
index a937ac5d579..f7b341cb386 100644
--- a/innobase/include/sync0sync.ic
+++ b/innobase/include/sync0sync.ic
@@ -86,6 +86,22 @@ mutex_test_and_set(
/* mutex_fence(); */
return(res);
+#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
+ ulint* lw;
+ ulint res;
+
+ lw = &(mutex->lock_word);
+
+ /* In assembly we use the so-called AT & T syntax where
+ the order of operands is inverted compared to the ordinary Intel
+ syntax. The 'l' after the mnemonics denotes a 32-bit operation.
+ The line after the code tells which values come out of the asm
+ code, and the second line tells the input to the asm code. */
+
+ asm volatile("movl $1, %%eax; xchgl (%%ecx), %%eax" :
+ "=eax" (res), "=m" (*lw) :
+ "ecx" (lw));
+ return(res);
#else
ibool ret;
@@ -118,6 +134,21 @@ mutex_reset_lock_word(
__asm MOV EDX, 0
__asm MOV ECX, lw
__asm XCHG EDX, DWORD PTR [ECX]
+#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
+ ulint* lw;
+
+ lw = &(mutex->lock_word);
+
+ /* In assembly we use the so-called AT & T syntax where
+ the order of operands is inverted compared to the ordinary Intel
+ syntax. The 'l' after the mnemonics denotes a 32-bit operation. */
+
+ asm volatile("movl $0, %%eax; xchgl (%%ecx), %%eax" :
+ "=m" (*lw) :
+ "ecx" (lw) :
+ "eax"); /* gcc does not seem to understand
+ that our asm code resets eax: tell it
+ explicitly that after the third ':' */
#else
mutex->lock_word = 0;
diff --git a/innobase/include/trx0trx.h b/innobase/include/trx0trx.h
index 52be0b1d992..f67ba43162d 100644
--- a/innobase/include/trx0trx.h
+++ b/innobase/include/trx0trx.h
@@ -24,6 +24,13 @@ saving CPU time. The kernel mutex contention is increased, however. */
extern ulint trx_n_mysql_transactions;
+/************************************************************************
+Releases the search latch if trx has reserved it. */
+
+void
+trx_search_latch_release_if_reserved(
+/*=================================*/
+ trx_t* trx); /* in: transaction */
/********************************************************************
Retrieves the error_info field from a trx. */
@@ -282,6 +289,13 @@ struct trx_struct{
ulint n_mysql_tables_in_use; /* number of Innobase tables
used in the processing of the current
SQL statement in MySQL */
+ ulint mysql_n_tables_locked;
+ /* how many tables the current SQL
+ statement uses, except those
+ in consistent read */
+ ibool has_search_latch;
+ /* TRUE if this trx has latched the
+ search system latch in S-mode */
ibool ignore_duplicates_in_insert;
/* in an insert roll back only insert
of the latest row in case
diff --git a/innobase/include/univ.i b/innobase/include/univ.i
index d29ca83b0fc..73bf48b1bc0 100644
--- a/innobase/include/univ.i
+++ b/innobase/include/univ.i
@@ -9,6 +9,8 @@ Created 1/20/1994 Heikki Tuuri
#ifndef univ_i
#define univ_i
+#undef UNIV_INTEL_X86
+
#if (defined(_WIN32) || defined(_WIN64)) && !defined(MYSQL_SERVER)
#define __WIN__
#include <windows.h>
@@ -72,6 +74,8 @@ subdirectory of 'mysql'. */
#define UNIV_SYNC_PERF_STAT
#define UNIV_SEARCH_PERF_STAT
+
+#define UNIV_DEBUG_FILE_ACCESSES
*/
#define UNIV_LIGHT_MEM_DEBUG
diff --git a/innobase/include/ut0ut.h b/innobase/include/ut0ut.h
index f2c4781c167..1e93a2b8a36 100644
--- a/innobase/include/ut0ut.h
+++ b/innobase/include/ut0ut.h
@@ -136,6 +136,13 @@ ut_difftime(
/* out: time2 - time1 expressed in seconds */
ib_time_t time2, /* in: time */
ib_time_t time1); /* in: time */
+/**************************************************************
+Prints a timestamp to a file. */
+
+void
+ut_print_timestamp(
+/*===============*/
+ FILE* file); /* in: file where to print */
/*****************************************************************
Runs an idle loop on CPU. The argument gives the desired delay
in microseconds on 100 MHz Pentium + Visual C++. */
diff --git a/innobase/log/log0log.c b/innobase/log/log0log.c
index 46fcf400d34..31cf595e59e 100644
--- a/innobase/log/log0log.c
+++ b/innobase/log/log0log.c
@@ -2634,8 +2634,9 @@ logs_empty_and_mark_files_at_shutdown(void)
{
dulint lsn;
ulint arch_log_no;
-
- fprintf(stderr, "InnoDB: Starting shutdown...\n");
+
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Starting shutdown...\n");
/* Wait until the master thread and all other operations are idle: our
algorithm only works if the server is idle at shutdown */
@@ -2725,7 +2726,8 @@ loop:
fil_flush_file_spaces(FIL_TABLESPACE);
- fprintf(stderr, "InnoDB: Shutdown completed\n");
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Shutdown completed\n");
}
/**********************************************************
diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c
index e93cd3f0364..d16085a2d6f 100644
--- a/innobase/log/log0recv.c
+++ b/innobase/log/log0recv.c
@@ -944,9 +944,9 @@ recv_read_in_area(
}
buf_read_recv_pages(FALSE, space, page_nos, n);
-
- /* printf("Recv pages at %lu n %lu\n", page_nos[0], n); */
-
+ /*
+ printf("Recv pages at %lu n %lu\n", page_nos[0], n);
+ */
return(n);
}
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index fa0c266a82a..668d74d75b5 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -11,6 +11,7 @@ Created 10/21/1995 Heikki Tuuri
#include "ut0mem.h"
#include "srv0srv.h"
+#undef HAVE_FDATASYNC
#ifdef POSIX_ASYNC_IO
/* We assume in this case that the OS has standard Posix aio (at least SunOS
@@ -562,6 +563,11 @@ os_file_flush(
return(TRUE);
}
+ fprintf(stderr,
+ "InnoDB: Error: the OS said file flush did not succeed\n");
+
+ os_file_handle_error(file, NULL);
+
return(FALSE);
#endif
}
diff --git a/innobase/page/page0page.c b/innobase/page/page0page.c
index 7986684fd07..511191ecd89 100644
--- a/innobase/page/page0page.c
+++ b/innobase/page/page0page.c
@@ -1199,8 +1199,16 @@ page_rec_validate(
n_owned = rec_get_n_owned(rec);
heap_no = rec_get_heap_no(rec);
- ut_a(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED);
- ut_a(heap_no < page_header_get_field(page, PAGE_N_HEAP));
+ if (!(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED)) {
+ fprintf(stderr, "Dir slot n owned too big %lu\n", n_owned);
+ return(FALSE);
+ }
+
+ if (!(heap_no < page_header_get_field(page, PAGE_N_HEAP))) {
+ fprintf(stderr, "Heap no too big %lu %lu\n", heap_no,
+ page_header_get_field(page, PAGE_N_HEAP));
+ return(FALSE);
+ }
return(TRUE);
}
@@ -1216,20 +1224,21 @@ page_validate(
dict_index_t* index) /* in: data dictionary index containing
the page record type definition */
{
+ page_dir_slot_t* slot;
mem_heap_t* heap;
+ page_cur_t cur;
byte* buf;
ulint i;
ulint count;
ulint own_count;
ulint slot_no;
ulint data_size;
- page_cur_t cur;
rec_t* rec;
rec_t* old_rec = NULL;
- page_dir_slot_t* slot;
ulint offs;
ulint n_slots;
-
+ ibool ret = FALSE;
+
heap = mem_heap_create(UNIV_PAGE_SIZE);
/* The following buffer is used to check that the
@@ -1244,8 +1253,16 @@ page_validate(
overlap. */
n_slots = page_dir_get_n_slots(page);
- ut_ad(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
- page_dir_get_nth_slot(page, n_slots - 1));
+
+ if (!(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
+ page_dir_get_nth_slot(page, n_slots - 1))) {
+ fprintf(stderr,
+ "Record heap and dir overlap on a page in index %s, %lu, %lu\n",
+ index->name, page_header_get_ptr(page, PAGE_HEAP_TOP),
+ page_dir_get_nth_slot(page, n_slots - 1));
+
+ goto func_exit;
+ }
/* Validate the record list in a loop checking also that
it is consistent with the directory. */
@@ -1259,11 +1276,20 @@ page_validate(
for (;;) {
rec = (&cur)->rec;
- page_rec_validate(rec);
+
+ if (!page_rec_validate(rec)) {
+ goto func_exit;
+ }
/* Check that the records are in the ascending order */
if ((count >= 2) && (!page_cur_is_after_last(&cur))) {
- ut_a(1 == cmp_rec_rec(rec, old_rec, index));
+ if (!(1 == cmp_rec_rec(rec, old_rec, index))) {
+ fprintf(stderr,
+ "Records in wrong order in index %s\n",
+ index->name);
+
+ goto func_exit;
+ }
}
if ((rec != page_get_supremum_rec(page))
@@ -1275,16 +1301,38 @@ page_validate(
offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) {
- ut_a(buf[offs + i] == 0); /* No other record may
- overlap this */
+ if (!buf[offs + i] == 0) {
+ /* No other record may overlap this */
+
+ fprintf(stderr,
+ "Record overlaps another in index %s \n",
+ index->name);
+
+ goto func_exit;
+ }
+
buf[offs + i] = 1;
}
if (rec_get_n_owned(rec) != 0) {
/* This is a record pointed to by a dir slot */
- ut_a(rec_get_n_owned(rec) == own_count);
+ if (rec_get_n_owned(rec) != own_count) {
+ fprintf(stderr,
+ "Wrong owned count %lu, %lu, in index %s\n",
+ rec_get_n_owned(rec), own_count,
+ index->name);
- ut_a(page_dir_slot_get_rec(slot) == rec);
+ goto func_exit;
+ }
+
+ if (page_dir_slot_get_rec(slot) != rec) {
+ fprintf(stderr,
+ "Dir slot does not point to right rec in %s\n",
+ index->name);
+
+ goto func_exit;
+ }
+
page_dir_slot_check(slot);
own_count = 0;
@@ -1297,45 +1345,89 @@ page_validate(
if (page_cur_is_after_last(&cur)) {
break;
}
-
- count++;
+
+ if (rec_get_next_offs(rec) < FIL_PAGE_DATA
+ || rec_get_next_offs(rec) >= UNIV_PAGE_SIZE) {
+ fprintf(stderr,
+ "Next record offset wrong %lu in index %s\n",
+ rec_get_next_offs(rec), index->name);
+
+ goto func_exit;
+ }
+
+ count++;
page_cur_move_to_next(&cur);
own_count++;
old_rec = rec;
}
- ut_a(rec_get_n_owned(rec) != 0);
- ut_a(slot_no == n_slots - 1);
- ut_a(page_header_get_field(page, PAGE_N_RECS) + 2 == count + 1);
+ if (rec_get_n_owned(rec) == 0) {
+ fprintf(stderr, "n owned is zero in index %s\n", index->name);
+
+ goto func_exit;
+ }
+
+ if (slot_no != n_slots - 1) {
+ fprintf(stderr, "n slots wrong %lu %lu in index %s\n",
+ slot_no, n_slots - 1, index->name);
+ goto func_exit;
+ }
+
+ if (page_header_get_field(page, PAGE_N_RECS) + 2 != count + 1) {
+ fprintf(stderr, "n recs wrong %lu %lu in index %s\n",
+ page_header_get_field(page, PAGE_N_RECS) + 2, count + 1,
+ index->name);
+
+ goto func_exit;
+ }
if (data_size != page_get_data_size(page)) {
- printf("Summed data size %lu, returned by func %lu\n",
+ fprintf(stderr, "Summed data size %lu, returned by func %lu\n",
data_size, page_get_data_size(page));
- ut_error;
+ goto func_exit;
}
/* Check then the free list */
rec = page_header_get_ptr(page, PAGE_FREE);
while (rec != NULL) {
- page_rec_validate(rec);
+ if (!page_rec_validate(rec)) {
+
+ goto func_exit;
+ }
count++;
offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) {
- ut_a(buf[offs + i] == 0);
+
+ if (buf[offs + i] != 0) {
+ fprintf(stderr,
+ "Record overlaps another in free list, index %s \n",
+ index->name);
+
+ goto func_exit;
+ }
+
buf[offs + i] = 1;
}
rec = page_rec_get_next(rec);
}
- ut_a(page_header_get_field(page, PAGE_N_HEAP) == count + 1);
-
+ if (page_header_get_field(page, PAGE_N_HEAP) != count + 1) {
+
+ fprintf(stderr, "N heap is wrong %lu %lu in index %s\n",
+ page_header_get_field(page, PAGE_N_HEAP), count + 1,
+ index->name);
+ }
+
+ ret = TRUE;
+
+func_exit:
mem_heap_free(heap);
- return(TRUE);
+ return(ret);
}
/*******************************************************************
diff --git a/innobase/rem/rem0cmp.c b/innobase/rem/rem0cmp.c
index d5208f2d486..78f4e450269 100644
--- a/innobase/rem/rem0cmp.c
+++ b/innobase/rem/rem0cmp.c
@@ -177,7 +177,9 @@ cmp_whole_field(
(int)(type->prtype & ~DATA_NOT_NULL),
a, a_length, b, b_length));
default:
- assert(0);
+ fprintf(stderr,
+ "InnoDB: unknown type number %lu\n", data_type);
+ ut_a(0);
}
return(0);
diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c
index ec24b40f5c2..8e1a584f667 100644
--- a/innobase/row/row0mysql.c
+++ b/innobase/row/row0mysql.c
@@ -24,6 +24,7 @@ Created 9/17/2000 Heikki Tuuri
#include "trx0roll.h"
#include "trx0purge.h"
#include "lock0lock.h"
+#include "rem0cmp.h"
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
@@ -823,7 +824,11 @@ row_create_table_for_mysql(
} else {
assert(err == DB_DUPLICATE_KEY);
fprintf(stderr,
- "Innobase: error: table %s already exists in Innobase data dictionary\n",
+ "InnoDB: Error: table %s already exists in InnoDB internal\n"
+ "InnoDB: data dictionary. Have you deleted the .frm file\n"
+ "InnoDB: and not used DROP TABLE? Have you used DROP DATABASE\n"
+ "InnoDB: for InnoDB tables in MySQL version <= 3.23.39?\n"
+ "InnoDB: See the Restrictions section of the InnoDB manual.\n",
table->name);
}
@@ -1129,3 +1134,146 @@ funct_exit:
return((int) err);
}
+
+/*************************************************************************
+Checks that the index contains entries in an ascending order, unique
+constraint is not broken, and calculates the number of index entries
+in the read view of the current transaction. */
+static
+ibool
+row_scan_and_check_index(
+/*=====================*/
+ /* out: TRUE if ok */
+ row_prebuilt_t* prebuilt, /* in: prebuilt struct in MySQL */
+ dict_index_t* index, /* in: index */
+ ulint* n_rows) /* out: number of entries seen in the
+ current consistent read */
+{
+ mem_heap_t* heap;
+ dtuple_t* prev_entry = NULL;
+ ulint matched_fields;
+ ulint matched_bytes;
+ byte* buf;
+ ulint ret;
+ rec_t* rec;
+ ibool is_ok = TRUE;
+ int cmp;
+
+ *n_rows = 0;
+
+ buf = mem_alloc(UNIV_PAGE_SIZE);
+ heap = mem_heap_create(100);
+
+ /* Make a dummy template in prebuilt, which we will use
+ in scanning the index entries */
+
+ prebuilt->index = index;
+ prebuilt->sql_stat_start = TRUE;
+ prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE;
+ prebuilt->n_template = 0;
+ prebuilt->need_to_access_clustered = FALSE;
+
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
+
+ prebuilt->select_lock_type = LOCK_NONE;
+
+ ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, 0);
+loop:
+ if (ret != DB_SUCCESS) {
+
+ mem_free(buf);
+ mem_heap_free(heap);
+
+ return(is_ok);
+ }
+
+ *n_rows = *n_rows + 1;
+
+ /* row_search... returns the index record in buf, record origin offset
+ within buf stored in the first 4 bytes, because we have built a dummy
+ template */
+
+ rec = buf + mach_read_from_4(buf);
+
+ if (prev_entry != NULL) {
+ matched_fields = 0;
+ matched_bytes = 0;
+
+ cmp = cmp_dtuple_rec_with_match(prev_entry, rec,
+ &matched_fields,
+ &matched_bytes);
+ if (cmp > 0) {
+ fprintf(stderr,
+ "Error: index records in a wrong order in index %s\n",
+ index->name);
+
+ is_ok = FALSE;
+ } else if ((index->type & DICT_UNIQUE)
+ && matched_fields >=
+ dict_index_get_n_ordering_defined_by_user(index)) {
+ fprintf(stderr,
+ "Error: duplicate key in index %s\n",
+ index->name);
+
+ is_ok = FALSE;
+ }
+ }
+
+ mem_heap_empty(heap);
+
+ prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
+
+ ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT);
+
+ goto loop;
+}
+
+/*************************************************************************
+Checks a table for corruption. */
+
+ulint
+row_check_table_for_mysql(
+/*======================*/
+ /* out: DB_ERROR or DB_SUCCESS */
+ row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
+ handle */
+{
+ dict_table_t* table = prebuilt->table;
+ dict_index_t* index;
+ ulint n_rows;
+ ulint n_rows_in_table;
+ ulint ret = DB_SUCCESS;
+
+ index = dict_table_get_first_index(table);
+
+ while (index != NULL) {
+ /* fprintf(stderr, "Validating index %s\n", index->name); */
+
+ if (!btr_validate_tree(index->tree)) {
+ ret = DB_ERROR;
+ } else {
+ if (!row_scan_and_check_index(prebuilt,
+ index, &n_rows)) {
+ ret = DB_ERROR;
+ }
+
+ /* fprintf(stderr, "%lu entries in index %s\n", n_rows,
+ index->name); */
+
+ if (index == dict_table_get_first_index(table)) {
+ n_rows_in_table = n_rows;
+ } else if (n_rows != n_rows_in_table) {
+
+ ret = DB_ERROR;
+
+ fprintf(stderr,
+ "Error: index %s contains %lu entries, should be %lu\n",
+ index->name, n_rows, n_rows_in_table);
+ }
+ }
+
+ index = dict_table_get_next_index(index);
+ }
+
+ return(ret);
+}
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index 58e0d053947..5599bb42a25 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -2207,11 +2207,11 @@ row_sel_get_clust_rec_for_mysql(
visit through secondary index records that would not really
exist in our snapshot. */
- if ((old_vers || rec_get_deleted_flag(rec))
+ if (clust_rec && (old_vers || rec_get_deleted_flag(rec))
&& !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
clust_rec, clust_index)) {
clust_rec = NULL;
- }
+ }
}
*out_rec = clust_rec;
@@ -2341,6 +2341,65 @@ row_sel_push_cache_row_for_mysql(
prebuilt->n_fetch_cached++;
}
+/*************************************************************************
+Tries to do a shortcut to fetch a clustered index record with a unique key,
+using the hash index if possible (not always). We assume that the search
+mode is PAGE_CUR_GE, it is a consistent read, trx has already a read view,
+btr search latch has been locked in S-mode. */
+static
+ulint
+row_sel_try_search_shortcut_for_mysql(
+/*==================================*/
+ /* out: SEL_FOUND, SEL_EXHAUSTED, SEL_RETRY */
+ rec_t** out_rec,/* out: record if found */
+ row_prebuilt_t* prebuilt,/* in: prebuilt struct */
+ mtr_t* mtr) /* in: started mtr */
+{
+ dict_index_t* index = prebuilt->index;
+ dtuple_t* search_tuple = prebuilt->search_tuple;
+ btr_pcur_t* pcur = prebuilt->pcur;
+ trx_t* trx = prebuilt->trx;
+ rec_t* rec;
+
+ ut_ad(index->type & DICT_CLUSTERED);
+
+ btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
+ BTR_SEARCH_LEAF, pcur,
+ RW_S_LATCH, mtr);
+ rec = btr_pcur_get_rec(pcur);
+
+ if (!page_rec_is_user_rec(rec)) {
+
+ return(SEL_RETRY);
+ }
+
+ /* As the cursor is now placed on a user record after a search with
+ the mode PAGE_CUR_GE, the up_match field in the cursor tells how many
+ fields in the user record matched to the search tuple */
+
+ if (btr_pcur_get_up_match(pcur) < dtuple_get_n_fields(search_tuple)) {
+
+ return(SEL_EXHAUSTED);
+ }
+
+ /* This is a non-locking consistent read: if necessary, fetch
+ a previous version of the record */
+
+ if (!lock_clust_rec_cons_read_sees(rec, index, trx->read_view)) {
+
+ return(SEL_RETRY);
+ }
+
+ if (rec_get_deleted_flag(rec)) {
+
+ return(SEL_EXHAUSTED);
+ }
+
+ *out_rec = rec;
+
+ return(SEL_FOUND);
+}
+
/************************************************************************
Searches for rows in the database. This is used in the interface to
MySQL. This function opens a cursor, and also implements fetch next
@@ -2387,6 +2446,7 @@ row_search_for_mysql(
ibool cons_read_requires_clust_rec;
ibool was_lock_wait;
ulint ret;
+ ulint shortcut;
ibool unique_search_from_clust_index = FALSE;
ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE;
@@ -2452,6 +2512,8 @@ row_search_for_mysql(
mode = pcur->search_mode;
}
+ mtr_start(&mtr);
+
if (match_mode == ROW_SEL_EXACT && index->type & DICT_UNIQUE
&& index->type & DICT_CLUSTERED
&& dtuple_get_n_fields(search_tuple)
@@ -2464,6 +2526,8 @@ row_search_for_mysql(
restore cursor position, and must return
immediately */
+ mtr_commit(&mtr);
+
return(DB_RECORD_NOT_FOUND);
}
@@ -2472,8 +2536,51 @@ row_search_for_mysql(
mode = PAGE_CUR_GE;
unique_search_from_clust_index = TRUE;
+
+ if (trx->mysql_n_tables_locked == 0
+ && !prebuilt->sql_stat_start) {
+
+ /* This is a SELECT query done as a consistent read,
+ and the read view has already been allocated:
+ let us try a search shortcut through the hash
+ index */
+
+ if (!trx->has_search_latch) {
+ rw_lock_s_lock(&btr_search_latch);
+ trx->has_search_latch = TRUE;
+
+ } else if (btr_search_latch.writer_is_wait_ex) {
+ /* There is an x-latch request waiting:
+ release the s-latch for a moment to reduce
+ starvation */
+
+ rw_lock_s_unlock(&btr_search_latch);
+ rw_lock_s_lock(&btr_search_latch);
+ }
+
+ shortcut = row_sel_try_search_shortcut_for_mysql(&rec,
+ prebuilt, &mtr);
+ if (shortcut == SEL_FOUND) {
+ row_sel_store_mysql_rec(buf, prebuilt, rec);
+
+ mtr_commit(&mtr);
+
+ return(DB_SUCCESS);
+
+ } else if (shortcut == SEL_EXHAUSTED) {
+
+ mtr_commit(&mtr);
+
+ return(DB_RECORD_NOT_FOUND);
+ }
+ }
}
+ if (trx->has_search_latch) {
+ rw_lock_s_unlock(&btr_search_latch);
+ trx->has_search_latch = FALSE;
+ }
+
/* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order,
otherwise downward */
@@ -2485,8 +2592,6 @@ row_search_for_mysql(
} else if (direction == ROW_SEL_NEXT) {
moves_up = TRUE;
}
-
- mtr_start(&mtr);
thr = que_fork_get_first_thr(prebuilt->sel_graph);
@@ -2711,7 +2816,9 @@ rec_loop:
if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD
&& !prebuilt->templ_contains_blob
&& prebuilt->select_lock_type == LOCK_NONE
- && !prebuilt->clust_index_was_generated) {
+ && !prebuilt->clust_index_was_generated
+ && prebuilt->template_type
+ != ROW_MYSQL_DUMMY_TEMPLATE) {
/* Inside an update, for example, we do not cache rows,
since we may use the cursor position to do the actual
@@ -2726,7 +2833,13 @@ rec_loop:
goto next_rec;
} else {
- row_sel_store_mysql_rec(buf, prebuilt, rec);
+ if (prebuilt->template_type == ROW_MYSQL_DUMMY_TEMPLATE) {
+ ut_memcpy(buf + 4, rec - rec_get_extra_size(rec),
+ rec_get_size(rec));
+ mach_write_to_4(buf, rec_get_extra_size(rec) + 4);
+ } else {
+ row_sel_store_mysql_rec(buf, prebuilt, rec);
+ }
if (prebuilt->clust_index_was_generated) {
row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
diff --git a/innobase/row/row0umod.c b/innobase/row/row0umod.c
index 70cf0fe5a32..c8db428bade 100644
--- a/innobase/row/row0umod.c
+++ b/innobase/row/row0umod.c
@@ -361,6 +361,7 @@ row_undo_mod_del_unmark_sec(
btr_cur_t* btr_cur;
ulint err;
ibool found;
+ char* err_buf;
UT_NOT_USED(node);
@@ -369,13 +370,31 @@ row_undo_mod_del_unmark_sec(
found = row_search_index_entry(index, entry, BTR_MODIFY_LEAF, &pcur,
&mtr);
- ut_a(found);
+ if (!found) {
+ err_buf = mem_alloc(1000);
+ dtuple_sprintf(err_buf, 900, entry);
- btr_cur = btr_pcur_get_btr_cur(&pcur);
+ fprintf(stderr, "InnoDB: error in sec index entry del undo in\n"
+ "InnoDB: index %s table %s\n", index->name,
+ index->table->name);
+ fprintf(stderr, "InnoDB: tuple %s\n", err_buf);
+
+ rec_sprintf(err_buf, 900, btr_pcur_get_rec(&pcur));
+ fprintf(stderr, "InnoDB: record %s\n", err_buf);
+
+ fprintf(stderr, "InnoDB: Make a detailed bug report and send it\n");
+ fprintf(stderr, "InnoDB: to mysql@lists.mysql.com\n");
+
+ mem_free(err_buf);
- err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
+ } else {
+
+ btr_cur = btr_pcur_get_btr_cur(&pcur);
+
+ err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
btr_cur, FALSE, thr, &mtr);
- ut_ad(err == DB_SUCCESS);
+ ut_ad(err == DB_SUCCESS);
+ }
btr_pcur_close(&pcur);
mtr_commit(&mtr);
diff --git a/innobase/row/row0upd.c b/innobase/row/row0upd.c
index 10dd64b8b1a..5bca2a24c01 100644
--- a/innobase/row/row0upd.c
+++ b/innobase/row/row0upd.c
@@ -750,6 +750,7 @@ row_upd_sec_index_entry(
btr_cur_t* btr_cur;
mem_heap_t* heap;
rec_t* rec;
+ char* err_buf;
ulint err = DB_SUCCESS;
index = node->index;
@@ -764,18 +765,37 @@ row_upd_sec_index_entry(
found = row_search_index_entry(index, entry, BTR_MODIFY_LEAF, &pcur,
&mtr);
- ut_ad(found);
-
btr_cur = btr_pcur_get_btr_cur(&pcur);
rec = btr_cur_get_rec(btr_cur);
- /* Delete mark the old index record; it can already be delete marked if
- we return after a lock wait in row_ins_index_entry below */
+ if (!found) {
+
+ err_buf = mem_alloc(1000);
+ dtuple_sprintf(err_buf, 900, entry);
+
+ fprintf(stderr, "InnoDB: error in sec index entry update in\n"
+ "InnoDB: index %s table %s\n", index->name,
+ index->table->name);
+ fprintf(stderr, "InnoDB: tuple %s\n", err_buf);
+
+ rec_sprintf(err_buf, 900, rec);
+ fprintf(stderr, "InnoDB: record %s\n", err_buf);
+
+ fprintf(stderr, "InnoDB: Make a detailed bug report and send it\n");
+ fprintf(stderr, "InnoDB: to mysql@lists.mysql.com\n");
+
+ mem_free(err_buf);
+ } else {
+
+ /* Delete mark the old index record; it can already be
+ delete marked if we return after a lock wait in
+ row_ins_index_entry below */
- if (!rec_get_deleted_flag(rec)) {
+ if (!rec_get_deleted_flag(rec)) {
err = btr_cur_del_mark_set_sec_rec(0, btr_cur, TRUE, thr,
&mtr);
+ }
}
btr_pcur_close(&pcur);
diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c
index 29ddf2a21c8..80fafa37adf 100644
--- a/innobase/srv/srv0start.c
+++ b/innobase/srv/srv0start.c
@@ -56,6 +56,7 @@ Created 2/16/1996 Heikki Tuuri
#include "srv0start.h"
#include "que0que.h"
+ibool srv_startup_is_before_trx_rollback_phase = FALSE;
ibool srv_is_being_started = FALSE;
ibool srv_was_started = FALSE;
@@ -531,6 +532,7 @@ innobase_start_or_create_for_mysql(void)
/* yydebug = TRUE; */
srv_is_being_started = TRUE;
+ srv_startup_is_before_trx_rollback_phase = TRUE;
if (0 == ut_strcmp(srv_unix_file_flush_method_str, "fdatasync")) {
srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
@@ -548,6 +550,9 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR);
}
+ /*
+ printf("srv_unix set to %lu\n", srv_unix_file_flush_method);
+ */
os_aio_use_native_aio = srv_use_native_aio;
err = srv_boot();
@@ -728,6 +733,7 @@ innobase_start_or_create_for_mysql(void)
trx_sys_create();
dict_create();
+ srv_startup_is_before_trx_rollback_phase = FALSE;
} else if (srv_archive_recovery) {
fprintf(stderr,
@@ -742,9 +748,15 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR);
}
- trx_sys_init_at_db_start();
+ /* Since ibuf init is in dict_boot, and ibuf is needed
+ in any disk i/o, first call dict_boot */
+
dict_boot();
+
+ trx_sys_init_at_db_start();
+ srv_startup_is_before_trx_rollback_phase = FALSE;
+
recv_recovery_from_archive_finish();
} else {
/* We always try to do a recovery, even if the database had
@@ -759,12 +771,15 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR);
}
- trx_sys_init_at_db_start();
+ /* Since ibuf init is in dict_boot, and ibuf is needed
+ in any disk i/o, first call dict_boot */
dict_boot();
+ trx_sys_init_at_db_start();
/* The following needs trx lists which are initialized in
trx_sys_init_at_db_start */
-
+
+ srv_startup_is_before_trx_rollback_phase = FALSE;
recv_recovery_from_checkpoint_finish();
}
@@ -813,7 +828,8 @@ innobase_start_or_create_for_mysql(void)
/* Create the thread which watches the timeouts for lock waits */
os_thread_create(&srv_lock_timeout_monitor_thread, NULL,
thread_ids + 2 + SRV_MAX_N_IO_THREADS);
- fprintf(stderr, "InnoDB: Started\n");
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Started\n");
srv_was_started = TRUE;
srv_is_being_started = FALSE;
@@ -835,8 +851,9 @@ innobase_shutdown_for_mysql(void)
{
if (!srv_was_started) {
if (srv_is_being_started) {
+ ut_print_timestamp(stderr);
fprintf(stderr,
- "InnoDB: Warning: shutting down not properly started database\n");
+ " InnoDB: Warning: shutting down a not properly started database\n");
}
return(DB_SUCCESS);
}
diff --git a/innobase/sync/sync0sync.c b/innobase/sync/sync0sync.c
index 7153355d2a9..c3a1ac3b47f 100644
--- a/innobase/sync/sync0sync.c
+++ b/innobase/sync/sync0sync.c
@@ -166,6 +166,46 @@ struct sync_level_struct{
ulint level; /* level of the latch in the latching order */
};
+
+#if defined(__GNUC__) && defined(UNIV_INTEL_X86)
+
+ulint
+sync_gnuc_intelx86_test_and_set(
+ /* out: old value of the lock word */
+ ulint* lw) /* in: pointer to the lock word */
+{
+ ulint res;
+
+ /* In assembly we use the so-called AT & T syntax where
+ the order of operands is inverted compared to the ordinary Intel
+ syntax. The 'l' after the mnemonics denotes a 32-bit operation.
+ The line after the code tells which values come out of the asm
+ code, and the second line tells the input to the asm code. */
+
+ asm volatile("movl $1, %%eax; xchgl (%%ecx), %%eax" :
+ "=eax" (res), "=m" (*lw) :
+ "ecx" (lw));
+ return(res);
+}
+
+void
+sync_gnuc_intelx86_reset(
+ ulint* lw) /* in: pointer to the lock word */
+{
+ /* In assembly we use the so-called AT & T syntax where
+ the order of operands is inverted compared to the ordinary Intel
+ syntax. The 'l' after the mnemonics denotes a 32-bit operation. */
+
+ asm volatile("movl $0, %%eax; xchgl (%%ecx), %%eax" :
+ "=m" (*lw) :
+ "ecx" (lw) :
+ "eax"); /* gcc does not seem to understand
+ that our asm code resets eax: tell it
+ explicitly that after the third ':' */
+}
+
+#endif
+
/**********************************************************************
Creates, or rather, initializes a mutex object in a specified memory
location (which must be appropriately aligned). The mutex is initialized
diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c
index 13e2d1869ab..6b74c0d0d51 100644
--- a/innobase/trx/trx0roll.c
+++ b/innobase/trx/trx0roll.c
@@ -176,7 +176,7 @@ trx_rollback_all_without_sess(void)
if (UT_LIST_GET_FIRST(trx_sys->trx_list)) {
fprintf(stderr,
- "Innobase: Starting rollback of uncommitted transactions\n");
+ "InnoDB: Starting rollback of uncommitted transactions\n");
} else {
return;
}
@@ -196,7 +196,7 @@ loop:
if (trx == NULL) {
fprintf(stderr,
- "Innobase: Rollback of uncommitted transactions completed\n");
+ "InnoDB: Rollback of uncommitted transactions completed\n");
mem_heap_free(heap);
@@ -221,7 +221,7 @@ loop:
ut_a(thr == que_fork_start_command(fork, SESS_COMM_EXECUTE, 0));
- fprintf(stderr, "Innobase: Rolling back trx no %lu\n",
+ fprintf(stderr, "InnoDB: Rolling back trx no %lu\n",
ut_dulint_get_low(trx->id));
mutex_exit(&kernel_mutex);
@@ -238,7 +238,7 @@ loop:
mutex_exit(&kernel_mutex);
fprintf(stderr,
- "Innobase: Waiting rollback of trx no %lu to end\n",
+ "InnoDB: Waiting rollback of trx no %lu to end\n",
ut_dulint_get_low(trx->id));
os_thread_sleep(100000);
@@ -264,7 +264,7 @@ loop:
mutex_exit(&(dict_sys->mutex));
}
- fprintf(stderr, "Innobase: Rolling back of trx no %lu completed\n",
+ fprintf(stderr, "InnoDB: Rolling back of trx no %lu completed\n",
ut_dulint_get_low(trx->id));
mem_heap_free(heap);
diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c
index ef5eb5d9443..99ec5b50237 100644
--- a/innobase/trx/trx0sys.c
+++ b/innobase/trx/trx0sys.c
@@ -198,7 +198,7 @@ trx_sys_init_at_db_start(void)
if (UT_LIST_GET_LEN(trx_sys->trx_list) > 0) {
fprintf(stderr,
- "Innobase: %lu uncommitted transaction(s) which must be rolled back\n",
+ "InnoDB: %lu uncommitted transaction(s) which must be rolled back\n",
UT_LIST_GET_LEN(trx_sys->trx_list));
}
diff --git a/innobase/trx/trx0trx.c b/innobase/trx/trx0trx.c
index 4841711551b..14108c677eb 100644
--- a/innobase/trx/trx0trx.c
+++ b/innobase/trx/trx0trx.c
@@ -22,6 +22,7 @@ Created 3/26/1996 Heikki Tuuri
#include "read0read.h"
#include "srv0srv.h"
#include "thr0loc.h"
+#include "btr0sea.h"
/* Dummy session used currently in MySQL interface */
sess_t* trx_dummy_sess = NULL;
@@ -63,6 +64,7 @@ trx_create(
trx->dict_operation = FALSE;
trx->n_mysql_tables_in_use = 0;
+ trx->mysql_n_tables_locked = 0;
trx->ignore_duplicates_in_insert = FALSE;
@@ -96,6 +98,8 @@ trx_create(
trx->lock_heap = mem_heap_create_in_buffer(256);
UT_LIST_INIT(trx->trx_locks);
+ trx->has_search_latch = FALSE;
+
trx->read_view_heap = mem_heap_create(256);
trx->read_view = NULL;
@@ -133,6 +137,21 @@ trx_allocate_for_mysql(void)
}
/************************************************************************
+Releases the search latch if trx has reserved it. */
+
+void
+trx_search_latch_release_if_reserved(
+/*=================================*/
+ trx_t* trx) /* in: transaction */
+{
+ if (trx->has_search_latch) {
+ rw_lock_s_unlock(&btr_search_latch);
+
+ trx->has_search_latch = FALSE;
+ }
+}
+
+/************************************************************************
Frees a transaction object. */
void
@@ -149,6 +168,7 @@ trx_free(
ut_a(trx->update_undo == NULL);
ut_a(trx->n_mysql_tables_in_use == 0);
+ ut_a(trx->mysql_n_tables_locked == 0);
if (trx->undo_no_arr) {
trx_undo_arr_free(trx->undo_no_arr);
@@ -160,6 +180,8 @@ trx_free(
ut_a(trx->wait_lock == NULL);
ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);
+ ut_a(!trx->has_search_latch);
+
if (trx->lock_heap) {
mem_heap_free(trx->lock_heap);
}
diff --git a/innobase/trx/trx0undo.c b/innobase/trx/trx0undo.c
index efee02c4cad..1f408428582 100644
--- a/innobase/trx/trx0undo.c
+++ b/innobase/trx/trx0undo.c
@@ -613,6 +613,10 @@ trx_undo_insert_header_reuse(
/* Insert undo data is not needed after commit: we may free all
the space on the page */
+ ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_TYPE)
+ == TRX_UNDO_INSERT);
+
mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
@@ -800,7 +804,7 @@ trx_undo_free_page(
ulint hist_size;
UT_NOT_USED(hdr_offset);
- ut_ad(hdr_page_no != page_no);
+ ut_a(hdr_page_no != page_no);
ut_ad(!mutex_own(&kernel_mutex));
ut_ad(mutex_own(&(rseg->mutex)));
@@ -1411,6 +1415,10 @@ trx_undo_reuse_cached(
if (type == TRX_UNDO_INSERT) {
offset = trx_undo_insert_header_reuse(undo_page, trx_id, mtr);
} else {
+ ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_TYPE)
+ == TRX_UNDO_UPDATE);
+
offset = trx_undo_header_create(undo_page, trx_id, mtr);
}
diff --git a/innobase/ut/ut0ut.c b/innobase/ut/ut0ut.c
index cfd714fc275..1436f6a10a3 100644
--- a/innobase/ut/ut0ut.c
+++ b/innobase/ut/ut0ut.c
@@ -49,6 +49,51 @@ ut_difftime(
return(difftime(time2, time1));
}
+/**************************************************************
+Prints a timestamp to a file. */
+
+void
+ut_print_timestamp(
+/*===============*/
+ FILE* file) /* in: file where to print */
+{
+#ifdef __WIN__
+ SYSTEMTIME cal_tm;
+
+ GetLocalTime(&cal_tm);
+
+ fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
+ (int)cal_tm.wYear % 100,
+ (int)cal_tm.wMonth,
+ (int)cal_tm.wDay,
+ (int)cal_tm.wHour,
+ (int)cal_tm.wMinute,
+ (int)cal_tm.wSecond);
+#else
+
+ struct tm cal_tm;
+ struct tm* cal_tm_ptr;
+ time_t tm;
+
+ time(&tm);
+
+#ifdef HAVE_LOCALTIME_R
+ localtime_r(&tm, &cal_tm);
+ cal_tm_ptr = &cal_tm;
+#else
+ cal_tm_ptr = localtime(&tm);
+#endif
+
+ fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
+ cal_tm_ptr->tm_year % 100,
+ cal_tm_ptr->tm_mon+1,
+ cal_tm_ptr->tm_mday,
+ cal_tm_ptr->tm_hour,
+ cal_tm_ptr->tm_min,
+ cal_tm_ptr->tm_sec);
+#endif
+}
+
/*****************************************************************
Runs an idle loop on CPU. The argument gives the desired delay
in microseconds on 100 MHz Pentium + Visual C++. */
diff --git a/isam/_dynrec.c b/isam/_dynrec.c
index 42a596fa623..2a908f5b42c 100644
--- a/isam/_dynrec.c
+++ b/isam/_dynrec.c
@@ -954,7 +954,7 @@ static int _nisam_cmp_buffer(File file, const byte *buff, ulong filepos, uint le
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
goto err;
- if (memcmp((byte*) buff,temp_buff,IO_SIZE))
+ if (memcmp((byte*) buff,temp_buff,next_length))
DBUG_RETURN(1);
buff+=next_length;
length-= next_length;
diff --git a/libmysql/Makefile.am b/libmysql/Makefile.am
index d26212fa7c8..67b78f14ba2 100644
--- a/libmysql/Makefile.am
+++ b/libmysql/Makefile.am
@@ -35,12 +35,16 @@ link_sources:
set -x; \
ss=`echo $(mystringsobjects) | sed "s;\.lo;.c;g"`; \
ds=`echo $(dbugobjects) | sed "s;\.lo;.c;g"`; \
- qs=`echo $(sqlobjects) | sed "s;\.lo;.c;g"`; \
ms=`echo $(mysysobjects) | sed "s;\.lo;.c;g"`; \
+ vs=`echo $(vio_objects) | sed "s;\.lo;.c;g"`; \
for f in $$ss; do \
rm -f $(srcdir)/$$f; \
@LN_CP_F@ $(srcdir)/../strings/$$f $(srcdir)/$$f; \
done; \
+ for f in $$vs; do \
+ rm -f $(srcdir)/$$f; \
+ @LN_CP_F@ $(srcdir)/../vio/$$f $(srcdir)/$$f; \
+ done; \
for f in $(mystringsextra); do \
rm -f $(srcdir)/$$f; \
@LN_CP_F@ $(srcdir)/../strings/$$f $(srcdir)/$$f; \
diff --git a/libmysql/Makefile.shared b/libmysql/Makefile.shared
index 7b220e12346..4d3928c5bc5 100644
--- a/libmysql/Makefile.shared
+++ b/libmysql/Makefile.shared
@@ -34,9 +34,6 @@ LTCHARSET_OBJS= ${CHARSET_OBJS:.o=.lo}
target_sources = libmysql.c net.c password.c \
get_password.c errmsg.c
-#quick easy dirty hack to make it work after Tonu's changes
-#In my opinion, violite.c really belongs into mysys - Sasha
-sqlobjects = violite.lo
mystringsobjects = strmov.lo strxmov.lo strnmov.lo strmake.lo strend.lo \
strnlen.lo strfill.lo is_prefix.lo \
int2str.lo str2int.lo strinstr.lo strcont.lo \
@@ -54,7 +51,7 @@ mysysobjects1 = my_init.lo my_static.lo my_malloc.lo my_realloc.lo \
mf_pack.lo my_messnc.lo mf_dirname.lo mf_fn_ext.lo\
mf_wcomp.lo typelib.lo safemalloc.lo my_alloc.lo \
mf_format.lo mf_path.lo mf_unixpath.lo my_fopen.lo \
- my_fstream.lo \
+ my_symlink.lo my_fstream.lo \
mf_loadpath.lo my_pthread.lo my_thr_init.lo \
thr_mutex.lo mulalloc.lo string.lo default.lo \
my_compress.lo array.lo my_once.lo list.lo my_net.lo \
@@ -62,9 +59,10 @@ mysysobjects1 = my_init.lo my_static.lo my_malloc.lo my_realloc.lo \
# Not needed in the minimum library
mysysobjects2 = getopt.lo getopt1.lo getvar.lo my_lib.lo
mysysobjects = $(mysysobjects1) $(mysysobjects2)
-target_libadd = $(mysysobjects) $(mystringsobjects) $(dbugobjects)\
- $(sqlobjects)
+target_libadd = $(mysysobjects) $(mystringsobjects) $(dbugobjects) \
+ $(vio_objects)
target_ldflags = -version-info @SHARED_LIB_VERSION@
+vio_objects= vio.lo viosocket.lo
CLEANFILES = $(target_libadd) $(SHLIBOBJS) \
$(target)
DEFS = -DDEFAULT_CHARSET_HOME="\"$(MYSQLBASEdir)\"" \
@@ -79,7 +77,7 @@ clean-local:
rm -f `echo $(mystringsobjects) | sed "s;\.lo;.c;g"` \
`echo $(dbugobjects) | sed "s;\.lo;.c;g"` \
`echo $(mysysobjects) | sed "s;\.lo;.c;g"` \
- `echo $(sqlobjects) | sed "s;\.lo;.c;g"` \
+ `echo $(vio_objects) | sed "s;\.lo;.c;g"` \
$(mystringsextra) $(mysysheaders) ctype_extra_sources.c \
../linked_client_sources
diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c
index 1d95e5ac84f..55e2f8164ed 100644
--- a/libmysql/errmsg.c
+++ b/libmysql/errmsg.c
@@ -16,7 +16,7 @@
MA 02111-1307, USA */
/* Error messages for MySQL clients */
-/* error messages for the demon is in share/language/errmsg.sys */
+/* error messages for the daemon is in share/language/errmsg.sys */
#include <global.h>
#include <my_sys.h>
@@ -47,6 +47,43 @@ const char *client_errors[]=
"Can't initialize character set %-.64s (path: %-.64s)",
"Got packet bigger than 'max_allowed_packet'",
"Embedded server",
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
+};
+
+/* Start of code added by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
+
+#elif defined PORTUGUESE
+const char *client_errors[]=
+{
+ "Erro desconhecido do MySQL",
+ "Não pode criar 'UNIX socket' (%d)",
+ "Não pode se conectar ao servidor MySQL local através do 'socket' '%-.64s' (%d)",
+ "Não pode se conectar ao servidor MySQL em '%-.64s' (%d)",
+ "Não pode criar 'socket TCP/IP' (%d)",
+ "'Host' servidor MySQL '%-.64s' (%d) desconhecido",
+ "Servidor MySQL desapareceu",
+ "Incompatibilidade de protocolos. Versão do Servidor: %d - Versão do Cliente: %d",
+ "Cliente do MySQL com falta de memória",
+ "Informação inválida de 'host'",
+ "Localhost via 'UNIX socket'",
+ "%-.64s via 'TCP/IP'",
+ "Erro na negociação de acesso ao servidor",
+ "Conexão perdida com servidor MySQL durante 'query'",
+ "Comandos fora de sincronismo. Você não pode executar este comando agora",
+ "%-.64s via 'named pipe'",
+ "Não pode esperar pelo 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode abrir 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode estabelecer o estado do 'named pipe' para o 'host' %-.64s - 'pipe' %-.32s (%lu)",
+ "Não pode inicializar conjunto de caracteres %-.64s (caminho %-.64s)",
+ "Obteve pacote maior do que 'max_allowed_packet'",
+ "Embedded server"
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
};
#else /* ENGLISH */
@@ -74,6 +111,10 @@ const char *client_errors[]=
"Can't initialize character set %-.64s (path: %-.64s)",
"Got packet bigger than 'max_allowed_packet'",
"Embedded server",
+ "Error on SHOW SLAVE STATUS:",
+ "Error on SHOW SLAVE HOSTS:",
+ "Error connecting to slave:",
+ "Error connecting to master:"
};
#endif
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 9221812ea65..d6f5b7c523f 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -109,6 +109,12 @@ static ulong mysql_sub_escape_string(CHARSET_INFO *charset_info, char *to,
#define reset_sigpipe(mysql)
#endif
+static MYSQL* spawn_init(MYSQL* parent, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd);
+
+
/****************************************************************************
* A modified version of connect(). connect2() allows you to specify
* a timeout value, in seconds, that we should wait until we
@@ -678,7 +684,8 @@ static const char *default_options[]=
"init-command", "host", "database", "debug", "return-found-rows",
"ssl-key" ,"ssl-cert" ,"ssl-ca" ,"ssl-capath",
"character-set-dir", "default-character-set", "interactive-timeout",
- "connect_timeout",
+ "connect_timeout", "replication-probe", "enable-reads-from-master",
+ "repl-parse-query",
NullS
};
@@ -812,6 +819,15 @@ static void mysql_read_default_options(struct st_mysql_options *options,
case 19: /* Interactive-timeout */
options->client_flag|=CLIENT_INTERACTIVE;
break;
+ case 21: /* replication probe */
+ options->rpl_probe = 1;
+ break;
+ case 22: /* enable-reads-from-master */
+ options->rpl_parse = 1;
+ break;
+ case 23: /* repl-parse-query */
+ options->no_master_reads = 0;
+ break;
default:
DBUG_PRINT("warning",("unknown option: %s",option[0]));
}
@@ -987,6 +1003,273 @@ read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row, ulong *lengths)
return 0;
}
+/* perform query on master */
+int STDCALL mysql_master_query(MYSQL *mysql, const char *q,
+ unsigned int length)
+{
+ if(mysql_master_send_query(mysql, q, length))
+ return 1;
+ return mysql_read_query_result(mysql);
+}
+
+int STDCALL mysql_master_send_query(MYSQL *mysql, const char *q,
+ unsigned int length)
+{
+ MYSQL*master = mysql->master;
+ if (!length)
+ length = strlen(q);
+ if (!master->net.vio && !mysql_real_connect(master,0,0,0,0,0,0,0))
+ return 1;
+ mysql->last_used_con = master;
+ return simple_command(master, COM_QUERY, q, length, 1);
+}
+
+
+/* perform query on slave */
+int STDCALL mysql_slave_query(MYSQL *mysql, const char *q,
+ unsigned int length)
+{
+ if(mysql_slave_send_query(mysql, q, length))
+ return 1;
+ return mysql_read_query_result(mysql);
+}
+
+int STDCALL mysql_slave_send_query(MYSQL *mysql, const char *q,
+ unsigned int length)
+{
+ MYSQL* last_used_slave, *slave_to_use = 0;
+
+ if((last_used_slave = mysql->last_used_slave))
+ slave_to_use = last_used_slave->next_slave;
+ else
+ slave_to_use = mysql->next_slave;
+ /* next_slave is always safe to use - we have a circular list of slaves
+ if there are no slaves, mysql->next_slave == mysql
+ */
+ mysql->last_used_con = mysql->last_used_slave = slave_to_use;
+ if(!length)
+ length = strlen(q);
+ if(!slave_to_use->net.vio && !mysql_real_connect(slave_to_use, 0,0,0,
+ 0,0,0,0))
+ return 1;
+ return simple_command(slave_to_use, COM_QUERY, q, length, 1);
+}
+
+
+/* enable/disable parsing of all queries to decide
+ if they go on master or slave */
+void STDCALL mysql_enable_rpl_parse(MYSQL* mysql)
+{
+ mysql->options.rpl_parse = 1;
+}
+
+void STDCALL mysql_disable_rpl_parse(MYSQL* mysql)
+{
+ mysql->options.rpl_parse = 0;
+}
+
+/* get the value of the parse flag */
+int STDCALL mysql_rpl_parse_enabled(MYSQL* mysql)
+{
+ return mysql->options.rpl_parse;
+}
+
+/* enable/disable reads from master */
+void STDCALL mysql_enable_reads_from_master(MYSQL* mysql)
+{
+ mysql->options.no_master_reads = 0;
+}
+
+void STDCALL mysql_disable_reads_from_master(MYSQL* mysql)
+{
+ mysql->options.no_master_reads = 1;
+}
+
+/* get the value of the master read flag */
+int STDCALL mysql_reads_from_master_enabled(MYSQL* mysql)
+{
+ return !(mysql->options.no_master_reads);
+}
+
+/* We may get an error while doing replication internals.
+ In this case, we add a special explanation to the original
+ error
+*/
+static inline void expand_error(MYSQL* mysql, int error)
+{
+ char tmp[MYSQL_ERRMSG_SIZE];
+ char* p, *tmp_end;
+ tmp_end = strnmov(tmp, mysql->net.last_error, MYSQL_ERRMSG_SIZE);
+ p = strnmov(mysql->net.last_error, ER(error), MYSQL_ERRMSG_SIZE);
+ memcpy(p, tmp, tmp_end - tmp);
+ mysql->net.last_errno = error;
+}
+
+/* This function assumes we have just called SHOW SLAVE STATUS and have
+ read the given result and row
+*/
+static inline int get_master(MYSQL* mysql, MYSQL_RES* res, MYSQL_ROW row)
+{
+ MYSQL* master;
+ if(mysql_num_fields(res) < 3)
+ return 1; /* safety */
+
+ /* use the same username and password as the original connection */
+ if(!(master = spawn_init(mysql, row[0], atoi(row[2]), 0, 0)))
+ return 1;
+ mysql->master = master;
+ return 0;
+}
+
+/* assuming we already know that mysql points to a master connection,
+ retrieve all the slaves
+*/
+static inline int get_slaves_from_master(MYSQL* mysql)
+{
+ MYSQL_RES* res = 0;
+ MYSQL_ROW row;
+ int error = 1;
+ int has_auth_info;
+ if (!mysql->net.vio && !mysql_real_connect(mysql,0,0,0,0,0,0,0))
+ {
+ expand_error(mysql, CR_PROBE_MASTER_CONNECT);
+ return 1;
+ }
+
+ if (mysql_query(mysql, "SHOW SLAVE HOSTS") ||
+ !(res = mysql_store_result(mysql)))
+ {
+ expand_error(mysql, CR_PROBE_SLAVE_HOSTS);
+ return 1;
+ }
+
+ switch (mysql_num_fields(res))
+ {
+ case 3: has_auth_info = 0; break;
+ case 5: has_auth_info = 1; break;
+ default:
+ goto err;
+ }
+
+ while ((row = mysql_fetch_row(res)))
+ {
+ MYSQL* slave;
+ const char* tmp_user, *tmp_pass;
+
+ if (has_auth_info)
+ {
+ tmp_user = row[3];
+ tmp_pass = row[4];
+ }
+ else
+ {
+ tmp_user = mysql->user;
+ tmp_pass = mysql->passwd;
+ }
+
+ if(!(slave = spawn_init(mysql, row[1], atoi(row[2]),
+ tmp_user, tmp_pass)))
+ goto err;
+
+ /* Now add slave into the circular linked list */
+ slave->next_slave = mysql->next_slave;
+ mysql->next_slave = slave;
+ }
+ error = 0;
+err:
+ if(res)
+ mysql_free_result(res);
+ return error;
+}
+
+int STDCALL mysql_rpl_probe(MYSQL* mysql)
+{
+ MYSQL_RES* res = 0;
+ MYSQL_ROW row;
+ int error = 1;
+ /* first determine the replication role of the server we connected to
+ the most reliable way to do this is to run SHOW SLAVE STATUS and see
+ if we have a non-empty master host. This is still not fool-proof -
+ it is not a sin to have a master that has a dormant slave thread with
+ a non-empty master host. However, it is more reliable to check
+ for empty master than whether the slave thread is actually running
+ */
+ if (mysql_query(mysql, "SHOW SLAVE STATUS") ||
+ !(res = mysql_store_result(mysql)))
+ {
+ expand_error(mysql, CR_PROBE_SLAVE_STATUS);
+ return 1;
+ }
+
+ if (!(row = mysql_fetch_row(res)))
+ goto err;
+
+ /* check master host for emptiness/NULL */
+ if (row[0] && *(row[0]))
+ {
+ /* this is a slave, ask it for the master */
+ if (get_master(mysql, res, row) || get_slaves_from_master(mysql))
+ goto err;
+ }
+ else
+ {
+ mysql->master = mysql;
+ if (get_slaves_from_master(mysql))
+ goto err;
+ }
+
+ error = 0;
+err:
+ if(res)
+ mysql_free_result(res);
+ return error;
+}
+
+
+/* make a not so fool-proof decision on where the query should go, to
+ the master or the slave. Ideally the user should always make this
+ decision himself with mysql_master_query() or mysql_slave_query().
+ However, to be able to more easily port the old code, we support the
+ option of an educated guess - this should work for most applications,
+ however, it may make the wrong decision in some particular cases. If
+ that happens, the user would have to change the code to call
+ mysql_master_query() or mysql_slave_query() explicitly in the place
+ where we have made the wrong decision
+*/
+enum mysql_rpl_type
+STDCALL mysql_rpl_query_type(const char* q, int len)
+{
+ const char* q_end;
+ q_end = (len) ? q + len : strend(q);
+ for(; q < q_end; ++q)
+ {
+ char c;
+ if(isalpha(c=*q))
+ switch(tolower(c))
+ {
+ case 'i': /* insert */
+ case 'u': /* update or unlock tables */
+ case 'l': /* lock tables or load data infile */
+ case 'd': /* drop or delete */
+ case 'a': /* alter */
+ return MYSQL_RPL_MASTER;
+ case 'c': /* create or check */
+ return tolower(q[1]) == 'h' ? MYSQL_RPL_ADMIN : MYSQL_RPL_MASTER ;
+ case 's': /* select or show */
+ return tolower(q[1] == 'h') ? MYSQL_RPL_ADMIN : MYSQL_RPL_SLAVE;
+ case 'f': /* flush */
+ case 'r': /* repair */
+ case 'g': /* grant */
+ return MYSQL_RPL_ADMIN;
+ default:
+ return MYSQL_RPL_SLAVE;
+ }
+ }
+
+ return 0;
+}
+
+
/****************************************************************************
** Init MySQL structure or allocate one
****************************************************************************/
@@ -1005,6 +1288,12 @@ mysql_init(MYSQL *mysql)
else
bzero((char*) (mysql),sizeof(*(mysql)));
mysql->options.connect_timeout=CONNECT_TIMEOUT;
+ mysql->last_used_con = mysql->next_slave = mysql->master = mysql;
+ mysql->last_used_slave = 0;
+ /* By default, we are a replication pivot. The caller must reset it
+ after we return if this is not the case.
+ */
+ mysql->rpl_pivot = 1;
#if defined(SIGPIPE) && defined(THREAD)
if (!((mysql)->client_flag & CLIENT_IGNORE_SIGPIPE))
(void) signal(SIGPIPE,pipe_sig_handler);
@@ -1070,13 +1359,15 @@ mysql_ssl_set(MYSQL *mysql, const char *key, const char *cert,
mysql->options.ssl_cert = cert==0 ? 0 : my_strdup(cert,MYF(0));
mysql->options.ssl_ca = ca==0 ? 0 : my_strdup(ca,MYF(0));
mysql->options.ssl_capath = capath==0 ? 0 : my_strdup(capath,MYF(0));
- mysql->options.use_ssl = true;
- mysql->connector_fd = new_VioSSLConnectorFd(key, cert, ca, capath);
+ mysql->options.use_ssl = TRUE;
+ mysql->connector_fd = (gptr)new_VioSSLConnectorFd(key, cert, ca, capath);
+ DBUG_PRINT("info",("mysql_ssl_set, context: %p",((struct st_VioSSLConnectorFd *)(mysql->connector_fd))->ssl_context_));
+
return 0;
}
/**************************************************************************
-**************************************************************************/
+**************************************************************************
char * STDCALL
mysql_ssl_cipher(MYSQL *mysql)
@@ -1085,10 +1376,10 @@ mysql_ssl_cipher(MYSQL *mysql)
}
-/**************************************************************************
+**************************************************************************
** Free strings in the SSL structure and clear 'use_ssl' flag.
** NB! Errors are not reported until you do mysql_real_connect.
-**************************************************************************/
+**************************************************************************
int STDCALL
mysql_ssl_clear(MYSQL *mysql)
@@ -1105,7 +1396,7 @@ mysql_ssl_clear(MYSQL *mysql)
mysql->connector_fd->delete();
mysql->connector_fd = 0;
return 0;
-}
+}*/
#endif /* HAVE_OPENSSL */
/**************************************************************************
@@ -1496,11 +1787,9 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
goto error;
/* Do the SSL layering. */
DBUG_PRINT("info", ("IO layer change in progress..."));
- VioSSLConnectorFd* connector_fd = (VioSSLConnectorFd*)
- (mysql->connector_fd);
- VioSocket* vio_socket = (VioSocket*)(mysql->net.vio);
- VioSSL* vio_ssl = connector_fd->connect(vio_socket);
- mysql->net.vio = (NetVio*)(vio_ssl);
+ DBUG_PRINT("info", ("IO context %p",((struct st_VioSSLConnectorFd*)mysql->connector_fd)->ssl_context_));
+ mysql->net.vio = sslconnect((struct st_VioSSLConnectorFd*)(mysql->connector_fd),mysql->net.vio);
+ DBUG_PRINT("info", ("IO layer change done!"));
}
#endif /* HAVE_OPENSSL */
@@ -1542,6 +1831,9 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
mysql->reconnect=reconnect;
}
+ if (mysql->options.rpl_probe && mysql_rpl_probe(mysql))
+ goto error;
+
DBUG_PRINT("exit",("Mysql handler: %lx",mysql));
reset_sigpipe(mysql);
DBUG_RETURN(mysql);
@@ -1680,9 +1972,24 @@ mysql_close(MYSQL *mysql)
bzero((char*) &mysql->options,sizeof(mysql->options));
mysql->net.vio = 0;
#ifdef HAVE_OPENSSL
- ((VioConnectorFd*)(mysql->connector_fd))->delete();
- mysql->connector_fd = 0;
+/* ((VioConnectorFd*)(mysql->connector_fd))->delete();
+ mysql->connector_fd = 0;*/
#endif /* HAVE_OPENSSL */
+
+ /* free/close slave list */
+ if (mysql->rpl_pivot)
+ {
+ MYSQL* tmp;
+ for (tmp = mysql->next_slave; tmp != mysql; )
+ {
+ /* trick to avoid following freed pointer */
+ MYSQL* tmp1 = tmp->next_slave;
+ mysql_close(tmp);
+ tmp = tmp1;
+ }
+ }
+ if(mysql != mysql->master)
+ mysql_close(mysql->master);
if (mysql->free_me)
my_free((gptr) mysql,MYF(0));
}
@@ -1701,6 +2008,67 @@ mysql_query(MYSQL *mysql, const char *query)
return mysql_real_query(mysql,query, (uint) strlen(query));
}
+static MYSQL* spawn_init(MYSQL* parent, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd)
+{
+ MYSQL* child;
+ if (!(child = mysql_init(0)))
+ return 0;
+
+ child->options.user = my_strdup((user) ? user :
+ (parent->user ? parent->user :
+ parent->options.user), MYF(0));
+ child->options.password = my_strdup((passwd) ? passwd : (parent->passwd ?
+ parent->passwd :
+ parent->options.password), MYF(0));
+ child->options.port = port;
+ child->options.host = my_strdup((host) ? host : (parent->host ?
+ parent->host :
+ parent->options.host), MYF(0));
+ if(parent->db)
+ child->options.db = my_strdup(parent->db, MYF(0));
+ else if(parent->options.db)
+ child->options.db = my_strdup(parent->options.db, MYF(0));
+
+ child->options.rpl_parse = child->options.rpl_probe = child->rpl_pivot = 0;
+
+ return child;
+}
+
+
+int
+STDCALL mysql_set_master(MYSQL* mysql, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd)
+{
+ if (mysql->master != mysql && !mysql->master->rpl_pivot)
+ mysql_close(mysql->master);
+ if(!(mysql->master = spawn_init(mysql, host, port, user, passwd)))
+ return 1;
+ mysql->master->rpl_pivot = 0;
+ mysql->master->options.rpl_parse = 0;
+ mysql->master->options.rpl_probe = 0;
+ return 0;
+}
+
+int
+STDCALL mysql_add_slave(MYSQL* mysql, const char* host,
+ unsigned int port,
+ const char* user,
+ const char* passwd)
+{
+ MYSQL* slave;
+ if(!(slave = spawn_init(mysql, host, port, user, passwd)))
+ return 1;
+ slave->next_slave = mysql->next_slave;
+ mysql->next_slave = slave;
+ return 0;
+}
+
+
/*
Send the query and return so we can do something else.
Needs to be followed by mysql_read_query_result() when we want to
@@ -1710,6 +2078,20 @@ mysql_query(MYSQL *mysql, const char *query)
int STDCALL
mysql_send_query(MYSQL* mysql, const char* query, uint length)
{
+ if (mysql->options.rpl_parse && mysql->rpl_pivot)
+ {
+ switch (mysql_rpl_query_type(query, length))
+ {
+ case MYSQL_RPL_MASTER:
+ return mysql_master_send_query(mysql, query, length);
+ case MYSQL_RPL_SLAVE:
+ return mysql_slave_send_query(mysql, query, length);
+ case MYSQL_RPL_ADMIN: /*fall through */
+ }
+ }
+
+ mysql->last_used_con = mysql;
+
return simple_command(mysql, COM_QUERY, query, length, 1);
}
@@ -1721,6 +2103,11 @@ int STDCALL mysql_read_query_result(MYSQL *mysql)
uint length;
DBUG_ENTER("mysql_read_query_result");
+ /* read from the connection which we actually used, which
+ could differ from the original connection if we have slaves
+ */
+ mysql = mysql->last_used_con;
+
if ((length = net_safe_read(mysql)) == packet_error)
DBUG_RETURN(-1);
free_old_query(mysql); /* Free old result */
@@ -1767,7 +2154,8 @@ mysql_real_query(MYSQL *mysql, const char *query, uint length)
DBUG_ENTER("mysql_real_query");
DBUG_PRINT("enter",("handle: %lx",mysql));
DBUG_PRINT("query",("Query = \"%s\"",query));
- if (simple_command(mysql,COM_QUERY,query,length,1))
+
+ if (mysql_send_query(mysql,query,length))
DBUG_RETURN(-1);
DBUG_RETURN(mysql_read_query_result(mysql));
}
@@ -1839,6 +2227,9 @@ mysql_store_result(MYSQL *mysql)
MYSQL_RES *result;
DBUG_ENTER("mysql_store_result");
+ /* read from the actually used connection */
+ mysql = mysql->last_used_con;
+
if (!mysql->fields)
DBUG_RETURN(0);
if (mysql->status != MYSQL_STATUS_GET_RESULT)
@@ -1891,6 +2282,8 @@ mysql_use_result(MYSQL *mysql)
MYSQL_RES *result;
DBUG_ENTER("mysql_use_result");
+ mysql = mysql->last_used_con;
+
if (!mysql->fields)
DBUG_RETURN(0);
if (mysql->status != MYSQL_STATUS_GET_RESULT)
@@ -2344,32 +2737,32 @@ uint STDCALL mysql_field_tell(MYSQL_RES *res)
unsigned int STDCALL mysql_field_count(MYSQL *mysql)
{
- return mysql->field_count;
+ return mysql->last_used_con->field_count;
}
my_ulonglong STDCALL mysql_affected_rows(MYSQL *mysql)
{
- return (mysql)->affected_rows;
+ return mysql->last_used_con->affected_rows;
}
my_ulonglong STDCALL mysql_insert_id(MYSQL *mysql)
{
- return (mysql)->insert_id;
+ return mysql->last_used_con->insert_id;
}
uint STDCALL mysql_errno(MYSQL *mysql)
{
- return (mysql)->net.last_errno;
+ return mysql->net.last_errno;
}
char * STDCALL mysql_error(MYSQL *mysql)
{
- return (mysql)->net.last_error;
+ return mysql->net.last_error;
}
char *STDCALL mysql_info(MYSQL *mysql)
{
- return (mysql)->info;
+ return mysql->info;
}
ulong STDCALL mysql_thread_id(MYSQL *mysql)
diff --git a/libmysql/net.c b/libmysql/net.c
index 44c082c1693..0fc5cf8b50c 100644
--- a/libmysql/net.c
+++ b/libmysql/net.c
@@ -31,6 +31,7 @@
#include <winsock.h>
#endif
#include <global.h>
+#include <mysql_com.h>
#include <violite.h>
#include <my_sys.h>
#include <m_string.h>
@@ -39,7 +40,6 @@
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
-#include <violite.h>
#include <assert.h>
#ifdef MYSQL_SERVER
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index 8d8ac3c9a21..aa358958a1b 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -54,7 +54,7 @@ sqlsources = convert.cc derror.cc field.cc field_conv.cc filesort.cc \
sql_rename.cc sql_repl.cc sql_select.cc sql_show.cc \
sql_string.cc sql_table.cc sql_test.cc sql_udf.cc \
sql_update.cc sql_yacc.cc table.cc thr_malloc.cc time.cc \
- unireg.cc
+ unireg.cc uniques.cc stacktrace.c sql_unions.cc hash_filo.cc
## XXX: we should not have to duplicate info from the sources list
sqlobjects = convert.lo derror.lo field.lo field_conv.lo filesort.lo \
@@ -72,7 +72,7 @@ sqlobjects = convert.lo derror.lo field.lo field_conv.lo filesort.lo \
sql_rename.lo sql_repl.lo sql_select.lo sql_show.lo \
sql_string.lo sql_table.lo sql_test.lo sql_udf.lo \
sql_update.lo sql_yacc.lo table.lo thr_malloc.lo time.lo \
- unireg.lo
+ unireg.lo uniques.lo stacktrace.lo sql_unions.lo hash_filo.lo
EXTRA_DIST = lib_vio.c
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index b2889079ac8..a633d6f583b 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -50,8 +50,8 @@ void free_defaults_internal(char ** argv){if (argv) free_defaults(argv);}
char mysql_data_home[FN_REFLEN];
char * get_mysql_data_home(){return mysql_data_home;};
#define mysql_data_home mysql_data_home_internal
-#include "../sql/mysqld.cc"
#include "lib_vio.c"
+#include "../sql/mysqld.cc"
#define SCRAMBLE_LENGTH 8
extern "C" {
@@ -72,8 +72,13 @@ get_mysql_real_data_home(){ return mysql_real_data_home;};
bool lib_dispatch_command(enum enum_server_command command, NET *net,
const char *arg, ulong length)
{
- net_new_transaction(&((THD *)net->vio->dest_thd)->net);
- return dispatch_command(command, (THD *)net->vio->dest_thd, (char *)arg, length + 1);
+ THD *thd=(THD *) net->vio->dest_thd;
+ thd->store_globals(); // Fix if more than one connect
+ thd->net.last_error[0]=0; // Clear error message
+ thd->net.last_errno=0;
+
+ net_new_transaction(&thd->net);
+ return dispatch_command(command, thd, (char *) arg, length + 1);
}
@@ -81,17 +86,17 @@ bool lib_dispatch_command(enum enum_server_command command, NET *net,
void
lib_connection_phase(NET * net, int phase)
{
- THD * thd;
- thd = (THD *)(net->vio->dest_thd);
- if (thd)
- {
- switch (phase)
- {
- case 2:
- check_connections2(thd);
- break;
- }
- }
+ THD * thd;
+ thd = (THD *)(net->vio->dest_thd);
+ if (thd)
+ {
+ switch (phase)
+ {
+ case 2:
+ check_connections2(thd);
+ break;
+ }
+ }
}
}
void start_embedded_conn1(NET * net)
@@ -116,7 +121,7 @@ void start_embedded_conn1(NET * net)
thd->net.vio = v;
if (thd->store_globals())
{
- printf("store_globals failed.\n");
+ fprintf(stderr,"store_globals failed.\n");
return;
}
@@ -600,7 +605,7 @@ void embedded_srv_init(void)
}
//printf(ER(ER_READY),my_progname,server_version,"");
- printf("%s initialized.\n", server_version);
+ //printf("%s initialized.\n", server_version);
fflush(stdout);
diff --git a/libmysqld/lib_vio.c b/libmysqld/lib_vio.c
index 3238fd59699..fd4eb4ac29f 100644
--- a/libmysqld/lib_vio.c
+++ b/libmysqld/lib_vio.c
@@ -139,6 +139,8 @@ int vio_read(Vio * vio, gptr buf, int size)
uint4korr(vio->packets + sizeof(char *));
vio->packets = *(char **)vio->packets;
}
+ if (vio->where_in_packet + size > vio->end_of_packet)
+ size = vio->end_of_packet - vio->where_in_packet;
memcpy(buf, vio->where_in_packet, size);
vio->where_in_packet += size;
return (size);
@@ -233,4 +235,9 @@ void vio_in_addr(Vio *vio, struct in_addr *in)
{
}
+my_bool vio_poll_read(Vio *vio,uint timeout)
+{
+ return 0;
+}
+
#endif /* HAVE_VIO */
diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c
index 732c102c640..216261e220d 100644
--- a/libmysqld/libmysqld.c
+++ b/libmysqld/libmysqld.c
@@ -100,109 +100,6 @@ static ulong mysql_sub_escape_string(CHARSET_INFO *charset_info, char *to,
#define set_sigpipe(mysql)
#define reset_sigpipe(mysql)
-#if 0
-/****************************************************************************
-* A modified version of connect(). connect2() allows you to specify
-* a timeout value, in seconds, that we should wait until we
-* derermine we can't connect to a particular host. If timeout is 0,
-* connect2() will behave exactly like connect().
-*
-* Base version coded by Steve Bernacki, Jr. <steve@navinet.net>
-*****************************************************************************/
-
-static int connect2(my_socket s, const struct sockaddr *name, uint namelen,
- uint to)
-{
-#if defined(__WIN__)
- return connect(s, (struct sockaddr*) name, namelen);
-#else
- int flags, res, s_err;
- size_socket s_err_size = sizeof(uint);
- fd_set sfds;
- struct timeval tv;
- time_t start_time, now_time;
-
- /* If they passed us a timeout of zero, we should behave
- * exactly like the normal connect() call does.
- */
-
- if (to == 0)
- return connect(s, (struct sockaddr*) name, namelen);
-
- flags = fcntl(s, F_GETFL, 0); /* Set socket to not block */
-#ifdef O_NONBLOCK
- fcntl(s, F_SETFL, flags | O_NONBLOCK); /* and save the flags.. */
-#endif
-
- res = connect(s, (struct sockaddr*) name, namelen);
- s_err = errno; /* Save the error... */
- fcntl(s, F_SETFL, flags);
- if ((res != 0) && (s_err != EINPROGRESS))
- {
- errno = s_err; /* Restore it */
- return(-1);
- }
- if (res == 0) /* Connected quickly! */
- return(0);
-
- /* Otherwise, our connection is "in progress." We can use
- * the select() call to wait up to a specified period of time
- * for the connection to suceed. If select() returns 0
- * (after waiting howevermany seconds), our socket never became
- * writable (host is probably unreachable.) Otherwise, if
- * select() returns 1, then one of two conditions exist:
- *
- * 1. An error occured. We use getsockopt() to check for this.
- * 2. The connection was set up sucessfully: getsockopt() will
- * return 0 as an error.
- *
- * Thanks goes to Andrew Gierth <andrew@erlenstar.demon.co.uk>
- * who posted this method of timing out a connect() in
- * comp.unix.programmer on August 15th, 1997.
- */
-
- FD_ZERO(&sfds);
- FD_SET(s, &sfds);
- /*
- * select could be interrupted by a signal, and if it is,
- * the timeout should be adjusted and the select restarted
- * to work around OSes that don't restart select and
- * implementations of select that don't adjust tv upon
- * failure to reflect the time remaining
- */
- start_time = time(NULL);
- for (;;)
- {
- tv.tv_sec = (long) to;
- tv.tv_usec = 0;
- if ((res = select(s+1, NULL, &sfds, NULL, &tv)) >= 0)
- break;
- now_time=time(NULL);
- to-= (uint) (now_time - start_time);
- if (errno != EINTR || (int) to <= 0)
- return -1;
- }
-
- /* select() returned something more interesting than zero, let's
- * see if we have any errors. If the next two statements pass,
- * we've got an open socket!
- */
-
- s_err=0;
- if (getsockopt(s, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0)
- return(-1);
-
- if (s_err)
- { /* getsockopt() could suceed */
- errno = s_err;
- return(-1); /* but return an error... */
- }
- return(0); /* It's all good! */
-#endif
-}
-#endif /* 0 */
-
-
/*****************************************************************************
** read a packet from server. Give error message if socket was down
** or packet is an error message
@@ -343,25 +240,15 @@ simple_command(MYSQL *mysql,enum enum_server_command command, const char *arg,
{
NET *net= &mysql->net;
int result= -1;
- init_sigpipe_variables
- /* Don't give sigpipe errors if the client doesn't want them */
- set_sigpipe(mysql);
- if (mysql->net.vio == 0)
- { /* Do reconnect if possible */
- if (mysql_reconnect(mysql))
- {
- net->last_errno=CR_SERVER_GONE_ERROR;
- strmov(net->last_error,ER(net->last_errno));
- goto end;
- }
- }
+ /* Check that we are calling the client functions in right order */
if (mysql->status != MYSQL_STATUS_READY)
{
strmov(net->last_error,ER(mysql->net.last_errno=CR_COMMANDS_OUT_OF_SYNC));
goto end;
}
+ /* Clear result variables */
mysql->net.last_error[0]=0;
mysql->net.last_errno=0;
mysql->info=0;
@@ -371,32 +258,11 @@ simple_command(MYSQL *mysql,enum enum_server_command command, const char *arg,
net_clear(net);
vio_reset(net->vio);
- if (!arg)
- arg="";
-
- result = lib_dispatch_command(command, net, arg,
- length ? length : (ulong) strlen(arg));
-#if 0
- if (net_write_command(net,(uchar) command,arg,
- length ? length : (ulong) strlen(arg)))
- {
- DBUG_PRINT("error",("Can't send command to server. Error: %d",errno));
- end_server(mysql);
- if (mysql_reconnect(mysql) ||
- net_write_command(net,(uchar) command,arg,
- length ? length : (ulong) strlen(arg)))
- {
- net->last_errno=CR_SERVER_GONE_ERROR;
- strmov(net->last_error,ER(net->last_errno));
- goto end;
- }
- }
-#endif
+ result = lib_dispatch_command(command, net, arg,length);
if (!skipp_check)
result= ((mysql->packet_length=net_safe_read(mysql)) == packet_error ?
-1 : 0);
end:
- reset_sigpipe(mysql);
return result;
}
@@ -1280,7 +1146,7 @@ mysql_close(MYSQL *mysql)
{
free_old_query(mysql);
mysql->status=MYSQL_STATUS_READY; /* Force command */
- simple_command(mysql,COM_QUIT,NullS,0,1);
+ simple_command(mysql,COM_QUIT,"",0,1);
end_server(mysql);
}
my_free((gptr) mysql->host_info,MYF(MY_ALLOW_ZERO_PTR));
@@ -1323,22 +1189,26 @@ mysql_query(MYSQL *mysql, const char *query)
return mysql_real_query(mysql,query, (uint) strlen(query));
}
+int STDCALL
+mysql_send_query(MYSQL* mysql, const char* query, uint length)
+{
+ return simple_command(mysql, COM_QUERY, query, length, 1);
+}
+
int STDCALL
-mysql_real_query(MYSQL *mysql, const char *query, uint length)
+mysql_read_query_result(MYSQL *mysql)
{
uchar *pos;
ulong field_count;
MYSQL_DATA *fields;
- DBUG_ENTER("mysql_real_query");
- DBUG_PRINT("enter",("handle: %lx",mysql));
- DBUG_PRINT("query",("Query = \"%s\"",query));
+ uint length;
+ DBUG_ENTER("mysql_read_query_result");
- if (simple_command(mysql,COM_QUERY,query,length,1) ||
- (length=net_safe_read(mysql)) == packet_error)
+ if ((length=net_safe_read(mysql)) == packet_error)
DBUG_RETURN(-1);
free_old_query(mysql); /* Free old result */
- get_info:
+get_info:
pos=(uchar*) mysql->net.read_pos;
if ((field_count= net_field_length(&pos)) == 0)
{
@@ -1375,6 +1245,17 @@ mysql_real_query(MYSQL *mysql, const char *query, uint length)
DBUG_RETURN(0);
}
+int STDCALL
+mysql_real_query(MYSQL *mysql, const char *query, uint length)
+{
+ DBUG_ENTER("mysql_real_query");
+ DBUG_PRINT("enter",("handle: %lx",mysql));
+ DBUG_PRINT("query",("Query = \"%s\"",query));
+ if (mysql_send_query(mysql, query, length))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(mysql_read_query_result(mysql));
+}
+
static int
send_file_to_server(MYSQL *mysql, const char *filename)
@@ -1741,7 +1622,7 @@ mysql_list_processes(MYSQL *mysql)
DBUG_ENTER("mysql_list_processes");
LINT_INIT(fields);
- if (simple_command(mysql,COM_PROCESS_INFO,0,0,0))
+ if (simple_command(mysql,COM_PROCESS_INFO,"",0,0))
DBUG_RETURN(0);
free_old_query(mysql);
pos=(uchar*) mysql->net.read_pos;
@@ -1780,7 +1661,7 @@ int STDCALL
mysql_shutdown(MYSQL *mysql)
{
DBUG_ENTER("mysql_shutdown");
- DBUG_RETURN(simple_command(mysql,COM_SHUTDOWN,0,0,0));
+ DBUG_RETURN(simple_command(mysql,COM_SHUTDOWN,"",0,0));
}
@@ -1807,14 +1688,14 @@ int STDCALL
mysql_dump_debug_info(MYSQL *mysql)
{
DBUG_ENTER("mysql_dump_debug_info");
- DBUG_RETURN(simple_command(mysql,COM_DEBUG,0,0,0));
+ DBUG_RETURN(simple_command(mysql,COM_DEBUG,"",0,0));
}
char * STDCALL
mysql_stat(MYSQL *mysql)
{
DBUG_ENTER("mysql_stat");
- if (simple_command(mysql,COM_STATISTICS,0,0,0))
+ if (simple_command(mysql,COM_STATISTICS,"",0,0))
return mysql->net.last_error;
mysql->net.read_pos[mysql->packet_length]=0; /* End of stat string */
if (!mysql->net.read_pos[0])
@@ -1831,7 +1712,7 @@ int STDCALL
mysql_ping(MYSQL *mysql)
{
DBUG_ENTER("mysql_ping");
- DBUG_RETURN(simple_command(mysql,COM_PING,0,0,0));
+ DBUG_RETURN(simple_command(mysql,COM_PING,"",0,0));
}
diff --git a/man/Makefile.am b/man/Makefile.am
index 186fc01685e..7019d2aa865 100644
--- a/man/Makefile.am
+++ b/man/Makefile.am
@@ -19,7 +19,7 @@
man_MANS = mysql.1 isamchk.1 isamlog.1 mysql_zap.1 mysqlaccess.1 \
mysqladmin.1 mysqld.1 mysqld_multi.1 mysqldump.1 mysqlshow.1 \
- perror.1 replace.1 safe_mysqld.1
+ perror.1 replace.1 mysqld_safe.1
EXTRA_DIST = $(man_MANS)
diff --git a/man/isamchk.1 b/man/isamchk.1
index f225dc35d18..2552d9f80cd 100644
--- a/man/isamchk.1
+++ b/man/isamchk.1
@@ -125,7 +125,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/isamlog.1 b/man/isamlog.1
index efc042ccd7c..ef6ceaff8da 100644
--- a/man/isamlog.1
+++ b/man/isamlog.1
@@ -83,7 +83,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
diff --git a/man/mysql.1 b/man/mysql.1
index 96ccca8f50e..e10fd589092 100644
--- a/man/mysql.1
+++ b/man/mysql.1
@@ -111,7 +111,7 @@ executable
.I /depot/bin/mysqld
executable
.TP
-.I /depot/bin/safe_mysqld
+.I /depot/bin/mysqld_safe
executable shell script for starting mysqld safely
.TP
.I /site/var/mysql/data
@@ -135,7 +135,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysql_zap.1 b/man/mysql_zap.1
index 98da195894e..e57eb7a4d07 100644
--- a/man/mysql_zap.1
+++ b/man/mysql_zap.1
@@ -28,7 +28,7 @@ isn't given, ask user for confirmation for each process to kill. If signal isn't
.BR -t
is given the processes is only shown on stdout.
.SH "SEE ALSO"
-isamchk (1), isamlog (1), mysqlaccess (1), mysqladmin (1), mysqlbug (1), mysqld (1), mysqldump (1), mysqlshow (1), msql2mysql (1), perror (1), replace (1), safe_mysqld (1), which1 (1), zap (1),
+isamchk (1), isamlog (1), mysqlaccess (1), mysqladmin (1), mysqlbug (1), mysqld (1), mysqldump (1), mysqlshow (1), msql2mysql (1), perror (1), replace (1), mysqld_safe (1), which1 (1), zap (1),
.SH AUTHOR
Ver 1.0, distribution 3.23.29a Michael (Monty) Widenius (monty@tcx.se), TCX Datakonsult AB (http://www.tcx.se). This software comes with no warranty. Manual page by L. (Kill-9) Pedersen (kill-9@kill-9.dk), Mercurmedia Data Model Architect / system developer (http://www.mercurmedia.com)
.\" end of man page \ No newline at end of file
diff --git a/man/mysqlaccess.1 b/man/mysqlaccess.1
index 888cfe8f646..0ae06dca137 100644
--- a/man/mysqlaccess.1
+++ b/man/mysqlaccess.1
@@ -106,7 +106,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysqladmin.1 b/man/mysqladmin.1
index 58bd2070de6..1e435006bb2 100644
--- a/man/mysqladmin.1
+++ b/man/mysqladmin.1
@@ -189,7 +189,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysqld.1 b/man/mysqld.1
index d3f22c0be1b..1f87eb9cf32 100644
--- a/man/mysqld.1
+++ b/man/mysqld.1
@@ -137,7 +137,7 @@ Don't check the rows in the table if there isn't any delete blocks.
Before a table is automaticly repaired, mysqld will add a note about this in the error log. If you want to be able to recover from most things without user intervention, you should use the options BACKUP,FORCE. This will force a repair of a table even if some rows would be deleted, but it will keep the old data file as a backup so that you can later examine what happened.
.TP
.BR \-\-pid\-file=\fP\fIpath \fP
-Path to pid file used by safe_mysqld.
+Path to pid file used by mysqld_safe.
.TP
.BR \-P | \-\-port=...
Port number to listen for TCP/IP connections.
@@ -215,7 +215,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysqld_multi.1 b/man/mysqld_multi.1
index b68050e92ef..b7aa77f656d 100644
--- a/man/mysqld_multi.1
+++ b/man/mysqld_multi.1
@@ -36,7 +36,7 @@ Log file. Full path to and the name for the log file. NOTE: If the file exists,
mysqladmin binary to be used for a server shutdown.
.TP
.BR --mysqld=...
-mysqld binary to be used. Note that you can give safe_mysqld to this option also. The options are passed to mysqld. Just make sure you have mysqld in your environment variable PATH or fix safe_mysqld.
+mysqld binary to be used. Note that you can give mysqld_safe to this option also. The options are passed to mysqld. Just make sure you have mysqld in your environment variable PATH or fix mysqld_safe.
.TP
.BR --no-log
Print to stdout instead of the log file. By default the log file is turned on.
@@ -70,7 +70,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
diff --git a/man/safe_mysqld.1 b/man/mysqld_safe.1
index 30abf04ae6b..bffbecd6478 100644
--- a/man/safe_mysqld.1
+++ b/man/mysqld_safe.1
@@ -1,9 +1,9 @@
.TH SAFE_MYSQLD 1 "19 December 2000"
.SH NAME
-.BR safe_mysqld
-is the recommended way to start a mysqld daemon on Unix. safe_mysqld adds some safety features such as restarting the server when an error occurs and logging run-time information to a log file.
+.BR mysqld_safe
+is the recommended way to start a mysqld daemon on Unix. mysqld_safe adds some safety features such as restarting the server when an error occurs and logging run-time information to a log file.
.SH SYNOPSIS
-.B safe_mysqld
+.B mysqld_safe
.RB [ \-\-basedir=\fP\fIpath\fP ]
.RB [ \-\-core\-file\-size=# ]
.RB [ \-\-defaults\-extra\-file=\fP\fIpath\fP ]
@@ -58,7 +58,7 @@ Set the timezone (the TZ) variable to the value of this parameter.
.TP
.BR \-\-user=#
.SH NOTE
-Note that all options on the command line to safe_mysqld are passed to mysqld. If you wants to use any options in safe_mysqld that mysqld doesn't support, you must specify these in the option file.
+Note that all options on the command line to mysqld_safe are passed to mysqld. If you wants to use any options in mysqld_safe that mysqld doesn't support, you must specify these in the option file.
.SH "SEE ALSO"
isamchk (1),
isamlog (1),
@@ -71,7 +71,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysqldump.1 b/man/mysqldump.1
index f108da17bf9..b9e5aa33791 100644
--- a/man/mysqldump.1
+++ b/man/mysqldump.1
@@ -258,7 +258,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/mysqlshow.1 b/man/mysqlshow.1
index 55a87c1df78..661b2cd02c8 100644
--- a/man/mysqlshow.1
+++ b/man/mysqlshow.1
@@ -78,7 +78,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/perror.1 b/man/perror.1
index 2853f2cb1ba..7adf99ea772 100644
--- a/man/perror.1
+++ b/man/perror.1
@@ -43,7 +43,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/replace.1 b/man/replace.1
index 10bcf64fc88..38ffe998027 100644
--- a/man/replace.1
+++ b/man/replace.1
@@ -52,7 +52,7 @@ mysqlshow (1),
msql2mysql (1),
perror (1),
replace (1),
-safe_mysqld (1),
+mysqld_safe (1),
which1 (1),
zap (1),
.SH AUTHOR
diff --git a/man/which.2 b/man/which.2
index 599b68080a2..30d5557ed01 100644
--- a/man/which.2
+++ b/man/which.2
@@ -48,7 +48,7 @@ Ignore option
.BR --read-alias;
don\'t read stdin.
.SH "SEE ALSO"
-isamchk (1), isamlog (1), mysqlaccess (1), mysqladmin (1), mysqlbug (1), mysqld (1), mysqldump (1), mysqlshow (1), msql2mysql (1), perror (1), replace (1), safe_mysqld (1), which1 (1), zap (1),
+isamchk (1), isamlog (1), mysqlaccess (1), mysqladmin (1), mysqlbug (1), mysqld (1), mysqldump (1), mysqlshow (1), msql2mysql (1), perror (1), replace (1), mysqld_safe (1), which1 (1), zap (1),
.SH AUTHOR
Ver 1.0, distribution 3.23.29a Michael (Monty) Widenius (monty@tcx.se), TCX Datakonsult AB (http://www.tcx.se). This software comes with no warranty. Manual page by L. (Kill-9) Pedersen (kill-9@kill-9.dk), Mercurmedia Data Model Architect / system developer (http://www.mercurmedia.com)
.\" end of man page \ No newline at end of file
diff --git a/myisam/ft_dump.c b/myisam/ft_dump.c
index 62e2a67dfc2..bb308188969 100644
--- a/myisam/ft_dump.c
+++ b/myisam/ft_dump.c
@@ -31,11 +31,11 @@ static char *query=NULL;
int main(int argc,char *argv[])
{
int error=0;
- uint keylen, inx, doc_cnt;
+ uint keylen, inx, doc_cnt=0;
float weight;
- double gws, min_gws, avg_gws=0;
+ double gws, min_gws=0, avg_gws=0;
MI_INFO *info;
- char buf[MAX], buf2[MAX], buf_maxlen[MAX], buf_min_gws[MAX], *s;
+ char buf[MAX], buf2[MAX], buf_maxlen[MAX], buf_min_gws[MAX];
ulong total=0, maxlen=0, uniq=0, max_doc_cnt=0;
#ifdef EVAL_RUN
uint cnt;
@@ -111,8 +111,8 @@ int main(int argc,char *argv[])
cnt=*(byte *)(info->lastkey+keylen);
#endif /* EVAL_RUN */
- snprintf(buf,MAX,"%.*s",keylen,info->lastkey+1);
- for (s=buf;*s;s++) *s=tolower(*s);
+ snprintf(buf,MAX,"%.*s",(int) keylen,info->lastkey+1);
+ casedn_str(buf);
total++;
if (count || stats)
diff --git a/myisam/mi_check.c b/myisam/mi_check.c
index b19a3ffdfbc..deb3f2071b3 100644
--- a/myisam/mi_check.c
+++ b/myisam/mi_check.c
@@ -103,6 +103,7 @@ void myisamchk_init(MI_CHECK *param)
int chk_status(MI_CHECK *param, register MI_INFO *info)
{
MYISAM_SHARE *share=info->s;
+
if (mi_is_crashed_on_repair(info))
mi_check_print_warning(param,
"Table is marked as crashed and last repair failed");
@@ -111,9 +112,14 @@ int chk_status(MI_CHECK *param, register MI_INFO *info)
"Table is marked as crashed");
if (share->state.open_count != (uint) (info->s->global_changed ? 1 : 0))
{
+ /* Don't count this as a real warning, as check can correct this ! */
+ uint save=param->warning_printed;
mi_check_print_warning(param,
"%d clients is using or hasn't closed the table properly",
share->state.open_count);
+ /* If this will be fixed by the check, forget the warning */
+ if (param->testflag & T_UPDATE_STATE)
+ param->warning_printed=save;
}
return 0;
}
@@ -1096,6 +1102,10 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
share->pack.header_length;
got_error=1;
new_file= -1;
+ sort_info->buff=0;
+ sort_info->buff_length=0;
+ sort_info->record=0;
+
if (!(param->testflag & T_SILENT))
{
printf("- recovering (with keycache) MyISAM-table '%s'\n",name);
@@ -1108,7 +1118,10 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
if (init_io_cache(&param->read_cache,info->dfile,
(uint) param->read_buffer_length,
READ_CACHE,share->pack.header_length,1,MYF(MY_WME)))
+ {
+ bzero(&info->rec_cache,sizeof(info->rec_cache));
goto err;
+ }
if (!rep_quick)
if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length,
WRITE_CACHE, new_header_length, 1,
@@ -1116,7 +1129,6 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
goto err;
info->opt_flag|=WRITE_CACHE_USED;
sort_info->start_recpos=0;
- sort_info->buff=0; sort_info->buff_length=0;
if (!(sort_info->record=(byte*) my_malloc((uint) share->base.pack_reclength,
MYF(0))))
{
@@ -1126,7 +1138,10 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
if (!rep_quick)
{
- if ((new_file=my_raid_create(fn_format(param->temp_filename,name,"",
+ /* Get real path for data file */
+ fn_format(param->temp_filename,name,"", MI_NAME_DEXT,2+4+32);
+ if ((new_file=my_raid_create(fn_format(param->temp_filename,
+ param->temp_filename,"",
DATA_TMP_EXT,
2+4),
0,param->tmpfile_createflag,
@@ -1288,7 +1303,7 @@ err:
{
my_close(new_file,MYF(0));
info->dfile=new_file= -1;
- if (change_to_newfile(share->filename,MI_NAME_DEXT,
+ if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
DATA_TMP_EXT, share->base.raid_chunks,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
@@ -1472,8 +1487,10 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
if (!(param->testflag & T_SILENT))
printf("- Sorting index for MyISAM-table '%s'\n",name);
- if ((new_file=my_create(fn_format(param->temp_filename,name,"",
- INDEX_TMP_EXT,2+4),
+ /* Get real path for index file */
+ fn_format(param->temp_filename,name,"", MI_NAME_IEXT,2+4+32);
+ if ((new_file=my_create(fn_format(param->temp_filename,param->temp_filename,
+ "", INDEX_TMP_EXT,2+4),
0,param->tmpfile_createflag,MYF(0))) <= 0)
{
mi_check_print_error(param,"Can't create new tempfile: '%s'",
@@ -1493,7 +1510,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
if (share->state.key_root[key] != HA_OFFSET_ERROR)
{
- index_pos[key]=param->new_file_pos; /* Write first block here */
+ index_pos[key]=param->new_file_pos; /* Write first block here */
if (sort_one_index(param,info,keyinfo,share->state.key_root[key],
new_file))
goto err;
@@ -1514,7 +1531,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
VOID(my_close(share->kfile,MYF(MY_WME)));
share->kfile = -1;
VOID(my_close(new_file,MYF(MY_WME)));
- if (change_to_newfile(share->filename,MI_NAME_IEXT,INDEX_TMP_EXT,0,
+ if (change_to_newfile(share->index_file_name,MI_NAME_IEXT,INDEX_TMP_EXT,0,
MYF(0)) ||
mi_open_keyfile(share))
goto err2;
@@ -1614,9 +1631,14 @@ err:
} /* sort_one_index */
- /* Change to use new file */
- /* Copy stats from old file to new file, deletes orginal and */
- /* changes new file name to old file name */
+ /*
+ Let temporary file replace old file.
+ This assumes that the new file was created in the same
+ directory as given by realpath(filename).
+ This will ensure that any symlinks that are used will still work.
+ Copy stats from old file to new file, deletes orignal and
+ changes new file name to old file name
+ */
int change_to_newfile(const char * filename, const char * old_ext,
const char * new_ext,
@@ -1631,8 +1653,10 @@ int change_to_newfile(const char * filename, const char * old_ext,
raid_chunks,
MYF(MY_WME | MY_LINK_WARNING | MyFlags));
#endif
- return my_redel(fn_format(old_filename,filename,"",old_ext,2+4),
- fn_format(new_filename,filename,"",new_ext,2+4),
+ /* Get real path to filename */
+ (void) fn_format(old_filename,filename,"",old_ext,2+4+32);
+ return my_redel(old_filename,
+ fn_format(new_filename,old_filename,"",new_ext,2+4),
MYF(MY_WME | MY_LINK_WARNING | MyFlags));
} /* change_to_newfile */
@@ -1749,7 +1773,10 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
}
if (!rep_quick)
{
- if ((new_file=my_raid_create(fn_format(param->temp_filename,name,"",
+ /* Get real path for data file */
+ fn_format(param->temp_filename,name,"", MI_NAME_DEXT,2+4+32);
+ if ((new_file=my_raid_create(fn_format(param->temp_filename,
+ param->temp_filename, "",
DATA_TMP_EXT,
2+4),
0,param->tmpfile_createflag,
@@ -1994,7 +2021,7 @@ err:
{
my_close(new_file,MYF(0));
info->dfile=new_file= -1;
- if (change_to_newfile(share->filename,MI_NAME_DEXT,
+ if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
DATA_TMP_EXT, share->base.raid_chunks,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
@@ -2846,7 +2873,6 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
MI_STATUS_INFO status_info;
uint unpack,key_parts;
ha_rows max_records;
- char name[FN_REFLEN];
ulonglong file_length,tmp_length;
MI_CREATE_INFO create_info;
@@ -2955,8 +2981,9 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
create_info.language = (param->language ? param->language :
share.state.header.language);
- if (mi_create(fn_format(name,filename,"",MI_NAME_IEXT,
- 4+ (param->opt_follow_links ? 16 : 0)),
+ /* We don't have to handle symlinks here because we are using
+ HA_DONT_TOUCH_DATA */
+ if (mi_create(filename,
share.base.keys - share.state.header.uniques,
keyinfo, share.base.fields, recdef,
share.state.header.uniques, uniquedef,
@@ -2966,7 +2993,7 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
mi_check_print_error(param,"Got error %d when trying to recreate indexfile",my_errno);
goto end;
}
- *org_info=mi_open(name,O_RDWR,
+ *org_info=mi_open(filename,O_RDWR,
(param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED :
(param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED :
HA_OPEN_ABORT_IF_LOCKED);
diff --git a/myisam/mi_create.c b/myisam/mi_create.c
index 6de13f8f84a..d3e5e819742 100644
--- a/myisam/mi_create.c
+++ b/myisam/mi_create.c
@@ -38,12 +38,13 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
register uint i,j;
File dfile,file;
int errpos,save_errno;
+ myf create_flag;
uint fields,length,max_key_length,packed,pointer,
key_length,info_length,key_segs,options,min_key_length_skipp,
base_pos,varchar_count,long_varchar_count,varchar_length,
max_key_block_length,unique_key_parts,offset;
ulong reclength, real_reclength,min_pack_length;
- char buff[FN_REFLEN];
+ char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows;
enum en_fieldtype type;
@@ -163,6 +164,9 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
if (packed || (flags & HA_PACK_RECORD))
options|=HA_OPTION_PACK_RECORD; /* Must use packed records */
+ /* We can't use checksum with static length rows */
+ if (!(options & HA_OPTION_PACK_RECORD))
+ options&= ~HA_OPTION_CHECKSUM;
if (options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD))
min_pack_length+=varchar_count; /* Min length to pack */
else
@@ -444,7 +448,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
share.base.records=ci->max_rows;
share.base.reloc= ci->reloc_rows;
share.base.reclength=real_reclength;
- share.base.pack_reclength=reclength+ test(options & HA_OPTION_CHECKSUM);;
+ share.base.pack_reclength=reclength+ test(options & HA_OPTION_CHECKSUM);
share.base.max_pack_length=pack_reclength;
share.base.min_pack_length=min_pack_length;
share.base.pack_bits=packed;
@@ -467,18 +471,41 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
MI_EXTEND_BLOCK_LENGTH;
if (! (flags & HA_DONT_TOUCH_DATA))
share.state.create_time= (long) time((time_t*) 0);
+
+ if (ci->index_file_name)
+ {
+ fn_format(filename, ci->index_file_name,"",MI_NAME_IEXT,4);
+ fn_format(linkname,name, "",MI_NAME_IEXT,4);
+ linkname_ptr=linkname;
+ /*
+ Don't create the table if the link or file exists to ensure that one
+ doesn't accidently destroy another table.
+ */
+ create_flag=0;
+ }
+ else
+ {
+ fn_format(filename,name,"",MI_NAME_IEXT,(4+ (flags & HA_DONT_TOUCH_DATA) ?
+ 32 : 0));
+ linkname_ptr=0;
+ /* Replace the current file */
+ create_flag=MY_DELETE_OLD;
+ }
- if ((file = my_create(fn_format(buff,name,"",MI_NAME_IEXT,4),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ if ((file= my_create_with_symlink(linkname_ptr,
+ filename,
+ 0, O_RDWR | O_TRUNC,
+ MYF(MY_WME | create_flag))) < 0)
goto err;
errpos=1;
- VOID(fn_format(buff,name,"",MI_NAME_DEXT,2+4));
+
if (!(flags & HA_DONT_TOUCH_DATA))
{
#ifdef USE_RAID
if (share.base.raid_type)
{
- if ((dfile=my_raid_create(buff,0,O_RDWR | O_TRUNC,
+ (void) fn_format(filename,name,"",MI_NAME_DEXT,2+4);
+ if ((dfile=my_raid_create(filename,0,O_RDWR | O_TRUNC,
share.base.raid_type,
share.base.raid_chunks,
share.base.raid_chunksize,
@@ -487,9 +514,26 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
}
else
#endif
- if ((dfile = my_create(buff,0,O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- goto err;
-
+ {
+ if (ci->data_file_name)
+ {
+ fn_format(filename, ci->data_file_name,"",MI_NAME_DEXT,4);
+ fn_format(linkname, name, "",MI_NAME_DEXT,4);
+ linkname_ptr=linkname;
+ create_flag=0;
+ }
+ else
+ {
+ fn_format(filename,name,"",MI_NAME_DEXT,4);
+ linkname_ptr=0;
+ create_flag=MY_DELETE_OLD;
+ }
+ if ((dfile=
+ my_create_with_symlink(linkname_ptr, filename,
+ 0,O_RDWR | O_TRUNC,
+ MYF(MY_WME | create_flag))) < 0)
+ goto err;
+ }
errpos=3;
}
@@ -508,14 +552,14 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
/* Write key and keyseg definitions */
for (i=0 ; i < share.base.keys - uniques; i++)
{
- uint ft_segs=(keydefs[i].flag & HA_FULLTEXT) ? FT_SEGS : 0; /* SerG */
+ uint ft_segs=(keydefs[i].flag & HA_FULLTEXT) ? FT_SEGS : 0;
if (mi_keydef_write(file, &keydefs[i]))
goto err;
for (j=0 ; j < keydefs[i].keysegs-ft_segs ; j++)
if (mi_keyseg_write(file, &keydefs[i].seg[j]))
goto err;
- for (j=0 ; j < ft_segs ; j++) /* SerG */
+ for (j=0 ; j < ft_segs ; j++)
{
MI_KEYSEG seg=ft_keysegs[j];
seg.language= keydefs[i].seg[0].language;
@@ -596,20 +640,16 @@ err:
VOID(my_close(dfile,MYF(0)));
/* fall through */
case 2:
- if (! (flags & HA_DONT_TOUCH_DATA))
- {
/* QQ: Tõnu should add a call to my_raid_delete() here */
- VOID(fn_format(buff,name,"",MI_NAME_DEXT,2+4));
- my_delete(buff,MYF(0));
- }
+ if (! (flags & HA_DONT_TOUCH_DATA))
+ my_delete_with_symlink(fn_format(filename,name,"",MI_NAME_DEXT,2+4),
+ MYF(0));
/* fall through */
case 1:
VOID(my_close(file,MYF(0)));
if (! (flags & HA_DONT_TOUCH_DATA))
- {
- VOID(fn_format(buff,name,"",MI_NAME_IEXT,2+4));
- my_delete(buff,MYF(0));
- }
+ my_delete_with_symlink(fn_format(filename,name,"",MI_NAME_IEXT,2+4),
+ MYF(0));
}
my_free((char*) rec_per_key_part, MYF(0));
DBUG_RETURN(my_errno=save_errno); /* return the fatal errno */
diff --git a/myisam/mi_dbug.c b/myisam/mi_dbug.c
index eda1aafecc8..8c532970dd9 100644
--- a/myisam/mi_dbug.c
+++ b/myisam/mi_dbug.c
@@ -162,7 +162,7 @@ my_bool check_table_is_closed(const char *name, const char *where)
{
MI_INFO *info=(MI_INFO*) pos->data;
MYISAM_SHARE *share=info->s;
- if (!strcmp(share->filename,filename))
+ if (!strcmp(share->unique_file_name,filename))
{
if (share->last_version)
{
diff --git a/myisam/mi_delete_table.c b/myisam/mi_delete_table.c
index 995106160ef..d8fff51acb6 100644
--- a/myisam/mi_delete_table.c
+++ b/myisam/mi_delete_table.c
@@ -50,12 +50,12 @@ int mi_delete_table(const char *name)
#endif /* USE_RAID */
fn_format(from,name,"",MI_NAME_IEXT,4);
- if (my_delete(from, MYF(MY_WME)))
+ if (my_delete_with_symlink(from, MYF(MY_WME)))
DBUG_RETURN(my_errno);
fn_format(from,name,"",MI_NAME_DEXT,4);
#ifdef USE_RAID
if (raid_type)
DBUG_RETURN(my_raid_delete(from, raid_chunks, MYF(MY_WME)) ? my_errno : 0);
#endif
- DBUG_RETURN(my_delete(from, MYF(MY_WME)) ? my_errno : 0);
+ DBUG_RETURN(my_delete_with_symlink(from, MYF(MY_WME)) ? my_errno : 0);
}
diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c
index 4c05f6c737d..e090498f3fe 100644
--- a/myisam/mi_dynrec.c
+++ b/myisam/mi_dynrec.c
@@ -1221,20 +1221,19 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
char temp_buff[IO_SIZE*2];
DBUG_ENTER("_mi_cmp_buffer");
- VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1));
while (length > IO_SIZE*2)
{
- if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
+ if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) ||
+ memcmp((byte*) buff,temp_buff,next_length))
goto err;
- if (memcmp((byte*) buff,temp_buff,IO_SIZE))
- DBUG_RETURN(1);
+ filepos+=next_length;
buff+=next_length;
length-= next_length;
next_length=IO_SIZE*2;
}
- if (my_read(file,temp_buff,length,MYF(MY_NABP)))
+ if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP)))
goto err;
DBUG_RETURN(memcmp((byte*) buff,temp_buff,length));
err:
diff --git a/myisam/mi_info.c b/myisam/mi_info.c
index 6e7abfc0914..867718de326 100644
--- a/myisam/mi_info.c
+++ b/myisam/mi_info.c
@@ -87,6 +87,8 @@ int mi_status(MI_INFO *info, register MI_ISAMINFO *x, uint flag)
x->raid_chunks= share->base.raid_chunks;
x->raid_chunksize= share->base.raid_chunksize;
x->key_map = share->state.key_map;
+ x->data_file_name = share->data_file_name;
+ x->index_file_name = share->index_file_name;
}
if ((flag & HA_STATUS_TIME) && !my_fstat(info->dfile,&state,MYF(0)))
x->update_time=state.st_mtime;
diff --git a/myisam/mi_open.c b/myisam/mi_open.c
index d4b677e12dd..4d8a5c2a1d6 100644
--- a/myisam/mi_open.c
+++ b/myisam/mi_open.c
@@ -49,7 +49,7 @@ static MI_INFO *test_if_reopen(char *filename)
{
MI_INFO *info=(MI_INFO*) pos->data;
MYISAM_SHARE *share=info->s;
- if (!strcmp(share->filename,filename) && share->last_version)
+ if (!strcmp(share->unique_file_name,filename) && share->last_version)
return info;
}
return 0;
@@ -69,7 +69,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
int lock_error,kfile,open_mode,save_errno;
uint i,j,len,errpos,head_length,base_pos,offset,info_length,extra,keys,
key_parts,unique_key_parts,tmp_length,uniques;
- char name_buff[FN_REFLEN],*disk_cache,*disk_pos;
+ char name_buff[FN_REFLEN], org_name [FN_REFLEN], index_name[FN_REFLEN],
+ data_name[FN_REFLEN];
+ char *disk_cache,*disk_pos;
MI_INFO info,*m_info,*old_info;
MYISAM_SHARE share_buff,*share;
ulong rec_per_key_part[MI_MAX_POSSIBLE_KEY*MI_MAX_KEY_SEG];
@@ -84,7 +86,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
head_length=sizeof(share_buff.state.header);
bzero((byte*) &info,sizeof(info));
- VOID(fn_format(name_buff,name,"",MI_NAME_IEXT,4+16+32));
+ my_realpath(name_buff, fn_format(org_name,name,"",MI_NAME_IEXT,4),MYF(0));
pthread_mutex_lock(&THR_LOCK_myisam);
if (!(old_info=test_if_reopen(name_buff)))
{
@@ -128,6 +130,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
my_errno=HA_ERR_OLD_FILE;
goto err;
}
+ /* Don't call realpath() if the name can't be a link */
+ if (strcmp(name_buff, org_name))
+ (void) my_readlink(index_name, org_name, MYF(0));
+ else
+ (void) strmov(index_name, org_name);
+ (void) fn_format(data_name,org_name,"",MI_NAME_DEXT,2+4+16);
+
info_length=mi_uint2korr(share->state.header.header_length);
base_pos=mi_uint2korr(share->state.header.base_pos);
if (!(disk_cache=(char*) my_alloca(info_length)))
@@ -250,7 +259,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
&share->rec,
(share->base.fields+1)*sizeof(MI_COLUMNDEF),
&share->blobs,sizeof(MI_BLOB)*share->base.blobs,
- &share->filename,strlen(name_buff)+1,
+ &share->unique_file_name,strlen(name_buff)+1,
+ &share->index_file_name,strlen(index_name)+1,
+ &share->data_file_name,strlen(data_name)+1,
&share->state.key_root,keys*sizeof(my_off_t),
&share->state.key_del,
(share->state.header.max_block_size*sizeof(my_off_t)),
@@ -268,7 +279,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
memcpy((char*) share->state.key_del,
(char*) key_del, (sizeof(my_off_t) *
share->state.header.max_block_size));
- strmov(share->filename,name_buff);
+ strmov(share->unique_file_name, name_buff);
+ strmov(share->index_file_name, index_name);
+ strmov(share->data_file_name, data_name);
share->blocksize=min(IO_SIZE,myisam_block_size);
{
@@ -438,12 +451,12 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
&info.buff,(share->base.max_key_block_length*2+
share->base.max_key_length),
&info.lastkey,share->base.max_key_length*3+1,
- &info.filename,strlen(name)+1,
+ &info.filename,strlen(org_name)+1,
NullS))
goto err;
errpos=6;
- strmov(info.filename,name);
+ strmov(info.filename,org_name);
memcpy(info.blobs,share->blobs,sizeof(MI_BLOB)*share->base.blobs);
info.lastkey2=info.lastkey+share->base.max_key_length;
@@ -515,7 +528,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
pthread_mutex_unlock(&THR_LOCK_myisam);
if (myisam_log_file >= 0)
{
- intern_filename(name_buff,share->filename);
+ intern_filename(name_buff,share->index_file_name);
_myisam_log(MI_LOG_OPEN,m_info,name_buff,(uint) strlen(name_buff));
}
DBUG_RETURN(m_info);
@@ -1001,13 +1014,10 @@ char *mi_recinfo_read(char *ptr, MI_COLUMNDEF *recinfo)
int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
{
- char name_buff[FN_REFLEN];
- (void) fn_format(name_buff, share->filename,"",MI_NAME_DEXT, 2+4);
-
#ifdef USE_RAID
if (share->base.raid_type)
{
- if ((info->dfile=my_raid_open(name_buff,
+ if ((info->dfile=my_raid_open(share->data_file_name,
share->mode | O_SHARE,
share->base.raid_type,
share->base.raid_chunks,
@@ -1017,7 +1027,7 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
}
else
#endif
- if ((info->dfile=my_open(name_buff, share->mode | O_SHARE,
+ if ((info->dfile=my_open(share->data_file_name, share->mode | O_SHARE,
MYF(MY_WME))) < 0)
return 1;
return 0;
@@ -1026,7 +1036,7 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
int mi_open_keyfile(MYISAM_SHARE *share)
{
- if ((share->kfile=my_open(share->filename, share->mode | O_SHARE,
+ if ((share->kfile=my_open(share->unique_file_name, share->mode | O_SHARE,
MYF(MY_WME))) < 0)
return 1;
return 0;
diff --git a/myisam/mi_packrec.c b/myisam/mi_packrec.c
index b6a9435ee3d..be7f9dcae0a 100644
--- a/myisam/mi_packrec.c
+++ b/myisam/mi_packrec.c
@@ -1010,7 +1010,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file,
{
ref_length=myisam->s->pack.ref_length;
/*
- We can't use my_pread() here because mi_rad_pack_record assumes
+ We can't use my_pread() here because mi_read_rnd_pack_record assumes
position is ok
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
diff --git a/myisam/mi_rename.c b/myisam/mi_rename.c
index 5c92db3f7ce..4d6250f58f4 100644
--- a/myisam/mi_rename.c
+++ b/myisam/mi_rename.c
@@ -51,7 +51,7 @@ int mi_rename(const char *old_name, const char *new_name)
fn_format(from,old_name,"",MI_NAME_IEXT,4);
fn_format(to,new_name,"",MI_NAME_IEXT,4);
- if (my_rename(from, to, MYF(MY_WME)))
+ if (my_rename_with_symlink(from, to, MYF(MY_WME)))
DBUG_RETURN(my_errno);
fn_format(from,old_name,"",MI_NAME_DEXT,4);
fn_format(to,new_name,"",MI_NAME_DEXT,4);
@@ -60,5 +60,5 @@ int mi_rename(const char *old_name, const char *new_name)
DBUG_RETURN(my_raid_rename(from, to, raid_chunks, MYF(MY_WME)) ? my_errno :
0);
#endif
- DBUG_RETURN(my_rename(from, to,MYF(MY_WME)) ? my_errno : 0);
+ DBUG_RETURN(my_rename_with_symlink(from, to,MYF(MY_WME)) ? my_errno : 0);
}
diff --git a/myisam/mi_search.c b/myisam/mi_search.c
index f90f6fa6467..18d8ea8a4b0 100644
--- a/myisam/mi_search.c
+++ b/myisam/mi_search.c
@@ -256,6 +256,7 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
DBUG_RETURN(flag);
} /* _mi_seq_search */
+
int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
uchar *key, uint key_len, uint nextflag, uchar **ret_pos,
uchar *buff, my_bool *last_key)
@@ -274,6 +275,13 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
uint saved_length=0, saved_prefix_len=0;
DBUG_ENTER("_mi_prefix_search");
+ LINT_INIT(length);
+ LINT_INIT(prefix_len);
+ LINT_INIT(seg_len_pack);
+ LINT_INIT(saved_from);
+ LINT_INIT(saved_to);
+ LINT_INIT(saved_vseg);
+
t_buff[0]=0; /* Avoid bugs */
if (!(nextflag & (SEARCH_FIND | SEARCH_NO_FIND | SEARCH_LAST)))
key_len=USE_WHOLE_KEY;
@@ -286,7 +294,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
uint lenght_pack;
get_key_pack_length(kseg_len,lenght_pack,kseg);
key_len_skip=lenght_pack+kseg_len;
- key_len_left=key_len-key_len_skip;
+ key_len_left=(int) key_len- (int) key_len_skip;
cmplen=(key_len_left>=0) ? kseg_len : key_len-lenght_pack;
DBUG_PRINT("info",("key: '%.*s'",kseg_len,kseg));
}
@@ -407,11 +415,11 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
*/
if (len < cmplen)
{
- my_flag=-1;
+ my_flag= -1;
}
else if (len > cmplen)
{
- if(my_flag = !(nextflag & SEARCH_PREFIX) || key_len_left>0)
+ if ((my_flag= (!(nextflag & SEARCH_PREFIX) || key_len_left>0)))
break;
goto fix_flag;
}
diff --git a/myisam/mi_statrec.c b/myisam/mi_statrec.c
index 05ff40d8921..e0fce6d3e1c 100644
--- a/myisam/mi_statrec.c
+++ b/myisam/mi_statrec.c
@@ -27,17 +27,16 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
{
my_off_t filepos=info->s->state.dellink;
info->rec_cache.seek_not_done=1; /* We have done a seek */
- VOID(my_seek(info->dfile,info->s->state.dellink+1,MY_SEEK_SET,MYF(0)));
-
- if (my_read(info->dfile,(char*) &temp[0],info->s->base.rec_reflength,
- MYF(MY_NABP)))
+ if (my_pread(info->dfile,(char*) &temp[0],info->s->base.rec_reflength,
+ info->s->state.dellink+1,
+ MYF(MY_NABP)))
goto err;
info->s->state.dellink= _mi_rec_pos(info->s,temp);
info->state->del--;
info->state->empty-=info->s->base.pack_reclength;
- VOID(my_seek(info->dfile,filepos,MY_SEEK_SET,MYF(0)));
- if (my_write(info->dfile, (char*) record, info->s->base.reclength,
- MYF(MY_NABP)))
+ if (my_pwrite(info->dfile, (char*) record, info->s->base.reclength,
+ filepos,
+ MYF(MY_NABP)))
goto err;
}
else
@@ -64,16 +63,18 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
else
{
info->rec_cache.seek_not_done=1; /* We have done a seek */
- VOID(my_seek(info->dfile,info->state->data_file_length,
- MY_SEEK_SET,MYF(0)));
- if (my_write(info->dfile,(char*) record,info->s->base.reclength,
- info->s->write_flag))
+ if (my_pwrite(info->dfile,(char*) record,info->s->base.reclength,
+ info->state->data_file_length,
+ info->s->write_flag))
goto err;
if (info->s->base.pack_reclength != info->s->base.reclength)
{
uint length=info->s->base.pack_reclength - info->s->base.reclength;
bzero((char*) temp,length);
- if (my_write(info->dfile, (byte*) temp,length, info->s->write_flag))
+ if (my_pwrite(info->dfile, (byte*) temp,length,
+ info->state->data_file_length+
+ info->s->base.reclength,
+ info->s->write_flag))
goto err;
}
}
@@ -88,9 +89,10 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
int _mi_update_static_record(MI_INFO *info, my_off_t pos, const byte *record)
{
info->rec_cache.seek_not_done=1; /* We have done a seek */
- VOID(my_seek(info->dfile,pos,MY_SEEK_SET,MYF(0)));
- return (my_write(info->dfile,(char*) record,info->s->base.reclength,
- MYF(MY_NABP)) != 0);
+ return (my_pwrite(info->dfile,
+ (char*) record,info->s->base.reclength,
+ pos,
+ MYF(MY_NABP)) != 0);
}
@@ -104,9 +106,8 @@ int _mi_delete_static_record(MI_INFO *info)
_mi_dpointer(info,temp+1,info->s->state.dellink);
info->s->state.dellink = info->lastpos;
info->rec_cache.seek_not_done=1;
- VOID(my_seek(info->dfile,info->lastpos,MY_SEEK_SET,MYF(0)));
- return (my_write(info->dfile,(byte*) temp, 1+info->s->rec_reflength,
- MYF(MY_NABP)) != 0);
+ return (my_pwrite(info->dfile,(byte*) temp, 1+info->s->rec_reflength,
+ info->lastpos, MYF(MY_NABP)) != 0);
}
@@ -129,9 +130,9 @@ int _mi_cmp_static_record(register MI_INFO *info, register const byte *old)
if ((info->opt_flag & READ_CHECK_USED))
{ /* If check isn't disabled */
info->rec_cache.seek_not_done=1; /* We have done a seek */
- VOID(my_seek(info->dfile,info->lastpos,MY_SEEK_SET,MYF(0)));
- if (my_read(info->dfile, (char*) info->rec_buff, info->s->base.reclength,
- MYF(MY_NABP)))
+ if (my_pread(info->dfile, (char*) info->rec_buff, info->s->base.reclength,
+ info->lastpos,
+ MYF(MY_NABP)))
DBUG_RETURN(-1);
if (memcmp((byte*) info->rec_buff, (byte*) old,
(uint) info->s->base.reclength))
@@ -152,9 +153,8 @@ int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def,
DBUG_ENTER("_mi_cmp_static_unique");
info->rec_cache.seek_not_done=1; /* We have done a seek */
- VOID(my_seek(info->dfile,pos,MY_SEEK_SET,MYF(0)));
- if (my_read(info->dfile, (char*) info->rec_buff, info->s->base.reclength,
- MYF(MY_NABP)))
+ if (my_pread(info->dfile, (char*) info->rec_buff, info->s->base.reclength,
+ pos, MYF(MY_NABP)))
DBUG_RETURN(-1);
DBUG_RETURN(mi_unique_comp(def, record, info->rec_buff,
def->null_are_equal));
diff --git a/myisam/mi_test_all.sh b/myisam/mi_test_all.sh
index dfa2e1d0fdd..ccc9c39c64e 100755
--- a/myisam/mi_test_all.sh
+++ b/myisam/mi_test_all.sh
@@ -1,5 +1,10 @@
+#!/bin/sh
+#
+# Execute some simple basic test on MyISAM libary to check if things
+# works at all.
+
silent="-s"
-suffix=$MACH
+if test -f mi_test1$MACH ; then suffix=$MACH else suffix=""; fi
mi_test1$suffix $silent
myisamchk$suffix -se test1
mi_test1$suffix $silent -N -S
diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c
index cff5f781538..e5ea58148e3 100644
--- a/myisam/myisamchk.c
+++ b/myisam/myisamchk.c
@@ -183,7 +183,6 @@ static struct option long_options[] =
{"information", no_argument, 0, 'i'},
{"keys-used", required_argument, 0, 'k'},
{"medium-check", no_argument, 0, 'm'},
- {"no-symlinks", no_argument, 0, 'l'},
{"quick", no_argument, 0, 'q'},
{"read-only", no_argument, 0, 'T'},
{"recover", no_argument, 0, 'r'},
@@ -207,7 +206,7 @@ static struct option long_options[] =
static void print_version(void)
{
- printf("%s Ver 1.45 for %s at %s\n",my_progname,SYSTEM_TYPE,
+ printf("%s Ver 1.47 for %s at %s\n",my_progname,SYSTEM_TYPE,
MACHINE_TYPE);
}
@@ -240,7 +239,8 @@ static void usage(void)
-F, --fast Check only tables that hasn't been closed properly\n\
-C, --check-only-changed\n\
Check only tables that has changed since last check\n\
- -f, --force Restart with -r if there are any errors in the table\n\
+ -f, --force Restart with -r if there are any errors in the table.\n\
+ States will be updated as with --update-state\n\
-i, --information Print statistics information about table that is checked\n\
-m, --medium-check Faster than extended-check, but only finds 99.99% of\n\
all errors. Should be good enough for most cases\n\
@@ -365,7 +365,7 @@ static void get_options(register int *argc,register char ***argv)
break;
case 'f':
check_param.tmpfile_createflag= O_RDWR | O_TRUNC;
- check_param.testflag|=T_FORCE_CREATE;
+ check_param.testflag|= T_FORCE_CREATE | T_UPDATE_STATE;
break;
case 'F':
check_param.testflag|=T_FAST;
@@ -373,9 +373,6 @@ static void get_options(register int *argc,register char ***argv)
case 'k':
check_param.keys_in_use= (ulonglong) strtoll(optarg,NULL,10);
break;
- case 'l':
- check_param.opt_follow_links=0;
- break;
case 'm':
check_param.testflag|= T_MEDIUM; /* Medium check */
break;
@@ -498,7 +495,6 @@ static int myisamchk(MI_CHECK *param, my_string filename)
uint raid_chunks;
MI_INFO *info;
File datafile;
- char fixed_name[FN_REFLEN];
char llbuff[22],llbuff2[22];
my_bool state_updated=0;
MYISAM_SHARE *share;
@@ -675,9 +671,6 @@ static int myisamchk(MI_CHECK *param, my_string filename)
if (tmp != share->state.key_map)
info->update|=HA_STATE_CHANGED;
}
- VOID(fn_format(fixed_name,filename,"",MI_NAME_IEXT,
- 4+ (param->opt_follow_links ? 16 : 0)));
-
if (rep_quick && chk_del(&check_param, info,
param->testflag & ~T_VERBOSE))
{
@@ -702,11 +695,11 @@ static int myisamchk(MI_CHECK *param, my_string filename)
info->s->state.key_map,
check_param.force_sort))
{
- error=mi_repair_by_sort(&check_param,info,fixed_name,rep_quick);
+ error=mi_repair_by_sort(&check_param,info,filename,rep_quick);
state_updated=1;
}
else if (param->testflag & (T_REP | T_REP_BY_SORT))
- error=mi_repair(&check_param, info,fixed_name,rep_quick);
+ error=mi_repair(&check_param, info,filename,rep_quick);
}
if (!error && param->testflag & T_SORT_RECORDS)
{
@@ -718,7 +711,7 @@ static int myisamchk(MI_CHECK *param, my_string filename)
if (param->out_flag & O_NEW_DATA)
{ /* Change temp file to org file */
VOID(my_close(info->dfile,MYF(MY_WME))); /* Close new file */
- error|=change_to_newfile(fixed_name,MI_NAME_DEXT,DATA_TMP_EXT,
+ error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT,
raid_chunks,
MYF(0));
if (mi_open_datafile(info,info->s))
@@ -739,7 +732,7 @@ static int myisamchk(MI_CHECK *param, my_string filename)
if (share->keyinfo[key].flag & HA_BINARY_PACK_KEY)
update_index=0;
- error=mi_sort_records(param,info,fixed_name,param->opt_sort_key,
+ error=mi_sort_records(param,info,filename,param->opt_sort_key,
(my_bool) !(param->testflag & T_REP),
update_index);
datafile=info->dfile; /* This is now locked */
@@ -747,12 +740,12 @@ static int myisamchk(MI_CHECK *param, my_string filename)
{
if (check_param.verbose)
puts("Table had a compressed index; We must now recreate the index");
- error=mi_repair_by_sort(&check_param,info,fixed_name,1);
+ error=mi_repair_by_sort(&check_param,info,filename,1);
}
}
}
if (!error && param->testflag & T_SORT_INDEX)
- error=mi_sort_index(param,info,fixed_name);
+ error=mi_sort_index(param,info,filename);
if (!error)
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
@@ -849,12 +842,12 @@ end2:
if (error == 0)
{
if (param->out_flag & O_NEW_DATA)
- error|=change_to_newfile(fixed_name,MI_NAME_DEXT,DATA_TMP_EXT,
+ error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT,
raid_chunks,
((param->testflag & T_BACKUP_DATA) ?
MYF(MY_REDEL_MAKE_BACKUP) : MYF(0)));
if (param->out_flag & O_NEW_INDEX)
- error|=change_to_newfile(fixed_name,MI_NAME_IEXT,INDEX_TMP_EXT,0,
+ error|=change_to_newfile(filename,MI_NAME_IEXT,INDEX_TMP_EXT,0,
MYF(0));
}
VOID(fflush(stdout)); VOID(fflush(stderr));
@@ -1212,7 +1205,9 @@ static int mi_sort_records(MI_CHECK *param,
mi_check_print_error(param,"Not enough memory for record");
goto err;
}
- new_file=my_raid_create(fn_format(param->temp_filename,name,"",
+ fn_format(param->temp_filename,name,"", MI_NAME_DEXT,2+4+32);
+ new_file=my_raid_create(fn_format(param->temp_filename,
+ param->temp_filename,"",
DATA_TMP_EXT,2+4),
0,param->tmpfile_createflag,
share->base.raid_type,
diff --git a/myisam/myisamdef.h b/myisam/myisamdef.h
index 427ccba71ed..865c47fb7ea 100644
--- a/myisam/myisamdef.h
+++ b/myisam/myisamdef.h
@@ -160,7 +160,9 @@ typedef struct st_mi_isam_share { /* Shared between opens */
MI_COLUMNDEF *rec; /* Pointer to field information */
MI_PACK pack; /* Data about packed records */
MI_BLOB *blobs; /* Pointer to blobs */
- char *filename; /* Name of indexfile */
+ char *unique_file_name; /* realpath() of index file */
+ char *data_file_name, /* Resolved path names from symlinks */
+ *index_file_name;
byte *file_map; /* mem-map of file if possible */
ulong this_process; /* processid */
ulong last_process; /* For table-change-check */
diff --git a/myisam/myisamlog.c b/myisam/myisamlog.c
index 36817ac2842..78c3faa72ed 100644
--- a/myisam/myisamlog.c
+++ b/myisam/myisamlog.c
@@ -56,7 +56,7 @@ extern int main(int argc,char * *argv);
static void get_options(int *argc,char ***argv);
static int examine_log(my_string file_name,char **table_names);
static int read_string(IO_CACHE *file,gptr *to,uint length);
-static int file_info_compare(void *a,void *b);
+static int file_info_compare(void *cmp_arg, void *a,void *b);
static int test_if_open(struct file_info *key,element_count count,
struct test_if_open_param *param);
static void fix_blob_pointers(MI_INFO *isam,byte *record);
@@ -698,7 +698,8 @@ static int read_string(IO_CACHE *file, register gptr *to, register uint length)
} /* read_string */
-static int file_info_compare(void *a, void *b)
+static int file_info_compare(void* cmp_arg __attribute__((unused)),
+ void *a, void *b)
{
long lint;
diff --git a/myisam/myisampack.c b/myisam/myisampack.c
index 3037f00250b..ee8ef0bb360 100644
--- a/myisam/myisampack.c
+++ b/myisam/myisampack.c
@@ -894,7 +894,8 @@ static int get_statistic(MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
DBUG_RETURN(error != HA_ERR_END_OF_FILE);
}
-static int compare_huff_elements(void *not_used, byte *a, byte *b)
+static int compare_huff_elements(void* cmp_arg __attribute__((unused)),
+ byte *a, byte *b)
{
return *((my_off_t*) a) < *((my_off_t*) b) ? -1 :
(*((my_off_t*) a) == *((my_off_t*) b) ? 0 : 1);
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index d98c10a29a9..91333dfad9b 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -59,6 +59,8 @@ SUFFIXES = .sh
-e 's!@''libexecdir''@!$(libexecdir)!g' \
-e 's!@''PERL''@!@PERL@!' \
-e 's!@''VERSION''@!@VERSION@!' \
+ -e 's!@''MYSQL_BASE_VERSION''@!@MYSQL_BASE_VERSION@!' \
+ -e 's!@''MYSQL_NO_DASH_VERSION''@!@MYSQL_NO_DASH_VERSION@!' \
-e 's!@''MYSQL_SERVER_SUFFIX''@!@MYSQL_SERVER_SUFFIX@!' \
$< > $@-t
@CHMOD@ +x $@-t
diff --git a/mysql-test/install_test_db.sh b/mysql-test/install_test_db.sh
index 049ac6b1cd7..34df311e683 100644
--- a/mysql-test/install_test_db.sh
+++ b/mysql-test/install_test_db.sh
@@ -30,7 +30,7 @@ else
fi
mdata=$data/mysql
-
+EXTRA_ARG=""
if test ! -x $execdir/mysqld
then
@@ -57,9 +57,7 @@ if [ x$BINARY_DIST = x1 ] ; then
basedir=..
else
basedir=.
-rm -rf share
-mkdir share
-ln -sf ../../sql/share share/mysql
+EXTRA_ARG="--language=../sql/share/english/"
fi
# Initialize variables
@@ -193,7 +191,7 @@ then
fi
if $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables \
- --basedir=$basedir --datadir=$ldata --skip-innodb --skip-bdb --skip-gemini << END_OF_DATA
+ --basedir=$basedir --datadir=$ldata --skip-innodb --skip-bdb --skip-gemini $EXTRA_ARG << END_OF_DATA
use mysql;
$c_d
$i_d
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 49012799103..4d508883496 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -99,8 +99,9 @@ export MYSQL_TEST_DIR
STD_DATA=$MYSQL_TEST_DIR/std_data
hostname=`hostname` # Installed in the mysql privilege table
-TESTDIR="$MYSQL_TEST_DIR/t/"
+TESTDIR="$MYSQL_TEST_DIR/t"
TESTSUFFIX=test
+TOT_SKIP=0
TOT_PASS=0
TOT_FAIL=0
TOT_TEST=0
@@ -124,6 +125,7 @@ USE_RUNNING_SERVER=1
DO_GCOV=""
DO_GDB=""
DO_DDD=""
+DO_CLIENT_GDB=""
SLEEP_TIME=2
DBUSER=""
@@ -142,6 +144,8 @@ while test $# -gt 0; do
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-bdb"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-bdb" ;;
--skip-rpl) NO_SLAVE=1 ;;
+ --skip-test=*) SKIP_TEST=`$ECHO "$1" | $SED -e "s;--skip-test=;;"`;;
+ --do-test=*) DO_TEST=`$ECHO "$1" | $SED -e "s;--do-test=;;"`;;
--record)
RECORD=1;
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;;
@@ -149,10 +153,17 @@ while test $# -gt 0; do
DO_BENCH=1
NO_SLAVE=1
;;
+ --big*) # Actually --big-test
+ EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;;
--sleep=*)
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1"
SLEEP_TIME=`$ECHO "$1" | $SED -e "s;--sleep=;;"`
;;
+ --mysqld=*)
+ TMP=`$ECHO "$1" | $SED -e "s;--mysqld-=;"`
+ EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $TMP"
+ EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT $TMP"
+ ;;
--gcov )
if [ x$BINARY_DIST = x1 ] ; then
$ECHO "Cannot do coverage test without the source - please use source dist"
@@ -165,15 +176,23 @@ while test $# -gt 0; do
;;
--gdb )
if [ x$BINARY_DIST = x1 ] ; then
- $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with -gdb option"
+ $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --gdb option"
fi
DO_GDB=1
+ USE_RUNNING_SERVER=""
+ ;;
+ --client-gdb )
+ if [ x$BINARY_DIST = x1 ] ; then
+ $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --client-gdb option"
+ fi
+ DO_CLIENT_GDB=1
;;
--ddd )
if [ x$BINARY_DIST = x1 ] ; then
- $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with -gdb option"
+ $ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --ddd option"
fi
DO_DDD=1
+ USE_RUNNING_SERVER=""
;;
--skip-*)
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1"
@@ -232,7 +251,7 @@ fi
[ -z "$COLUMNS" ] && COLUMNS=80
E=`$EXPR $COLUMNS - 8`
-#DASH72=`expr substr '------------------------------------------------------------------------' 1 $E`
+#DASH72=`$EXPR substr '------------------------------------------------------------------------' 1 $E`
DASH72=`$ECHO '------------------------------------------------------------------------'|$CUT -c 1-$E`
# on source dist, we pick up freshly build executables
@@ -242,6 +261,7 @@ if [ x$SOURCE_DIST = x1 ] ; then
MYSQL_TEST="$BASEDIR/client/mysqltest"
MYSQLADMIN="$BASEDIR/client/mysqladmin"
MYSQL="$BASEDIR/client/mysql"
+ LANGUAGE="$BASEDIR/sql/share/english/"
INSTALL_DB="./install_test_db"
else
MYSQLD="$BASEDIR/bin/mysqld"
@@ -249,6 +269,12 @@ else
MYSQLADMIN="$BASEDIR/bin/mysqladmin"
MYSQL="$BASEDIR/bin/mysql"
INSTALL_DB="./install_test_db -bin"
+ if test -d "$BASEDIR/share/mysql/english"
+ then
+ LANGUAGE="$BASEDIR/share/mysql/english/"
+ else
+ LANGUAGE="$BASEDIR/share/english/"
+ fi
fi
# If we should run all tests cases, we will use a local server for that
@@ -273,7 +299,10 @@ then
fi
-MYSQL_TEST="$MYSQL_TEST --no-defaults --socket=$MASTER_MYSOCK --database=$DB --user=$DBUSER --password=$DBPASSWD --silent -v --tmpdir=$MYSQL_TMP_DIR"
+MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB --user=$DBUSER --password=$DBPASSWD --silent -v --tmpdir=$MYSQL_TMP_DIR"
+MYSQL_TEST_BIN=$MYSQL_TEST
+MYSQL_TEST="$MYSQL_TEST $MYSQL_TEST_ARGS"
+GDB_CLIENT_INIT=$MYSQL_TMP_DIR/gdbinit.client
GDB_MASTER_INIT=$MYSQL_TMP_DIR/gdbinit.master
GDB_SLAVE_INIT=$MYSQL_TMP_DIR/gdbinit.slave
GCOV_MSG=$MYSQL_TMP_DIR/mysqld-gcov.out
@@ -304,17 +333,34 @@ show_failed_diff ()
{
reject_file=r/$1.reject
result_file=r/$1.result
+ eval_file=r/$1.eval
+
+ if [ -f $eval_file ]
+ then
+ result_file=$eval_file
+ fi
+
if [ -x "$DIFF" ] && [ -f $reject_file ]
then
echo "Below are the diffs between actual and expected results:"
echo "-------------------------------------------------------"
- $DIFF -c $result_file $reject_file
+ $DIFF -c -a $result_file $reject_file
echo "-------------------------------------------------------"
- echo "Please e-mail the above, along with the output of mysqlbug"
- echo "and any other relevant info to bugs@lists.mysql.com"
+ echo "Please follow the instructions outlined at"
+ echo "http://www.mysql.com/doc/R/e/Reporting_mysqltest_bugs.html"
+ echo "to find the reason to this problem and how to report this."
fi
}
+do_gdb_test ()
+{
+ mysql_test_args="$MYSQL_TEST_ARGS $1"
+ $ECHO "set args $mysql_test_args < $2" > $GDB_CLIENT_INIT
+ echo "Set breakpoints ( if needed) and type 'run' in gdb window"
+ #this xterm should not be backgrounded
+ xterm -title "Client" -e gdb -x $GDB_CLIENT_INIT $MYSQL_TEST_BIN
+}
+
error () {
$ECHO "Error: $1"
exit 1
@@ -446,9 +492,8 @@ start_master()
--pid-file=$MASTER_MYPID \
--socket=$MASTER_MYSOCK \
--log=$MASTER_MYLOG --default-character-set=latin1 \
- --core \
--tmpdir=$MYSQL_TMP_DIR \
- --language=english \
+ --language=$LANGUAGE \
--innodb_data_file_path=ibdata1:50M \
$SMALL_SERVER \
$EXTRA_MASTER_OPT $EXTRA_MASTER_MYSQLD_OPT"
@@ -462,7 +507,7 @@ start_master()
--default-character-set=latin1 \
--core \
--tmpdir=$MYSQL_TMP_DIR \
- --language=english \
+ --language=$LANGUAGE \
--innodb_data_file_path=ibdata1:50M \
$SMALL_SERVER \
$EXTRA_MASTER_OPT $EXTRA_MASTER_MYSQLD_OPT"
@@ -518,8 +563,10 @@ start_slave()
--log=$SLAVE_MYLOG --default-character-set=latin1 \
--core \
--tmpdir=$MYSQL_TMP_DIR \
- --language=english \
+ --language=$LANGUAGE \
--skip-innodb --skip-slave-start \
+ --report-host=127.0.0.1 --report-user=root \
+ --report-port=$SLAVE_MYPORT \
$SMALL_SERVER \
$EXTRA_SLAVE_OPT $EXTRA_SLAVE_MYSQLD_OPT"
if [ x$DO_DDD = x1 ]
@@ -628,6 +675,22 @@ run_testcase ()
slave_init_script=$TESTDIR/$tname-slave.sh
slave_master_info_file=$TESTDIR/$tname-slave-master-info.opt
SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0`
+ if [ -n "$SKIP_TEST" ] ; then
+ SKIP_THIS_TEST=`$EXPR \( $tname : "$SKIP_TEST" \) != 0`
+ if [ x$SKIP_THIS_TEST = x1 ] ;
+ then
+ return;
+ fi
+ fi
+
+ if [ -n "$DO_TEST" ] ; then
+ DO_THIS_TEST=`$EXPR \( $tname : "$DO_TEST" \) != 0`
+ if [ x$DO_THIS_TEST = x0 ] ;
+ then
+ return;
+ fi
+ fi
+
if [ x${NO_SLAVE}x$SKIP_SLAVE = x1x0 ] ;
then
@@ -635,9 +698,9 @@ run_testcase ()
SYST=" ...."
REALT=" ...."
timestr="$USERT $SYST $REALT"
- pname=`$ECHO "$tname "|$CUT -c 1-16`
- RES="$pname $timestr"
- pass_inc
+ pname=`$ECHO "$tname "|$CUT -c 1-24`
+ RES="$pname $timestr"
+ skip_inc
$ECHO "$RES$RES_SPACE [ skipped ]"
return
fi
@@ -691,8 +754,13 @@ run_testcase ()
if [ -f $tf ] ; then
$RM -f r/$tname.*reject
- mytime=`$TIME -p $MYSQL_TEST -R r/$tname.result $EXTRA_MYSQL_TEST_OPT \
- < $tf 2> $TIMEFILE`
+ mysql_test_args="-R r/$tname.result $EXTRA_MYSQL_TEST_OPT"
+ if [ -z "$DO_CLIENT_GDB" ] ; then
+ mytime=`$TIME -p $MYSQL_TEST $mysql_test_args < $tf 2> $TIMEFILE`
+ else
+ do_gdb_test "$mysql_test_args" "$tf"
+ fi
+
res=$?
if [ $res = 0 ]; then
@@ -711,8 +779,8 @@ run_testcase ()
fi
timestr="$USERT $SYST $REALT"
- pname=`$ECHO "$tname "|$CUT -c 1-16`
- RES="$pname $timestr"
+ pname=`$ECHO "$tname "|$CUT -c 1-24`
+ RES="$pname $timestr"
if [ $res = 0 ]; then
total_inc
@@ -744,7 +812,7 @@ run_testcase ()
$ECHO "Resuming Tests"
$ECHO ""
else
-# pass_inc
+ skip_inc
$ECHO "$RES$RES_SPACE [ skipped ]"
fi
fi
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 4f12f71c7ce..dbdbb7f57a9 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -27,3 +27,8 @@ n
12
Table Op Msg_type Msg_text
test.t1 optimize status OK
+i
+1
+2
+3
+4
diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result
index 5e227313e4a..c88b7375aec 100644
--- a/mysql-test/r/bdb.result
+++ b/mysql-test/r/bdb.result
@@ -509,3 +509,5 @@ id id3
1 1
2 2
100 2
+KINMU_DATE
+KINMU_DATE
diff --git a/mysql-test/r/big_test.require b/mysql-test/r/big_test.require
new file mode 100644
index 00000000000..001b903496b
--- /dev/null
+++ b/mysql-test/r/big_test.require
@@ -0,0 +1,2 @@
+using_big_test
+1
diff --git a/mysql-test/r/bigint.result b/mysql-test/r/bigint.result
index 08e21b279bc..46ce0fda2c1 100644
--- a/mysql-test/r/bigint.result
+++ b/mysql-test/r/bigint.result
@@ -5,5 +5,11 @@
+9999999999999999999 -9999999999999999999
10000000000000000000 -10000000000000000000
a
+18446744073709551614
18446744073709551615
+a
+18446744073709551615
+a
18446744073709551615
+a
+18446744073709551614
diff --git a/mysql-test/r/check.result b/mysql-test/r/check.result
new file mode 100644
index 00000000000..694d7429a14
--- /dev/null
+++ b/mysql-test/r/check.result
@@ -0,0 +1,2 @@
+Table Op Msg_type Msg_text
+test.t1 check status OK
diff --git a/mysql-test/r/count_distinct.result b/mysql-test/r/count_distinct.result
index 6fc10f590ec..97d7b57f249 100644
--- a/mysql-test/r/count_distinct.result
+++ b/mysql-test/r/count_distinct.result
@@ -7,3 +7,5 @@ isbn city libname a
isbn city libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
+f1 count(distinct t2.f2) count(distinct 1,NULL)
+1 0 0
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index f32c9b0bc80..7940d51868a 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -8,3 +8,7 @@ b
1 10000000001
a$1 $b c$
1 2 3
+table type possible_keys key key_len ref rows Extra
+t2 ref B B 21 const 1 where used
+a B
+3 world
diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result
index 67923fe903c..741fc6bba85 100644
--- a/mysql-test/r/drop.result
+++ b/mysql-test/r/drop.result
@@ -1,2 +1,11 @@
n
1
+n
+4
+Database
+foo
+mysql
+test
+Database
+mysql
+test
diff --git a/mysql-test/r/flush.result b/mysql-test/r/flush.result
index 76862936a16..fca84de710c 100644
--- a/mysql-test/r/flush.result
+++ b/mysql-test/r/flush.result
@@ -1,2 +1,6 @@
n
3
+n
+23
+n
+345
diff --git a/mysql-test/r/func_test.result b/mysql-test/r/func_test.result
index 3dc0fc19848..5d2211baf50 100644
--- a/mysql-test/r/func_test.result
+++ b/mysql-test/r/func_test.result
@@ -34,3 +34,5 @@ this is a 2 2.0
1 1
1 and 0 or 2 2 or 1 and 0
1 1
+sum(if(num is null,0.00,num))
+144.54
diff --git a/mysql-test/r/have_symlink.require b/mysql-test/r/have_symlink.require
new file mode 100644
index 00000000000..55ad9437034
--- /dev/null
+++ b/mysql-test/r/have_symlink.require
@@ -0,0 +1,2 @@
+Variable_name Value
+have_symlink YES
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 3ad645a6511..3598b15eb0a 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -144,7 +144,7 @@ test.t1 optimize error The handler for the table doesn't support check/repair
a
2
Table Op Msg_type Msg_text
-test.t1 check error The handler for the table doesn't support check/repair
+test.t1 check status OK
a b
2 testing
Table Op Msg_type Msg_text
diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result
new file mode 100644
index 00000000000..6358e2a81cd
--- /dev/null
+++ b/mysql-test/r/multi_update.result
@@ -0,0 +1,22 @@
+Table Op Msg_type Msg_text
+test.t1 check status OK
+test.t2 check status OK
+test.t3 check status OK
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
+count(*)
+0
diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result
index 74c8bd53af2..a47fc950f0e 100644
--- a/mysql-test/r/order_by.result
+++ b/mysql-test/r/order_by.result
@@ -111,3 +111,119 @@ DateOfAction TransactionID
member_id nickname voornaam
1
2
+table type possible_keys key key_len ref rows Extra
+t1 range a a 20 NULL 2 where used; Using index
+a b c
+1 NULL b
+table type possible_keys key key_len ref rows Extra
+t1 range a a 4 NULL 10 where used; Using index
+a b c
+2 3 c
+2 2 b
+2 2 a
+2 1 b
+2 1 a
+1 3 b
+1 1 b
+1 1 b
+1 1 NULL
+1 NULL b
+1 NULL NULL
+table type possible_keys key key_len ref rows Extra
+t1 ref a a 4 const 5 where used; Using index; Using filesort
+a b c
+1 3 b
+1 1 NULL
+1 1 b
+1 1 b
+1 NULL NULL
+1 NULL b
+table type possible_keys key key_len ref rows Extra
+t1 ref a a 9 const,const 2 where used; Using index; Using filesort
+a b c
+1 NULL NULL
+1 NULL b
+table type possible_keys key key_len ref rows Extra
+t1 range a a 9 NULL 8 where used; Using index; Using filesort
+table type possible_keys key key_len ref rows Extra
+t1 range a a 9 NULL 5 where used; Using index
+table type possible_keys key key_len ref rows Extra
+t1 ref a a 9 const,const 1 where used; Using index; Using filesort
+table type possible_keys key key_len ref rows Extra
+t1 range a a 9 NULL 6 where used; Using index
+table type possible_keys key key_len ref rows Extra
+t1 range a a 9 NULL 5 where used; Using index
+table type possible_keys key key_len ref rows Extra
+t1 range a a 9 NULL 2 where used; Using index; Using filesort
+table type possible_keys key key_len ref rows Extra
+t1 index NULL a 18 NULL 11 Using index
+a b c
+1 0
+1 0 b
+1 1
+1 1 b
+1 1 b
+1 3 b
+2 1 a
+2 1 b
+2 2 a
+2 2 b
+2 3 c
+table type possible_keys key key_len ref rows Extra
+t1 index NULL a 18 NULL 11 Using index
+a b c
+2 3 c
+2 2 b
+2 2 a
+2 1 b
+2 1 a
+1 3 b
+1 1 b
+1 1 b
+1 1
+1 0 b
+1 0
+table type possible_keys key key_len ref rows Extra
+t1 range a a 18 NULL 3 where used; Using index
+a b c
+1 1 b
+1 1 b
+table type possible_keys key key_len ref rows Extra
+t1 range a a 4 NULL 6 where used; Using index
+a b c
+1 1 b
+1 1 b
+1 1
+1 0 b
+1 0
+count(*)
+9
+a b c
+2 3 c
+2 2 b
+2 2 a
+2 1 b
+2 1 a
+1 3 b
+1 1 b
+1 1 b
+1 1
+table type possible_keys key key_len ref rows Extra
+t1 range a a 8 NULL 10 where used; Using index
+a b c
+2 1 b
+2 1 a
+1 1 b
+1 1 b
+1 1
+1 0 b
+1 0
+table type possible_keys key key_len ref rows Extra
+t1 range a a 4 NULL 5 where used; Using index
+a b c
+1 3 b
+1 1 b
+1 1 b
+1 1
+1 0 b
+1 0
diff --git a/mysql-test/r/order_fill_sortbuf.result b/mysql-test/r/order_fill_sortbuf.result
new file mode 100644
index 00000000000..cb3349cc433
--- /dev/null
+++ b/mysql-test/r/order_fill_sortbuf.result
@@ -0,0 +1,2 @@
+count(*)
+4000
diff --git a/mysql-test/r/rpl000002.result b/mysql-test/r/rpl000002.result
index a68ef517708..7f518a7339e 100644
--- a/mysql-test/r/rpl000002.result
+++ b/mysql-test/r/rpl000002.result
@@ -2,6 +2,8 @@ n
2000
2001
2002
+Server_id Host Port
+2 127.0.0.1 9307
id created
1 1970-01-01 06:25:45
id created
diff --git a/mysql-test/r/rpl000009.result b/mysql-test/r/rpl000009.result
index d5b4cdf3bee..74452473d0c 100644
--- a/mysql-test/r/rpl000009.result
+++ b/mysql-test/r/rpl000009.result
@@ -1,2 +1,32 @@
n m
4 15
+Database
+bar
+foo
+mysql
+test
+Database
+mysql
+test
+Database
+bar
+foo
+mysql
+test
+Tables_in_foo
+Tables_in_bar
+t1
+t2
+n s
+1 one bar
+2 two bar
+3 three bar
+n s
+11 eleven bar
+12 twelve bar
+13 thirteen bar
+n s
+1 one bar
+2 two bar
+3 three bar
+4 four bar
diff --git a/mysql-test/r/rpl000014.result b/mysql-test/r/rpl000014.result
index a47c3c91c1d..cdafc7575ab 100644
--- a/mysql-test/r/rpl000014.result
+++ b/mysql-test/r/rpl000014.result
@@ -1,15 +1,15 @@
File Position Binlog_do_db Binlog_ignore_db
-master-bin.001 73
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 1 master-bin.001 73 No 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 1 master-bin.001 173 Yes 0 0
+master-bin.001 79
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 1 master-bin.001 79 Yes 0 0 1
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 1 master-bin.001 73 No 0 0 1
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 1 master-bin.001 73 Yes 0 0 1
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 1 master-bin.001 173 Yes 0 0 1
File Position Binlog_do_db Binlog_ignore_db
-master-bin.001 73
+master-bin.001 79
n
1
2
diff --git a/mysql-test/r/rpl000015.result b/mysql-test/r/rpl000015.result
index 58487af27f8..0aef660905b 100644
--- a/mysql-test/r/rpl000015.result
+++ b/mysql-test/r/rpl000015.result
@@ -1,13 +1,13 @@
File Position Binlog_do_db Binlog_ignore_db
-master-bin.001 73
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
- 0 0 0 No 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 test 9998 60 4 No 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 60 4 No 0 0
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 60 master-bin.001 73 Yes 0 0
+master-bin.001 79
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+ 0 0 0 No 0 0 0
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 test 9998 60 4 No 0 0 0
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 60 4 No 0 0 0
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 60 master-bin.001 79 Yes 0 0 1
n
10
45
diff --git a/mysql-test/r/rpl000016.result b/mysql-test/r/rpl000016.result
index abe4275a124..65f260bd575 100644
--- a/mysql-test/r/rpl000016.result
+++ b/mysql-test/r/rpl000016.result
@@ -1,5 +1,5 @@
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 60 master-bin.001 216 Yes 0 0
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 60 master-bin.001 234 Yes 0 0 3
s
Could not break slave
Tried hard
@@ -9,8 +9,8 @@ master-bin.002
master-bin.003
Log_name
master-bin.003
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 60 master-bin.003 184 Yes 0 0
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 60 master-bin.003 202 Yes 0 0 3
m
34
65
@@ -23,8 +23,8 @@ master-bin.004
master-bin.005
master-bin.006
File Position Binlog_do_db Binlog_ignore_db
-master-bin.006 131
-Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
-127.0.0.1 root 9999 60 master-bin.006 131 Yes 0 0
+master-bin.006 720
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root 9999 60 master-bin.006 720 Yes 0 0 11
count(*)
100
diff --git a/mysql-test/r/rpl_log.result b/mysql-test/r/rpl_log.result
new file mode 100644
index 00000000000..35a91d03489
--- /dev/null
+++ b/mysql-test/r/rpl_log.result
@@ -0,0 +1,57 @@
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.001 4 Start 1 1 Server ver: $VERSION, Binlog ver: 2
+master-bin.001 79 Query 1 2 use test; create table t1(n int not null auto_increment primary key)
+master-bin.001 172 Intvar 1 3 INSERT_ID=1
+master-bin.001 200 Query 1 4 use test; insert into t1 values (NULL)
+master-bin.001 263 Query 1 5 use test; drop table t1
+master-bin.001 311 Query 1 6 use test; create table t1 (word char(20) not null)
+master-bin.001 386 Load 1 7 use test; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 FIELDS TERMINATED BY '\\t' ESCAPED BY '\\\\' LINES TERMINATED BY '\\n' (word)
+master-bin.001 468 Query 1 8 use test; drop table t1
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.001 79 Query 1 2 use test; create table t1(n int not null auto_increment primary key)
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.001 79 Query 1 2 use test; create table t1(n int not null auto_increment primary key)
+master-bin.001 172 Intvar 1 3 INSERT_ID=1
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.001 200 Query 1 4 use test; insert into t1 values (NULL)
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.001 4 Start 1 1 Server ver: $VERSION, Binlog ver: 2
+master-bin.001 79 Query 1 2 use test; create table t1(n int not null auto_increment primary key)
+master-bin.001 172 Intvar 1 3 INSERT_ID=1
+master-bin.001 200 Query 1 4 use test; insert into t1 values (NULL)
+master-bin.001 263 Query 1 5 use test; drop table t1
+master-bin.001 311 Query 1 6 use test; create table t1 (word char(20) not null)
+master-bin.001 386 Load 1 7 use test; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 FIELDS TERMINATED BY '\\t' ESCAPED BY '\\\\' LINES TERMINATED BY '\\n' (word)
+master-bin.001 468 Query 1 8 use test; drop table t1
+master-bin.001 516 Rotate 1 9 master-bin.002;pos=4
+master-bin.001 557 Stop 1 10
+Log_name Pos Event_type Server_id Log_seq Info
+master-bin.002 4 Start 1 1 Server ver: $VERSION, Binlog ver: 2
+master-bin.002 79 Query 1 2 use test; create table t1 (n int)
+master-bin.002 137 Query 1 3 use test; insert into t1 values (1)
+master-bin.002 197 Query 1 4 use test; drop table t1
+Log_name
+master-bin.001
+master-bin.002
+Log_name
+slave-bin.001
+slave-bin.002
+Log_name Pos Event_type Server_id Log_seq Info
+slave-bin.001 4 Start 2 1 Server ver: $VERSION, Binlog ver: 2
+slave-bin.001 79 Slave 2 2 host=127.0.0.1,port=$MASTER_MYPORT,log=master-bin.001,pos=4
+slave-bin.001 132 Query 1 2 use test; create table t1(n int not null auto_increment primary key)
+slave-bin.001 225 Intvar 1 3 INSERT_ID=1
+slave-bin.001 253 Query 1 4 use test; insert into t1 values (NULL)
+slave-bin.001 316 Query 1 5 use test; drop table t1
+slave-bin.001 364 Query 1 6 use test; create table t1 (word char(20) not null)
+slave-bin.001 439 Query 1 8 use test; drop table t1
+slave-bin.001 487 Rotate 2 3 slave-bin.002;pos=4; forced by master
+slave-bin.001 527 Stop 2 4
+Log_name Pos Event_type Server_id Log_seq Info
+slave-bin.002 4 Start 2 1 Server ver: $VERSION, Binlog ver: 2
+slave-bin.002 79 Slave 2 2 host=127.0.0.1,port=$MASTER_MYPORT,log=master-bin.002,pos=4
+slave-bin.002 132 Query 1 2 use test; create table t1 (n int)
+slave-bin.002 190 Query 1 3 use test; insert into t1 values (1)
+slave-bin.002 250 Query 1 4 use test; drop table t1
+Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter Last_log_seq
+127.0.0.1 root $MASTER_MYPORT 1 master-bin.002 245 Yes 0 0 4
diff --git a/mysql-test/r/rpl_magic.result b/mysql-test/r/rpl_magic.result
new file mode 100644
index 00000000000..449a6bca68c
--- /dev/null
+++ b/mysql-test/r/rpl_magic.result
@@ -0,0 +1,22 @@
+n
+1
+2
+3
+4
+5
+n
+1
+2
+3
+4
+n
+1
+2
+3
+4
+n
+1
+2
+3
+4
+5
diff --git a/mysql-test/r/rpl_sporadic_master.result b/mysql-test/r/rpl_sporadic_master.result
new file mode 100644
index 00000000000..414468f0998
--- /dev/null
+++ b/mysql-test/r/rpl_sporadic_master.result
@@ -0,0 +1,7 @@
+n
+1
+2
+3
+4
+5
+6
diff --git a/mysql-test/r/select_found.result b/mysql-test/r/select_found.result
index fcbe8958748..b3fa281e76d 100644
--- a/mysql-test/r/select_found.result
+++ b/mysql-test/r/select_found.result
@@ -22,7 +22,7 @@ b
FOUND_ROWS()
6
b c
-2 1
+5 3
FOUND_ROWS()
6
a b a b
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index ce2e5d4f58d..099ea2fa109 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -80,3 +80,13 @@ t1 CREATE TABLE `t1` (
`test_set` set('val1','val2','val3') NOT NULL default '',
`name` char(20) default 'O''Brien'
) TYPE=MyISAM COMMENT='it''s a table'
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL default '0',
+ UNIQUE KEY `aa` (`a`)
+) TYPE=MyISAM
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL default '0',
+ PRIMARY KEY (`a`)
+) TYPE=MyISAM
diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result
new file mode 100644
index 00000000000..71e8f79b890
--- /dev/null
+++ b/mysql-test/r/symlink.result
@@ -0,0 +1,23 @@
+Table Op Msg_type Msg_text
+test.t9 check status OK
+Table Op Msg_type Msg_text
+test.t9 optimize status OK
+Table Op Msg_type Msg_text
+test.t9 repair status OK
+Table Create Table
+t9 CREATE TABLE `t9` (
+ `a` int(11) NOT NULL auto_increment,
+ `b` char(16) NOT NULL default '',
+ `c` int(11) NOT NULL default '0',
+ PRIMARY KEY (`a`)
+) TYPE=MyISAM
+count(*)
+16724
+Table Create Table
+t9 CREATE TABLE `t9` (
+ `a` int(11) NOT NULL auto_increment,
+ `b` char(16) NOT NULL default '',
+ `c` int(11) NOT NULL default '0',
+ `d` int(11) NOT NULL default '0',
+ PRIMARY KEY (`a`)
+) TYPE=MyISAM
diff --git a/mysql-test/r/type_float.result b/mysql-test/r/type_float.result
index 93a38e9967f..30a2f884557 100644
--- a/mysql-test/r/type_float.result
+++ b/mysql-test/r/type_float.result
@@ -2,6 +2,8 @@
10 10.0 10 10 10
6e-05 -6e-05 --6e-05 -6e-05+1.000000
6e-05 -6e-05 6e-05 0.99994
+1e1 1.e1 1.0e1 1e+1 1.e+1 1.0e+1 1e-1 1.e-1 1.0e-1
+10 10 10 10 10 10 0.1 0.1 0.1
Field Type Null Key Default Extra Privileges
f1 float YES NULL select,insert,update,references
f2 double YES NULL select,insert,update,references
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index ee04e437bb7..f852378e6a1 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -1,7 +1,7 @@
@test @`select` @TEST @not_used
1 2 3 NULL
@test_int @test_double @test_string @test_string2 @select
-10 0.00 abcdeghi abcdefghij NULL
+10 1e-10 abcdeghi abcdefghij NULL
@test_int @test_double @test_string @test_string2
hello hello hello hello
@test_int @test_double @test_string @test_string2
@@ -10,3 +10,5 @@ hellohello hellohello hellohello hellohello
NULL NULL NULL NULL
@t1:=(@t2:=1)+@t3:=4 @t1 @t2 @t3
5 5 1 4
+@t5
+1.23456
diff --git a/mysql-test/std_data/master-bin.001 b/mysql-test/std_data/master-bin.001
index fa30d8e5302..2ec2397acdd 100644
--- a/mysql-test/std_data/master-bin.001
+++ b/mysql-test/std_data/master-bin.001
Binary files differ
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index 377e8257457..681e3d36cca 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -71,7 +71,6 @@ ALTER TABLE t1 ADD Column new_col int not null;
UNLOCK TABLES;
OPTIMIZE TABLE t1;
DROP TABLE t1;
-drop table if exists t1;
#
# ALTER TABLE ... ENABLE/DISABLE KEYS
@@ -92,3 +91,13 @@ while ($1)
}
alter table t1 enable keys;
drop table t1;
+
+#
+# Drop and add an auto_increment column
+#
+
+create table t1 (i int unsigned not null auto_increment primary key);
+insert into t1 values (null),(null),(null),(null);
+alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/bdb-crash.test b/mysql-test/t/bdb-crash.test
index 05ab7260d23..e2d2cd42be2 100644
--- a/mysql-test/t/bdb-crash.test
+++ b/mysql-test/t/bdb-crash.test
@@ -1,3 +1,4 @@
+-- source include/have_bdb.inc
# test for bug reported by Mark Steele
drop table if exists tblChange;
diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test
index 96296e238fd..d9e08ed33dc 100644
--- a/mysql-test/t/bdb.test
+++ b/mysql-test/t/bdb.test
@@ -686,7 +686,7 @@ drop table t1;
create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=bdb;
insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL');
LOCK TABLES t1 WRITE;
---error 690
+--error 1062
insert into t1 values (99,1,2,'D'),(1,1,2,'D');
select id from t1;
select id from t1;
@@ -697,7 +697,7 @@ create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(3
insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL');
LOCK TABLES t1 WRITE;
begin;
---error 690
+--error 1062
insert into t1 values (99,1,2,'D'),(1,1,2,'D');
select id from t1;
insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D');
@@ -705,3 +705,13 @@ commit;
select id,id3 from t1;
UNLOCK TABLES;
DROP TABLE t1;
+
+#
+# Test with empty tables (crashed with lock error)
+#
+
+CREATE TABLE t1 (SYAIN_NO char(5) NOT NULL default '', KINMU_DATE char(6) NOT NULL default '', PRIMARY KEY (SYAIN_NO,KINMU_DATE)) TYPE=BerkeleyDB;
+CREATE TABLE t2 ( SYAIN_NO char(5) NOT NULL default '',STR_DATE char(8) NOT NULL default '',PRIMARY KEY (SYAIN_NO,STR_DATE) ) TYPE=BerkeleyDB;
+select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
+select T1.KINMU_DATE from t1 T1 ,t2 T2 where T1.SYAIN_NO = '12345' and T1.KINMU_DATE = '200106' and T2.SYAIN_NO = T1.SYAIN_NO;
+DROP TABLE t1,t2;
diff --git a/mysql-test/t/bigint.test b/mysql-test/t/bigint.test
index 9a819463f3f..6470b6f6a30 100644
--- a/mysql-test/t/bigint.test
+++ b/mysql-test/t/bigint.test
@@ -6,7 +6,11 @@ select 9223372036854775807,-009223372036854775808;
select +9999999999999999999,-9999999999999999999;
drop table if exists t1;
-create table t1 (a bigint unsigned);
-insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFF);
+create table t1 (a bigint unsigned not null, primary key(a));
+insert into t1 values (18446744073709551615), (0xFFFFFFFFFFFFFFFE);
+select * from t1;
+select * from t1 where a=18446744073709551615;
+select * from t1 where a='18446744073709551615';
+delete from t1 where a=18446744073709551615;
select * from t1;
drop table t1;
diff --git a/mysql-test/t/check.test b/mysql-test/t/check.test
new file mode 100644
index 00000000000..6296b31d65d
--- /dev/null
+++ b/mysql-test/t/check.test
@@ -0,0 +1,19 @@
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+connection con1;
+drop table if exists t1;
+#add a lot of keys to slow down check
+create table t1(n int not null, key(n), key(n), key(n), key(n));
+let $1=10000;
+while ($1)
+{
+ eval insert into t1 values ($1);
+ dec $1;
+}
+send check table t1 type=extended;
+connection con2;
+insert into t1 values (200000);
+connection con1;
+reap;
+drop table t1;
+
diff --git a/mysql-test/t/compare.test b/mysql-test/t/compare.test
index b5596784f35..450d9c0961c 100644
--- a/mysql-test/t/compare.test
+++ b/mysql-test/t/compare.test
@@ -2,6 +2,7 @@
# Bug when using comparions of strings and integers.
#
+drop table if exists t1;
CREATE TABLE t1 (id CHAR(12) not null, PRIMARY KEY (id));
insert into t1 values ('000000000001'),('000000000002');
explain select * from t1 where id=000000000001;
diff --git a/mysql-test/t/count_distinct.test b/mysql-test/t/count_distinct.test
index 1afb548c2ad..3d795d44821 100644
--- a/mysql-test/t/count_distinct.test
+++ b/mysql-test/t/count_distinct.test
@@ -32,3 +32,13 @@ insert into t1 values ('NYC Lib','New York');
select t2.isbn,city,t1.libname,count(t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city,t1.libname;
select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1;
drop table t1, t2, t3;
+
+#
+# Problem with LEFT JOIN
+#
+
+create table t1 (f1 int);
+insert into t1 values (1);
+create table t2 (f1 int,f2 int);
+select t1.f1,count(distinct t2.f2),count(distinct 1,NULL) from t1 left join t2 on t1.f1=t2.f1 group by t1.f1;
+drop table t1,t2;
diff --git a/mysql-test/t/count_distinct2.test b/mysql-test/t/count_distinct2.test
index 2447a7c3611..33d4cf54278 100644
--- a/mysql-test/t/count_distinct2.test
+++ b/mysql-test/t/count_distinct2.test
@@ -45,7 +45,7 @@ select count(distinct n2), n1 from t1 group by n1;
drop table t1;
# test the converstion from tree to MyISAM
-create table t1 (n int);
+create table t1 (n int default NULL);
let $1=5000;
while ($1)
{
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index a5224cd0318..d45d013c9fb 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -2,6 +2,7 @@
# Check some special create statements.
#
+drop table if exists t1,t2;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -57,3 +58,14 @@ select a$1, $b, c$ from test_$1.$test1;
create table test_$1.test2$ (a int);
drop table test_$1.test2$;
drop database test_$1;
+
+#
+# Test of CREATE ... SELECT with indexes
+#
+
+create table t1 (a int auto_increment not null primary key, B CHAR(20));
+insert into t1 (b) values ("hello"),("my"),("world");
+create table t2 (key (b)) select * from t1;
+explain select * from t2 where b="world";
+select * from t2 where b="world";
+drop table t1,t2;
diff --git a/mysql-test/t/drop.test b/mysql-test/t/drop.test
index 1de387f6e4c..2a45fe8253b 100644
--- a/mysql-test/t/drop.test
+++ b/mysql-test/t/drop.test
@@ -10,3 +10,34 @@ insert into t1 values(2);
create table t1(n int);
drop table t1;
select * from t1;
+
+#now test for a bug in drop database - it is important that the name
+#of the table is the same as the name of the database - in the original
+#code this triggered a bug
+drop database if exists foo;
+create database foo;
+drop database if exists foo;
+create database foo;
+create table foo.foo (n int);
+insert into foo.foo values (4);
+select * from foo.foo;
+drop database if exists foo;
+create database foo;
+drop database foo;
+
+# test drop/create database and FLUSH TABLES WITH READ LOCK
+drop database if exists foo;
+flush tables with read lock;
+--error 1209
+create database foo;
+unlock tables;
+create database foo;
+show databases;
+flush tables with read lock;
+--error 1208
+drop database foo;
+unlock tables;
+drop database foo;
+show databases;
+
+
diff --git a/mysql-test/t/err000001.test b/mysql-test/t/err000001.test
index 9957cefff9b..d9898054a83 100644
--- a/mysql-test/t/err000001.test
+++ b/mysql-test/t/err000001.test
@@ -10,7 +10,8 @@ create table t1 (a int);
!$1054 select count(test.t1.b) from t1;
!$1109 select count(not_existing_database.t1) from t1;
!$1109 select count(not_existing_database.t1.a) from t1;
-!$1044 select count(not_existing_database.t1.a) from not_existing_database.t1;
+--error 1044,1146
+select count(not_existing_database.t1.a) from not_existing_database.t1;
!$1054 select 1 from t1 order by 2;
!$1054 select 1 from t1 group by 2;
!$1054 select 1 from t1 order by t1.b;
diff --git a/mysql-test/t/flush.test b/mysql-test/t/flush.test
index 94586519c66..4491de1f82b 100644
--- a/mysql-test/t/flush.test
+++ b/mysql-test/t/flush.test
@@ -32,3 +32,28 @@ connection con2;
unlock tables;
connection con1;
reap;
+
+#test if drop database will wait until we release the global read lock
+connection con1;
+drop database if exists foo;
+create database foo;
+create table foo.t1(n int);
+insert into foo.t1 values (23);
+flush tables with read lock;
+connection con2;
+send drop database foo;
+connection con1;
+select * from foo.t1;
+unlock tables;
+connection con2;
+reap;
+
+# test if dirty close releases global read lock
+connection con1;
+create table t1 (n int);
+flush tables with read lock;
+dirty_close con1;
+connection con2;
+insert into t1 values (345);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/func_test.test b/mysql-test/t/func_test.test
index 9562ae5f77b..0439a96f077 100644
--- a/mysql-test/t/func_test.test
+++ b/mysql-test/t/func_test.test
@@ -24,3 +24,13 @@ select -1.49 or -1.49,0.6 or 0.6;
select 5 between 0 and 10 between 0 and 1,(5 between 0 and 10) between 0 and 1;
select 1 and 2 between 2 and 10, 2 between 2 and 10 and 1;
select 1 and 0 or 2, 2 or 1 and 0;
+
+#
+# Problem with IF()
+#
+
+drop table if exists t1;
+create table t1 (num double(12,2));
+insert into t1 values (144.54);
+select sum(if(num is null,0.00,num)) from t1;
+drop table t1;
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index cef53ce8165..84b861a9c47 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -437,7 +437,7 @@ drop table t1;
create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(30),primary key (id,id2),index index_id3 (id3)) type=innodb;
insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL');
LOCK TABLES t1 WRITE;
---error 690
+--error 1062
insert into t1 values (99,1,2,'D'),(1,1,2,'D');
select id from t1;
select id from t1;
@@ -448,7 +448,7 @@ create table t1 (id int NOT NULL,id2 int NOT NULL,id3 int NOT NULL,dummy1 char(3
insert into t1 values (0,0,0,'ABCDEFGHIJ'),(2,2,2,'BCDEFGHIJK'),(1,1,1,'CDEFGHIJKL');
LOCK TABLES t1 WRITE;
begin;
---error 690
+--error 1062
insert into t1 values (99,1,2,'D'),(1,1,2,'D');
select id from t1;
insert ignore into t1 values (100,1,2,'D'),(1,1,99,'D');
diff --git a/mysql-test/t/insert.test b/mysql-test/t/insert.test
index cf6f41d454d..270b1cd4c79 100644
--- a/mysql-test/t/insert.test
+++ b/mysql-test/t/insert.test
@@ -10,3 +10,15 @@ insert into t1 values (a+3);
insert into t1 values (4),(a+5);
select * from t1;
drop table t1;
+
+#
+# Test of duplicate key values with packed keys
+#
+
+create table t1 (id int not null auto_increment primary key, username varchar(32) not null, unique (username));
+insert into t1 values (0,"mysql");
+insert into t1 values (0,"mysql ab");
+insert into t1 values (0,"mysql a");
+insert into t1 values (0,"r1manic");
+insert into t1 values (0,"r1man");
+drop table t1;
diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test
new file mode 100644
index 00000000000..7959b67ee9c
--- /dev/null
+++ b/mysql-test/t/multi_update.test
@@ -0,0 +1,51 @@
+#
+# Only run the test if we are using --big-test, because this test takes a
+# long time
+#
+-- require r/big_test.require
+eval select $BIG_TEST as using_big_test;
+
+drop table if exists t1,t2,t3;
+create table t1(id1 int not null auto_increment primary key, t char(12));
+create table t2(id2 int not null, t char(12));
+create table t3(id3 int not null, t char(12), index(id3));
+let $1 = 10000;
+while ($1)
+ {
+ let $2 = 5;
+ eval insert into t1(t) values ('$1');
+ while ($2)
+ {
+ eval insert into t2(id2,t) values ($1,'$2');
+ let $3 = 10;
+ while ($3)
+ {
+ eval insert into t3(id3,t) values ($1,'$2');
+ dec $3;
+ }
+ dec $2;
+ }
+ dec $1;
+ }
+
+delete t1.*, t2.*, t3.* from t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 9500;
+
+check table t1, t2, t3;
+
+select count(*) from t1 where id1 > 9500;
+select count(*) from t2 where id2 > 9500;
+select count(*) from t3 where id3 > 9500;
+
+delete t1, t2, t3 from t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 500;
+select count(*) from t1 where id1 > 500;
+select count(*) from t2 where id2 > 500;
+select count(*) from t3 where id3 > 500;
+
+delete t1, t2, t3 from t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 0;
+
+# These queries will force a scan of the table
+select count(*) from t1 where id1;
+select count(*) from t2 where id2;
+select count(*) from t3 where id3;
+
+drop table t1,t2,t3;
diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test
index 4e5cee0d0ff..08d26413761 100644
--- a/mysql-test/t/order_by.test
+++ b/mysql-test/t/order_by.test
@@ -168,8 +168,8 @@ drop table t1,t2,t3;
#bug reported by Wouter de Jong
-drop table if exists members;
-CREATE TABLE members (
+drop table if exists t1;
+CREATE TABLE t1 (
member_id int(11) NOT NULL auto_increment,
inschrijf_datum varchar(20) NOT NULL default '',
lastchange_datum varchar(20) NOT NULL default '',
@@ -200,8 +200,57 @@ CREATE TABLE members (
PRIMARY KEY (member_id)
) TYPE=MyISAM PACK_KEYS=1;
-insert into members (member_id) values (1),(2),(3);
-select member_id, nickname, voornaam FROM members
+insert into t1 (member_id) values (1),(2),(3);
+select member_id, nickname, voornaam FROM t1
ORDER by lastchange_datum DESC LIMIT 2;
-drop table members;
+drop table t1;
+
+#
+# Test optimization of ORDER BY DESC
+#
+
+create table t1 (a int not null, b int, c varchar(10), key (a, b, c));
+insert into t1 values (1, NULL, NULL), (1, NULL, 'b'), (1, 1, NULL), (1, 1, 'b'), (1, 1, 'b'), (2, 1, 'a'), (2, 1, 'b'), (2, 2, 'a'), (2, 2, 'b'), (2, 3, 'c'),(1,3,'b');
+
+explain select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
+select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
+explain select * from t1 where a >= 1 and a < 3 order by a desc;
+select * from t1 where a >= 1 and a < 3 order by a desc;
+explain select * from t1 where a = 1 order by a desc, b desc;
+select * from t1 where a = 1 order by a desc, b desc;
+explain select * from t1 where a = 1 and b is null order by a desc, b desc;
+select * from t1 where a = 1 and b is null order by a desc, b desc;
+explain select * from t1 where a >= 1 and a < 3 and b >0 order by a desc,b desc;
+explain select * from t1 where a = 2 and b >0 order by a desc,b desc;
+explain select * from t1 where a = 2 and b is null order by a desc,b desc;
+explain select * from t1 where a = 2 and (b is null or b > 0) order by a
+desc,b desc;
+explain select * from t1 where a = 2 and b > 0 order by a desc,b desc;
+explain select * from t1 where a = 2 and b < 2 order by a desc,b desc;
+
+#
+# Test things when we don't have NULL keys
+#
+
+alter table t1 modify b int not null, modify c varchar(10) not null;
+explain select * from t1 order by a, b, c;
+select * from t1 order by a, b, c;
+explain select * from t1 order by a desc, b desc, c desc;
+select * from t1 order by a desc, b desc, c desc;
+# test multiple ranges, NO_MAX_RANGE and EQ_RANGE
+explain select * from t1 where (a = 1 and b = 1 and c = 'b') or (a > 2) order by a desc;
+select * from t1 where (a = 1 and b = 1 and c = 'b') or (a > 2) order by a desc;
+# test NEAR_MAX, NO_MIN_RANGE
+explain select * from t1 where a < 2 and b <= 1 order by a desc, b desc;
+select * from t1 where a < 2 and b <= 1 order by a desc, b desc;
+select count(*) from t1 where a < 5 and b > 0;
+select * from t1 where a < 5 and b > 0 order by a desc,b desc;
+# test HA_READ_AFTER_KEY (at the end of the file), NEAR_MIN
+explain select * from t1 where a between 1 and 3 and b <= 1 order by a desc, b desc;
+select * from t1 where a between 1 and 3 and b <= 1 order by a desc, b desc;
+# test HA_READ_AFTER_KEY (in the middle of the file)
+explain select * from t1 where a between 0 and 1 order by a desc, b desc;
+select * from t1 where a between 0 and 1 order by a desc, b desc;
+drop table t1;
+
diff --git a/mysql-test/t/order_fill_sortbuf-master.opt b/mysql-test/t/order_fill_sortbuf-master.opt
new file mode 100644
index 00000000000..af4e7d33143
--- /dev/null
+++ b/mysql-test/t/order_fill_sortbuf-master.opt
@@ -0,0 +1 @@
+-O sort_buffer=0
diff --git a/mysql-test/t/order_fill_sortbuf.test b/mysql-test/t/order_fill_sortbuf.test
new file mode 100644
index 00000000000..a64ffce08e3
--- /dev/null
+++ b/mysql-test/t/order_fill_sortbuf.test
@@ -0,0 +1,20 @@
+#
+# This test does a create-select with ORDER BY, where there is so many
+# rows MySQL needs to use a merge during the sort phase.
+#
+
+drop table if exists t1,t2;
+CREATE TABLE `t1` (
+ `id` int(11) NOT NULL default '0',
+ `id2` int(11) NOT NULL default '0',
+ `id3` int(11) NOT NULL default '0');
+let $1=4000;
+while ($1)
+ {
+ eval insert into t1 (id,id2,id3) values ($1,$1,$1);
+ dec $1;
+ }
+
+create table t2 select id2 from t1 order by id3;
+select count(*) from t2;
+drop table t1,t2;
diff --git a/mysql-test/t/overflow.test b/mysql-test/t/overflow.test
index 7a652257bac..6619a87cabb 100644
--- a/mysql-test/t/overflow.test
+++ b/mysql-test/t/overflow.test
@@ -1,4 +1,4 @@
connect (con1,localhost,boo,,);
connection con1;
--- error 1064;
+-- error 1064,1102
drop database AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA;
diff --git a/mysql-test/t/rpl000002.test b/mysql-test/t/rpl000002.test
index 0c490e6316d..865aa5e5bab 100644
--- a/mysql-test/t/rpl000002.test
+++ b/mysql-test/t/rpl000002.test
@@ -11,6 +11,7 @@ use test;
sync_with_master;
select * from t1;
connection master;
+show slave hosts;
drop table t1;
save_master_pos;
connection slave;
diff --git a/mysql-test/t/rpl000009.test b/mysql-test/t/rpl000009.test
index 768c6c151b4..208e6f0b037 100644
--- a/mysql-test/t/rpl000009.test
+++ b/mysql-test/t/rpl000009.test
@@ -31,3 +31,56 @@ connection slave;
sync_with_master;
drop database if exists bar;
drop database if exists foo;
+
+#now let's test load data from master
+
+#first create some databases and tables on the master
+connection master;
+set sql_log_bin = 0;
+create database foo;
+create database bar;
+show databases;
+create table foo.t1(n int, s char(20));
+create table foo.t2(n int, s text);
+insert into foo.t1 values (1, 'one'), (2, 'two'), (3, 'three');
+insert into foo.t2 values (11, 'eleven'), (12, 'twelve'), (13, 'thirteen');
+
+create table bar.t1(n int, s char(20));
+create table bar.t2(n int, s text);
+insert into bar.t1 values (1, 'one bar'), (2, 'two bar'), (3, 'three bar');
+insert into bar.t2 values (11, 'eleven bar'), (12, 'twelve bar'),
+ (13, 'thirteen bar');
+set sql_log_bin = 1;
+save_master_pos;
+connection slave;
+sync_with_master;
+
+#this should show that the slave is empty at this point
+show databases;
+load data from master;
+
+#now let's check if we have the right tables and the right data in them
+show databases;
+use foo;
+show tables;
+use bar;
+show tables;
+select * from bar.t1;
+select * from bar.t2;
+
+#now let's see if replication works
+connection master;
+insert into bar.t1 values (4, 'four bar');
+save_master_pos;
+connection slave;
+sync_with_master;
+select * from bar.t1;
+
+#now time for cleanup
+connection master;
+drop database bar;
+drop database foo;
+save_master_pos;
+connection slave;
+sync_with_master;
+
diff --git a/mysql-test/t/rpl000014.test b/mysql-test/t/rpl000014.test
index b501d63b10e..604e614b3a8 100644
--- a/mysql-test/t/rpl000014.test
+++ b/mysql-test/t/rpl000014.test
@@ -25,7 +25,7 @@ create table foo (n int);
insert into foo values (1),(2),(3);
save_master_pos;
connection slave;
-change master to master_log_pos=73;
+change master to master_log_pos=79;
sync_with_master;
select * from foo;
connection master;
diff --git a/mysql-test/t/rpl000017-slave.sh b/mysql-test/t/rpl000017-slave.sh
index 4415f093aad..2ead2021416 100755
--- a/mysql-test/t/rpl000017-slave.sh
+++ b/mysql-test/t/rpl000017-slave.sh
@@ -6,4 +6,5 @@ replicate
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
9306
1
+0
EOF
diff --git a/mysql-test/t/rpl_log.test b/mysql-test/t/rpl_log.test
new file mode 100644
index 00000000000..9ac6e7ab6b8
--- /dev/null
+++ b/mysql-test/t/rpl_log.test
@@ -0,0 +1,38 @@
+source include/master-slave.inc;
+eval_result; #result depends on some server specific params
+
+#clean up slave binlogs
+connection slave;
+slave stop;
+reset master;
+reset slave;
+
+connection master;
+reset master;
+drop table if exists t1;
+create table t1(n int not null auto_increment primary key);
+insert into t1 values (NULL);
+drop table t1;
+create table t1 (word char(20) not null);
+load data infile '../../std_data/words.dat' into table t1;
+drop table t1;
+show binlog events;
+show binlog events from 79 limit 1;
+show binlog events from 79 limit 2;
+show binlog events from 79 limit 2,1;
+flush logs;
+create table t1 (n int);
+insert into t1 values (1);
+drop table t1;
+show binlog events;
+show binlog events in 'master-bin.002';
+show master logs;
+save_master_pos;
+connection slave;
+let $VERSION=`select version()`;
+slave start;
+sync_with_master;
+show master logs;
+show binlog events in 'slave-bin.001' from 4;
+show binlog events in 'slave-bin.002' from 4;
+show slave status;
diff --git a/mysql-test/t/rpl_magic.test b/mysql-test/t/rpl_magic.test
new file mode 100644
index 00000000000..18f1cea34a3
--- /dev/null
+++ b/mysql-test/t/rpl_magic.test
@@ -0,0 +1,30 @@
+source include/master-slave.inc;
+
+#first, make sure the slave has had enough time to register
+connection master;
+save_master_pos;
+connection slave;
+sync_with_master;
+
+#discover slaves
+connection master;
+rpl_probe;
+
+#turn on master/slave query direction auto-magic
+enable_rpl_parse;
+drop table if exists t1;
+create table t1 ( n int);
+insert into t1 values (1),(2),(3),(4);
+disable_rpl_parse;
+save_master_pos;
+enable_rpl_parse;
+connection slave;
+sync_with_master;
+insert into t1 values(5);
+connection master;
+select * from t1;
+select * from t1;
+disable_rpl_parse;
+select * from t1;
+connection slave;
+select * from t1;
diff --git a/mysql-test/t/rpl_sporadic_master-master.opt b/mysql-test/t/rpl_sporadic_master-master.opt
new file mode 100644
index 00000000000..6d5b66bed61
--- /dev/null
+++ b/mysql-test/t/rpl_sporadic_master-master.opt
@@ -0,0 +1 @@
+--sporadic-binlog-dump-fail --max-binlog-dump-events=2
diff --git a/mysql-test/t/rpl_sporadic_master.test b/mysql-test/t/rpl_sporadic_master.test
new file mode 100644
index 00000000000..ce6568f659e
--- /dev/null
+++ b/mysql-test/t/rpl_sporadic_master.test
@@ -0,0 +1,24 @@
+#test to see if replication can continue when master sporadically fails on
+# COM_BINLOG_DUMP and additionally limits the number of events per dump
+source include/master-slave.inc;
+connection master;
+drop table if exists t1;
+create table t1(n int not null auto_increment primary key);
+insert into t1 values (NULL),(NULL);
+delete from t1;
+insert into t1 values (NULL),(NULL);
+insert into t1 values (NULL),(NULL);
+flush logs;
+delete from t1;
+insert into t1 values (NULL),(NULL);
+insert into t1 values (NULL),(NULL);
+insert into t1 values (NULL),(NULL);
+save_master_pos;
+connection slave;
+sync_with_master;
+select * from t1;
+connection master;
+drop table t1;
+save_master_pos;
+connection slave;
+sync_with_master;
diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test
index 10079ba2549..cdb6ee57e0f 100644
--- a/mysql-test/t/select.test
+++ b/mysql-test/t/select.test
@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou
#
select sum(Period)/count(*) from t1;
-select companynr,count(price) as "count",sum(price) as "sum" ,sum(price)/count(price)-avg(price) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
+select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg;
#
diff --git a/mysql-test/t/select_found.test b/mysql-test/t/select_found.test
index d86ef1e0907..52c8039b2f5 100644
--- a/mysql-test/t/select_found.test
+++ b/mysql-test/t/select_found.test
@@ -13,7 +13,7 @@ select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;
select found_rows();
select SQL_CALC_FOUND_ROWS distinct b from t1 limit 1;
select found_rows();
-select SQL_CALC_FOUND_ROWS b,count(*) as c from t1 group by b order by c limit 1;
+select SQL_CALC_FOUND_ROWS b,count(*) as c from t1 group by b order by c desc limit 1;
select found_rows();
select SQL_CALC_FOUND_ROWS * from t1 left join t1 as t2 on (t1.b=t2.a) limit 2,1;
select found_rows();
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index f4f58c8c885..476d8dcdf0d 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -65,3 +65,10 @@ create table t1 (
) comment = 'it\'s a table' ;
show create table t1 ;
drop table t1;
+
+create table t1 (a int not null, unique aa (a));
+show create table t1;
+drop table t1;
+create table t1 (a int not null, primary key (a));
+show create table t1;
+drop table t1;
diff --git a/mysql-test/t/status.test b/mysql-test/t/status.test
index 70a7a3ab584..bdfeb26073f 100644
--- a/mysql-test/t/status.test
+++ b/mysql-test/t/status.test
@@ -6,7 +6,7 @@ show status like 'Table_lock%';
connection con1;
SET SQL_LOG_BIN=0;
drop table if exists t1;
-create table t1(n int);
+create table t1(n int) type=myisam;
insert into t1 values(1);
connection con2;
lock tables t1 read;
diff --git a/mysql-test/t/symlink.test b/mysql-test/t/symlink.test
new file mode 100644
index 00000000000..5e2b8232844
--- /dev/null
+++ b/mysql-test/t/symlink.test
@@ -0,0 +1,82 @@
+-- require r/have_symlink.require
+show variables like "have_symlink";
+
+#
+# First create little data to play with
+#
+drop table if exists t1,t2,t7,t8,t9;
+create table t1 (a int not null auto_increment, b char(16) not null, primary key (a));
+create table t2 (a int not null auto_increment, b char(16) not null, primary key (a));
+insert into t1 (b) values ("test"),("test1"),("test2"),("test3");
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+insert into t2 (b) select b from t1;
+insert into t1 (b) select b from t2;
+drop table t2;
+
+#
+# Start the test
+# We use t9 here to not crash with tables generated by the backup test
+#
+
+eval create table t9 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam data directory="$MYSQL_TEST_DIR/var/tmp" index directory="$MYSQL_TEST_DIR/var/run";
+insert into t9 select * from t1;
+check table t9;
+optimize table t9;
+repair table t9;
+alter table t9 add column c int not null;
+show create table t9;
+
+# Test renames
+alter table t9 rename t8, add column d int not null;
+alter table t8 rename t7;
+rename table t7 to t9;
+# Drop old t1 table, keep t9
+drop table t1;
+
+#
+# Test error handling
+# Note that we are using the above table t9 here!
+#
+
+--error 1103
+create table t1 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam data directory="tmp";
+
+# Check that we cannot link over a table from another database.
+
+drop database if exists test_mysqltest;
+create database test_mysqltest;
+
+--error 1
+create table test_mysqltest.t9 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam index directory="/this-dir-does-not-exist";
+
+--error 1103
+create table test_mysqltest.t9 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam index directory="not-hard-path";
+
+--error 1
+eval create table test_mysqltest.t9 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam index directory="$MYSQL_TEST_DIR/var/run";
+
+--error 1
+eval create table test_mysqltest.t9 (a int not null auto_increment, b char(16) not null, primary key (a)) type=myisam data directory="$MYSQL_TEST_DIR/var/tmp";
+
+# Check moving table t9 from default database to test_mysqltest;
+# In this case the symlinks should be removed.
+
+alter table t9 rename test_mysqltest.t9;
+select count(*) from test_mysqltest.t9;
+show create table test_mysqltest.t9;
+drop database test_mysqltest;
diff --git a/mysql-test/t/type_float.test b/mysql-test/t/type_float.test
index 1496170a256..23941ad2913 100644
--- a/mysql-test/t/type_float.test
+++ b/mysql-test/t/type_float.test
@@ -3,7 +3,8 @@
# Numeric floating point.
SELECT 10,10.0,10.,.1e+2,100.0e-1;
-select 6e-05, -6e-05, --6e-05, -6e-05+1.000000;
+SELECT 6e-05, -6e-05, --6e-05, -6e-05+1.000000;
+SELECT 1e1,1.e1,1.0e1,1e+1,1.e+1,1.0e+1,1e-1,1.e-1,1.0e-1;
drop table if exists t1;
create table t1 (f1 float(24),f2 float(52));
diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test
index 1067559b759..d5ff64d199b 100644
--- a/mysql-test/t/variables.test
+++ b/mysql-test/t/variables.test
@@ -1,7 +1,7 @@
#
# test variables
#
-set @`test`=1,@TEST=3,@select=2;
+set @`test`=1,@TEST=3,@select=2,@t5=1.23456;
select @test,@`select`,@TEST,@not_used;
set @test_int=10,@test_double=1e-10,@test_string="abcdeghi",@test_string2="abcdefghij",@select=NULL;
select @test_int,@test_double,@test_string,@test_string2,@select;
@@ -12,3 +12,5 @@ select @test_int,@test_double,@test_string,@test_string2;
set @test_int=null,@test_double=null,@test_string=null,@test_string2=null;
select @test_int,@test_double,@test_string,@test_string2;
select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
+select @t5;
+
diff --git a/mysys/Makefile.am b/mysys/Makefile.am
index 5a7293bc680..6dd9bb06fe9 100644
--- a/mysys/Makefile.am
+++ b/mysys/Makefile.am
@@ -33,6 +33,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c\
my_alloc.c safemalloc.c my_fopen.c my_fstream.c \
my_error.c errors.c my_div.c my_messnc.c \
mf_format.c mf_same.c mf_dirname.c mf_fn_ext.c \
+ my_symlink.c my_symlink2.c \
mf_pack.c mf_pack2.c mf_unixpath.c mf_stripp.c \
mf_casecnv.c mf_soundex.c mf_wcomp.c mf_wfile.c \
mf_qsort.c mf_qsort2.c mf_sort.c \
diff --git a/mysys/errors.c b/mysys/errors.c
index 6e9f1fabab0..77e52c2f0b3 100644
--- a/mysys/errors.c
+++ b/mysys/errors.c
@@ -46,6 +46,9 @@ const char * NEAR globerrs[GLOBERRS]=
"Can't create directory '%s' (Errcode: %d)",
"Character set '%s' is not a compiled character set and is not specified in the '%s' file",
"Out of resources when opening file '%s' (Errcode: %d)",
+ "Can't read value for symlink '%s' (Error %d)",
+ "Can't create symlink '%s' pointing at '%s' (Error %d)",
+ "Error on realpath() on '%s' (Error %d)",
};
void init_glob_errs(void)
@@ -81,6 +84,9 @@ void init_glob_errs()
EE(EE_DISK_FULL) = "Disk is full writing '%s'. Waiting for someone to free space...";
EE(EE_CANT_MKDIR) ="Can't create directory '%s' (Errcode: %d)";
EE(EE_UNKNOWN_CHARSET)= "Character set is not a compiled character set and is not specified in the %s file";
- EE(EE_OUT_OF_FILERESOURCES)="Out of resources when opening file '%s' (Errcode: %d)",
+ EE(EE_OUT_OF_FILERESOURCES)="Out of resources when opening file '%s' (Errcode: %d)";
+ EE(EE_CANT_READLINK)="Can't read value for symlink '%s' (Error %d)";
+ EE(EE_CANT_SYMLINK)="Can't create symlink '%s' pointing at '%s' (Error %d)";
+ EE(EE_REALPATH)="Error on realpath() on '%s' (Error %d)";
}
#endif
diff --git a/mysys/getvar.c b/mysys/getvar.c
index e0f60b207b7..90ab599244d 100644
--- a/mysys/getvar.c
+++ b/mysys/getvar.c
@@ -101,7 +101,7 @@ my_bool set_changeable_var(my_string str,CHANGEABLE_VAR *vars)
}
if (num < (longlong) found->min_value)
num=(longlong) found->min_value;
- else if (num > (longlong) (ulong) found->max_value)
+ else if (num > 0 && (ulonglong) num > (ulonglong) (ulong) found->max_value)
num=(longlong) (ulong) found->max_value;
num=((num- (longlong) found->sub_size) / (ulonglong) found->block_size);
(*found->varptr)= (long) (num*(ulonglong) found->block_size);
diff --git a/mysys/mf_brkhant.c b/mysys/mf_brkhant.c
index 4e4bc2410f9..debf5d9a712 100644
--- a/mysys/mf_brkhant.c
+++ b/mysys/mf_brkhant.c
@@ -24,17 +24,15 @@
/* Set variable that we can't break */
+#if !defined(THREAD)
void dont_break(void)
{
-#if !defined(THREAD)
my_dont_interrupt=1;
-#endif
return;
} /* dont_break */
void allow_break(void)
{
-#if !defined(THREAD)
{
reg1 int index;
@@ -54,8 +52,8 @@ void allow_break(void)
_my_signals=0;
}
}
-#endif
} /* dont_break */
+#endif
/* Set old status */
diff --git a/mysys/mf_cache.c b/mysys/mf_cache.c
index ff29926ac50..4b8fc6fed17 100644
--- a/mysys/mf_cache.c
+++ b/mysys/mf_cache.c
@@ -28,7 +28,8 @@
this, just remember the file name for later removal
*/
-static my_bool cache_remove_open_tmp(IO_CACHE *cache, const char *name)
+static my_bool cache_remove_open_tmp(IO_CACHE *cache __attribute__((unused)),
+ const char *name)
{
#if O_TEMPORARY == 0
#if !defined(CANT_DELETE_OPEN_FILES)
diff --git a/mysys/mf_format.c b/mysys/mf_format.c
index c4425806e01..f90e646289f 100644
--- a/mysys/mf_format.c
+++ b/mysys/mf_format.c
@@ -17,10 +17,6 @@
#include "mysys_priv.h"
#include <m_string.h>
-#ifdef HAVE_REALPATH
-#include <sys/param.h>
-#include <sys/stat.h>
-#endif
/* format a filename with replace of library and extension */
/* params to and name may be identicall */
@@ -33,21 +29,12 @@
/* 32 Resolve filename to full path */
/* 64 Return NULL if too long path */
-#ifdef SCO
-#define BUFF_LEN 4097
-#else
-#ifdef MAXPATHLEN
-#define BUFF_LEN MAXPATHLEN
-#else
-#define BUFF_LEN FN_LEN
-#endif
-#endif
my_string fn_format(my_string to, const char *name, const char *dsk,
const char *form, int flag)
{
reg1 uint length;
- char dev[FN_REFLEN], buff[BUFF_LEN], *pos, *startpos;
+ char dev[FN_REFLEN], buff[FN_REFLEN], *pos, *startpos;
const char *ext;
DBUG_ENTER("fn_format");
DBUG_PRINT("enter",("name: %s dsk: %s form: %s flag: %d",
@@ -109,18 +96,13 @@ my_string fn_format(my_string to, const char *name, const char *dsk,
#endif
(void) strmov(pos,ext); /* Don't convert extension */
}
- /* Purify gives a lot of UMR errors when using realpath */
-#if defined(HAVE_REALPATH) && !defined(HAVE_purify) && !defined(HAVE_BROKEN_REALPATH)
- if (flag & 16)
+ if (flag & 32)
+ (void) my_realpath(to, to, MYF(flag & 32 ? 0 : MY_RESOLVE_LINK));
+ else if (flag & 16)
{
- struct stat stat_buff;
- if (flag & 32 || (!lstat(to,&stat_buff) && S_ISLNK(stat_buff.st_mode)))
- {
- if (realpath(to,buff))
- strmake(to,buff,FN_REFLEN-1);
- }
+ strmov(buff,to);
+ (void) my_readlink(to, buff, MYF(0));
}
-#endif
DBUG_RETURN (to);
} /* fn_format */
diff --git a/mysys/mf_pack.c b/mysys/mf_pack.c
index c18d37888b8..b442af7e9e5 100644
--- a/mysys/mf_pack.c
+++ b/mysys/mf_pack.c
@@ -236,11 +236,16 @@ void symdirget(char *dir)
*pos++=temp; *pos=0; /* Restore old filename */
if (fp)
{
- if (fgets(buff, sizeof(buff), fp))
+ if (fgets(buff, sizeof(buff)-1, fp))
{
for (pos=strend(buff);
pos > buff && (iscntrl(pos[-1]) || isspace(pos[-1])) ;
pos --);
+
+ /* Ensure that the symlink ends with the directory symbol */
+ if (pos == buff || pos[-1] != FN_LIBCHAR)
+ *pos++=FN_LIBCHAR;
+
strmake(dir,buff, (uint) (pos-buff));
}
my_fclose(fp,MYF(0));
diff --git a/mysys/mf_same.c b/mysys/mf_same.c
index 5b8c5ecf970..c1a5cae11cb 100644
--- a/mysys/mf_same.c
+++ b/mysys/mf_same.c
@@ -20,19 +20,22 @@
#include "mysys_priv.h"
#include <m_string.h>
- /* Formaterar ett filnamn i avsende p} ett annat namn */
- /* Klarar {ven to = name */
- /* Denna funktion r|r inte p} utg}ngsnamnet */
+ /*
+ Copy directory and/or extension between filenames.
+ (For the meaning of 'flag', check mf_format.c)
+ 'to' may be equal to 'name'.
+ Returns 'to'.
+ */
-my_string fn_same(my_string toname, const char *name, int flag)
+my_string fn_same(char *to, const char *name, int flag)
{
char dev[FN_REFLEN];
const char *ext;
DBUG_ENTER("fn_same");
- DBUG_PRINT("mfunkt",("to: %s name: %s flag: %d",toname,name,flag));
+ DBUG_PRINT("enter",("to: %s name: %s flag: %d",to,name,flag));
if ((ext=strrchr(name+dirname_part(dev,name),FN_EXTCHAR)) == 0)
ext="";
- DBUG_RETURN(fn_format(toname,toname,dev,ext,flag));
+ DBUG_RETURN(fn_format(to,to,dev,ext,flag));
} /* fn_same */
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index b82ff965dfb..ffbed381226 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -100,41 +100,34 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size)
#endif
}
+/* Mark all data in blocks free for reusage */
+
static inline void mark_blocks_free(MEM_ROOT* root)
{
- reg1 USED_MEM *next,*last = 0;
+ reg1 USED_MEM *next;
+ reg2 USED_MEM **last;
- /* iterate through (partially) free blocks, mark them fully free */
- for(next = root->free; next; next = next->next )
- {
- last = next;
- next->left = next->size - ALIGN_SIZE(sizeof(USED_MEM));
- }
- /* if free block list was not empty, point the next of the
- last free block to the beginning of the used list */
- next = root->used; /* a little optimization to avoid dereferencing root
- twice - we will shortly start iterating through used
- list */
- if(last)
- last->next = next;
- else /* if free list is empty, just point it to the current used*/
- root->free = next;
-
- /* now go through the current used list, and mark each block
- as fully free. Note that because of our optimization, we do not
- need to initialize next here - see above
- */
- for(;next; next = next->next)
- next->left = next->size - ALIGN_SIZE(sizeof(USED_MEM));
-
- /* Now everything is set - we just need to indicate that nothing is used
- anymore
- */
- root->used = 0;
+ /* iterate through (partially) free blocks, mark them free */
+ last= &root->free;
+ for (next= root->free; next; next= *(last= &next->next))
+ next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM));
+
+ /* Combine the free and the used list */
+ *last= next=root->used;
+
+ /* now go through the used blocks and mark them free */
+ for (; next; next= next->next)
+ next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM));
+
+ /* Now everything is set; Indicate that nothing is used anymore */
+ root->used= 0;
}
- /* deallocate everything used by alloc_root or just move
- used blocks to free list if called with MY_USED_TO_FREE */
+
+/*
+ Deallocate everything used by alloc_root or just move
+ used blocks to free list if called with MY_USED_TO_FREE
+*/
void free_root(MEM_ROOT *root, myf MyFlags)
{
@@ -143,23 +136,23 @@ void free_root(MEM_ROOT *root, myf MyFlags)
if (!root)
DBUG_VOID_RETURN; /* purecov: inspected */
- if(MyFlags & MY_MARK_BLOCKS_FREE)
- {
- mark_blocks_free(root);
- DBUG_VOID_RETURN;
- }
+ if (MyFlags & MY_MARK_BLOCKS_FREE)
+ {
+ mark_blocks_free(root);
+ DBUG_VOID_RETURN;
+ }
if (!(MyFlags & MY_KEEP_PREALLOC))
root->pre_alloc=0;
- for ( next=root->used; next ;)
+ for (next=root->used; next ;)
{
old=next; next= next->next ;
if (old != root->pre_alloc)
my_free((gptr) old,MYF(0));
}
- for (next= root->free ; next ; )
+ for (next=root->free ; next ;)
{
- old=next; next= next->next ;
+ old=next; next= next->next;
if (old != root->pre_alloc)
my_free((gptr) old,MYF(0));
}
diff --git a/mysys/my_compress.c b/mysys/my_compress.c
index d1e32234135..9d94a400f48 100644
--- a/mysys/my_compress.c
+++ b/mysys/my_compress.c
@@ -15,7 +15,7 @@
Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA */
-/* Written by Sinisa Milivojevic <sinisa@coresinc.com> */
+/* Written by Sinisa Milivojevic <sinisa@mysql.com> */
#include <global.h>
#ifdef HAVE_COMPRESS
diff --git a/mysys/my_copy.c b/mysys/my_copy.c
index bfd7e957585..be131873118 100644
--- a/mysys/my_copy.c
+++ b/mysys/my_copy.c
@@ -54,7 +54,7 @@ int my_copy(const char *from, const char *to, myf MyFlags)
if (MyFlags & MY_HOLD_ORIGINAL_MODES) /* Copy stat if possible */
new_file_stat=stat((char*) to, &new_stat_buff);
- if ((from_file=my_open(from,O_RDONLY,MyFlags)) >= 0)
+ if ((from_file=my_open(from,O_RDONLY | O_SHARE,MyFlags)) >= 0)
{
if (stat(from,&stat_buff))
{
@@ -64,7 +64,7 @@ int my_copy(const char *from, const char *to, myf MyFlags)
if (MyFlags & MY_HOLD_ORIGINAL_MODES && !new_file_stat)
stat_buff=new_stat_buff;
if ((to_file= my_create(to,(int) stat_buff.st_mode,
- O_WRONLY | O_TRUNC | O_BINARY,
+ O_WRONLY | O_TRUNC | O_BINARY | O_SHARE,
MyFlags)) < 0)
goto err;
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index 77d5f311418..dc1e292a893 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -16,7 +16,6 @@
MA 02111-1307, USA */
#include "mysys_priv.h"
-
#include "mysys_err.h"
int my_delete(const char *name, myf MyFlags)
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 4e0de71bcf5..5c7d0be5854 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -66,11 +66,11 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
my_filename(Filedes),my_errno);
}
if ((int) readbytes == -1 || (MyFlags & (MY_FNABP | MY_NABP)))
- DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
+ DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
- DBUG_RETURN(0); /* Ok vid l{sning */
- DBUG_RETURN(readbytes); /* purecov: inspected */
+ DBUG_RETURN(0); /* Read went ok; Return 0 */
+ DBUG_RETURN(readbytes); /* purecov: inspected */
}
} /* my_pread */
diff --git a/mysys/my_static.c b/mysys/my_static.c
index f1339877273..3fc68455841 100644
--- a/mysys/my_static.c
+++ b/mysys/my_static.c
@@ -97,4 +97,5 @@ int (*fatal_error_handler_hook)(uint error,const char *str,myf MyFlags)=
my_bool NEAR my_disable_locking=0;
my_bool NEAR my_disable_async_io=0;
my_bool NEAR my_disable_flush_key_blocks=0;
+my_bool NEAR my_disable_symlinks=0;
my_bool NEAR mysys_uses_curses=0;
diff --git a/mysys/my_symlink.c b/mysys/my_symlink.c
new file mode 100644
index 00000000000..65d165fc026
--- /dev/null
+++ b/mysys/my_symlink.c
@@ -0,0 +1,138 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA */
+
+#include "mysys_priv.h"
+#include "mysys_err.h"
+#include <m_string.h>
+#include <errno.h>
+#ifdef HAVE_REALPATH
+#include <sys/param.h>
+#include <sys/stat.h>
+#endif
+
+/*
+ Reads the content of a symbolic link
+ If the file is not a symbolic link, return the original file name in to.
+ Returns: 0 if table was a symlink,
+ 1 if table was a normal file
+ -1 on error.
+*/
+
+int my_readlink(char *to, const char *filename, myf MyFlags)
+{
+#ifndef HAVE_READLINK
+ strmov(to,filename);
+ return 1;
+#else
+ int result=0;
+ int length;
+ DBUG_ENTER("my_readlink");
+
+ if ((length=readlink(filename, to, FN_REFLEN-1)) < 0)
+ {
+ /* Don't give an error if this wasn't a symlink */
+ if ((my_errno=errno) == EINVAL)
+ {
+ result= 1;
+ strmov(to,filename);
+ }
+ else
+ {
+ if (MyFlags & MY_WME)
+ my_error(EE_CANT_READLINK, MYF(0), filename, errno);
+ result= -1;
+ }
+ }
+ else
+ to[length]=0;
+ DBUG_RETURN(result);
+#endif /* HAVE_READLINK */
+}
+
+
+/* Create a symbolic link */
+
+int my_symlink(const char *content, const char *linkname, myf MyFlags)
+{
+#ifndef HAVE_READLINK
+ return 0;
+#else
+ int result;
+ DBUG_ENTER("my_symlink");
+
+ result= 0;
+ if (symlink(content, linkname))
+ {
+ result= -1;
+ my_errno=errno;
+ if (MyFlags & MY_WME)
+ my_error(EE_CANT_SYMLINK, MYF(0), linkname, content, errno);
+ }
+ DBUG_RETURN(result);
+#endif /* HAVE_READLINK */
+}
+
+/*
+ Resolve all symbolic links in path
+ 'to' may be equal to 'filename'
+
+ Because purify gives a lot of UMR errors when using realpath(),
+ this code is disabled when using purify.
+
+ If MY_RESOLVE_LINK is given, only do realpath if the file is a link.
+*/
+
+#if defined(SCO)
+#define BUFF_LEN 4097
+#elif defined(MAXPATHLEN)
+#define BUFF_LEN MAXPATHLEN
+#else
+#define BUFF_LEN FN_LEN
+#endif
+
+int my_realpath(char *to, const char *filename, myf MyFlags)
+{
+#if defined(HAVE_REALPATH) && !defined(HAVE_purify) && !defined(HAVE_BROKEN_REALPATH)
+ int result=0;
+ char buff[BUFF_LEN];
+ struct stat stat_buff;
+ DBUG_ENTER("my_realpath");
+
+ if (!(MyFlags & MY_RESOLVE_LINK) ||
+ (!lstat(filename,&stat_buff) && S_ISLNK(stat_buff.st_mode)))
+ {
+ char *ptr;
+ if ((ptr=realpath(filename,buff)))
+ strmake(to,ptr,FN_REFLEN-1);
+ else
+ {
+ /* Realpath didn't work; Use original name */
+ my_errno=errno;
+ if (MyFlags & MY_WME)
+ my_error(EE_REALPATH, MYF(0), filename, my_errno);
+ if (to != filename)
+ strmov(to,filename);
+ result= -1;
+ }
+ }
+ DBUG_RETURN(result);
+#else
+ if (to != filename)
+ strmov(to,filename);
+ return 0;
+#endif
+}
diff --git a/mysys/my_symlink2.c b/mysys/my_symlink2.c
new file mode 100644
index 00000000000..e77815df12a
--- /dev/null
+++ b/mysys/my_symlink2.c
@@ -0,0 +1,155 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA */
+
+/*
+ Advanced symlink handling.
+ This is used in MyISAM to let users symlinks tables to different disk.
+ The main idea with these functions is to automaticly create, delete and
+ rename files and symlinks like they would be one unit.
+*/
+
+#include "mysys_priv.h"
+#include "mysys_err.h"
+#include <m_string.h>
+
+File my_create_with_symlink(const char *linkname, const char *filename,
+ int createflags, int access_flags, myf MyFlags)
+{
+ File file;
+ int tmp_errno;
+ /* Test if we should create a link */
+ int create_link=(linkname && strcmp(linkname,filename));
+ DBUG_ENTER("my_create_with_symlink");
+
+ if (!(MyFlags & MY_DELETE_OLD))
+ {
+ if (!access(filename,F_OK))
+ {
+ my_error(EE_CANTCREATEFILE, MYF(0), filename, EEXIST);
+ DBUG_RETURN(-1);
+ }
+ if (create_link && !access(linkname,F_OK))
+ {
+ my_error(EE_CANTCREATEFILE, MYF(0), linkname, EEXIST);
+ DBUG_RETURN(-1);
+ }
+ }
+
+ if ((file=my_create(filename, createflags, access_flags, MyFlags)) >= 0)
+ {
+ if (create_link)
+ {
+ /* Delete old link/file */
+ if (MyFlags & MY_DELETE_OLD)
+ my_delete(linkname, MYF(0));
+ /* Create link */
+ if (my_symlink(filename, linkname, MyFlags))
+ {
+ /* Fail, remove everything we have done */
+ tmp_errno=my_errno;
+ my_close(file,MYF(0));
+ my_delete(filename, MYF(0));
+ file= -1;
+ my_errno=tmp_errno;
+ }
+ }
+ }
+ DBUG_RETURN(file);
+}
+
+/*
+ If the file was a symlink, delete both symlink and the file which the
+ symlink pointed to.
+*/
+
+int my_delete_with_symlink(const char *name, myf MyFlags)
+{
+ char link_name[FN_REFLEN];
+ int was_symlink= (!my_disable_symlinks &&
+ !my_readlink(link_name, name, MYF(0)));
+ int result;
+ DBUG_ENTER("my_delete_with_symlink");
+
+ if (!(result=my_delete(name, MyFlags)))
+ {
+ if (was_symlink)
+ result=my_delete(link_name, MyFlags);
+ }
+ DBUG_RETURN(result);
+}
+
+/*
+ If the file is a normal file, just rename it.
+ If the file is a symlink:
+ - Create a new file with the name 'to' that points at
+ symlink_dir/basename(to)
+ - Rename the symlinked file to symlink_dir/basename(to)
+ - Delete 'from'
+ If something goes wrong, restore everything.
+*/
+
+int my_rename_with_symlink(const char *from, const char *to, myf MyFlags)
+{
+#ifndef HAVE_READLINK
+ return my_rename(from, to, MyFlags);
+#else
+ char link_name[FN_REFLEN], tmp_name[FN_REFLEN];
+ int was_symlink= (!my_disable_symlinks &&
+ !my_readlink(link_name, from, MYF(0)));
+ int result=0;
+ DBUG_ENTER("my_rename_with_symlink");
+
+ if (!was_symlink)
+ DBUG_RETURN(my_rename(from, to, MyFlags));
+
+ /* Change filename that symlink pointed to */
+ strmov(tmp_name, to);
+ fn_same(tmp_name,link_name,1); /* Copy dir */
+
+ /* Create new symlink */
+ if (my_symlink(tmp_name, to, MyFlags))
+ DBUG_RETURN(1);
+
+ /*
+ Rename symlinked file if the base name didn't change.
+ This can happen if you use this function where 'from' and 'to' has
+ the same basename and different directories.
+ */
+
+ if (strcmp(link_name, tmp_name) && my_rename(link_name, tmp_name, MyFlags))
+ {
+ int save_errno=my_errno;
+ my_delete(to, MyFlags); /* Remove created symlink */
+ my_errno=save_errno;
+ DBUG_RETURN(1);
+ }
+
+ /* Remove original symlink */
+ if (my_delete(from, MyFlags))
+ {
+ int save_errno=my_errno;
+ /* Remove created link */
+ my_delete(to, MyFlags);
+ /* Rename file back */
+ if (strcmp(link_name, tmp_name))
+ (void) my_rename(tmp_name, link_name, MyFlags);
+ my_errno=save_errno;
+ result= 1;
+ }
+ DBUG_RETURN(result);
+#endif /* HAVE_READLINK */
+}
diff --git a/mysys/tree.c b/mysys/tree.c
index 1ea7e48a790..7100b72345c 100644
--- a/mysys/tree.c
+++ b/mysys/tree.c
@@ -312,6 +312,8 @@ int tree_delete(TREE *tree, void *key)
}
if (remove_colour == BLACK)
rb_delete_fixup(tree,parent);
+ if (tree->free)
+ (*tree->free)(ELEMENT_KEY(tree,element));
my_free((gptr) element,MYF(0));
tree->elements_in_tree--;
return 0;
diff --git a/pstack/bucomm.c b/pstack/bucomm.c
index 6c491d80bb5..d3231e71747 100644
--- a/pstack/bucomm.c
+++ b/pstack/bucomm.c
@@ -212,7 +212,7 @@ make_tempname (filename)
{
tmpname = xmalloc (sizeof (template));
strcpy (tmpname, template);
- mktemp (tmpname);
+ mkstemp (tmpname);
}
return tmpname;
}
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 45fbe275476..d2cca98ce63 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -44,7 +44,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
mysqlhotcopy.sh \
mysqldumpslow.sh \
mysqld_multi.sh \
- safe_mysqld.sh
+ mysqld_safe.sh
EXTRA_DIST = $(EXTRA_SCRIPTS) \
mysqlaccess.conf \
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 84dac59018b..1848123fd8e 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -11,30 +11,29 @@ export machine system version
SOURCE=`pwd`
CP="cp -p"
-# Debug option must come first
+STRIP=1
DEBUG=0
-if test x$1 = x"--debug"
-then
- DEBUG=1
- shift 1
-fi
-
-# Save temporary distribution here (must be full path)
+SILENT=0
TMP=/tmp
-if test $# -gt 0
-then
- TMP=$1
- shift 1
-fi
-
-# Get optional suffix for distribution
SUFFIX=""
-if test $# -gt 0
-then
- SUFFIX=$1
- shift 1
-fi
+parse_arguments() {
+ for arg do
+ case "$arg" in
+ --debug) DEBUG=1;;
+ --tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
+ --suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
+ --no-strip) STRIP=0 ;;
+ --silent) SILENT=1 ;;
+ *)
+ echo "Unknown argument '$arg'"
+ exit 1
+ ;;
+ esac
+ done
+}
+
+parse_arguments "$@"
#make
@@ -68,14 +67,18 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \
client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \
client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
- client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest
+ client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \
+ client/.libs/mysqlcheck
do
if [ -f $i ]
then
$CP $i $BASE/bin
fi
done
-strip $BASE/bin/*
+
+if [ x$STRIP = x1 ] ; then
+ strip $BASE/bin/*
+fi
for i in sql/mysqld.sym.gz
do
@@ -114,7 +117,7 @@ rm -f $BASE/bin/Makefile* $BASE/bin/*.in $BASE/bin/*.sh $BASE/bin/mysql_install_
$BASE/bin/replace \@localstatedir\@ ./data \@bindir\@ ./bin \@scriptdir\@ ./bin \@libexecdir\@ ./bin \@sbindir\@ ./bin \@prefix\@ . \@HOSTNAME\@ @HOSTNAME@ < $SOURCE/scripts/mysql_install_db.sh > $BASE/scripts/mysql_install_db
$BASE/bin/replace \@prefix\@ /usr/local/mysql \@bindir\@ ./bin \@MYSQLD_USER\@ root \@localstatedir\@ /usr/local/mysql/data < $SOURCE/support-files/mysql.server.sh > $BASE/support-files/mysql.server
-$BASE/bin/replace /my/gnu/bin/hostname /bin/hostname -- $BASE/bin/safe_mysqld
+$BASE/bin/replace /my/gnu/bin/hostname /bin/hostname -- $BASE/bin/mysqld_safe
mv $BASE/support-files/binary-configure $BASE/configure
chmod a+x $BASE/bin/* $BASE/scripts/* $BASE/support-files/mysql-* $BASE/configure
@@ -190,7 +193,13 @@ fi
echo "Using $tar to create archive"
cd $TMP
-$tar cvf $SOURCE/$NEW_NAME.tar $NEW_NAME
+
+OPT=cvf
+if [ x$SILENT = x1 ] ; then
+ OPT=cf
+fi
+
+$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME
cd $SOURCE
echo "Compressing archive"
gzip -9 $NEW_NAME.tar
diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh
index 09f81c70a1f..ed344f4b1e3 100644
--- a/scripts/mysql_config.sh
+++ b/scripts/mysql_config.sh
@@ -45,7 +45,7 @@ EOF
exit 1
}
-if ! test $# -gt 0; then usage; fi
+if test $# -le 0; then usage; fi
while test $# -gt 0; do
case $1 in
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index f7324668bd2..7e232692ba1 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -333,7 +333,7 @@ then
if test "$IN_RPM" -eq 0
then
echo "You can start the MySQL daemon with:"
- echo "cd @prefix@ ; $bindir/safe_mysqld &"
+ echo "cd @prefix@ ; $bindir/mysqld_safe &"
echo
echo "You can test the MySQL daemon with the benchmarks in the 'sql-bench' directory:"
echo "cd sql-bench ; run-all-tests"
diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh
index 1adaa458271..656894f0e37 100644
--- a/scripts/mysqld_multi.sh
+++ b/scripts/mysqld_multi.sh
@@ -457,12 +457,12 @@ sub example
# directory, that you have (just change the socket, -S=...)
# See more detailed information from chapter:
# '6 The MySQL Access Privilege System' from the MySQL manual.
-# 2.pid-file is very important, if you are using safe_mysqld to start mysqld
-# (e.g. --mysqld=safe_mysqld) Every mysqld should have it's own pid-file.
-# The advantage using safe_mysqld instead of mysqld directly here is, that
-# safe_mysqld 'guards' every mysqld process and will restart it, if mysqld
+# 2.pid-file is very important, if you are using mysqld_safe to start mysqld
+# (e.g. --mysqld=mysqld_safe) Every mysqld should have it's own pid-file.
+# The advantage using mysqld_safe instead of mysqld directly here is, that
+# mysqld_safe 'guards' every mysqld process and will restart it, if mysqld
# process fails due to signal kill -9, or similar. (Like segmentation fault,
-# which MySQL should never do, of course ;) Please note that safe_mysqld
+# which MySQL should never do, of course ;) Please note that mysqld_safe
# script may require that you start it from a certain place. This means that
# you may have to CD to a certain directory, before you start the
# mysqld_multi. If you have problems starting, please see the script.
@@ -497,7 +497,7 @@ sub example
# give you extra performance in a threaded system!
#
[mysqld_multi]
-mysqld = @bindir@/safe_mysqld
+mysqld = @bindir@/mysqld_safe
mysqladmin = @bindir@/mysqladmin
user = multi_admin
password = multipass
@@ -591,9 +591,9 @@ Options:
Using: $opt_log
--mysqladmin=... mysqladmin binary to be used for a server shutdown.
Using: $mysqladmin
---mysqld=... mysqld binary to be used. Note that you can give safe_mysqld
+--mysqld=... mysqld binary to be used. Note that you can give mysqld_safe
to this option also. The options are passed to mysqld. Just
- make sure you have mysqld in your PATH or fix safe_mysqld.
+ make sure you have mysqld in your PATH or fix mysqld_safe.
Using: $mysqld
--no-log Print to stdout instead of the log file. By default the log
file is turned on.
diff --git a/scripts/safe_mysqld-watch.sh b/scripts/mysqld_safe-watch.sh
index 30f95fd7a86..c59b3b2614d 100644
--- a/scripts/safe_mysqld-watch.sh
+++ b/scripts/mysqld_safe-watch.sh
@@ -8,7 +8,7 @@
# binary installation that has other paths than you are using.
#
# mysql.server works by first doing a cd to the base directory and from there
-# executing safe_mysqld
+# executing mysqld_safe
# Check if we are starting this relative (for the binary release)
if test -f ./data/mysql/db.frm -a -f ./share/mysql/english/errmsg.sys -a \
diff --git a/scripts/safe_mysqld.sh b/scripts/mysqld_safe.sh
index 6c006e96768..ac33c9ad1f2 100644
--- a/scripts/safe_mysqld.sh
+++ b/scripts/mysqld_safe.sh
@@ -8,7 +8,7 @@
# binary installation that has other paths than you are using.
#
# mysql.server works by first doing a cd to the base directory and from there
-# executing safe_mysqld
+# executing mysqld_safe
trap '' 1 2 3 15 # we shouldn't let anyone kill us
@@ -38,12 +38,12 @@ parse_arguments() {
--pid-file=*) pid_file=`echo "$arg" | sed -e "s;--pid-file=;;"` ;;
--user=*) user=`echo "$arg" | sed -e "s;--user=;;"` ;;
- # these two might have been set in a [safe_mysqld] section of my.cnf
- # they get passed via environment variables to safe_mysqld
+ # these two might have been set in a [mysqld_safe] section of my.cnf
+ # they get passed via environment variables to mysqld_safe
--socket=*) MYSQL_UNIX_PORT=`echo "$arg" | sed -e "s;--socket=;;"` ;;
--port=*) MYSQL_TCP_PORT=`echo "$arg" | sed -e "s;--port=;;"` ;;
- # safe_mysqld-specific options - must be set in my.cnf ([safe_mysqld])!
+ # mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])!
--ledir=*) ledir=`echo "$arg" | sed -e "s;--ledir=;;"` ;;
--err-log=*) err_log=`echo "$arg" | sed -e "s;--err-log=;;"` ;;
# QQ The --open-files should be removed
@@ -114,7 +114,7 @@ fi
pid_file=
err_log=
-# Get first arguments from the my.cfg file, groups [mysqld] and [safe_mysqld]
+# Get first arguments from the my.cnf file, groups [mysqld] and [mysqld_safe]
# and then merge with the command line arguments
if test -x ./bin/my_print_defaults
then
@@ -130,7 +130,7 @@ else
fi
args=
-parse_arguments `$print_defaults $defaults mysqld server safe_mysqld`
+parse_arguments `$print_defaults $defaults mysqld server mysqld_safe safe_mysqld`
parse_arguments PICK-ARGS-FROM-ARGV "$@"
if test ! -x $ledir/$MYSQLD
@@ -138,7 +138,7 @@ then
echo "The file $ledir/$MYSQLD doesn't exist or is not executable"
echo "Please do a cd to the mysql installation directory and restart"
echo "this script from there as follows:"
- echo "./bin/safe_mysqld".
+ echo "./bin/mysqld_safe".
exit 1
fi
diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark
index a51752a5023..c52a53699e0 100644
--- a/sql-bench/Comments/postgres.benchmark
+++ b/sql-bench/Comments/postgres.benchmark
@@ -1,56 +1,77 @@
-# This file describes how to run MySQL benchmarks with Postgres
-#
+# This file describes how to run MySQL benchmark suite with PostgreSQL
+#
+# WARNING:
+#
+# Don't run the --fast test on a PostgreSQL 7.1.1 database on
+# which you have any critical data; During one of our test runs
+# PostgreSQL got a corrupted database and all data was destroyed!
+# When we tried to restart postmaster, It died with a
+# 'no such file or directory' error and never recovered from that!
+#
+# Another time vacuum() filled our system disk with had 6G free
+# while vaccuming a table of 60 M.
+#
+# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
-# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP
+# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.2 compiled with SMP
# support
# Both the perl client and the database server is run
# on the same machine. No other cpu intensive process was used during
# the benchmark.
-
-#
-#
-# First, install postgresql-7.0.2.tar.gz
#
+# During the test we run PostgreSQL with -o -F, not async mode (not ACID safe)
+# because when we started postmaster without -o -F, PostgreSQL log files
+# filled up a 9G disk until postmaster crashed.
+# We did however notice that with -o -F, PostgreSQL was a magnitude slower
+# than when not using -o -F.
#
-# Start by adding the following lines to your ~/.bash_profile or
+# First, install postgresql-7.1.2.tar.gz
+
+# Adding the following lines to your ~/.bash_profile or
# corresponding file. If you are using csh, use ´setenv´.
-#
-export POSTGRES_INCLUDE=/usr/local/pgsql/include
-export POSTGRES_LIB=/usr/local/pgsql/lib
+export POSTGRES_INCLUDE=/usr/local/pg/include
+export POSTGRES_LIB=/usr/local/pg/lib
-PATH=$PATH:/usr/local/pgsql/bin
-MANPATH=$MANPATH:/usr/local/pgsql/man
+PATH=$PATH:/usr/local/pg/bin
+MANPATH=$MANPATH:/usr/local/pg/man
#
# Add the following line to /etc/ld.so.conf:
#
-/usr/local/pgsql/lib
-and run ldconfig.
+/usr/local/pg/lib
-#
-# untar the postgres source distribution and cd to src/
-# run the following commands:
-#
+# and run:
+
+ldconfig
-./configure
+# untar the postgres source distribution, cd to postgresql-*
+# and run the following commands:
+
+CFLAGS=-O3 ./configure
gmake
gmake install
-mkdir /usr/local/pgsql/data
-chown postgres /usr/local/pgsql/data
+mkdir /usr/local/pg/data
+chown postgres /usr/local/pg/data
su - postgres
-/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data
-su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" &
-su postgres -c "/usr/local/pgsql/bin/createdb test"
+/usr/local/pg/bin/initdb -D /usr/local/pg/data
+/usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data &
+/usr/local/pg/bin/createdb test
+exit
#
-# Second, install packages DBD-Pg-0.95.tar.gz and DBI-1.14.tar.gz,
+# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.18.tar.gz,
# available from http://www.perl.com/CPAN/
-#
+
+export POSTGRES_LIB=/usr/local/pg/lib/
+export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql
+perl Makefile.PL
+make
+make install
#
# Now we run the test that can be found in the sql-bench directory in the
@@ -59,17 +80,28 @@ su postgres -c "/usr/local/pgsql/bin/createdb test"
# We did run two tests:
# The standard test
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
+
+# When running with --fast we run the following vacuum commands on
+# the database between each major update of the tables:
+# vacuum anlyze table
+# vacuum table
+# or
+# vacuum analyze
+# vacuum
-# and a test where we do a vacuum() after each update.
-# (The time for vacuum() is counted in the book-keeping() column)
+# The time for vacuum() is accounted for in the book-keeping() column, not
+# in the test that updates the database.
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
# If you want to store the results in a output/RUN-xxx file, you should
# repeate the benchmark with the extra option --log --use-old-result
# This will create a the RUN file based of the previous results
-#
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
-run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
+run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512MG, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
+
+# Between running the different tests we dropped and recreated the PostgreSQL
+# database to ensure that PostgreSQL should get a clean start,
+# independent of the previous runs.
diff --git a/sql-bench/Makefile.am b/sql-bench/Makefile.am
index 73ba070717f..5712373c405 100644
--- a/sql-bench/Makefile.am
+++ b/sql-bench/Makefile.am
@@ -21,7 +21,7 @@ benchdir_root= $(prefix)
benchdir = $(benchdir_root)/sql-bench
bench_SCRIPTS = test-ATIS test-connect test-create test-insert \
test-big-tables test-select test-wisconsin \
- test-alter-table \
+ test-alter-table graph-compare-results \
bench-init.pl compare-results run-all-tests \
server-cfg crash-me copy-db bench-count-distinct
CLEANFILES = $(bench_SCRIPTS)
@@ -30,7 +30,7 @@ EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \
test-alter-table.sh test-wisconsin.sh \
bench-init.pl.sh compare-results.sh server-cfg.sh \
run-all-tests.sh crash-me.sh copy-db.sh \
- bench-count-distinct.sh
+ bench-count-distinct.sh graph-compare-results.sh
EXTRA_DIST = $(EXTRA_SCRIPTS)
dist-hook:
diff --git a/sql-bench/README b/sql-bench/README
index 6096c5cc1e8..6b6a5fc95c0 100755
--- a/sql-bench/README
+++ b/sql-bench/README
@@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate
the MySQL benchmarks. In order to run the benchmarks you should normally
execute a command like the following:
-run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log
+run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log
The above means that one wants to run the benchmark with MySQL. The limits
should be taken from all of mysql,PostgreSQL and Solid. Login name and
diff --git a/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 74ba392c93b..00000000000
--- a/sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:46:54
-
-ATIS table test
-
-Creating tables
-Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (9768): 2 wallclock secs ( 0.49 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data
-Time for select_simple_join (500): 2 wallclock secs ( 0.63 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_join (200): 15 wallclock secs ( 4.21 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_distinct (800): 12 wallclock secs ( 1.70 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (2600): 12 wallclock secs ( 1.43 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Removing tables
-Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index e932e9ca0ce..00000000000
--- a/sql-bench/Results/ATIS-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:10:55
-
-ATIS table test
-
-Creating tables
-Time for create_table (28): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (9768): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data
-Time for select_simple_join (500): 1 wallclock secs ( 0.64 usr 0.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_join (200): 16 wallclock secs ( 4.21 usr 2.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_distinct (800): 11 wallclock secs ( 1.74 usr 0.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (2600): 13 wallclock secs ( 1.34 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Removing tables
-Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 42 wallclock secs ( 7.93 usr 3.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index a94e920b55d..00000000000
--- a/sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 5:18:45
-
-ATIS table test
-
-Creating tables
-Time for create_table (28): 0 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (9768): 9 wallclock secs ( 2.88 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data
-Time for select_simple_join (500): 3 wallclock secs ( 0.69 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_join (200): 14 wallclock secs ( 5.18 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_distinct (800): 17 wallclock secs ( 2.21 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (2600): 45 wallclock secs ( 1.73 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Removing tables
-Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 89 wallclock secs (12.72 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..748dd16a42e
--- /dev/null
+++ b/sql-bench/Results/ATIS-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,20 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:14:34
+
+ATIS table test
+
+Creating tables
+Time for create_table (28): 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+
+Inserting data
+Time to insert (9768): 8 wallclock secs ( 2.78 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.29 CPU)
+
+Retrieving data
+Time for select_simple_join (500): 3 wallclock secs ( 0.73 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.83 CPU)
+Time for select_join (100): 4 wallclock secs ( 0.67 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.76 CPU)
+Time for select_key_prefix_join (100): 12 wallclock secs ( 4.43 usr 0.15 sys + 0.00 cusr 0.00 csys = 4.58 CPU)
+Time for select_distinct (800): 22 wallclock secs ( 2.10 usr 0.26 sys + 0.00 cusr 0.00 csys = 2.36 CPU)
+Time for select_group (2600): 55 wallclock secs ( 1.75 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.81 CPU)
+
+Removing tables
+Time to drop_table (28): 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
+Total time: 104 wallclock secs (12.54 usr 1.17 sys + 0.00 cusr 0.00 csys = 13.71 CPU)
diff --git a/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index befe150bdc4..00000000000
--- a/sql-bench/Results/ATIS-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,28 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:08:29
-
-ATIS table test
-
-Creating tables
-Time for create_table (28): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (9768): 9 wallclock secs ( 2.85 usr 0.37 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data
-Time for select_simple_join (500): 3 wallclock secs ( 0.79 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_join (200): 13 wallclock secs ( 4.77 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_distinct (800): 17 wallclock secs ( 2.06 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (2600): 41 wallclock secs ( 1.51 usr 0.15 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Removing tables
-Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 87 wallclock secs (12.00 usr 0.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..628ddd42784
--- /dev/null
+++ b/sql-bench/Results/ATIS-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,26 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:53:03
+
+ATIS table test
+
+Creating tables
+Time for create_table (28): 1 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Inserting data
+Time to insert (9768): 8 wallclock secs ( 2.90 usr 0.28 sys + 0.00 cusr 0.00 csys = 3.18 CPU)
+
+Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Retrieving data
+Time for select_simple_join (500): 4 wallclock secs ( 0.71 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.89 CPU)
+Time for select_join (100): 4 wallclock secs ( 0.59 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.69 CPU)
+Time for select_key_prefix_join (100): 12 wallclock secs ( 4.47 usr 0.12 sys + 0.00 cusr 0.00 csys = 4.59 CPU)
+Time for select_distinct (800): 23 wallclock secs ( 1.91 usr 0.34 sys + 0.00 cusr 0.00 csys = 2.25 CPU)
+Time for select_group (2600): 51 wallclock secs ( 1.48 usr 0.12 sys + 0.00 cusr 0.00 csys = 1.60 CPU)
+
+Removing tables
+Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Total time: 105 wallclock secs (12.13 usr 1.14 sys + 0.00 cusr 0.00 csys = 13.27 CPU)
diff --git a/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index b3f8ad6f63f..00000000000
--- a/sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,75 +0,0 @@
-Benchmark DBD suite: 2.9
-Date of test: 2000-08-17 19:09:48
-Running tests on: Linux 2.2.14-my-SMP i686
-Arguments:
-Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M
-Limits from: mysql,pg
-Server version: MySQL 3.23.22 beta
-
-ATIS: Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-alter-table: Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-big-tables: Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-connect: Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-create: Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-insert: Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-select: Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-wisconsin: Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-All 8 test executed successfully
-
-Totals per operation:
-Operation seconds usr sys cpu tests
-alter_table_add 252.00 0.20 0.02 0.00 992
-connect 10.00 6.60 1.51 0.00 10000
-connect+select_1_row 13.00 7.08 2.47 0.00 10000
-connect+select_simple 13.00 7.36 2.24 0.00 10000
-count 46.00 0.07 0.00 0.00 100
-count_distinct 124.00 0.65 0.16 0.00 1000
-count_distinct_big 623.00 69.07 56.00 0.00 1020
-count_distinct_group 77.00 0.94 0.33 0.00 1000
-count_distinct_group_on_key 64.00 0.37 0.07 0.00 1000
-count_distinct_group_on_key_parts 77.00 0.93 0.45 0.00 1000
-count_group_on_key_parts 61.00 1.09 0.27 0.00 1000
-count_on_key 574.00 16.11 3.17 0.00 50100
-create+drop 26.00 2.10 0.81 0.00 10000
-create_MANY_tables 32.00 1.97 0.49 0.00 10000
-create_index 4.00 0.00 0.00 0.00 8
-create_key+drop 40.00 3.64 0.72 0.00 10000
-create_table 0.00 0.00 0.00 0.00 31
-delete_big 21.00 0.00 0.00 0.00 13
-delete_big_many_keys 120.00 0.00 0.00 0.00 2
-delete_key 4.00 0.50 0.47 0.00 10000
-drop_index 4.00 0.00 0.00 0.00 8
-drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 9.00 0.44 0.49 0.00 10000
-insert 130.00 20.73 12.97 0.00 350768
-insert_duplicates 113.00 18.31 11.27 0.00 300000
-insert_key 159.00 8.91 4.08 0.00 100000
-insert_many_fields 8.00 0.29 0.08 0.00 2000
-min_max 31.00 0.03 0.00 0.00 60
-min_max_on_key 213.00 25.00 4.86 0.00 85000
-order_by 47.00 19.72 16.45 0.00 10
-order_by_key 31.00 19.75 10.54 0.00 10
-select_1_row 3.00 0.74 0.62 0.00 10000
-select_2_rows 3.00 0.45 0.58 0.00 10000
-select_big 37.00 23.09 11.64 0.00 10080
-select_column+column 3.00 0.52 0.59 0.00 10000
-select_diff_key 210.00 0.28 0.07 0.00 500
-select_distinct 12.00 1.70 0.68 0.00 800
-select_group 70.00 1.49 0.40 0.00 2711
-select_group_when_MANY_tables 14.00 0.68 0.63 0.00 10000
-select_join 15.00 4.21 2.20 0.00 200
-select_key 129.00 66.05 14.03 0.00 200000
-select_key_prefix 130.00 67.36 13.74 0.00 200000
-select_many_fields 22.00 7.89 6.66 0.00 2000
-select_range 21.00 7.00 1.72 0.00 25420
-select_range_prefix 18.00 6.07 1.50 0.00 25010
-select_simple 2.00 0.52 0.49 0.00 10000
-select_simple_join 2.00 0.63 0.32 0.00 500
-update_big 65.00 0.01 0.00 0.00 500
-update_of_key 25.00 2.51 2.23 0.00 500
-update_of_key_big 33.00 0.06 0.00 0.00 501
-update_of_primary_key_many_keys 67.00 0.00 0.01 0.00 256
-update_with_key 109.00 13.71 11.48 0.00 100000
-wisc_benchmark 4.00 1.75 0.68 0.00 114
-TOTALS 3920.00 438.58 200.19 0.00 1594242
diff --git a/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 88b36fc1e52..00000000000
--- a/sql-bench/Results/RUN-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,74 +0,0 @@
-Benchmark DBD suite: 2.9
-Date of test: 2000-08-17 20:19:45
-Running tests on: Linux 2.2.14-my-SMP i686
-Arguments: --fast
-Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M
-Limits from: mysql,pg
-Server version: MySQL 3.23.22 beta
-
-ATIS: Total time: 42 wallclock secs ( 7.93 usr 3.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-alter-table: Total time: 260 wallclock secs ( 0.26 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-big-tables: Total time: 31 wallclock secs ( 8.32 usr 6.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-connect: Total time: 54 wallclock secs (26.60 usr 10.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-create: Total time: 122 wallclock secs ( 8.51 usr 3.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-insert: Total time: 1332 wallclock secs (254.96 usr 103.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-select: Total time: 1696 wallclock secs (113.17 usr 64.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-wisconsin: Total time: 6 wallclock secs ( 1.67 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-All 8 test executed successfully
-
-Totals per operation:
-Operation seconds usr sys cpu tests
-alter_table_add 252.00 0.20 0.10 0.00 992
-connect 11.00 6.50 1.98 0.00 10000
-connect+select_1_row 14.00 7.49 2.28 0.00 10000
-connect+select_simple 12.00 7.41 2.24 0.00 10000
-count 46.00 0.02 0.00 0.00 100
-count_distinct 124.00 0.57 0.12 0.00 1000
-count_distinct_big 629.00 70.62 55.60 0.00 1020
-count_distinct_group 77.00 1.14 0.31 0.00 1000
-count_distinct_group_on_key 65.00 0.35 0.14 0.00 1000
-count_distinct_group_on_key_parts 77.00 1.07 0.35 0.00 1000
-count_group_on_key_parts 61.00 1.03 0.31 0.00 1000
-count_on_key 573.00 16.47 3.19 0.00 50100
-create+drop 26.00 2.17 1.03 0.00 10000
-create_MANY_tables 35.00 1.84 0.58 0.00 10000
-create_index 4.00 0.00 0.00 0.00 8
-create_key+drop 40.00 3.68 0.86 0.00 10000
-create_table 1.00 0.01 0.00 0.00 31
-delete_big 18.00 0.00 0.00 0.00 13
-delete_big_many_keys 1.00 0.00 0.00 0.00 2
-delete_key 3.00 0.44 0.38 0.00 10000
-drop_index 4.00 0.00 0.00 0.00 8
-drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 15.00 0.10 0.01 0.00 10000
-insert 87.00 18.67 12.96 0.00 350768
-insert_duplicates 82.00 17.82 12.50 0.00 300000
-insert_key 91.00 8.12 4.12 0.00 100000
-insert_many_fields 9.00 0.46 0.10 0.00 2000
-min_max 32.00 0.05 0.00 0.00 60
-min_max_on_key 210.00 24.91 5.21 0.00 85000
-order_by 48.00 20.14 16.88 0.00 10
-order_by_key 31.00 20.12 10.64 0.00 10
-select_1_row 2.00 0.54 0.83 0.00 10000
-select_2_rows 4.00 0.55 0.65 0.00 10000
-select_big 37.00 23.14 12.09 0.00 10080
-select_column+column 3.00 0.51 0.73 0.00 10000
-select_diff_key 205.00 0.19 0.03 0.00 500
-select_distinct 11.00 1.74 0.60 0.00 800
-select_group 71.00 1.38 0.66 0.00 2711
-select_group_when_MANY_tables 6.00 0.71 0.53 0.00 10000
-select_join 16.00 4.21 2.17 0.00 200
-select_key 125.00 67.84 14.15 0.00 200000
-select_key_prefix 127.00 65.92 14.67 0.00 200000
-select_many_fields 22.00 7.85 6.78 0.00 2000
-select_range 20.00 7.27 1.62 0.00 25420
-select_range_prefix 19.00 6.09 1.82 0.00 25010
-select_simple 2.00 0.47 0.54 0.00 10000
-select_simple_join 1.00 0.64 0.24 0.00 500
-update_big 65.00 0.00 0.00 0.00 500
-update_of_key 77.00 2.84 2.32 0.00 756
-update_of_key_big 33.00 0.02 0.01 0.00 501
-update_with_key 97.00 14.16 13.03 0.00 100000
-wisc_benchmark 4.00 1.66 0.72 0.00 114
-TOTALS 3625.00 439.13 206.08 0.00 1594242
diff --git a/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 0cb843d77ba..00000000000
--- a/sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,88 +0,0 @@
-Benchmark DBD suite: 2.10
-Date of test: 2000-12-05 5:18:45
-Running tests on: Linux 2.2.14-my-SMP i686
-Arguments:
-Comments: Intel Xeon, 2x550 Mhz 500 Mb, pg started with -o -F
-Limits from: mysql,pg
-Server version: PostgreSQL version ???
-
-ATIS: Total time: 89 wallclock secs (12.72 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-alter-table: Total time: 29 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-big-tables: Total time: 1248 wallclock secs ( 9.27 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-connect: Total time: 472 wallclock secs (48.80 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-create: Total time: 8968 wallclock secs (35.76 usr 5.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-insert: Estimated total time: 110214 wallclock secs (659.27 usr 91.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-select: Estimated total time: 8255 wallclock secs (54.76 usr 6.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-wisconsin: Total time: 813 wallclock secs (12.05 usr 2.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-All 8 test executed successfully
-Tests with estimated time have a + at end of line
-
-Totals per operation:
-Operation seconds usr sys cpu tests
-alter_table_add 28.00 0.41 0.03 0.00 992
-connect 125.00 9.11 3.79 0.00 10000
-connect+select_1_row 173.00 12.56 5.56 0.00 10000
-connect+select_simple 140.00 12.15 5.74 0.00 10000
-count 130.00 0.01 0.03 0.00 100
-count_distinct 235.00 0.76 0.12 0.00 2000
-count_distinct_big 200.00 8.26 0.30 0.00 120
-count_distinct_group 271.00 1.27 0.10 0.00 1000
-count_distinct_group_on_key 174.00 0.44 0.11 0.00 1000
-count_distinct_group_on_key_parts 270.00 1.43 0.07 0.00 1000
-count_group_on_key_parts 242.00 1.19 0.05 0.00 1000
-count_on_key 2544.00 16.73 2.42 0.00 50100 +
-create+drop 2954.00 11.24 1.81 0.00 10000
-create_MANY_tables 448.00 7.42 0.95 0.00 10000
-create_index 1.00 0.00 0.00 0.00 8
-create_key+drop 4055.00 10.98 1.30 0.00 10000
-create_table 1.00 0.03 0.01 0.00 31
-delete_all 341.00 0.00 0.00 0.00 12
-delete_all_many_keys 31.00 0.07 0.00 0.00 1
-delete_big 0.00 0.00 0.00 0.00 1
-delete_big_many_keys 30.00 0.07 0.00 0.00 128
-delete_key 283.00 2.91 0.52 0.00 10000
-drop_index 0.00 0.00 0.00 0.00 8
-drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 1324.00 3.41 0.51 0.00 10000
-insert 8542.00 109.96 19.42 0.00 350768
-insert_duplicates 3055.00 60.75 8.53 0.00 100000
-insert_key 3693.00 33.29 5.64 0.00 100000
-insert_many_fields 357.00 1.18 0.13 0.00 2000
-insert_select_1_key 49.00 0.00 0.00 0.00 1
-insert_select_2_keys 43.00 0.00 0.00 0.00 1
-min_max 58.00 0.02 0.01 0.00 60
-min_max_on_key 11172.00 24.56 3.60 0.00 85000 ++
-order_by_big 121.00 21.92 0.67 0.00 10
-order_by_big_key 115.00 22.06 0.67 0.00 10
-order_by_big_key2 118.00 22.07 0.53 0.00 10
-order_by_big_key_desc 116.00 22.15 0.66 0.00 10
-order_by_big_key_diff 126.00 22.20 0.79 0.00 10
-order_by_key 15.00 1.09 0.06 0.00 500
-order_by_key2_diff 19.00 2.00 0.06 0.00 500
-order_by_range 16.00 1.21 0.02 0.00 500
-select_1_row 7.00 3.10 0.50 0.00 10000
-select_2_rows 6.00 2.75 0.54 0.00 10000
-select_big 64.00 25.86 1.65 0.00 10080
-select_column+column 9.00 2.41 0.31 0.00 10000
-select_diff_key 13.00 0.24 0.01 0.00 500
-select_distinct 17.00 2.21 0.07 0.00 800
-select_group 285.00 1.76 0.11 0.00 2711
-select_group_when_MANY_tables 187.00 2.71 0.68 0.00 10000
-select_join 14.00 5.18 0.20 0.00 200
-select_key 4967.00 68.44 12.65 0.00 200000 +
-select_key2 4933.00 67.48 11.08 0.00 200000 +
-select_key_prefix 4938.00 67.63 10.85 0.00 200000 +
-select_many_fields 891.00 8.07 0.66 0.00 2000
-select_range 35.00 0.87 0.02 0.00 410
-select_range_key2 26862.00 7.62 1.08 0.00 25000 ++
-select_range_prefix 24419.00 9.69 0.80 0.00 25000 ++
-select_simple 4.00 2.96 0.45 0.00 10000
-select_simple_join 3.00 0.69 0.04 0.00 500
-update_big 1894.00 0.02 0.00 0.00 10
-update_of_key 2460.00 15.33 3.09 0.00 50000
-update_of_key_big 444.00 0.20 0.00 0.00 501
-update_of_primary_key_many_keys 1164.00 0.08 0.01 0.00 256
-update_with_key 14806.00 89.73 16.29 0.00 300000
-wisc_benchmark 18.00 3.04 0.25 0.00 114
-TOTALS 130055.00 832.98 125.55 0.00 1844991 ++++++++++
diff --git a/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..4025af26afd
--- /dev/null
+++ b/sql-bench/Results/RUN-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,103 @@
+Benchmark DBD suite: 2.13
+Date of test: 2001-06-12 18:11:16
+Running tests on: Linux 2.4.2-64GB-SMP i686
+Arguments:
+Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F
+Limits from: mysql,pg
+Server version: PostgreSQL version 7.1.2
+
+ATIS: Total time: 104 wallclock secs (12.54 usr 1.17 sys + 0.00 cusr 0.00 csys = 13.71 CPU)
+alter-table: Total time: 50 wallclock secs ( 0.58 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.66 CPU)
+big-tables: Total time: 1355 wallclock secs ( 8.68 usr 0.69 sys + 0.00 cusr 0.00 csys = 9.37 CPU)
+connect: Total time: 547 wallclock secs (50.45 usr 14.25 sys + 0.00 cusr 0.00 csys = 64.70 CPU)
+create: Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU)
+insert: Estimated total time: 288864 wallclock secs (887.56 usr 201.43 sys + 0.00 cusr 0.00 csys = 1088.99 CPU)
+select: Estimated total time: 13160 wallclock secs (70.90 usr 7.35 sys + 0.00 cusr 0.00 csys = 78.25 CPU)
+wisconsin: Total time: 55 wallclock secs (12.69 usr 2.29 sys + 0.00 cusr 0.00 csys = 14.98 CPU)
+
+All 8 test executed successfully
+Tests with estimated time have a + at end of line
+
+Totals per operation:
+Operation seconds usr sys cpu tests
+alter_table_add 48.00 0.31 0.04 0.35 992
+connect 141.00 7.82 1.62 9.44 10000
+connect+select_1_row 192.00 10.79 2.47 13.26 10000
+connect+select_simple 154.00 10.43 2.60 13.03 10000
+count 131.00 0.06 0.00 0.06 100
+count_distinct 131.00 0.29 0.02 0.31 1000
+count_distinct_2 213.00 0.43 0.06 0.49 1000
+count_distinct_big 268.00 8.51 0.17 8.68 120
+count_distinct_group 384.00 1.12 0.07 1.19 1000
+count_distinct_group_on_key 485.00 0.38 0.03 0.41 1000
+count_distinct_group_on_key_parts 381.00 1.23 0.05 1.28 1000
+count_distinct_key_prefix 188.00 0.33 0.05 0.38 1000
+count_group_on_key_parts 332.00 1.20 0.04 1.24 1000
+count_on_key 1809.00 15.49 2.15 17.65 50100 +
+create+drop 2924.00 11.10 2.41 13.51 10000
+create_MANY_tables 194.00 6.27 5.72 11.99 5000
+create_index 1.00 0.00 0.00 0.00 8
+create_key+drop 5464.00 11.00 2.30 13.30 10000
+create_table 1.00 0.05 0.00 0.05 31
+delete_all 3191.00 0.01 0.00 0.01 12
+delete_all_many_keys 118.00 0.05 0.04 0.09 1
+delete_big 0.00 0.00 0.00 0.00 1
+delete_big_many_keys 118.00 0.05 0.04 0.09 128
+delete_key 136.00 3.08 0.59 3.67 10000
+drop_index 0.00 0.00 0.00 0.00 8
+drop_table 0.00 0.02 0.00 0.02 28
+drop_table_when_MANY_tables 599.00 1.39 0.38 1.77 5000
+insert 353.00 103.74 26.20 129.94 350768
+insert_duplicates 120.00 29.00 13.77 42.77 100000
+insert_key 907.00 45.53 60.49 106.02 100000
+insert_many_fields 529.00 1.04 0.19 1.23 2000
+insert_select_1_key 111.00 0.00 0.00 0.00 1
+insert_select_2_keys 180.00 0.00 0.00 0.00 1
+min_max 59.00 0.03 0.00 0.03 60
+min_max_on_key 9941.00 25.90 4.02 29.92 85000 ++
+order_by_big 146.00 22.57 0.64 23.21 10
+order_by_big_key 145.00 26.12 1.23 27.35 10
+order_by_big_key2 133.00 22.62 0.93 23.55 10
+order_by_big_key_desc 145.00 25.80 1.41 27.21 10
+order_by_big_key_diff 139.00 22.46 0.67 23.13 10
+order_by_big_key_prefix 132.00 22.46 0.83 23.29 10
+order_by_key2_diff 7.00 2.09 0.04 2.13 500
+order_by_key_prefix 4.00 1.12 0.06 1.18 500
+order_by_range 4.00 1.11 0.04 1.15 500
+outer_join 4093.00 0.00 0.00 0.00 10
+outer_join_found 4086.00 0.00 0.00 0.00 10
+outer_join_not_found 203500.00 0.00 0.00 0.00 500 +
+outer_join_on_key 3961.00 0.00 0.00 0.00 10
+select_1_row 6.00 2.56 0.45 3.01 10000
+select_2_rows 6.00 2.68 0.45 3.13 10000
+select_big 62.00 22.48 3.33 25.81 80
+select_big_str 35.00 10.82 5.73 16.55 10000
+select_column+column 8.00 2.73 0.39 3.12 10000
+select_diff_key 0.00 0.16 0.02 0.18 500
+select_distinct 22.00 2.10 0.26 2.36 800
+select_group 327.00 1.78 0.06 1.84 2711
+select_group_when_MANY_tables 14.00 1.46 0.28 1.74 5000
+select_join 4.00 0.67 0.09 0.76 100
+select_key 245.00 69.03 8.64 77.67 200000
+select_key2 209.00 67.94 8.08 76.02 200000
+select_key2_return_key 201.00 63.19 8.05 71.24 200000
+select_key2_return_prim 204.00 64.84 7.89 72.73 200000
+select_key_prefix 210.00 67.51 8.60 76.11 200000
+select_key_prefix_join 12.00 4.43 0.15 4.58 100
+select_key_return_key 240.00 67.26 8.61 75.87 200000
+select_many_fields 825.00 7.63 0.50 8.13 2000
+select_query_cache 2623.00 3.22 0.37 3.59 10000
+select_query_cache2 2622.00 2.73 0.47 3.20 10000
+select_range 491.00 11.40 0.50 11.90 410
+select_range_key2 21975.00 5.82 0.10 5.92 25010 ++
+select_range_prefix 21993.00 6.20 0.48 6.68 25010 ++
+select_simple 5.00 2.59 0.54 3.13 10000
+select_simple_join 3.00 0.73 0.10 0.83 500
+update_big 6612.00 0.00 0.00 0.00 10
+update_of_key 119.00 16.20 10.81 27.01 50000
+update_of_key_big 333.00 0.21 0.21 0.42 501
+update_of_primary_key_many_keys 6813.00 0.13 0.02 0.15 256
+update_with_key 567.00 90.20 25.08 115.28 300000
+update_with_key_prefix 244.00 29.03 5.64 34.67 100000
+wisc_benchmark 16.00 3.54 1.02 4.56 114
+TOTALS 313344.00 1074.27 238.29 1312.57 2551551 ++++++++
diff --git a/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 97f6abfa8a7..00000000000
--- a/sql-bench/Results/RUN-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,77 +0,0 @@
-Benchmark DBD suite: 2.8
-Date of test: 2000-08-17 11:51:48
-Running tests on: Linux 2.2.14-my-SMP i686
-Arguments: --fast
-Comments: Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F
-Limits from: mysql,pg
-Server version: PostgreSQL version 7.0.2
-
-ATIS: Total time: 87 wallclock secs (12.00 usr 0.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-alter-table: Total time: 29 wallclock secs ( 0.58 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-big-tables: Total time: 1247 wallclock secs ( 8.78 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-connect: Total time: 484 wallclock secs (47.96 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-create: Total time: 8745 wallclock secs (32.62 usr 4.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-insert: Estimated total time: 16506 wallclock secs (446.80 usr 59.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-select: Estimated total time: 5187 wallclock secs (127.12 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-wisconsin: Total time: 60 wallclock secs (12.14 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-All 8 test executed successfully
-Tests with estimated time have a + at end of line
-
-Totals per operation:
-Operation seconds usr sys cpu tests
-alter_table_add 27.00 0.32 0.05 0.00 992
-book-keeping 2680.00 0.04 0.03 0.00 25
-connect 129.00 9.33 3.59 0.00 10000
-connect+select_1_row 176.00 12.21 5.95 0.00 10000
-connect+select_simple 142.00 11.69 5.72 0.00 10000
-count 119.00 0.00 0.00 0.00 100
-count_distinct 185.00 0.71 0.16 0.00 1000
-count_distinct_big 667.00 82.38 2.86 0.00 1020
-count_distinct_group 246.00 1.12 0.06 0.00 1000
-count_distinct_group_on_key 145.00 0.33 0.07 0.00 1000
-count_distinct_group_on_key_parts 246.00 1.09 0.05 0.00 1000
-count_group_on_key_parts 216.00 1.37 0.02 0.00 1000
-count_on_key 1213.00 15.61 2.51 0.00 50100 +
-create+drop 3022.00 10.18 1.71 0.00 10000
-create_MANY_tables 455.00 8.09 1.12 0.00 10000
-create_index 1.00 0.00 0.00 0.00 8
-create_key+drop 3752.00 8.40 1.09 0.00 10000
-create_table 1.00 0.01 0.00 0.00 31
-delete_big 102.00 0.00 0.00 0.00 13
-delete_big_many_keys 2.00 0.00 0.00 0.00 2
-delete_key 15.00 2.84 0.49 0.00 10000
-drop_index 0.00 0.00 0.00 0.00 8
-drop_table 0.00 0.00 0.00 0.00 28
-drop_table_when_MANY_tables 1328.00 2.91 0.56 0.00 10000
-insert 375.00 103.83 16.23 0.00 350768
-insert_duplicates 321.00 88.94 13.94 0.00 300000
-insert_key 1367.00 32.13 5.30 0.00 100000
-insert_many_fields 356.00 1.12 0.19 0.00 2000
-min_max 53.00 0.02 0.00 0.00 60
-min_max_on_key 8723.00 25.11 3.76 0.00 85000 ++
-order_by 103.00 22.63 0.73 0.00 10
-order_by_key 103.00 22.46 0.65 0.00 10
-select_1_row 6.00 2.47 0.51 0.00 10000
-select_2_rows 7.00 3.12 0.44 0.00 10000
-select_big 61.00 26.33 1.34 0.00 10080
-select_column+column 8.00 2.78 0.39 0.00 10000
-select_diff_key 1.00 0.23 0.02 0.00 500
-select_distinct 17.00 2.06 0.13 0.00 800
-select_group 264.00 1.55 0.15 0.00 2711
-select_group_when_MANY_tables 188.00 3.03 0.46 0.00 10000
-select_join 13.00 4.77 0.26 0.00 200
-select_key 188.00 65.70 9.45 0.00 200000
-select_key_prefix 188.00 65.88 9.55 0.00 200000
-select_many_fields 886.00 7.63 0.72 0.00 2000
-select_range 66.00 7.49 0.74 0.00 25420
-select_range_prefix 44.00 6.28 0.79 0.00 25010
-select_simple 4.00 2.62 0.47 0.00 10000
-select_simple_join 3.00 0.79 0.07 0.00 500
-update_big 1832.00 0.00 0.00 0.00 500
-update_of_key 97.00 14.01 2.17 0.00 500
-update_of_key_big 559.00 0.21 0.01 0.00 501
-update_of_primary_key_many_keys 1491.00 0.07 0.01 0.00 256
-update_with_key 449.00 91.48 14.02 0.00 100000
-wisc_benchmark 15.00 3.21 0.28 0.00 114
-TOTALS 32657.00 776.58 108.82 0.00 1594267 +++
diff --git a/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..8326a1864c3
--- /dev/null
+++ b/sql-bench/Results/RUN-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,104 @@
+Benchmark DBD suite: 2.13
+Date of test: 2001-06-12 18:14:29
+Running tests on: Linux 2.4.2-64GB-SMP i686
+Arguments: --fast
+Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F
+Limits from: mysql,pg
+Server version: PostgreSQL version 7.1.2
+
+ATIS: Total time: 105 wallclock secs (12.13 usr 1.14 sys + 0.00 cusr 0.00 csys = 13.27 CPU)
+alter-table: Total time: 51 wallclock secs ( 0.63 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.69 CPU)
+big-tables: Total time: 1356 wallclock secs ( 8.41 usr 0.76 sys + 0.00 cusr 0.00 csys = 9.17 CPU)
+connect: Total time: 550 wallclock secs (52.92 usr 14.30 sys + 0.00 cusr 0.00 csys = 67.22 CPU)
+create: Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU)
+insert: Estimated total time: 21187 wallclock secs (884.26 usr 225.15 sys + 0.00 cusr 0.00 csys = 1109.40 CPU)
+select: Estimated total time: 12852 wallclock secs (74.09 usr 9.62 sys + 0.00 cusr 0.00 csys = 83.71 CPU)
+wisconsin: Total time: 64 wallclock secs (13.06 usr 3.32 sys + 0.00 cusr 0.00 csys = 16.38 CPU)
+
+All 8 test executed successfully
+Tests with estimated time have a + at end of line
+
+Totals per operation:
+Operation seconds usr sys cpu tests
+alter_table_add 48.00 0.32 0.03 0.35 992
+book-keeping 3262.00 0.03 0.00 0.03 25
+connect 140.00 7.94 1.85 9.79 10000
+connect+select_1_row 190.00 10.78 2.23 13.01 10000
+connect+select_simple 155.00 10.57 2.71 13.28 10000
+count 132.00 0.04 0.00 0.04 100
+count_distinct 131.00 0.34 0.05 0.39 1000
+count_distinct_2 213.00 0.38 0.03 0.41 1000
+count_distinct_big 269.00 8.53 0.27 8.80 120
+count_distinct_group 385.00 1.14 0.09 1.23 1000
+count_distinct_group_on_key 209.00 0.35 0.09 0.44 1000
+count_distinct_group_on_key_parts 382.00 1.16 0.06 1.22 1000
+count_distinct_key_prefix 188.00 0.38 0.02 0.40 1000
+count_group_on_key_parts 332.00 1.14 0.03 1.17 1000
+count_on_key 1774.00 14.24 1.80 16.04 50100 +
+create+drop 2924.00 11.10 2.41 13.51 10000
+create_MANY_tables 194.00 6.27 5.72 11.99 5000
+create_index 0.00 0.00 0.00 0.00 8
+create_key+drop 5464.00 11.00 2.30 13.30 10000
+create_table 1.00 0.03 0.00 0.03 31
+delete_all 11.00 0.01 0.01 0.02 12
+delete_all_many_keys 3.00 0.05 0.00 0.05 1
+delete_big 2.00 0.01 0.00 0.01 1
+delete_big_many_keys 3.00 0.05 0.00 0.05 128
+delete_key 11.00 3.02 0.37 3.39 10000
+drop_index 1.00 0.00 0.00 0.00 8
+drop_table 0.00 0.00 0.00 0.00 28
+drop_table_when_MANY_tables 599.00 1.39 0.38 1.77 5000
+insert 359.00 104.39 28.15 132.54 350768
+insert_duplicates 111.00 28.41 9.26 37.67 100000
+insert_key 895.00 45.94 68.46 114.40 100000
+insert_many_fields 525.00 1.01 0.18 1.19 2000
+insert_select_1_key 45.00 0.00 0.00 0.00 1
+insert_select_2_keys 77.00 0.01 0.00 0.01 1
+min_max 58.00 0.01 0.00 0.01 60
+min_max_on_key 9948.00 29.82 5.49 35.30 85000 ++
+order_by_big 147.00 22.48 0.61 23.09 10
+order_by_big_key 150.00 25.91 1.24 27.15 10
+order_by_big_key2 137.00 22.59 0.71 23.30 10
+order_by_big_key_desc 147.00 25.81 1.23 27.04 10
+order_by_big_key_diff 143.00 22.68 0.55 23.23 10
+order_by_big_key_prefix 133.00 22.64 0.62 23.26 10
+order_by_key2_diff 7.00 2.07 0.04 2.11 500
+order_by_key_prefix 3.00 1.48 0.03 1.51 500
+order_by_range 4.00 1.04 0.04 1.08 500
+outer_join 253.00 0.00 0.00 0.00 10
+outer_join_found 243.00 0.00 0.00 0.00 10
+outer_join_not_found 242.00 0.00 0.01 0.01 500
+outer_join_on_key 238.00 0.00 0.00 0.00 10
+select_1_row 6.00 2.65 0.43 3.08 10000
+select_2_rows 7.00 2.81 0.40 3.21 10000
+select_big 56.00 22.70 2.29 24.99 80
+select_big_str 37.00 12.51 5.97 18.48 10000
+select_column+column 8.00 2.90 0.33 3.23 10000
+select_diff_key 1.00 0.21 0.00 0.21 500
+select_distinct 23.00 1.91 0.34 2.25 800
+select_group 318.00 1.54 0.12 1.66 2711
+select_group_when_MANY_tables 14.00 1.46 0.28 1.74 5000
+select_join 4.00 0.59 0.10 0.69 100
+select_key 213.00 67.07 8.38 75.45 200000
+select_key2 192.00 67.06 8.24 75.30 200000
+select_key2_return_key 183.00 63.93 8.32 72.25 200000
+select_key2_return_prim 188.00 64.56 8.71 73.27 200000
+select_key_prefix 192.00 67.39 7.56 74.95 200000
+select_key_prefix_join 12.00 4.47 0.12 4.59 100
+select_key_return_key 208.00 65.98 8.96 74.94 200000
+select_many_fields 823.00 7.36 0.55 7.91 2000
+select_query_cache 2643.00 3.20 0.43 3.63 10000
+select_query_cache2 2642.00 3.26 0.43 3.69 10000
+select_range 481.00 11.87 1.04 12.91 410
+select_range_key2 47.00 6.56 0.67 7.23 25010
+select_range_prefix 48.00 6.63 0.65 7.28 25010
+select_simple 5.00 2.74 0.38 3.12 10000
+select_simple_join 4.00 0.71 0.18 0.89 500
+update_big 3883.00 0.01 0.00 0.01 10
+update_of_key 90.00 14.87 5.98 20.85 50000
+update_of_key_big 647.00 0.12 0.06 0.18 501
+update_of_primary_key_many_keys 835.00 0.10 0.09 0.19 256
+update_with_key 470.00 87.85 41.80 129.65 300000
+update_with_key_prefix 170.00 31.13 15.28 46.41 100000
+wisc_benchmark 18.00 3.58 0.20 3.78 114
+TOTALS 45356.00 1076.29 265.36 1341.64 2551576 +++
diff --git a/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 67da8f8a33a..00000000000
--- a/sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:47:38
-
-Testing of ALTER TABLE
-Testing with 1000 columns and 1000 rows in 20 steps
-Insert data into the table
-Time for insert (1000) 0 wallclock secs ( 0.06 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 9a85f9a1754..00000000000
--- a/sql-bench/Results/alter-table-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:11:37
-
-Testing of ALTER TABLE
-Testing with 1000 columns and 1000 rows in 20 steps
-Insert data into the table
-Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 260 wallclock secs ( 0.26 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index d225f1fddaa..00000000000
--- a/sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 5:20:15
-
-Testing of ALTER TABLE
-Testing with 1000 columns and 1000 rows in 20 steps
-Insert data into the table
-Time for insert (1000) 0 wallclock secs ( 0.28 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for alter_table_add (992): 28 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 29 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..06c5236dca0
--- /dev/null
+++ b/sql-bench/Results/alter-table-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,14 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:16:18
+
+Testing of ALTER TABLE
+Testing with 1000 columns and 1000 rows in 20 steps
+Insert data into the table
+Time for insert (1000) 0 wallclock secs ( 0.27 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.31 CPU)
+
+Time for alter_table_add (992): 48 wallclock secs ( 0.31 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.35 CPU)
+
+Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Total time: 50 wallclock secs ( 0.58 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.66 CPU)
diff --git a/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 18b03b3ae0a..00000000000
--- a/sql-bench/Results/alter-table-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:09:56
-
-Testing of ALTER TABLE
-Testing with 1000 columns and 1000 rows in 20 steps
-Insert data into the table
-Time for insert (1000) 1 wallclock secs ( 0.26 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for alter_table_add (992): 27 wallclock secs ( 0.32 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 29 wallclock secs ( 0.58 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..bb492b6b989
--- /dev/null
+++ b/sql-bench/Results/alter-table-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,14 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:54:48
+
+Testing of ALTER TABLE
+Testing with 1000 columns and 1000 rows in 20 steps
+Insert data into the table
+Time for insert (1000) 1 wallclock secs ( 0.30 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.33 CPU)
+
+Time for alter_table_add (992): 48 wallclock secs ( 0.32 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.35 CPU)
+
+Time for create_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for drop_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Total time: 51 wallclock secs ( 0.63 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.69 CPU)
diff --git a/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index a4ff1e655ac..00000000000
--- a/sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:51:59
-
-Testing of some unusual tables
-All tests are done 1000 times with 1000 fields
-
-Testing table with 1000 fields
-Testing select * from table with 1 record
-Time to select_many_fields(1000): 9 wallclock secs ( 4.07 usr 3.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 13 wallclock secs ( 3.82 usr 3.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert VALUES()
-Time to insert_many_fields(1000): 3 wallclock secs ( 0.23 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 5 wallclock secs ( 0.06 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index b5532123a9b..00000000000
--- a/sql-bench/Results/big-tables-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:15:57
-
-Testing of some unusual tables
-All tests are done 1000 times with 1000 fields
-
-Testing table with 1000 fields
-Testing select * from table with 1 record
-Time to select_many_fields(1000): 9 wallclock secs ( 3.76 usr 3.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 13 wallclock secs ( 4.09 usr 3.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert VALUES()
-Time to insert_many_fields(1000): 3 wallclock secs ( 0.41 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 6 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 31 wallclock secs ( 8.32 usr 6.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 10e17dea64a..00000000000
--- a/sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,19 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 5:20:45
-
-Testing of some unusual tables
-All tests are done 1000 times with 1000 fields
-
-Testing table with 1000 fields
-Testing select * from table with 1 record
-Time to select_many_fields(1000): 402 wallclock secs ( 3.75 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 489 wallclock secs ( 4.32 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert VALUES()
-Time to insert_many_fields(1000): 144 wallclock secs ( 0.38 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 213 wallclock secs ( 0.80 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 1248 wallclock secs ( 9.27 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..4ae51da87c6
--- /dev/null
+++ b/sql-bench/Results/big-tables-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,19 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:17:10
+
+Testing of some unusual tables
+All tests are done 1000 times with 1000 fields
+
+Testing table with 1000 fields
+Testing select * from table with 1 record
+Time to select_many_fields(1000): 354 wallclock secs ( 3.70 usr 0.19 sys + 0.00 cusr 0.00 csys = 3.89 CPU)
+
+Testing select all_fields from table with 1 record
+Time to select_many_fields(1000): 471 wallclock secs ( 3.93 usr 0.31 sys + 0.00 cusr 0.00 csys = 4.24 CPU)
+
+Testing insert VALUES()
+Time to insert_many_fields(1000): 230 wallclock secs ( 0.34 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.46 CPU)
+
+Testing insert (all_fields) VALUES()
+Time to insert_many_fields(1000): 299 wallclock secs ( 0.70 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.77 CPU)
+
+Total time: 1355 wallclock secs ( 8.68 usr 0.69 sys + 0.00 cusr 0.00 csys = 9.37 CPU)
diff --git a/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index a5c7613be55..00000000000
--- a/sql-bench/Results/big-tables-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,28 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:10:25
-
-Testing of some unusual tables
-All tests are done 1000 times with 1000 fields
-
-Testing table with 1000 fields
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select * from table with 1 record
-Time to select_many_fields(1000): 398 wallclock secs ( 3.66 usr 0.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select all_fields from table with 1 record
-Time to select_many_fields(1000): 488 wallclock secs ( 3.97 usr 0.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert VALUES()
-Time to insert_many_fields(1000): 143 wallclock secs ( 0.41 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert (all_fields) VALUES()
-Time to insert_many_fields(1000): 213 wallclock secs ( 0.71 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 1247 wallclock secs ( 8.78 usr 0.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..1758aac5e38
--- /dev/null
+++ b/sql-bench/Results/big-tables-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,25 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 17:55:39
+
+Testing of some unusual tables
+All tests are done 1000 times with 1000 fields
+
+Testing table with 1000 fields
+Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing select * from table with 1 record
+Time to select_many_fields(1000): 353 wallclock secs ( 3.56 usr 0.31 sys + 0.00 cusr 0.00 csys = 3.87 CPU)
+
+Testing select all_fields from table with 1 record
+Time to select_many_fields(1000): 470 wallclock secs ( 3.80 usr 0.24 sys + 0.00 cusr 0.00 csys = 4.04 CPU)
+
+Testing insert VALUES()
+Time to insert_many_fields(1000): 229 wallclock secs ( 0.38 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.45 CPU)
+
+Time for book-keeping (1): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing insert (all_fields) VALUES()
+Time to insert_many_fields(1000): 296 wallclock secs ( 0.63 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.74 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Total time: 1356 wallclock secs ( 8.41 usr 0.76 sys + 0.00 cusr 0.00 csys = 9.17 CPU)
diff --git a/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index b8317ca9ddb..00000000000
--- a/sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,30 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:52:30
-
-Testing the speed of connecting to the server and sending of data
-All tests are done 10000 times
-
-Testing connection/disconnect
-Time to connect (10000): 10 wallclock secs ( 6.60 usr 1.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 13 wallclock secs ( 7.36 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test simple select
-Time for select_simple (10000): 2 wallclock secs ( 0.52 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 13 wallclock secs ( 7.08 usr 2.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 1 row from table
-Time to select_1_row (10000): 3 wallclock secs ( 0.74 usr 0.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 2 rows from table
-Time to select_2_rows (10000): 3 wallclock secs ( 0.45 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test select with aritmetic (+)
-Time for select_column+column (10000): 3 wallclock secs ( 0.52 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing retrieval of big records (7000 bytes)
-Time to select_big (10000): 6 wallclock secs ( 2.98 usr 1.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 6084f81343f..00000000000
--- a/sql-bench/Results/connect-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,30 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:16:28
-
-Testing the speed of connecting to the server and sending of data
-All tests are done 10000 times
-
-Testing connection/disconnect
-Time to connect (10000): 11 wallclock secs ( 6.50 usr 1.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 12 wallclock secs ( 7.41 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test simple select
-Time for select_simple (10000): 2 wallclock secs ( 0.47 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 14 wallclock secs ( 7.49 usr 2.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 1 row from table
-Time to select_1_row (10000): 2 wallclock secs ( 0.54 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 2 rows from table
-Time to select_2_rows (10000): 4 wallclock secs ( 0.55 usr 0.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test select with aritmetic (+)
-Time for select_column+column (10000): 3 wallclock secs ( 0.51 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing retrieval of big records (7000 bytes)
-Time to select_big (10000): 6 wallclock secs ( 3.12 usr 1.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 54 wallclock secs (26.60 usr 10.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 31a92939c56..00000000000
--- a/sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,30 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 5:41:34
-
-Testing the speed of connecting to the server and sending of data
-All tests are done 10000 times
-
-Testing connection/disconnect
-Time to connect (10000): 125 wallclock secs ( 9.11 usr 3.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 140 wallclock secs (12.15 usr 5.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test simple select
-Time for select_simple (10000): 4 wallclock secs ( 2.96 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 173 wallclock secs (12.56 usr 5.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 1 row from table
-Time to select_1_row (10000): 7 wallclock secs ( 3.10 usr 0.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 2 rows from table
-Time to select_2_rows (10000): 6 wallclock secs ( 2.75 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test select with aritmetic (+)
-Time for select_column+column (10000): 9 wallclock secs ( 2.41 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing retrieval of big records (7000 bytes)
-Time to select_big (10000): 8 wallclock secs ( 3.74 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 472 wallclock secs (48.80 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..2cdf15596f5
--- /dev/null
+++ b/sql-bench/Results/connect-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,30 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:39:45
+
+Testing the speed of connecting to the server and sending of data
+All tests are done 10000 times
+
+Testing connection/disconnect
+Time to connect (10000): 141 wallclock secs ( 7.82 usr 1.62 sys + 0.00 cusr 0.00 csys = 9.44 CPU)
+
+Test connect/simple select/disconnect
+Time for connect+select_simple (10000): 154 wallclock secs (10.43 usr 2.60 sys + 0.00 cusr 0.00 csys = 13.03 CPU)
+
+Test simple select
+Time for select_simple (10000): 5 wallclock secs ( 2.59 usr 0.54 sys + 0.00 cusr 0.00 csys = 3.13 CPU)
+
+Testing connect/select 1 row from table/disconnect
+Time to connect+select_1_row (10000): 192 wallclock secs (10.79 usr 2.47 sys + 0.00 cusr 0.00 csys = 13.26 CPU)
+
+Testing select 1 row from table
+Time to select_1_row (10000): 6 wallclock secs ( 2.56 usr 0.45 sys + 0.00 cusr 0.00 csys = 3.01 CPU)
+
+Testing select 2 rows from table
+Time to select_2_rows (10000): 6 wallclock secs ( 2.68 usr 0.45 sys + 0.00 cusr 0.00 csys = 3.13 CPU)
+
+Test select with aritmetic (+)
+Time for select_column+column (10000): 8 wallclock secs ( 2.73 usr 0.39 sys + 0.00 cusr 0.00 csys = 3.12 CPU)
+
+Testing retrieval of big records (65000 bytes)
+Time to select_big_str (10000): 35 wallclock secs (10.82 usr 5.73 sys + 0.00 cusr 0.00 csys = 16.55 CPU)
+
+Total time: 547 wallclock secs (50.45 usr 14.25 sys + 0.00 cusr 0.00 csys = 64.70 CPU)
diff --git a/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 21556d6f7ce..00000000000
--- a/sql-bench/Results/connect-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,42 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:31:12
-
-Testing the speed of connecting to the server and sending of data
-All tests are done 10000 times
-
-Testing connection/disconnect
-Time to connect (10000): 129 wallclock secs ( 9.33 usr 3.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test connect/simple select/disconnect
-Time for connect+select_simple (10000): 142 wallclock secs (11.69 usr 5.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test simple select
-Time for select_simple (10000): 4 wallclock secs ( 2.62 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing connect/select 1 row from table/disconnect
-Time to connect+select_1_row (10000): 176 wallclock secs (12.21 usr 5.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 1 row from table
-Time to select_1_row (10000): 6 wallclock secs ( 2.47 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing select 2 rows from table
-Time to select_2_rows (10000): 7 wallclock secs ( 3.12 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test select with aritmetic (+)
-Time for select_column+column (10000): 8 wallclock secs ( 2.78 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing retrieval of big records (7000 bytes)
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time to select_big (10000): 8 wallclock secs ( 3.71 usr 0.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 484 wallclock secs (47.96 usr 17.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..00ea04c49a3
--- /dev/null
+++ b/sql-bench/Results/connect-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,38 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 18:18:15
+
+Testing the speed of connecting to the server and sending of data
+All tests are done 10000 times
+
+Testing connection/disconnect
+Time to connect (10000): 140 wallclock secs ( 7.94 usr 1.85 sys + 0.00 cusr 0.00 csys = 9.79 CPU)
+
+Test connect/simple select/disconnect
+Time for connect+select_simple (10000): 155 wallclock secs (10.57 usr 2.71 sys + 0.00 cusr 0.00 csys = 13.28 CPU)
+
+Test simple select
+Time for select_simple (10000): 5 wallclock secs ( 2.74 usr 0.38 sys + 0.00 cusr 0.00 csys = 3.12 CPU)
+
+Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing connect/select 1 row from table/disconnect
+Time to connect+select_1_row (10000): 190 wallclock secs (10.78 usr 2.23 sys + 0.00 cusr 0.00 csys = 13.01 CPU)
+
+Testing select 1 row from table
+Time to select_1_row (10000): 6 wallclock secs ( 2.65 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.08 CPU)
+
+Testing select 2 rows from table
+Time to select_2_rows (10000): 7 wallclock secs ( 2.81 usr 0.40 sys + 0.00 cusr 0.00 csys = 3.21 CPU)
+
+Test select with aritmetic (+)
+Time for select_column+column (10000): 8 wallclock secs ( 2.90 usr 0.33 sys + 0.00 cusr 0.00 csys = 3.23 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing retrieval of big records (65000 bytes)
+Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time to select_big_str (10000): 37 wallclock secs (12.51 usr 5.97 sys + 0.00 cusr 0.00 csys = 18.48 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Total time: 550 wallclock secs (52.92 usr 14.30 sys + 0.00 cusr 0.00 csys = 67.22 CPU)
diff --git a/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 731c1569794..00000000000
--- a/sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,18 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:53:24
-
-Testing the speed of creating and droping tables
-Testing with 10000 tables and 10000 loop count
-
-Testing create of tables
-Time for create_MANY_tables (10000): 32 wallclock secs ( 1.97 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Accessing tables
-Time to select_group_when_MANY_tables (10000): 14 wallclock secs ( 0.68 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing drop
-Time for drop_table_when_MANY_tables (10000): 9 wallclock secs ( 0.44 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing create+drop
-Time for create+drop (10000): 26 wallclock secs ( 2.10 usr 0.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_key+drop (10000): 40 wallclock secs ( 3.64 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 5b90c4778c6..00000000000
--- a/sql-bench/Results/create-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,18 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:17:23
-
-Testing the speed of creating and droping tables
-Testing with 10000 tables and 10000 loop count
-
-Testing create of tables
-Time for create_MANY_tables (10000): 35 wallclock secs ( 1.84 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Accessing tables
-Time to select_group_when_MANY_tables (10000): 6 wallclock secs ( 0.71 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing drop
-Time for drop_table_when_MANY_tables (10000): 15 wallclock secs ( 0.10 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing create+drop
-Time for create+drop (10000): 26 wallclock secs ( 2.17 usr 1.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_key+drop (10000): 40 wallclock secs ( 3.68 usr 0.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 122 wallclock secs ( 8.51 usr 3.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 1272418baab..00000000000
--- a/sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,18 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 5:49:26
-
-Testing the speed of creating and droping tables
-Testing with 10000 tables and 10000 loop count
-
-Testing create of tables
-Time for create_MANY_tables (10000): 448 wallclock secs ( 7.42 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Accessing tables
-Time to select_group_when_MANY_tables (10000): 187 wallclock secs ( 2.71 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing drop
-Time for drop_table_when_MANY_tables (10000): 1324 wallclock secs ( 3.41 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing create+drop
-Time for create+drop (10000): 2954 wallclock secs (11.24 usr 1.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_key+drop (10000): 4055 wallclock secs (10.98 usr 1.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 8968 wallclock secs (35.76 usr 5.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..d4ed9d43980
--- /dev/null
+++ b/sql-bench/Results/create-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,18 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:48:52
+
+Testing the speed of creating and droping tables
+Testing with 5000 tables and 10000 loop count
+
+Testing create of tables
+Time for create_MANY_tables (5000): 194 wallclock secs ( 6.27 usr 5.72 sys + 0.00 cusr 0.00 csys = 11.99 CPU)
+
+Accessing tables
+Time to select_group_when_MANY_tables (5000): 14 wallclock secs ( 1.46 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.74 CPU)
+
+Testing drop
+Time for drop_table_when_MANY_tables (5000): 599 wallclock secs ( 1.39 usr 0.38 sys + 0.00 cusr 0.00 csys = 1.77 CPU)
+
+Testing create+drop
+Time for create+drop (10000): 2924 wallclock secs (11.10 usr 2.41 sys + 0.00 cusr 0.00 csys = 13.51 CPU)
+Time for create_key+drop (10000): 5464 wallclock secs (11.00 usr 2.30 sys + 0.00 cusr 0.00 csys = 13.30 CPU)
+Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU)
diff --git a/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index c22ceeb6781..00000000000
--- a/sql-bench/Results/create-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,18 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-15 17:09:50
-
-Testing the speed of creating and droping tables
-Testing with 10000 tables and 10000 loop count
-
-Testing create of tables
-Time for create_MANY_tables (10000): 455 wallclock secs ( 8.09 usr 1.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Accessing tables
-Time to select_group_when_MANY_tables (10000): 188 wallclock secs ( 3.03 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing drop
-Time for drop_table_when_MANY_tables (10000): 1328 wallclock secs ( 2.91 usr 0.56 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing create+drop
-Time for create+drop (10000): 3022 wallclock secs (10.18 usr 1.71 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for create_key+drop (10000): 3752 wallclock secs ( 8.40 usr 1.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 8745 wallclock secs (32.62 usr 4.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..d4ed9d43980
--- /dev/null
+++ b/sql-bench/Results/create-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,18 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:48:52
+
+Testing the speed of creating and droping tables
+Testing with 5000 tables and 10000 loop count
+
+Testing create of tables
+Time for create_MANY_tables (5000): 194 wallclock secs ( 6.27 usr 5.72 sys + 0.00 cusr 0.00 csys = 11.99 CPU)
+
+Accessing tables
+Time to select_group_when_MANY_tables (5000): 14 wallclock secs ( 1.46 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.74 CPU)
+
+Testing drop
+Time for drop_table_when_MANY_tables (5000): 599 wallclock secs ( 1.39 usr 0.38 sys + 0.00 cusr 0.00 csys = 1.77 CPU)
+
+Testing create+drop
+Time for create+drop (10000): 2924 wallclock secs (11.10 usr 2.41 sys + 0.00 cusr 0.00 csys = 13.51 CPU)
+Time for create_key+drop (10000): 5464 wallclock secs (11.00 usr 2.30 sys + 0.00 cusr 0.00 csys = 13.30 CPU)
+Total time: 9195 wallclock secs (31.22 usr 11.10 sys + 0.00 cusr 0.00 csys = 42.32 CPU)
diff --git a/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index e1dfc3171b9..00000000000
--- a/sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,58 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:55:26
-
-Testing the speed of inserting data into 1 table and do some selects on it.
-The tests are done with a table that has 100000 rows.
-
-Generating random keys
-Creating tables
-Inserting 100000 rows in order
-Inserting 100000 rows in reverse order
-Inserting 100000 rows in random order
-Time for insert (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for insert_duplicates (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data from the table
-Time for select_big (10:3000000): 30 wallclock secs (19.98 usr 10.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key (10:3000000): 31 wallclock secs (19.75 usr 10.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by (10:3000000): 47 wallclock secs (19.72 usr 16.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_diff_key (500:1000): 210 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_prefix (5010:42084): 10 wallclock secs ( 2.48 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (5010:42084): 11 wallclock secs ( 2.61 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key_prefix (200000): 130 wallclock secs (67.36 usr 13.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key (200000): 129 wallclock secs (66.05 usr 14.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test of compares with simple ranges
-Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.59 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (20000:43500): 8 wallclock secs ( 3.74 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (111): 58 wallclock secs ( 0.06 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (15000): 8 wallclock secs ( 4.40 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max (60): 31 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count (100): 46 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (20): 64 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys with functions
-Time for update_of_key (500): 25 wallclock secs ( 2.51 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update with key
-Time for update_with_key (100000): 109 wallclock secs (13.71 usr 11.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of all rows
-Time for update_big (500): 65 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing delete
-Time for delete_key (10000): 4 wallclock secs ( 0.50 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for delete_big (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 159 wallclock secs ( 8.91 usr 4.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys
-Time for update_of_primary_key_many_keys (256): 67 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Deleting everything from table
-Time for delete_big_many_keys (2): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 2ecb653e7f7..00000000000
--- a/sql-bench/Results/insert-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,58 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:19:26
-
-Testing the speed of inserting data into 1 table and do some selects on it.
-The tests are done with a table that has 100000 rows.
-
-Generating random keys
-Creating tables
-Inserting 100000 rows in order
-Inserting 100000 rows in reverse order
-Inserting 100000 rows in random order
-Time for insert (300000): 82 wallclock secs (17.82 usr 12.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for insert_duplicates (300000): 82 wallclock secs (17.82 usr 12.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data from the table
-Time for select_big (10:3000000): 31 wallclock secs (19.92 usr 10.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key (10:3000000): 31 wallclock secs (20.12 usr 10.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by (10:3000000): 48 wallclock secs (20.14 usr 16.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_diff_key (500:1000): 205 wallclock secs ( 0.19 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_prefix (5010:42084): 11 wallclock secs ( 2.64 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (5010:42084): 10 wallclock secs ( 2.77 usr 0.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key_prefix (200000): 127 wallclock secs (65.92 usr 14.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key (200000): 125 wallclock secs (67.84 usr 14.15 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test of compares with simple ranges
-Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.45 usr 1.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (20000:43500): 7 wallclock secs ( 3.71 usr 0.90 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (111): 58 wallclock secs ( 0.04 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (15000): 8 wallclock secs ( 4.68 usr 1.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max (60): 32 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count (100): 46 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (20): 63 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys with functions
-Time for update_of_key (500): 23 wallclock secs ( 2.80 usr 2.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for update_of_key_big (501): 33 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update with key
-Time for update_with_key (100000): 97 wallclock secs (14.16 usr 13.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of all rows
-Time for update_big (500): 65 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing delete
-Time for delete_key (10000): 3 wallclock secs ( 0.44 usr 0.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for delete_big (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 91 wallclock secs ( 8.12 usr 4.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys
-Time for update_of_primary_key_many_keys (256): 54 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Deleting everything from table
-Time for delete_big_many_keys (2): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 1332 wallclock secs (254.96 usr 103.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index da6ee641174..00000000000
--- a/sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,98 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 8:18:54
-
-Testing the speed of inserting data into 1 table and do some selects on it.
-The tests are done with a table that has 100000 rows.
-
-Generating random keys
-Creating tables
-Inserting 100000 rows in order
-Inserting 100000 rows in reverse order
-Inserting 100000 rows in random order
-Time for insert (300000): 7486 wallclock secs (94.98 usr 16.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing insert of duplicates
-Time for insert_duplicates (100000): 3055 wallclock secs (60.75 usr 8.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data from the table
-Time for select_big (10:3000000): 54 wallclock secs (21.95 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key (10:3000000): 115 wallclock secs (22.06 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key_desc (10:3000000): 116 wallclock secs (22.15 usr 0.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key2 (10:3000000): 118 wallclock secs (22.07 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big_key_diff (10:3000000): 126 wallclock secs (22.20 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_big (10:3000000): 121 wallclock secs (21.92 usr 0.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_range (500:125750): 16 wallclock secs ( 1.21 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key (500:125750): 15 wallclock secs ( 1.09 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key2_diff (500:250500): 19 wallclock secs ( 2.00 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_diff_key (500:1000): 13 wallclock secs ( 0.24 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-180 queries in 180 loops of 5000 loops took 653 seconds
-Estimated time for select_range_prefix (5000:1512): 18138 wallclock secs ( 5.00 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-165 queries in 165 loops of 5000 loops took 614 seconds
-Estimated time for select_range_key2 (5000:1386): 18606 wallclock secs ( 3.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-24340 queries in 12170 loops of 100000 loops took 601 seconds
-Estimated time for select_key_prefix (200000): 4938 wallclock secs (67.63 usr 10.85 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-24198 queries in 12099 loops of 100000 loops took 601 seconds
-Estimated time for select_key (200000): 4967 wallclock secs (68.44 usr 12.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-24362 queries in 12181 loops of 100000 loops took 601 seconds
-Estimated time for select_key2 (200000): 4933 wallclock secs (67.48 usr 11.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test of compares with simple ranges
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-1920 queries in 48 loops of 500 loops took 603 seconds
-Estimated time for select_range_prefix (20000:4176): 6281 wallclock secs ( 4.69 usr 0.52 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-1480 queries in 37 loops of 500 loops took 611 seconds
-Estimated time for select_range_key2 (20000:3219): 8256 wallclock secs ( 4.59 usr 1.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (111): 240 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-1314 queries in 219 loops of 2500 loops took 603 seconds
-Estimated time for min_max_on_key (15000): 6883 wallclock secs ( 4.00 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max (60): 58 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (100): 120 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count (100): 130 wallclock secs ( 0.01 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (20): 143 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys with functions
-Time for update_of_key (50000): 2460 wallclock secs (15.33 usr 3.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for update_of_key_big (501): 444 wallclock secs ( 0.20 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update with key
-Time for update_with_key (300000): 14806 wallclock secs (89.73 usr 16.29 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of all rows
-Time for update_big (10): 1894 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing INSERT INTO ... SELECT
-Time for insert_select_1_key (1): 49 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for insert_select_2_keys (1): 43 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for drop table(2): 20 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing delete
-Time for delete_key (10000): 283 wallclock secs ( 2.91 usr 0.52 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for delete_all (12): 341 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 3693 wallclock secs (33.29 usr 5.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys
-Time for update_of_primary_key_many_keys (256): 1164 wallclock secs ( 0.08 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Deleting rows from the table
-Time for delete_big_many_keys (128): 30 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Deleting everything from table
-Time for delete_all_many_keys (1): 31 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Estimated total time: 110214 wallclock secs (659.27 usr 91.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..5cbb52e1ddc
--- /dev/null
+++ b/sql-bench/Results/insert-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,103 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 4:22:08
+
+Testing the speed of inserting data into 1 table and do some selects on it.
+The tests are done with a table that has 100000 rows.
+
+Generating random keys
+Creating tables
+Inserting 100000 rows in order
+Inserting 100000 rows in reverse order
+Inserting 100000 rows in random order
+Time for insert (300000): 304 wallclock secs (88.91 usr 24.12 sys + 0.00 cusr 0.00 csys = 113.03 CPU)
+
+Testing insert of duplicates
+Time for insert_duplicates (100000): 120 wallclock secs (29.00 usr 13.77 sys + 0.00 cusr 0.00 csys = 42.77 CPU)
+
+Retrieving data from the table
+Time for select_big (10:3000000): 61 wallclock secs (22.36 usr 3.32 sys + 0.00 cusr 0.00 csys = 25.68 CPU)
+Time for order_by_big_key (10:3000000): 145 wallclock secs (26.12 usr 1.23 sys + 0.00 cusr 0.00 csys = 27.35 CPU)
+Time for order_by_big_key_desc (10:3000000): 145 wallclock secs (25.80 usr 1.41 sys + 0.00 cusr 0.00 csys = 27.21 CPU)
+Time for order_by_big_key_prefix (10:3000000): 132 wallclock secs (22.46 usr 0.83 sys + 0.00 cusr 0.00 csys = 23.29 CPU)
+Time for order_by_big_key2 (10:3000000): 133 wallclock secs (22.62 usr 0.93 sys + 0.00 cusr 0.00 csys = 23.55 CPU)
+Time for order_by_big_key_diff (10:3000000): 139 wallclock secs (22.46 usr 0.67 sys + 0.00 cusr 0.00 csys = 23.13 CPU)
+Time for order_by_big (10:3000000): 146 wallclock secs (22.57 usr 0.64 sys + 0.00 cusr 0.00 csys = 23.21 CPU)
+Time for order_by_range (500:125750): 4 wallclock secs ( 1.11 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.15 CPU)
+Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 1.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.18 CPU)
+Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 2.09 usr 0.04 sys + 0.00 cusr 0.00 csys = 2.13 CPU)
+Time for select_diff_key (500:1000): 0 wallclock secs ( 0.16 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.18 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+195 queries in 195 loops of 5010 loops took 627 seconds
+Estimated time for select_range_prefix (5010:1638): 16109 wallclock secs ( 2.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 2.83 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+195 queries in 195 loops of 5010 loops took 626 seconds
+Estimated time for select_range_key2 (5010:1638): 16083 wallclock secs ( 1.80 usr 0.00 sys + 0.00 cusr 0.00 csys = 1.80 CPU)
+Time for select_key_prefix (200000): 210 wallclock secs (67.51 usr 8.60 sys + 0.00 cusr 0.00 csys = 76.11 CPU)
+Time for select_key (200000): 245 wallclock secs (69.03 usr 8.64 sys + 0.00 cusr 0.00 csys = 77.67 CPU)
+Time for select_key_return_key (200000): 240 wallclock secs (67.26 usr 8.61 sys + 0.00 cusr 0.00 csys = 75.87 CPU)
+Time for select_key2 (200000): 209 wallclock secs (67.94 usr 8.08 sys + 0.00 cusr 0.00 csys = 76.02 CPU)
+Time for select_key2_return_key (200000): 201 wallclock secs (63.19 usr 8.05 sys + 0.00 cusr 0.00 csys = 71.24 CPU)
+Time for select_key2_return_prim (200000): 204 wallclock secs (64.84 usr 7.89 sys + 0.00 cusr 0.00 csys = 72.73 CPU)
+
+Test of compares with simple ranges
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+2080 queries in 52 loops of 500 loops took 612 seconds
+Estimated time for select_range_prefix (20000:4524): 5884 wallclock secs ( 3.37 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.85 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+2040 queries in 51 loops of 500 loops took 601 seconds
+Estimated time for select_range_key2 (20000:4437): 5892 wallclock secs ( 4.02 usr 0.10 sys + 0.00 cusr 0.00 csys = 4.12 CPU)
+Time for select_group (111): 272 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+1410 queries in 235 loops of 2500 loops took 602 seconds
+Estimated time for min_max_on_key (15000): 6404 wallclock secs ( 4.36 usr 0.96 sys + 0.00 cusr 0.00 csys = 5.32 CPU)
+Time for min_max (60): 59 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
+Time for count_on_key (100): 114 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+Time for count (100): 131 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
+Time for count_distinct_big (20): 203 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing update of keys with functions
+Time for update_of_key (50000): 119 wallclock secs (16.20 usr 10.81 sys + 0.00 cusr 0.00 csys = 27.01 CPU)
+Time for update_of_key_big (501): 333 wallclock secs ( 0.21 usr 0.21 sys + 0.00 cusr 0.00 csys = 0.42 CPU)
+
+Testing update with key
+Time for update_with_key (300000): 567 wallclock secs (90.20 usr 25.08 sys + 0.00 cusr 0.00 csys = 115.28 CPU)
+Time for update_with_key_prefix (100000): 244 wallclock secs (29.03 usr 5.64 sys + 0.00 cusr 0.00 csys = 34.67 CPU)
+
+Testing update of all rows
+Time for update_big (10): 6612 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing left outer join
+Time for outer_join_on_key (10:10): 3961 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join (10:10): 4093 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_found (10:10): 4086 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+2 queries in 2 loops of 500 loops took 814 seconds
+Estimated time for outer_join_not_found (500:500): 203500 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing INSERT INTO ... SELECT
+Time for insert_select_1_key (1): 111 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert_select_2_keys (1): 180 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for drop table(2): 18 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing delete
+Time for delete_key (10000): 136 wallclock secs ( 3.08 usr 0.59 sys + 0.00 cusr 0.00 csys = 3.67 CPU)
+Time for delete_all (12): 3191 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Insert into table with 16 keys and with a primary key with 16 parts
+Time for insert_key (100000): 907 wallclock secs (45.53 usr 60.49 sys + 0.00 cusr 0.00 csys = 106.02 CPU)
+
+Testing update of keys
+Time for update_of_primary_key_many_keys (256): 6813 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
+
+Deleting rows from the table
+Time for delete_big_many_keys (128): 118 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
+
+Deleting everything from table
+Time for delete_all_many_keys (1): 118 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
+
+Estimated total time: 288864 wallclock secs (887.56 usr 201.43 sys + 0.00 cusr 0.00 csys = 1088.99 CPU)
diff --git a/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index d38cc95311e..00000000000
--- a/sql-bench/Results/insert-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,85 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 3:39:16
-
-Testing the speed of inserting data into 1 table and do some selects on it.
-The tests are done with a table that has 100000 rows.
-
-Generating random keys
-Creating tables
-Inserting 100000 rows in order
-Inserting 100000 rows in reverse order
-Inserting 100000 rows in random order
-Time for insert (300000): 315 wallclock secs (88.93 usr 13.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 6 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for insert_duplicates (300000): 321 wallclock secs (88.94 usr 13.94 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 3 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Retrieving data from the table
-Time for select_big (10:3000000): 52 wallclock secs (22.48 usr 0.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by_key (10:3000000): 103 wallclock secs (22.46 usr 0.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for order_by (10:3000000): 103 wallclock secs (22.63 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_diff_key (500:1000): 1 wallclock secs ( 0.23 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range_prefix (5010:42084): 30 wallclock secs ( 2.82 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (5010:42084): 29 wallclock secs ( 3.04 usr 0.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key_prefix (200000): 188 wallclock secs (65.88 usr 9.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_key (200000): 188 wallclock secs (65.70 usr 9.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Test of compares with simple ranges
-Time for select_range_prefix (20000:43500): 14 wallclock secs ( 3.46 usr 0.53 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (20000:43500): 13 wallclock secs ( 3.53 usr 0.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_group (111): 223 wallclock secs ( 0.04 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-1446 queries in 241 loops of 2500 loops took 602 seconds
-Estimated time for min_max_on_key (15000): 6244 wallclock secs ( 4.77 usr 0.83 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max (60): 53 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (100): 112 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count (100): 119 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (20): 138 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys with functions
-Time for update_of_key (500): 97 wallclock secs (14.01 usr 2.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 41 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for update_of_key_big (501): 559 wallclock secs ( 0.21 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 115 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update with key
-Time for update_with_key (100000): 449 wallclock secs (91.48 usr 14.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of all rows
-Time for update_big (500): 1832 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing delete
-Time for delete_key (10000): 15 wallclock secs ( 2.84 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for delete_big (12): 100 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Insert into table with 16 keys and with a primary key with 16 parts
-Time for insert_key (100000): 1367 wallclock secs (32.13 usr 5.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing update of keys
-Time for update_of_primary_key_many_keys (256): 1491 wallclock secs ( 0.07 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 2489 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Deleting everything from table
-Time for delete_big_many_keys (2): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Estimated total time: 16506 wallclock secs (446.80 usr 59.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..7cf90e5b34b
--- /dev/null
+++ b/sql-bench/Results/insert-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,104 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 18:27:26
+
+Testing the speed of inserting data into 1 table and do some selects on it.
+The tests are done with a table that has 100000 rows.
+
+Generating random keys
+Creating tables
+Inserting 100000 rows in order
+Inserting 100000 rows in reverse order
+Inserting 100000 rows in random order
+Time for insert (300000): 296 wallclock secs (89.01 usr 24.43 sys + 0.00 cusr 0.00 csys = 113.44 CPU)
+
+Time for book-keeping (1): 8 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing insert of duplicates
+Time for insert_duplicates (100000): 111 wallclock secs (28.41 usr 9.26 sys + 0.00 cusr 0.00 csys = 37.67 CPU)
+
+Retrieving data from the table
+Time for select_big (10:3000000): 55 wallclock secs (22.58 usr 2.28 sys + 0.00 cusr 0.00 csys = 24.86 CPU)
+Time for order_by_big_key (10:3000000): 150 wallclock secs (25.91 usr 1.24 sys + 0.00 cusr 0.00 csys = 27.15 CPU)
+Time for order_by_big_key_desc (10:3000000): 147 wallclock secs (25.81 usr 1.23 sys + 0.00 cusr 0.00 csys = 27.04 CPU)
+Time for order_by_big_key_prefix (10:3000000): 133 wallclock secs (22.64 usr 0.62 sys + 0.00 cusr 0.00 csys = 23.26 CPU)
+Time for order_by_big_key2 (10:3000000): 137 wallclock secs (22.59 usr 0.71 sys + 0.00 cusr 0.00 csys = 23.30 CPU)
+Time for order_by_big_key_diff (10:3000000): 143 wallclock secs (22.68 usr 0.55 sys + 0.00 cusr 0.00 csys = 23.23 CPU)
+Time for order_by_big (10:3000000): 147 wallclock secs (22.48 usr 0.61 sys + 0.00 cusr 0.00 csys = 23.09 CPU)
+Time for order_by_range (500:125750): 4 wallclock secs ( 1.04 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.08 CPU)
+Time for order_by_key_prefix (500:125750): 3 wallclock secs ( 1.48 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.51 CPU)
+Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 2.07 usr 0.04 sys + 0.00 cusr 0.00 csys = 2.11 CPU)
+Time for select_diff_key (500:1000): 1 wallclock secs ( 0.21 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.21 CPU)
+Time for select_range_prefix (5010:42084): 34 wallclock secs ( 2.90 usr 0.27 sys + 0.00 cusr 0.00 csys = 3.17 CPU)
+Time for select_range_key2 (5010:42084): 33 wallclock secs ( 2.72 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.02 CPU)
+Time for select_key_prefix (200000): 192 wallclock secs (67.39 usr 7.56 sys + 0.00 cusr 0.00 csys = 74.95 CPU)
+Time for select_key (200000): 213 wallclock secs (67.07 usr 8.38 sys + 0.00 cusr 0.00 csys = 75.45 CPU)
+Time for select_key_return_key (200000): 208 wallclock secs (65.98 usr 8.96 sys + 0.00 cusr 0.00 csys = 74.94 CPU)
+Time for select_key2 (200000): 192 wallclock secs (67.06 usr 8.24 sys + 0.00 cusr 0.00 csys = 75.30 CPU)
+Time for select_key2_return_key (200000): 183 wallclock secs (63.93 usr 8.32 sys + 0.00 cusr 0.00 csys = 72.25 CPU)
+Time for select_key2_return_prim (200000): 188 wallclock secs (64.56 usr 8.71 sys + 0.00 cusr 0.00 csys = 73.27 CPU)
+
+Test of compares with simple ranges
+Time for select_range_prefix (20000:43500): 14 wallclock secs ( 3.73 usr 0.38 sys + 0.00 cusr 0.00 csys = 4.11 CPU)
+Time for select_range_key2 (20000:43500): 14 wallclock secs ( 3.84 usr 0.37 sys + 0.00 cusr 0.00 csys = 4.21 CPU)
+Time for select_group (111): 267 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+1398 queries in 233 loops of 2500 loops took 601 seconds
+Estimated time for min_max_on_key (15000): 6448 wallclock secs ( 4.83 usr 0.54 sys + 0.00 cusr 0.00 csys = 5.36 CPU)
+Time for min_max (60): 58 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+Time for count_on_key (100): 115 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+Time for count (100): 132 wallclock secs ( 0.04 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.04 CPU)
+Time for count_distinct_big (20): 204 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing update of keys with functions
+Time for update_of_key (50000): 90 wallclock secs (14.87 usr 5.98 sys + 0.00 cusr 0.00 csys = 20.85 CPU)
+Time for book-keeping (1): 58 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for update_of_key_big (501): 647 wallclock secs ( 0.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.18 CPU)
+
+Time for book-keeping (1): 236 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing update with key
+Time for update_with_key (300000): 470 wallclock secs (87.85 usr 41.80 sys + 0.00 cusr 0.00 csys = 129.65 CPU)
+Time for update_with_key_prefix (100000): 170 wallclock secs (31.13 usr 15.28 sys + 0.00 cusr 0.00 csys = 46.41 CPU)
+
+Testing update of all rows
+Time for update_big (10): 3883 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing left outer join
+Time for outer_join_on_key (10:10): 238 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join (10:10): 253 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_found (10:10): 243 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for outer_join_not_found (500:10): 242 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Testing INSERT INTO ... SELECT
+Time for insert_select_1_key (1): 45 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for insert_select_2_keys (1): 77 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+Time for book-keeping (1): 1626 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+
+Testing delete
+Time for delete_key (10000): 11 wallclock secs ( 3.02 usr 0.37 sys + 0.00 cusr 0.00 csys = 3.39 CPU)
+Time for delete_all (12): 11 wallclock secs ( 0.01 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
+
+Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Insert into table with 16 keys and with a primary key with 16 parts
+Time for insert_key (100000): 895 wallclock secs (45.94 usr 68.46 sys + 0.00 cusr 0.00 csys = 114.40 CPU)
+
+Time for book-keeping (1): 16 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Testing update of keys
+Time for update_of_primary_key_many_keys (256): 835 wallclock secs ( 0.10 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.19 CPU)
+
+Time for book-keeping (1): 1298 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Deleting rows from the table
+Time for delete_big_many_keys (128): 3 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+
+Deleting everything from table
+Time for delete_all_many_keys (1): 3 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Estimated total time: 21187 wallclock secs (884.26 usr 225.15 sys + 0.00 cusr 0.00 csys = 1109.40 CPU)
diff --git a/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 45bb324ec79..00000000000
--- a/sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,23 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:22:00
-
-Testing the speed of selecting on keys that consist of many parts
-The test-table has 10000 rows and the test is done with 12 ranges.
-
-Creating table
-Inserting 10000 rows
-Time to insert (10000): 4 wallclock secs ( 0.81 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing big selects on the table
-Time for select_big (70:17207): 1 wallclock secs ( 0.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (410:75949): 2 wallclock secs ( 0.65 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (70000): 205 wallclock secs (20.60 usr 3.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (50000): 518 wallclock secs (16.08 usr 3.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.09 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Testing count(distinct) on the table
-Time for count_distinct (1000:2000): 124 wallclock secs ( 0.65 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key (1000:6000): 64 wallclock secs ( 0.37 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 0.93 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group (1000:100000): 77 wallclock secs ( 0.94 usr 0.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (1000:10000000): 559 wallclock secs (69.04 usr 55.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 966d3010631..00000000000
--- a/sql-bench/Results/select-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,23 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 19:41:39
-
-Testing the speed of selecting on keys that consist of many parts
-The test-table has 10000 rows and the test is done with 12 ranges.
-
-Creating table
-Inserting 10000 rows
-Time to insert (10000): 4 wallclock secs ( 0.85 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing big selects on the table
-Time for select_big (70:17207): 0 wallclock secs ( 0.10 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (410:75949): 3 wallclock secs ( 0.79 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for min_max_on_key (70000): 202 wallclock secs (20.23 usr 4.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_on_key (50000): 517 wallclock secs (16.44 usr 3.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.03 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Testing count(distinct) on the table
-Time for count_distinct (1000:2000): 124 wallclock secs ( 0.57 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key (1000:6000): 65 wallclock secs ( 0.35 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 1.07 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group (1000:100000): 77 wallclock secs ( 1.14 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (1000:10000000): 566 wallclock secs (70.60 usr 55.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Total time: 1696 wallclock secs (113.17 usr 64.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 60e5348568b..00000000000
--- a/sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,29 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 20:00:31
-
-Testing the speed of selecting on keys that consist of many parts
-The test-table has 10000 rows and the test is done with 12 ranges.
-
-Creating table
-Inserting 10000 rows
-Time to insert (10000): 254 wallclock secs ( 3.11 usr 0.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing big selects on the table
-Time for select_big (70:17207): 2 wallclock secs ( 0.17 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (410:75949): 35 wallclock secs ( 0.87 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-9807 queries in 1401 loops of 10000 loops took 601 seconds
-Estimated time for min_max_on_key (70000): 4289 wallclock secs (20.56 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-12395 queries in 2479 loops of 10000 loops took 601 seconds
-Estimated time for count_on_key (50000): 2424 wallclock secs (16.70 usr 2.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for count_group_on_key_parts (1000:100000): 242 wallclock secs ( 1.19 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Testing count(distinct) on the table
-Time for count_distinct (2000:2000): 235 wallclock secs ( 0.76 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key (1000:6000): 174 wallclock secs ( 0.44 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key_parts (1000:100000): 270 wallclock secs ( 1.43 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group (1000:100000): 271 wallclock secs ( 1.27 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (100:1000000): 57 wallclock secs ( 8.24 usr 0.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Estimated total time: 8255 wallclock secs (54.76 usr 6.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..c53058af7bf
--- /dev/null
+++ b/sql-bench/Results/select-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,36 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 15:10:04
+
+Testing the speed of selecting on keys that consist of many parts
+The test-table has 10000 rows and the test is done with 500 ranges.
+
+Creating table
+Inserting 10000 rows
+Time to insert (10000): 9 wallclock secs ( 2.91 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.21 CPU)
+
+Test if the database has a query cache
+Time for select_query_cache (10000): 2623 wallclock secs ( 3.22 usr 0.37 sys + 0.00 cusr 0.00 csys = 3.59 CPU)
+
+Time for select_query_cache2 (10000): 2622 wallclock secs ( 2.73 usr 0.47 sys + 0.00 cusr 0.00 csys = 3.20 CPU)
+
+Testing big selects on the table
+Time for select_big (70:17207): 1 wallclock secs ( 0.12 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
+Time for select_range (410:1057904): 491 wallclock secs (11.40 usr 0.50 sys + 0.00 cusr 0.00 csys = 11.90 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+11893 queries in 1699 loops of 10000 loops took 601 seconds
+Estimated time for min_max_on_key (70000): 3537 wallclock secs (21.54 usr 3.06 sys + 0.00 cusr 0.00 csys = 24.60 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+17720 queries in 3544 loops of 10000 loops took 601 seconds
+Estimated time for count_on_key (50000): 1695 wallclock secs (15.49 usr 2.14 sys + 0.00 cusr 0.00 csys = 17.64 CPU)
+
+Time for count_group_on_key_parts (1000:100000): 332 wallclock secs ( 1.20 usr 0.04 sys + 0.00 cusr 0.00 csys = 1.24 CPU)
+Testing count(distinct) on the table
+Time for count_distinct_key_prefix (1000:1000): 188 wallclock secs ( 0.33 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.38 CPU)
+Time for count_distinct (1000:1000): 131 wallclock secs ( 0.29 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.31 CPU)
+Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.43 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
+Time for count_distinct_group_on_key (1000:6000): 485 wallclock secs ( 0.38 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.41 CPU)
+Time for count_distinct_group_on_key_parts (1000:100000): 381 wallclock secs ( 1.23 usr 0.05 sys + 0.00 cusr 0.00 csys = 1.28 CPU)
+Time for count_distinct_group (1000:100000): 384 wallclock secs ( 1.12 usr 0.07 sys + 0.00 cusr 0.00 csys = 1.19 CPU)
+Time for count_distinct_big (100:1000000): 65 wallclock secs ( 8.50 usr 0.17 sys + 0.00 cusr 0.00 csys = 8.67 CPU)
+Estimated total time: 13160 wallclock secs (70.90 usr 7.35 sys + 0.00 cusr 0.00 csys = 78.25 CPU)
diff --git a/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 14c86c54550..00000000000
--- a/sql-bench/Results/select-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,38 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 6:40:22
-
-Testing the speed of selecting on keys that consist of many parts
-The test-table has 10000 rows and the test is done with 12 ranges.
-
-Creating table
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting 10000 rows
-Time to insert (10000): 12 wallclock secs ( 3.13 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Testing big selects on the table
-Time for select_big (70:17207): 1 wallclock secs ( 0.14 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for select_range (410:75949): 24 wallclock secs ( 0.92 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-16968 queries in 2424 loops of 10000 loops took 601 seconds
-Estimated time for min_max_on_key (70000): 2479 wallclock secs (20.34 usr 2.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Note: Query took longer then time-limit: 600
-Estimating end time based on:
-27270 queries in 5454 loops of 10000 loops took 601 seconds
-Estimated time for count_on_key (50000): 1101 wallclock secs (15.60 usr 2.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time for count_group_on_key_parts (1000:0): 216 wallclock secs ( 1.37 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Testing count(distinct) on the table
-Time for count_distinct (1000:2000): 185 wallclock secs ( 0.71 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key (1000:6000): 145 wallclock secs ( 0.33 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group_on_key_parts (1000:100000): 246 wallclock secs ( 1.09 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_group (1000:100000): 246 wallclock secs ( 1.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time for count_distinct_big (1000:10000000): 529 wallclock secs (82.37 usr 2.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Estimated total time: 5187 wallclock secs (127.12 usr 9.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..4f624d80112
--- /dev/null
+++ b/sql-bench/Results/select-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,42 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-11 22:43:08
+
+Testing the speed of selecting on keys that consist of many parts
+The test-table has 10000 rows and the test is done with 500 ranges.
+
+Creating table
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Inserting 10000 rows
+Time to insert (10000): 16 wallclock secs ( 3.01 usr 0.33 sys + 0.00 cusr 0.00 csys = 3.34 CPU)
+
+Time for book-keeping (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Test if the database has a query cache
+Time for select_query_cache (10000): 2643 wallclock secs ( 3.20 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.63 CPU)
+
+Time for select_query_cache2 (10000): 2642 wallclock secs ( 3.26 usr 0.43 sys + 0.00 cusr 0.00 csys = 3.69 CPU)
+
+Testing big selects on the table
+Time for select_big (70:17207): 1 wallclock secs ( 0.12 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
+Time for select_range (410:1057904): 481 wallclock secs (11.87 usr 1.04 sys + 0.00 cusr 0.00 csys = 12.91 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+12019 queries in 1717 loops of 10000 loops took 601 seconds
+Estimated time for min_max_on_key (70000): 3500 wallclock secs (24.99 usr 4.95 sys + 0.00 cusr 0.00 csys = 29.94 CPU)
+Note: Query took longer then time-limit: 600
+Estimating end time based on:
+18105 queries in 3621 loops of 10000 loops took 601 seconds
+Estimated time for count_on_key (50000): 1659 wallclock secs (14.19 usr 1.80 sys + 0.00 cusr 0.00 csys = 15.99 CPU)
+
+Time for count_group_on_key_parts (1000:100000): 332 wallclock secs ( 1.14 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.17 CPU)
+Testing count(distinct) on the table
+Time for count_distinct_key_prefix (1000:1000): 188 wallclock secs ( 0.38 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.40 CPU)
+Time for count_distinct (1000:1000): 131 wallclock secs ( 0.34 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.39 CPU)
+Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.38 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.41 CPU)
+Time for count_distinct_group_on_key (1000:6000): 209 wallclock secs ( 0.35 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.44 CPU)
+Time for count_distinct_group_on_key_parts (1000:100000): 382 wallclock secs ( 1.16 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.22 CPU)
+Time for count_distinct_group (1000:100000): 385 wallclock secs ( 1.14 usr 0.09 sys + 0.00 cusr 0.00 csys = 1.23 CPU)
+Time for count_distinct_big (100:1000000): 65 wallclock secs ( 8.53 usr 0.26 sys + 0.00 cusr 0.00 csys = 8.79 CPU)
+Time for book-keeping (1): 2 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Estimated total time: 12852 wallclock secs (74.09 usr 9.62 sys + 0.00 cusr 0.00 csys = 83.71 CPU)
diff --git a/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index c2eb314c1f4..00000000000
--- a/sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:50:12
-
-Wisconsin benchmark test
-
-Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (31000): 11 wallclock secs ( 1.12 usr 0.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Running actual benchmark
-Time for wisc_benchmark (114): 4 wallclock secs ( 1.75 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 6c2f9506a2c..00000000000
--- a/sql-bench/Results/wisconsin-mysql_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'MySQL 3.23.22 beta' at 2000-08-17 20:09:56
-
-Wisconsin benchmark test
-
-Time for create_table (3): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (31000): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Running actual benchmark
-Time for wisc_benchmark (114): 4 wallclock secs ( 1.66 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 6 wallclock secs ( 1.67 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 23f8f49f12c..00000000000
--- a/sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,14 +0,0 @@
-Testing server 'PostgreSQL version ???' at 2000-12-05 20:46:15
-
-Wisconsin benchmark test
-
-Time for create_table (3): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (31000): 793 wallclock secs ( 8.99 usr 1.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Running actual benchmark
-Time for wisc_benchmark (114): 18 wallclock secs ( 3.04 usr 0.25 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 813 wallclock secs (12.05 usr 2.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..9e5dceb2b76
--- /dev/null
+++ b/sql-bench/Results/wisconsin-pg-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,14 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 17:42:14
+
+Wisconsin benchmark test
+
+Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Inserting data
+Time to insert (31000): 32 wallclock secs ( 9.14 usr 1.27 sys + 0.00 cusr 0.00 csys = 10.41 CPU)
+Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Running actual benchmark
+Time for wisc_benchmark (114): 16 wallclock secs ( 3.54 usr 1.02 sys + 0.00 cusr 0.00 csys = 4.56 CPU)
+
+Total time: 55 wallclock secs (12.69 usr 2.29 sys + 0.00 cusr 0.00 csys = 14.98 CPU)
diff --git a/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
deleted file mode 100644
index 58cc9c98970..00000000000
--- a/sql-bench/Results/wisconsin-pg_fast-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg
+++ /dev/null
@@ -1,26 +0,0 @@
-Testing server 'PostgreSQL version 7.0.2' at 2000-08-17 7:27:10
-
-Wisconsin benchmark test
-
-Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Inserting data
-Time to insert (31000): 39 wallclock secs ( 8.92 usr 1.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Time to delete_big (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Running actual benchmark
-Time for wisc_benchmark (114): 15 wallclock secs ( 3.21 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-NOTICE: Vacuum: table not found
-Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
-
-Total time: 60 wallclock secs (12.14 usr 1.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
diff --git a/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg b/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
new file mode 100644
index 00000000000..a58c22fc6f2
--- /dev/null
+++ b/sql-bench/Results/wisconsin-pg_fast-Linux_2.4.2_64GB_SMP_i686-cmp-mysql,pg
@@ -0,0 +1,22 @@
+Testing server 'PostgreSQL version 7.1.2' at 2001-06-12 1:11:23
+
+Wisconsin benchmark test
+
+Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Inserting data
+Time to insert (31000): 39 wallclock secs ( 9.47 usr 3.11 sys + 0.00 cusr 0.00 csys = 12.58 CPU)
+Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Time to delete_big (1): 2 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
+
+Time for book-keeping (1): 2 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Running actual benchmark
+Time for wisc_benchmark (114): 18 wallclock secs ( 3.58 usr 0.20 sys + 0.00 cusr 0.00 csys = 3.78 CPU)
+
+Time for book-keeping (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
+
+Total time: 64 wallclock secs (13.06 usr 3.32 sys + 0.00 cusr 0.00 csys = 16.38 CPU)
diff --git a/sql-bench/bench-init.pl.sh b/sql-bench/bench-init.pl.sh
index a30e9b3d9c9..adfa114f569 100644
--- a/sql-bench/bench-init.pl.sh
+++ b/sql-bench/bench-init.pl.sh
@@ -31,7 +31,7 @@
# $server Object for current server
# $limits Hash reference to limits for benchmark
-$benchmark_version="2.12";
+$benchmark_version="2.13";
use Getopt::Long;
require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n";
diff --git a/sql-bench/compare-results.sh b/sql-bench/compare-results.sh
index d8a358ed171..9e3a8f2add8 100644
--- a/sql-bench/compare-results.sh
+++ b/sql-bench/compare-results.sh
@@ -25,7 +25,7 @@ use Getopt::Long;
$opt_server="mysql";
$opt_dir="output";
-$opt_machine="";
+$opt_machine=$opt_cmp="";
$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=$opt_no_bars=$opt_verbose=0;
GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count","no-bars","html","verbose") || usage();
@@ -53,10 +53,6 @@ if ($#ARGV == -1)
@ARGV=glob($files);
$automatic_files=1;
}
-else
-{
- $opt_cmp="";
-}
foreach (@ARGV)
{
diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh
index badbcc85288..f6985adc5c0 100644
--- a/sql-bench/crash-me.sh
+++ b/sql-bench/crash-me.sh
@@ -38,7 +38,7 @@
# as such, and clarify ones such as "mediumint" with comments such as
# "3-byte int" or "same as xxx".
-$version="1.56";
+$version="1.57";
use DBI;
use Getopt::Long;
@@ -1539,12 +1539,24 @@ report("insert INTO ... SELECT ...","insert_select",
"insert into crash_q (a) SELECT crash_me.a from crash_me",
"drop table crash_q $drop_attr");
-report_trans("transactions","transactions",
- [create_table("crash_q",["a integer not null"],[]),
- "insert into crash_q values (1)"],
- "select * from crash_q",
- "drop table crash_q $drop_attr"
- );
+if (!defined($limits{"transactions"}))
+{
+ my ($limit,$type);
+ $limit="transactions";
+ print "$limit: ";
+ foreach $type (('', 'type=bdb', 'type=innodb', 'type=gemini'))
+ {
+ undef($limits{$limit});
+ last if (!report_trans($limit,
+ [create_table("crash_q",["a integer not null"],[],
+ $type),
+ "insert into crash_q values (1)"],
+ "select * from crash_q",
+ "drop table crash_q $drop_attr"
+ ));
+ }
+ print "$limits{$limit}\n";
+}
report("atomic updates","atomic_updates",
create_table("crash_q",["a integer not null"],["primary key (a)"]),
@@ -2500,8 +2512,7 @@ sub report_result
sub report_trans
{
- my ($prompt,$limit,$queries,$check,$clear)=@_;
- print "$prompt: ";
+ my ($limit,$queries,$check,$clear)=@_;
if (!defined($limits{$limit}))
{
eval {undef($dbh->{AutoCommit})};
@@ -2518,7 +2529,6 @@ sub report_trans
safe_query($clear);
} else {
$dbh->{AutoCommit} = 1;
- safe_query($clear);
save_config_data($limit,"error",$prompt);
}
} else {
@@ -2532,8 +2542,7 @@ sub report_trans
}
safe_query($clear);
}
- print "$limits{$limit}\n";
- return $limits{$limit} ne "no";
+ return $limits{$limit} ne "yes";
}
@@ -2961,9 +2970,11 @@ sub sql_concat
sub create_table
{
- my($table_name,$fields,$index) = @_;
+ my($table_name,$fields,$index,$extra) = @_;
my($query,$nr,$parts,@queries,@index);
+ $extra="" if (!defined($extra));
+
$query="create table $table_name (";
$nr=0;
foreach $field (@$fields)
@@ -3015,7 +3026,7 @@ sub create_table
}
}
chop($query);
- $query.= ')';
+ $query.= ") $extra";
unshift(@queries,$query);
return @queries;
}
diff --git a/sql-bench/graph-compare-results.sh b/sql-bench/graph-compare-results.sh
new file mode 100644
index 00000000000..317ef583886
--- /dev/null
+++ b/sql-bench/graph-compare-results.sh
@@ -0,0 +1,660 @@
+####
+#### Hello ... this is a heavily hacked script by Luuk
+#### instead of printing the result it makes a nice gif
+#### when you want to look at the code ... beware of the
+#### ugliest code ever seen .... but it works ...
+#### and that's sometimes the only thing you want ... isn't it ...
+#### as the original script ... Hope you like it
+####
+#### Greetz..... Luuk de Boer 1997.
+####
+
+## if you want the seconds behind the bar printed or not ...
+## or only the one where the bar is too big for the graph ...
+## look at line 535 of this program and below ...
+## look in sub calculate for allmost all hard/soft settings :-)
+
+# a little program to generate a table of results
+# just read all the RUN-*.log files and format them nicely
+# Made by Luuk de Boer
+# Patched by Monty
+
+use Getopt::Long;
+use GD;
+
+$opt_server="mysql";
+$opt_cmp="mysql,pg,solid";
+$opt_cmp="msql,mysql,pg,solid";
+$opt_cmp="empress,mysql,pg,solid";
+$opt_dir="output";
+$opt_machine="";
+$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=0;
+
+GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count") || usage();
+
+usage() if ($opt_help || $opt_Information);
+
+if ($opt_same_server)
+{
+ $files="$opt_dir/RUN-$opt_server-*$opt_machine";
+}
+else
+{
+ $files="$opt_dir/RUN-*$opt_machine";
+}
+$files.= "-cmp-$opt_cmp" if (length($opt_cmp));
+
+$automatic_files=0;
+if ($#ARGV == -1)
+{
+ @ARGV=glob($files);
+ $automatic_files=1;
+}
+
+
+#
+# Go trough all RUN files and gather statistics.
+#
+
+foreach (@ARGV)
+{
+ $filename = $_;
+ next if (defined($found{$_})); # remove duplicates
+ $found{$_}=1;
+ /RUN-(.*)$/;
+ $prog = $1;
+ push(@key_order,$prog);
+ $next = 0;
+ open(TMP, "<$filename") || die "Can't open $filename: $!\n";
+ while (<TMP>)
+ {
+ chomp;
+ if ($next == 0) {
+ if (/Server version:\s+(\S+.*)/i)
+ {
+ $tot{$prog}{'server'} = $1;
+ }
+ elsif (/Arguments:\s+(.+)/i)
+ {
+ $tot{$prog}{'arguments'} = $1;
+ # Remove some standard, not informative arguments
+ $tot{$prog}{'arguments'} =~ s/--log|--use-old-results|--server=\S+|--cmp=\S+|--user=\S+|--pass=\S+|--machine=\S+//g;
+ $tot{$prog}{'arguments'} =~ s/\s+/ /g;
+ }
+ elsif (/Comments:\s+(.+)/i) {
+ $tot{$prog}{'comments'} = $1;
+ } elsif (/^(\S+):\s*(estimated\s|)total\stime:\s+(\d+)\s+secs/i)
+ {
+ $tmp = $1; $tmp =~ s/://;
+ $tot{$prog}{$tmp} = [ $3, (length($2) ? "+" : "")];
+ $op1{$tmp} = $tmp;
+ } elsif (/Totals per operation:/i) {
+ $next = 1;
+ next;
+ }
+ }
+ elsif ($next == 1)
+ {
+ if (/^(\S+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*([+|?])*/)
+ {
+ $tot1{$prog}{$1} = [$2,$6,$7];
+ $op{$1} = $1;
+#print "TEST - $_ \n * $prog - $1 - $2 - $6 - $7 ****\n";
+# $prog - filename
+# $1 - operation
+# $2 - time in secs
+# $6 - number of loops
+# $7 - nothing / + / ? / * => estimated time ...
+ # get the highest value ....
+ $highest = ($2/$6) if (($highest < ($2/$6)) && ($1 !~/TOTALS/i));
+ $gifcount++;
+ $giftotal += ($2/$6);
+ }
+ }
+ }
+}
+
+if (!%op)
+{
+ print "Didn't find any files matching: '$files'\n";
+ print "Use the --cmp=server,server option to compare benchmarks\n";
+ exit 1;
+}
+
+
+# everything is loaded ...
+# now we have to create a fancy output :-)
+
+# I prefer to redirect scripts instead to force it to file ; Monty
+#
+# open(RES, ">$resultfile") || die "Can't write to $resultfile: $!\n";
+# select(RES)
+#
+
+#print <<EOF;
+#<cut for this moment>
+#
+#EOF
+
+if ($opt_relative)
+{
+# print "Column 1 is in seconds. All other columns are presented relative\n";
+# print "to this. 1.00 is the same, bigger numbers indicates slower\n\n";
+}
+
+#print "The result logs which where found and the options:\n";
+
+if ($automatic_files)
+{
+ if ($key_order[$i] =~ /^$opt_server/)
+ {
+ if ($key_order[$i] =~ /^$opt_server/)
+ {
+ unshift(@key_order,$key_order[$i]);
+ splice(@key_order,$i+1,1);
+ }
+ }
+}
+# extra for mysql and mysql_pgcc
+#$number1 = shift(@key_order);
+#$number2 = shift(@key_order);
+#unshift(@key_order,$number1);
+#unshift(@key_order,$number2);
+
+# Print header
+
+$column_count=0;
+foreach $key (@key_order)
+{
+ $column_count++;
+# printf "%2d %-40.40s: %s %s\n", $column_count, $key,
+# $tot{$key}{'server'}, $tot{$key}{'arguments'};
+# print "Comments: $tot{$key}{'comments'}\n"
+# if ($tot{$key}{'comments'} =~ /\w+/);
+}
+
+#print "\n";
+
+$namewidth=$opt_skip_count ? 20 :25;
+$colwidth= $opt_relative ? 9 : 6;
+
+print_sep("=");
+#printf "%-$namewidth.${namewidth}s|", "Operation";
+$count = 1;
+foreach $key (@key_order)
+{
+# printf "%${colwidth}d|", $count;
+ $count++;
+}
+#print "\n";
+#print_sep("-");
+#print_string("Results per test:");
+#print_sep("-");
+
+foreach $key (sort {$a cmp $b} keys %op1)
+{
+# printf "%-$namewidth.${namewidth}s|", $key;
+ $first=undef();
+ foreach $server (@key_order)
+ {
+ print_value($first,$tot{$server}{$key}->[0],$tot{$server}{$key}->[1]);
+ $first=$tot{$server}{$key}->[0] if (!defined($first));
+ }
+# print "\n";
+}
+
+print_sep("-");
+print_string("The results per operation:");
+print_sep("-");
+$luukcounter = 1;
+foreach $key (sort {$a cmp $b} keys %op)
+{
+ next if ($key =~ /TOTALS/i);
+ $tmp=$key;
+ $tmp.= " (" . $tot1{$key_order[0]}{$key}->[1] . ")" if (!$skip_count);
+# printf "%-$namewidth.${namewidth}s|", $tmp;
+ $first=undef();
+ foreach $server (@key_order)
+ {
+ print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
+ $first=$tot1{$server}{$key}->[0] if (!defined($first));
+ }
+# print "\n";
+ $luukcounter++;
+}
+
+#print_sep("-");
+$key="TOTALS";
+#printf "%-$namewidth.${namewidth}s|", $key;
+$first=undef();
+foreach $server (@key_order)
+{
+# print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
+ $first=$tot1{$server}{$key}->[0] if (!defined($first));
+}
+#print "\n";
+#print_sep("=");
+&make_gif;
+
+exit 0;
+
+#
+# some format functions;
+#
+
+sub print_sep
+{
+ my ($sep)=@_;
+# print $sep x ($namewidth + (($colwidth+1) * $column_count)+1),"\n";
+}
+
+
+sub print_value
+{
+ my ($first,$value,$flags)=@_;
+ my ($tmp);
+
+ if (defined($value))
+ {
+ if (!defined($first) || !$opt_relative)
+ {
+ $tmp=sprintf("%d",$value);
+ }
+ else
+ {
+ $first=1 if (!$first); # Assume that it took one second instead of 0
+ $tmp= sprintf("%.2f",$value/$first);
+ }
+ if (defined($flags))
+ {
+ $tmp="+".$tmp if ($flags =~ /\+/);
+ $tmp="?".$tmp if ($flags =~ /\?/);
+ }
+ }
+ else
+ {
+ $tmp="";
+ }
+ $tmp= " " x ($colwidth-length($tmp)) . $tmp if (length($tmp) < $colwidth);
+# print $tmp . "|";
+}
+
+
+sub print_string
+{
+ my ($str)=@_;
+ my ($width);
+ $width=$namewidth + ($colwidth+1)*$column_count;
+
+ $str=substr($str,1,$width) if (length($str) > $width);
+# print($str," " x ($width - length($str)),"|\n");
+}
+
+sub usage
+{
+ exit(0);
+}
+
+
+
+###########################################
+###########################################
+###########################################
+# making here a gif of the results ... (lets try it :-))
+# luuk .... 1997
+###########################################
+## take care that $highest / $giftotal / $gifcount / $luukcounter
+## are getting there value above ... so don't forget them while
+## copying the code to some other program ....
+
+sub make_gif {
+ &gd; # some base things ....
+ &legend; # make the nice legend
+ &lines; # yep sometimes you have to print some lines
+ &gif("gif/benchmark2-".$opt_cmp); # and finally we can print all to a gif file ...
+}
+##### mmm we are finished now ...
+
+
+# first we have to calculate some limits and some other stuff
+sub calculate {
+# here is the list which I have to know to make everything .....
+# the small border width ... $sm_border =
+# the border default $border =
+# the step default ... if it must be calculated then no value $step =
+# the highest number $highest =
+# the max length of the text of the x borders $max_len_lb=
+# the max length of a legend entry $max_len_le=
+# number of entries in the legend $num_legen =
+# the length of the color blocks for the legend $legend_block=
+# the width of the gif ...if it must be calculated - no value $width =
+# the height of the gif .. if it must be calculated - no value $height =
+# the width of the grey field ' ' ' ' $width_grey=
+# the height of the grey field ' ' ' ' $height_grey=
+# number of dashed lines $lines=
+# if bars must overlap how much they must be overlapped $overlap=
+# titlebar title of graph in two colors big $titlebar=
+# titlebar1 sub title of graph in small font in black $titlebar1=
+# xlabel $xlabel=
+# ylabel $ylabel=
+# the name of the gif ... $name=
+# then the following things must be knows .....
+# xlabel below or on the left side ?
+# legend yes/no?
+# where must the legend be placed?
+# must the xlabel be printed horizontal or vertical?
+# must the ylabel be printed horizontal or vertical?
+# must the graph be a line or a bar graph?
+# is a xlabel several different entries or some sub entries of one?
+# so xlabel 1 => test1=10, test2=15, test3=7 etc
+# or xlabel 1 => test1a=12, test1b=10, test1c=7 etc
+# must the bars overlap (only with the second example I think)
+# must the number be printed above or next to the bar?
+# when must the number be printed .... only when it extends the graph ...???
+# the space between the bars .... are that the same width of the bars ...
+# or is it a separate space ... defined ???
+# must the date printed below or some where else ....
+
+#calculate all space for text and other things ....
+ $sm_border = 8; # the grey border around ...
+ $border = 40; #default ...
+ $left_border = 2.75 * $border; #default ...
+ $right_border = $border; #default ...
+ $up_border = $border; #default ...
+ $down_border = $border; # default ...
+ $step = ($height - $up_border - $down_border)/ ($luukcounter + (($#key_order + 1) * $luukcounter));
+ # can set $step to get nice graphs ... and change the format ...
+ $step = 8; # set hard the step value
+
+ $gifavg = ($giftotal/$gifcount);
+ $highest = 2 * $gifavg;
+ $highest = 1; # set hard the highest value ...
+ $xhigh = int($highest + .5 * $highest);
+
+ # here to get the max lenght of the test entries ...
+ # so we can calculate the with of the left border
+ foreach $oper (sort keys (%op)) {
+ $max_len_lb = length($oper) if (length($oper) > $max_len_lb);
+# print "oper = $oper - $max_len_lb\n";
+ }
+ $max_len_lb = $max_len_lb * gdSmallFont->width;
+ $left_border = (3*$sm_border) + $max_len_lb;
+ $down_border = (4*$sm_border) + (gdSmallFont->width*(length($xhigh)+3)) + (gdSmallFont->height *2);
+ $right_border = (3*$sm_border) + 3 + (gdSmallFont->width*(length($highest)+5));
+
+ # calculate the space for the legend .....
+ foreach $key (@key_order) {
+ $tmp = $key;
+ $tmp =~ s/-cmp-$opt_cmp//i;
+ $giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
+ $max_len_le = length($giflegend) if (length($giflegend) > $max_len_le);
+ }
+ $max_len_le = $max_len_le * gdSmallFont->width;
+ $legend_block = 10; # the length of the block in the legend
+ $max_high_le = (($#key_order + 1)*(gdSmallFont->height+2)) + (2*$legend_block);
+ $down_border += $max_high_le;
+ $up_border = (5 * $sm_border) + gdSmallFont->height + gdLargeFont->height;
+
+ print "Here some things we already know ....\n";
+# print "luukcounter = $luukcounter (number of tests)\n";
+# print "gifcount = $gifcount (number of total entries)\n";
+# print "giftotal = $giftotal (total secs)\n";
+# print "gifavg = $gifavg\n";
+# print "highest = $highest\n";
+# print "xhigh = $xhigh\n";
+# print "step = $step -- $#key_order\n";
+# print "max_len_lb = $max_len_lb\n";
+# printf "Small- width %d - height %s\n",gdSmallFont->width,gdSmallFont->height;
+# printf "Tiny- width %d - height %s\n",gdTinyFont->width,gdTinyFont->height;
+}
+
+sub gd {
+ &calculate;
+ $width = 600; # the width ....
+ $height = 500; # the height ...
+ $width_greyfield = 430;
+ # when $step is set ... count the height ....????
+ $width = $width_greyfield + $left_border + $right_border;
+ $height = ($step * ($luukcounter + ($luukcounter * ($#key_order + 1)))) + $down_border + $up_border;
+ $b_width = $width - ($left_border + $right_border); # width within the grey field
+ $overlap = 0; # how far each colum can fall over each other ...nice :-)
+
+ # make the gif image ....
+ $im = new GD::Image($width,$height);
+
+ # allocate the colors to use ...
+ $white = $im->colorAllocate(255,255,255);
+ $black = $im->colorAllocate(0,0,0);
+ $paper_white = $im->colorAllocate(220, 220, 220);
+ $grey1 = $im->colorAllocate(240, 240, 240);
+ $grey4 = $im->colorAllocate(229, 229, 229);
+ $grey2 = $im->colorAllocate(102, 102, 102);
+ $grey3 = $im->colorAllocate(153, 153, 153);
+
+ $red = $im->colorAllocate(205,0,0); # msql
+ $lred = $im->colorAllocate(255,0,0);
+ $blue = $im->colorAllocate(0,0,205); # mysql
+ $lblue = $im->colorAllocate(0,0,255); # mysql_pgcc
+ $green = $im->colorAllocate(0, 205, 0); # postgres
+ $lgreen = $im->colorAllocate(0, 255, 0); # pg_fast
+ $orange = $im->colorAllocate(205,133, 0); # solid
+ $lorange = $im->colorAllocate(255, 165, 0); # Adabas
+ $yellow = $im->colorAllocate(205,205,0); # empress
+ $lyellow = $im->colorAllocate(255,255,0);
+ $magenta = $im->colorAllocate(255,0,255); # oracle
+ $lmagenta = $im->colorAllocate(255,200,255);
+ $cyan = $im->colorAllocate(0,205,205); # sybase
+ $lcyan = $im->colorAllocate(0,255,255);
+ $sienna = $im->colorAllocate(139,71,38); # db2
+ $lsienna = $im->colorAllocate(160,82,45);
+ $coral = $im->colorAllocate(205,91,69); # Informix
+ $lcoral = $im->colorAllocate(255,114,86);
+ $peach = $im->colorAllocate(205,175,149);
+ $lpeach = $im->colorAllocate(255,218,185);
+
+ @colors = ($red, $blue, $green, $orange, $yellow, $magenta, $cyan, $sienna, $coral, $peach);
+ @lcolors = ($lred, $lblue, $lgreen, $lorange, $lyellow, $lmagenta, $lcyan, $lsienna, $lcoral, $lpeach);
+
+ # set a color per server so in every result it has the same color ....
+ foreach $key (@key_order) {
+ if ($tot{$key}{'server'} =~ /mysql/i) {
+ if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i || $key =~ /mysql_fast/i) {
+ $tot{$key}{'color'} = $lblue;
+ } else {
+ $tot{$key}{'color'} = $blue;
+ }
+ } elsif ($tot{$key}{'server'} =~ /msql/i) {
+ $tot{$key}{'color'} = $lred;
+ } elsif ($tot{$key}{'server'} =~ /postgres/i) {
+ if ($key =~ /pg_fast/i) {
+ $tot{$key}{'color'} = $lgreen;
+ } else {
+ $tot{$key}{'color'} = $green;
+ }
+ } elsif ($tot{$key}{'server'} =~ /solid/i) {
+ $tot{$key}{'color'} = $lorange;
+ } elsif ($tot{$key}{'server'} =~ /empress/i) {
+ $tot{$key}{'color'} = $lyellow;
+ } elsif ($tot{$key}{'server'} =~ /oracle/i) {
+ $tot{$key}{'color'} = $magenta;
+ } elsif ($tot{$key}{'server'} =~ /sybase/i) {
+ $tot{$key}{'color'} = $cyan;
+ } elsif ($tot{$key}{'server'} =~ /db2/i) {
+ $tot{$key}{'color'} = $sienna;
+ } elsif ($tot{$key}{'server'} =~ /informix/i) {
+ $tot{$key}{'color'} = $coral;
+ } elsif ($tot{$key}{'server'} =~ /microsoft/i) {
+ $tot{$key}{'color'} = $peach;
+ } elsif ($tot{$key}{'server'} =~ /access/i) {
+ $tot{$key}{'color'} = $lpeach;
+ } elsif ($tot{$key}{'server'} =~ /adabas/i) {
+ $tot{$key}{'color'} = $lorange;
+ }
+ }
+
+ # make the nice little borders
+ # left bar
+ $poly0 = new GD::Polygon;
+ $poly0->addPt(0,0);
+ $poly0->addPt($sm_border,$sm_border);
+ $poly0->addPt($sm_border,($height - $sm_border));
+ $poly0->addPt(0,$height);
+ $im->filledPolygon($poly0,$grey1);
+ $im->polygon($poly0, $grey4);
+ # upper bar
+ $poly3 = new GD::Polygon;
+ $poly3->addPt(0,0);
+ $poly3->addPt($sm_border,$sm_border);
+ $poly3->addPt(($width - $sm_border),$sm_border);
+ $poly3->addPt($width,0);
+ $im->polygon($poly3, $grey4);
+ $tmptime = localtime(time);
+ $im->string(gdSmallFont,($width - $sm_border - (gdSmallFont->width * length($tmptime))),($height - ($sm_border) - gdSmallFont->height), $tmptime, $grey3);
+
+ # right bar
+ $poly1 = new GD::Polygon;
+ $poly1->addPt($width,0);
+ $poly1->addPt(($width - $sm_border),$sm_border);
+ $poly1->addPt(($width - $sm_border),($height - $sm_border));
+ $poly1->addPt($width,$height);
+ $im->filledPolygon($poly1, $grey3);
+ $im->stringUp(gdSmallFont,($width - 10),($height - (2 * $sm_border)), "Made by Luuk de Boer - 1997 (c)", $blue);
+ #below bar
+ $poly2 = new GD::Polygon;
+ $poly2->addPt(0,$height);
+ $poly2->addPt($sm_border,($height - $sm_border));
+ $poly2->addPt(($width - $sm_border),($height - $sm_border));
+ $poly2->addPt($width,$height);
+ $im->filledPolygon($poly2, $grey2);
+
+ # do the black line around where in you will print ... (must be done at last
+ # but is hard to develop with ... but the filled grey must be done first :-)
+ $im->filledRectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$grey4);
+
+
+ # print the nice title ...
+ $titlebar = "MySQL Benchmark results"; # head title ...
+ $titlebar1 = "Compare $opt_cmp "; # sub title
+ $header2 = "seconds/test"; # header value
+ $center = ($width / 2) - ((gdLargeFont->width * length($titlebar)) / 2);
+ $center1 = ($width / 2) - ((gdSmallFont->width * length($titlebar1)) / 2);
+ $center2 = ($width_greyfield/2) - ((gdSmallFont->width*length($header2))/2);
+ $bovenkant = $sm_border * 3;
+ $bovenkant1 = $bovenkant + gdLargeFont->height + (.5*$sm_border);
+ $bovenkant2 = $height - $down_border + (1*$sm_border) + (gdSmallFont->width*(length($xhigh)+3));
+ $im->string(gdLargeFont,($center),($bovenkant + 1), $titlebar, $grey3);
+ $im->string(gdLargeFont,($center),($bovenkant), $titlebar, $red);
+ $im->string(gdSmallFont,($center1),($bovenkant1), $titlebar1, $black);
+ $im->string(gdSmallFont,($left_border + $center2),($bovenkant2), $header2, $black);
+
+ $xlength = $width - $left_border - $right_border;
+ $lines = 10; # hard coded number of dashed lines
+ $xverh = $xlength / $xhigh;
+# print " de verhouding ===> $xverh --- $xlength -- $xhigh \n";
+
+ $xstep = ($xhigh / $lines) * $xverh;
+ $teller = 0;
+ # make the nice dashed lines and print the values ...
+ for ($i = 0; $i <= $lines; $i++) {
+ $st2 = ($left_border) + ($i * $xstep);
+ $im->dashedLine($st2,($height-$down_border),$st2,($up_border), $grey3);
+ if (($i != 0) && ($teller == 2)) {
+ $st3 = sprintf("%.2f", $i*($xhigh/$lines));
+ $im->stringUp(gdTinyFont,($st2 - (gdSmallFont->height/2)),($height - $down_border +(.5*$sm_border) + (gdSmallFont->width*(length($xhigh)+3))), $st3, $black);
+ $teller = 0;
+ }
+ $teller++;
+ }
+ $im->rectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$black);
+}
+
+sub legend {
+ # make the legend ...
+ $legxbegin = $left_border;
+
+ $legybegin = $height - $down_border + (2*$sm_border) + (gdSmallFont->width * (length($xhigh) + 3)) + gdSmallFont->height;
+ $legxend = $legxbegin + $max_len_le + (4*$legend_block);
+ $legxend = $legxbegin + $width_greyfield;
+ $legyend = $legybegin + $max_high_le;
+ $im->filledRectangle($legxbegin,$legybegin,$legxend,$legyend,$grey4);
+ $im->rectangle($legxbegin,$legybegin,$legxend,$legyend,$black);
+ # calculate the space for the legend .....
+ $c = 0; $i = 1;
+ $legybegin += $legend_block;
+ foreach $key (@key_order) {
+ $xtmp = $legxbegin + $legend_block;
+ $ytmp = $legybegin + ($c * (gdSmallFont->height +2));
+ $xtmp1 = $xtmp + $legend_block;
+ $ytmp1 = $ytmp + gdSmallFont->height;
+ $im->filledRectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$tot{$key}{'color'});
+ $im->rectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$black);
+ $tmp = $key;
+ $tmp =~ s/-cmp-$opt_cmp//i;
+ $giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
+ $xtmp2 = $xtmp1 + $legend_block;
+ $im->string(gdSmallFont,$xtmp2,$ytmp,"$giflegend",$black);
+ $c++;
+ $i++;
+# print "$c $i -> $giflegend\n";
+ }
+
+}
+
+sub lines {
+
+ $g = 0;
+ $i = 0;
+ $ybegin = $up_border + ((($#key_order + 2)/2)*$step);
+ $xbegin = $left_border;
+ foreach $key (sort {$a cmp $b} keys %op) {
+ next if ($key =~ /TOTALS/i);
+ $c = 0;
+# print "key - $key\n";
+ foreach $server (@key_order) {
+ $tot1{$server}{$key}->[1] = 1 if ($tot1{$server}{$key}->[1] == 0);
+ $entry = $tot1{$server}{$key}->[0]/$tot1{$server}{$key}->[1];
+ $ytmp = $ybegin + ($i * $step) ;
+ $xtmp = $xbegin + ($entry * $xverh) ;
+ $ytmp1 = $ytmp + $step;
+# print "$server -- $entry --x $xtmp -- y $ytmp - $c\n";
+ $entry1 = sprintf("%.2f", $entry);
+ if ($entry < $xhigh) {
+ $im->filledRectangle($xbegin, $ytmp, $xtmp, $ytmp1, $tot{$server}{'color'});
+ $im->rectangle($xbegin, $ytmp, $xtmp, $ytmp1, $black);
+# print the seconds behind the bar (look below for another entry)
+# this entry is for the bars that are not greater then the max width
+# of the grey field ...
+# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$black));
+# if you want the seconds in the color of the bar just uncomment it (below)
+# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$tot{$server}{'color'}));
+ } else {
+ $im->filledRectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $tot{$server}{'color'});
+ $im->rectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $black);
+
+# print the seconds behind the bar (look below for another entry)
+# here is the seconds printed behind the bar is the bar is too big for
+# the graph ... (seconds is greater then xhigh ...)
+ $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$black);
+# if you want the seconds in the color of the bar just uncomment it (below)
+# $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$colors[$c]);
+ }
+ $c++;
+ $i++;
+ }
+ # see if we can center the text between the bars ...
+ $ytmp2 = $ytmp1 - (((($c)*$step) + gdSmallFont->height)/2);
+ $im->string(gdSmallFont,($sm_border*2),$ytmp2,$key, $black);
+ $i++;
+ }
+}
+
+
+sub gif {
+ my ($name) = @_;
+ $name_gif = $name . ".gif";
+ print "name --> $name_gif\n";
+ open (GIF, "> $name_gif") || die "Can't open $name_gif: $!\n";
+ print GIF $im->gif;
+ close (GIF);
+}
+
diff --git a/sql-bench/limits/mysql-3.23.cfg b/sql-bench/limits/mysql-3.23.cfg
index 19bb3c67cc1..a496bd7bf4c 100644
--- a/sql-bench/limits/mysql-3.23.cfg
+++ b/sql-bench/limits/mysql-3.23.cfg
@@ -1,4 +1,4 @@
-#This file is automaticly generated by crash-me 1.54
+#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
@@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
-crash_me_version=1.54 # crash me version
+crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
@@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
-server_version=MySQL 3.23.29 gamma # server version
+server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
@@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
-transactions=no # transactions
+transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile
diff --git a/sql-bench/limits/mysql.cfg b/sql-bench/limits/mysql.cfg
index 19bb3c67cc1..a496bd7bf4c 100644
--- a/sql-bench/limits/mysql.cfg
+++ b/sql-bench/limits/mysql.cfg
@@ -1,4 +1,4 @@
-#This file is automaticly generated by crash-me 1.54
+#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
@@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
-crash_me_version=1.54 # crash me version
+crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
@@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
-server_version=MySQL 3.23.29 gamma # server version
+server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
@@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
-transactions=no # transactions
+transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile
diff --git a/sql-bench/limits/pg.cfg b/sql-bench/limits/pg.cfg
index 7e4d20b052a..9cb42f86b8a 100644
--- a/sql-bench/limits/pg.cfg
+++ b/sql-bench/limits/pg.cfg
@@ -1,10 +1,10 @@
-#This file is automaticly generated by crash-me 1.54
+#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
alter_add_col=yes # Alter table add column
-alter_add_constraint=no # Alter table add constraint
-alter_add_foreign_key=yes # Alter table add foreign key
+alter_add_constraint=yes # Alter table add constraint
+alter_add_foreign_key=no # Alter table add foreign key
alter_add_multi_col=no # Alter table add many columns
alter_add_primary_key=no # Alter table add primary key
alter_add_unique=no # Alter table add unique
@@ -29,21 +29,22 @@ columns_in_order_by=+64 # number of columns in order by
comment_#=no # # as comment
comment_--=yes # -- as comment (ANSI)
comment_/**/=yes # /* */ as comment
-comment_//=no # // as comment (ANSI)
+comment_//=no # // as comment
compute=no # Compute
connections=32 # Simultaneous connections (installation default)
constraint_check=yes # Column constraints
constraint_check_table=yes # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
-crash_me_version=1.54 # crash me version
+crash_me_version=1.57 # crash me version
create_default=yes # default value for column
-create_default_func=no # default value function for column
+create_default_func=yes # default value function for column
create_if_not_exists=no # create table if not exists
create_index=yes # create index
create_schema=no # Create SCHEMA
create_table_select=with AS # create table from select
cross_join=yes # cross join (same as from a,b)
+date_as_string=yes # String functions on date columns
date_infinity=no # Supports 'infinity dates
date_last=yes # Supports 9999-12-31 dates
date_one=yes # Supports 0001-01-01 dates
@@ -58,16 +59,16 @@ drop_requires_cascade=no # drop table require cascade/restrict
drop_restrict=no # drop table with cascade/restrict
end_colon=yes # allows end ';'
except=yes # except
-except_all=no # except all
+except_all=yes # except all
except_all_incompat=no # except all (incompatible lists)
except_incompat=no # except (incompatible lists)
float_int_expr=yes # mixing of integer and float in expression
foreign_key=yes # foreign keys
foreign_key_syntax=yes # foreign key syntax
-full_outer_join=no # full outer join
+full_outer_join=yes # full outer join
func_extra_!=no # Function NOT as '!' in SELECT
func_extra_%=yes # Function MOD as %
-func_extra_&=no # Function & (bitwise and)
+func_extra_&=yes # Function & (bitwise and)
func_extra_&&=no # Function AND as '&&'
func_extra_<>=yes # Function <> in SELECT
func_extra_==yes # Function =
@@ -79,12 +80,12 @@ func_extra_atn2=no # Function ATN2
func_extra_auto_num2string=no # Function automatic num->string convert
func_extra_auto_string2num=yes # Function automatic string->num convert
func_extra_between=yes # Function BETWEEN in SELECT
-func_extra_binary_shifts=no # Function << and >> (bitwise shifts)
+func_extra_binary_shifts=yes # Function << and >> (bitwise shifts)
func_extra_bit_count=no # Function BIT_COUNT
func_extra_ceil=yes # Function CEIL
func_extra_charindex=no # Function CHARINDEX
-func_extra_chr=no # Function CHR
-func_extra_concat_as_+=no # Function concatenation with +
+func_extra_chr=yes # Function CHR
+func_extra_concat_as_+=error # Function concatenation with +
func_extra_concat_list=no # Function CONCAT(list)
func_extra_convert=no # Function CONVERT
func_extra_cosh=no # Function COSH
@@ -103,7 +104,7 @@ func_extra_getdate=no # Function GETDATE
func_extra_greatest=no # Function GREATEST
func_extra_if=no # Function IF
func_extra_in_num=yes # Function IN on numbers in SELECT
-func_extra_in_str=no # Function IN on strings in SELECT
+func_extra_in_str=yes # Function IN on strings in SELECT
func_extra_initcap=yes # Function INITCAP
func_extra_instr=no # Function LOCATE as INSTR
func_extra_instr_oracle=no # Function INSTR (Oracle syntax)
@@ -114,7 +115,7 @@ func_extra_last_insert_id=no # Function LAST_INSERT_ID
func_extra_least=no # Function LEAST
func_extra_lengthb=no # Function LENGTHB
func_extra_like=yes # Function LIKE in SELECT
-func_extra_like_escape=no # Function LIKE ESCAPE in SELECT
+func_extra_like_escape=yes # Function LIKE ESCAPE in SELECT
func_extra_ln=no # Function LN
func_extra_log(m_n)=yes # Function LOG(m,n)
func_extra_logn=no # Function LOGN
@@ -160,7 +161,7 @@ func_extra_unix_timestamp=no # Function UNIX_TIMESTAMP
func_extra_userenv=no # Function USERENV
func_extra_version=yes # Function VERSION
func_extra_weekday=no # Function WEEKDAY
-func_extra_|=no # Function | (bitwise or)
+func_extra_|=yes # Function | (bitwise or)
func_extra_||=no # Function OR as '||'
func_extra_~*=yes # Function ~* (case insensitive compare)
func_odbc_abs=yes # Function ABS
@@ -192,7 +193,7 @@ func_odbc_ifnull=no # Function IFNULL
func_odbc_insert=no # Function INSERT
func_odbc_lcase=no # Function LCASE
func_odbc_left=no # Function LEFT
-func_odbc_length=no # Function REAL LENGTH
+func_odbc_length=yes # Function REAL LENGTH
func_odbc_length_without_space=no # Function ODBC LENGTH
func_odbc_locate_2=no # Function LOCATE(2 arg)
func_odbc_locate_3=no # Function LOCATE(3 arg)
@@ -220,7 +221,7 @@ func_odbc_sin=yes # Function SIN
func_odbc_soundex=no # Function SOUNDEX
func_odbc_space=no # Function SPACE
func_odbc_sqrt=no # Function SQRT
-func_odbc_substring=no # Function ODBC SUBSTRING
+func_odbc_substring=yes # Function ODBC SUBSTRING
func_odbc_tan=yes # Function TAN
func_odbc_timestampadd=no # Function TIMESTAMPADD
func_odbc_timestampdiff=no # Function TIMESTAMPDIFF
@@ -246,8 +247,8 @@ func_sql_localtime=no # Function LOCALTIME
func_sql_localtimestamp=no # Function LOCALTIMESTAMP
func_sql_lower=yes # Function LOWER
func_sql_nullif_num=yes # Function NULLIF with numbers
-func_sql_nullif_string=no # Function NULLIF with strings
-func_sql_octet_length=no # Function OCTET_LENGTH
+func_sql_nullif_string=yes # Function NULLIF with strings
+func_sql_octet_length=yes # Function OCTET_LENGTH
func_sql_position=yes # Function POSITION
func_sql_searched_case=yes # Function searched CASE
func_sql_session_user=yes # Function SESSION_USER
@@ -264,7 +265,7 @@ func_where_eq_some=yes # Function = SOME
func_where_exists=yes # Function EXISTS
func_where_in_num=yes # Function IN on numbers
func_where_like=yes # Function LIKE
-func_where_like_escape=no # Function LIKE ESCAPE
+func_where_like_escape=yes # Function LIKE ESCAPE
func_where_match=no # Function MATCH
func_where_match_unique=no # Function MATCH UNIQUE
func_where_matches=no # Function MATCHES
@@ -283,8 +284,8 @@ group_func_extra_bit_and=no # Group function BIT_AND
group_func_extra_bit_or=no # Group function BIT_OR
group_func_extra_count_distinct_list=no # Group function COUNT(DISTINCT expr,expr,...)
group_func_extra_std=no # Group function STD
-group_func_extra_stddev=no # Group function STDDEV
-group_func_extra_variance=no # Group function VARIANCE
+group_func_extra_stddev=yes # Group function STDDEV
+group_func_extra_variance=yes # Group function VARIANCE
group_func_sql_any=no # Group function ANY
group_func_sql_avg=yes # Group function AVG
group_func_sql_count_*=yes # Group function COUNT (*)
@@ -315,37 +316,37 @@ insert_multi_value=no # INSERT with Value lists
insert_select=yes # insert INTO ... SELECT ...
insert_with_set=no # INSERT with set syntax
intersect=yes # intersect
-intersect_all=no # intersect all
+intersect_all=yes # intersect all
intersect_all_incompat=no # intersect all (incompatible lists)
intersect_incompat=no # intersect (incompatible lists)
join_tables=+64 # tables in join
-left_outer_join=no # left outer join
-left_outer_join_using=no # left outer join using
+left_outer_join=yes # left outer join
+left_outer_join_using=yes # left outer join using
like_with_column=yes # column LIKE column
like_with_number=yes # LIKE on numbers
lock_tables=yes # lock table
logical_value=1 # Value of logical operation (1=1)
max_big_expressions=10 # big expressions
-max_char_size=8104 # max char() size
+max_char_size=+8000000 # max char() size
max_column_name=+512 # column name length
max_columns=1600 # Columns in table
max_conditions=19994 # OR and AND in WHERE
max_expressions=9999 # simple expressions
max_index=+64 # max index
-max_index_length=2704 # index length
+max_index_length=+8192 # index length
max_index_name=+512 # index name length
-max_index_part_length=2704 # max index part length
+max_index_part_length=235328 # max index part length
max_index_parts=16 # index parts
-max_index_varchar_part_length=2704 # index varchar part length
-max_row_length=7949 # max table row length (without blobs)
-max_row_length_with_null=7949 # table row length with nulls (without blobs)
+max_index_varchar_part_length=235328 # index varchar part length
+max_row_length=64519 # max table row length (without blobs)
+max_row_length_with_null=64519 # table row length with nulls (without blobs)
max_select_alias_name=+512 # select alias name length
max_stack_expression=+2000 # stacked expressions
max_table_alias_name=+512 # table alias name length
max_table_name=+512 # table name length
-max_text_size=8104 # max text or blob size
+max_text_size=+8000000 # max text or blob size
max_unique_index=+64 # unique indexes
-max_varchar_size=8104 # max varchar() size
+max_varchar_size=+8000000 # max varchar() size
minus=no # minus
minus_incompat=no # minus (incompatible lists)
minus_neg=no # Calculate 1--1
@@ -356,7 +357,7 @@ multi_table_delete=no # DELETE FROM table1,table2...
multi_table_update=no # Update with many tables
natural_join=yes # natural join
natural_join_incompat=yes # natural join (incompatible lists)
-natural_left_outer_join=no # natural left outer join
+natural_left_outer_join=yes # natural left outer join
no_primary_key=yes # Tables without primary key
null_concat_expr=yes # Is 'a' || NULL = NULL
null_in_index=yes # null in index
@@ -364,7 +365,7 @@ null_in_unique=yes # null in unique index
null_num_expr=yes # Is 1+NULL = NULL
nulls_in_unique=yes # null combination in unique index
odbc_left_outer_join=no # left outer join odbc style
-operating_system=Linux 2.2.14-5.0 i686 # crash-me tested on
+operating_system=Linux 2.4.0-64GB-SMP i686 # crash-me tested on
order_by=yes # Order by
order_by_alias=yes # Order by alias
order_by_function=yes # Order by function
@@ -386,7 +387,7 @@ remember_end_space=no # Remembers end space in char()
remember_end_space_varchar=yes # Remembers end space in varchar()
rename_table=no # rename table
repeat_string_size=+8000000 # return string size from function
-right_outer_join=no # right outer join
+right_outer_join=yes # right outer join
rowid=oid # Type for row id
select_constants=yes # Select constants
select_limit=with LIMIT # LIMIT number of rows
@@ -394,7 +395,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=16777207 # constant string size in SELECT
select_table_update=yes # Update with sub select
select_without_from=yes # SELECT without FROM
-server_version=PostgreSQL version 7.0.2 # server version
+server_version=PostgreSQL version 7.1.1 # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=yes # subqueries
@@ -466,7 +467,7 @@ type_extra_timespan=yes # Type timespan
type_extra_uint=no # Type uint
type_extra_varchar2(1_arg)=no # Type varchar2(1 arg)
type_extra_year=no # Type year
-type_odbc_bigint=no # Type bigint
+type_odbc_bigint=yes # Type bigint
type_odbc_binary(1_arg)=no # Type binary(1 arg)
type_odbc_datetime=yes # Type datetime
type_odbc_tinyint=no # Type tinyint
@@ -519,4 +520,4 @@ union_incompat=yes # union (incompatible lists)
unique_in_create=yes # unique in create table
unique_null_in_create=yes # unique null in create
views=yes # views
-where_string_size=16777182 # constant string size in where
+where_string_size=16777181 # constant string size in where
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 0ed6926a297..8c290a634f7 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -122,53 +122,49 @@ sub new
$self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = "";
- $limits{'max_conditions'} = 9999; # (Actually not a limit)
- $limits{'max_columns'} = 2000; # Max number of columns in table
- # Windows can't handle that many files in one directory
- $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
- $limits{'max_text_size'} = 65000; # Max size with default buffers.
- $limits{'query_size'} = 1000000; # Max size with default buffers.
- $limits{'max_index'} = 16; # Max number of keys
- $limits{'max_index_parts'} = 16; # Max segments/key
- $limits{'max_column_name'} = 64; # max table and column name
-
- $limits{'join_optimizer'} = 1; # Can optimize FROM tables
- $limits{'load_data_infile'} = 1; # Has load data infile
- $limits{'lock_tables'} = 1; # Has lock tables
- $limits{'functions'} = 1; # Has simple functions (+/-)
- $limits{'group_functions'} = 1; # Have group functions
- $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
- $limits{'group_distinct_functions'}= 1; # Have count(distinct)
- $limits{'select_without_from'}= 1; # Can do 'select 1';
- $limits{'multi_drop'} = 1; # Drop table can take many tables
- $limits{'subqueries'} = 0; # Doesn't support sub-queries.
- $limits{'left_outer_join'} = 1; # Supports left outer joins
- $limits{'table_wildcard'} = 1; # Has SELECT table_name.*
- $limits{'having_with_alias'} = 1; # Can use aliases in HAVING
- $limits{'having_with_group'} = 1; # Can use group functions in HAVING
- $limits{'like_with_column'} = 1; # Can use column1 LIKE column2
- $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
- $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
- $limits{'alter_table'} = 1; # Have ALTER TABLE
+ $limits{'NEG'} = 1; # Supports -id
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
+ $limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column
- $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
-
- $limits{'group_func_extra_std'} = 1; # Have group function std().
-
- $limits{'func_odbc_mod'} = 1; # Have function mod.
+ $limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'func_extra_%'} = 1; # Has % as alias for mod()
- $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'func_extra_if'} = 1; # Have function if.
- $limits{'column_alias'} = 1; # Alias for fields in select statement.
- $limits{'NEG'} = 1; # Supports -id
$limits{'func_extra_in_num'} = 1; # Has function in
- $limits{'limit'} = 1; # supports the limit attribute
- $limits{'unique_index'} = 1; # Unique index works or not
+ $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
+ $limits{'func_odbc_mod'} = 1; # Have function mod.
+ $limits{'functions'} = 1; # Has simple functions (+/-)
+ $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
+ $limits{'group_distinct_functions'}= 1; # Have count(distinct)
+ $limits{'group_func_extra_std'} = 1; # Have group function std().
+ $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
+ $limits{'group_functions'} = 1; # Have group functions
+ $limits{'having_with_alias'} = 1; # Can use aliases in HAVING
+ $limits{'having_with_group'} = 1; # Can use group functions in HAVING
+ $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
$limits{'insert_select'} = 1;
- $limits{'working_blobs'} = 1; # If big varchar/blobs works
+ $limits{'join_optimizer'} = 1; # Can optimize FROM tables
+ $limits{'left_outer_join'} = 1; # Supports left outer joins
+ $limits{'like_with_column'} = 1; # Can use column1 LIKE column2
+ $limits{'limit'} = 1; # supports the limit attribute
+ $limits{'load_data_infile'} = 1; # Has load data infile
+ $limits{'lock_tables'} = 1; # Has lock tables
+ $limits{'max_column_name'} = 64; # max table and column name
+ $limits{'max_columns'} = 2000; # Max number of columns in table
+ $limits{'max_conditions'} = 9999; # (Actually not a limit)
+ $limits{'max_index'} = 16; # Max number of keys
+ $limits{'max_index_parts'} = 16; # Max segments/key
+ $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
+ $limits{'max_text_size'} = 1000000; # Good enough for tests
+ $limits{'multi_drop'} = 1; # Drop table can take many tables
+ $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'order_by_unused'} = 1;
+ $limits{'query_size'} = 1000000; # Max size with default buffers.
+ $limits{'select_without_from'}= 1; # Can do 'select 1';
+ $limits{'subqueries'} = 0; # Doesn't support sub-queries.
+ $limits{'table_wildcard'} = 1; # Has SELECT table_name.*
+ $limits{'unique_index'} = 1; # Unique index works or not
$limits{'working_all_fields'} = 1;
+ $limits{'working_blobs'} = 1; # If big varchar/blobs works
$smds{'time'} = 1;
$smds{'q1'} = 'b'; # with time not supp by mysql ('')
@@ -569,12 +565,12 @@ sub new
$self->{'drop_attr'} = "";
$self->{"vacuum"} = 1;
$limits{'join_optimizer'} = 1; # Can optimize FROM tables
- $limits{'load_data_infile'} = 0; # Is this true ?
+ $limits{'load_data_infile'} = 0;
- $limits{'NEG'} = 1; # Can't handle -id
- $limits{'alter_table'} = 1; # alter ??
+ $limits{'NEG'} = 1;
$limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ?
- $limits{'alter_table_dropcol'}= 0; # alter_drop_col ?
+ $limits{'alter_table'} = 1;
+ $limits{'alter_table_dropcol'}= 0;
$limits{'column_alias'} = 1;
$limits{'func_extra_%'} = 1;
$limits{'func_extra_if'} = 0;
@@ -583,33 +579,33 @@ sub new
$limits{'func_odbc_mod'} = 1; # Has %
$limits{'functions'} = 1;
$limits{'group_by_position'} = 1;
+ $limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 0;
$limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1;
- $limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'having_with_alias'} = 0;
$limits{'having_with_group'} = 1;
- $limits{'left_outer_join'} = 0;
+ $limits{'insert_select'} = 1;
+ $limits{'left_outer_join'} = 1;
$limits{'like_with_column'} = 1;
$limits{'lock_tables'} = 0; # in ATIS gives this a problem
+ $limits{'max_column_name'} = 128;
+ $limits{'max_columns'} = 1000; # 500 crashes pg 6.3
+ $limits{'max_conditions'} = 9999; # This makes Pg real slow
+ $limits{'max_index'} = 64; # Big enough
+ $limits{'max_index_parts'} = 16;
+ $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
+ $limits{'max_text_size'} = 65000; # Good enough for test
$limits{'multi_drop'} = 1;
$limits{'order_by_position'} = 1;
+ $limits{'order_by_unused'} = 1;
+ $limits{'query_size'} = 16777216;
$limits{'select_without_from'}= 1;
$limits{'subqueries'} = 1;
$limits{'table_wildcard'} = 1;
- $limits{'max_column_name'} = 32; # Is this true
- $limits{'max_columns'} = 1000; # 500 crashes pg 6.3
- $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
- $limits{'max_conditions'} = 30; # This makes Pg real slow
- $limits{'max_index'} = 64; # Is this true ?
- $limits{'max_index_parts'} = 16; # Is this true ?
- $limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
- $limits{'query_size'} = 16777216;
$limits{'unique_index'} = 1; # Unique index works or not
- $limits{'insert_select'} = 1;
- $limits{'working_blobs'} = 1; # If big varchar/blobs works
- $limits{'order_by_unused'} = 1;
$limits{'working_all_fields'} = 1;
+ $limits{'working_blobs'} = 1; # If big varchar/blobs works
# the different cases per query ...
$smds{'q1'} = 'b'; # with time
@@ -640,7 +636,7 @@ sub new
sub version
{
my ($version,$dir);
- foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/my/local/pgsql/")
+ foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data")
{
if ($dir && -e "$dir/PG_VERSION")
{
@@ -804,18 +800,29 @@ sub reconnect_on_errors
sub vacuum
{
- my ($self,$full_vacuum,$dbh_ref)=@_;
- my ($loop_time,$end_time,$dbh);
+ my ($self,$full_vacuum,$dbh_ref,@tables)=@_;
+ my ($loop_time,$end_time,$dbh,$table);
if (defined($full_vacuum))
{
$$dbh_ref->disconnect; $$dbh_ref= $self->connect();
}
$dbh=$$dbh_ref;
$loop_time=new Benchmark;
- $dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
- $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
- $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
- $dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+ if ($#tables >= 0)
+ {
+ foreach $table (@tables)
+ {
+ $dbh->do("vacuum analyze $table") || die "Got error: $DBI::errstr when executing 'vacuum analyze $table'\n";
+ $dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+ }
+ }
+ else
+ {
+# $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+# $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+ $dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum analyze'\n";
+ $dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+ }
$end_time=new Benchmark;
print "Time for book-keeping (1): " .
Benchmark::timestr(Benchmark::timediff($end_time, $loop_time),"all") . "\n\n";
diff --git a/sql-bench/test-connect.sh b/sql-bench/test-connect.sh
index cddb32e2775..862161e3a03 100644
--- a/sql-bench/test-connect.sh
+++ b/sql-bench/test-connect.sh
@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++)
}
$end_time=new Benchmark;
-print "Time to select_big ($opt_loop_count): " .
+print "Time to select_big_str ($opt_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
$sth = $dbh->do("drop table bench1" . $server->{'drop_attr'})
diff --git a/sql-bench/test-insert.sh b/sql-bench/test-insert.sh
index 82ffcd83487..b8f20b123a5 100644
--- a/sql-bench/test-insert.sh
+++ b/sql-bench/test-insert.sh
@@ -250,10 +250,6 @@ if ($limits->{'unique_index'})
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
}
-#if ($opt_fast && defined($server->{vacuum}))
-#{
-# $server->vacuum(1,\$dbh);
-#}
####
#### Do some selects on the table
@@ -1410,10 +1406,6 @@ if ($limits->{'insert_multi_value'})
print "Time for multiple_value_insert (" . ($opt_loop_count) . "): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
- if ($opt_fast && defined($server->{vacuum}))
- {
- $server->vacuum(1,\$dbh);
- }
if ($opt_lock_tables)
{
$sth = $dbh->do("UNLOCK TABLES ") || die $DBI::errstr;
diff --git a/sql/Makefile.am b/sql/Makefile.am
index c4ed5c05cd3..70415be03a4 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -36,11 +36,11 @@ LDADD = ../isam/libnisam.a \
../myisam/libmyisam.a \
../myisammrg/libmyisammrg.a \
../heap/libheap.a \
+ ../vio/libvio.a \
../mysys/libmysys.a \
../dbug/libdbug.a \
../regex/libregex.a \
../strings/libmystrings.a
- #../vio/libvio.a
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@bdb_libs@ @innodb_libs@ @pstack_libs@ \
@@ -64,7 +64,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
thr_malloc.cc item_create.cc \
field.cc key.cc sql_class.cc sql_list.cc \
- net_serv.cc violite.c net_pkg.cc lock.cc my_lock.c \
+ net_serv.cc net_pkg.cc lock.cc my_lock.c \
sql_string.cc sql_manager.cc sql_map.cc \
mysqld.cc password.c hash_filo.cc hostname.cc \
convert.cc sql_parse.cc sql_yacc.yy \
@@ -83,10 +83,10 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
slave.cc sql_repl.cc \
mini_client.cc mini_client_errors.c \
- md5.c stacktrace.c
+ md5.c stacktrace.c sql_unions.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
-mysqlbinlog_SOURCES = mysqlbinlog.cc mini_client.cc net_serv.cc violite.c \
+mysqlbinlog_SOURCES = mysqlbinlog.cc mini_client.cc net_serv.cc \
mini_client_errors.c password.c
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS) $(mysqld_LDADD)
diff --git a/sql/field.cc b/sql/field.cc
index 1f1f00b161b..78f57c5ceb5 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1593,7 +1593,7 @@ double Field_longlong::val_real(void)
else
#endif
longlongget(j,ptr);
- return unsigned_flag ? ulonglong2double(j) : (double) j;
+ return unsigned_flag ? ulonglong2double((ulonglong) j) : (double) j;
}
longlong Field_longlong::val_int(void)
@@ -4087,6 +4087,59 @@ const char *Field_blob::unpack(char *to, const char *from)
}
+#ifdef HAVE_GEMINI_DB
+/* Blobs in Gemini tables are stored separately from the rows which contain
+** them (except for tiny blobs, which are stored in the row). For all other
+** blob types (blob, mediumblob, longblob), the row contains the length of
+** the blob data and a blob id. These methods (pack_id, get_id, and
+** unpack_id) handle packing and unpacking blob fields in Gemini rows.
+*/
+char *Field_blob::pack_id(char *to, const char *from, ulonglong id, uint max_length)
+{
+ char *save=ptr;
+ ptr=(char*) from;
+ ulong length=get_length(); // Length of from string
+ if (length > max_length)
+ {
+ ptr=to;
+ length=max_length;
+ store_length(length); // Store max length
+ ptr=(char*) from;
+ }
+ else
+ memcpy(to,from,packlength); // Copy length
+ if (length)
+ {
+ int8store(to+packlength, id);
+ }
+ ptr=save; // Restore org row pointer
+ return to+packlength+sizeof(id);
+}
+
+
+ulonglong Field_blob::get_id(const char *from)
+{
+ ulonglong id = 0;
+ ulong length=get_length(from);
+ if (length)
+ longlongget(id, from+packlength);
+ return id;
+}
+
+
+const char *Field_blob::unpack_id(char *to, const char *from, const char *bdata)
+{
+ memcpy(to,from,packlength);
+ ulong length=get_length(from);
+ from+=packlength;
+ if (length)
+ memcpy_fixed(to+packlength, &bdata, sizeof(bdata));
+ else
+ bzero(to+packlength,sizeof(bdata));
+ return from+sizeof(ulonglong);
+}
+#endif /* HAVE_GEMINI_DB */
+
/* Keys for blobs are like keys on varchars */
int Field_blob::pack_cmp(const char *a, const char *b, uint key_length)
diff --git a/sql/field.h b/sql/field.h
index 2f03d849c9b..b5d7c613701 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -869,6 +869,13 @@ public:
}
char *pack(char *to, const char *from, uint max_length= ~(uint) 0);
const char *unpack(char *to, const char *from);
+#ifdef HAVE_GEMINI_DB
+ char *pack_id(char *to, const char *from, ulonglong id,
+ uint max_length= ~(uint) 0);
+ ulonglong get_id(const char *from);
+ const char *unpack_id(char *to, const char *from, const char *bdata);
+ enum_field_types blobtype() { return (packlength == 1 ? FIELD_TYPE_TINY_BLOB : FIELD_TYPE_BLOB);}
+#endif
char *pack_key(char *to, const char *from, uint max_length);
char *pack_key_from_key_image(char* to, const char *from, uint max_length);
int pack_cmp(const char *a, const char *b, uint key_length);
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e5e6c7d97c8..3b59a0c09bb 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -53,11 +53,19 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer,
static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count);
static uint sortlength(SORT_FIELD *sortorder,uint length);
- /* Makes a indexfil of recordnumbers of a sorted database */
- /* outfile is reset before data is written to it, if it wasn't
- open a new file is opened */
+ /*
+ Creates a set of pointers that can be used to read the rows
+ in sorted order. This should be done with the functions
+ in records.cc
-ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
+ Before calling filesort, one must have done
+ table->file->info(HA_STATUS_VARIABLE)
+
+ The result set is stored in table->io_cache or
+ table->record_pointers
+ */
+
+ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length,
SQL_SELECT *select, ha_rows special, ha_rows max_rows,
ha_rows *examined_rows)
{
@@ -69,19 +77,20 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
IO_CACHE tempfile,*selected_records_file,*outfile;
SORTPARAM param;
DBUG_ENTER("filesort");
- DBUG_EXECUTE("info",TEST_filesort(table,sortorder,s_length,special););
+ DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length,special););
#ifdef SKIPP_DBUG_IN_FILESORT
DBUG_PUSH(""); /* No DBUG here */
#endif
- outfile= table[0]->io_cache;
+ outfile= table->io_cache;
my_b_clear(&tempfile);
buffpek= (BUFFPEK *) NULL; sort_keys= (uchar **) NULL; error= 1;
maxbuffer=1;
- param.ref_length= table[0]->file->ref_length;
+ param.ref_length= table->file->ref_length;
param.sort_length=sortlength(sortorder,s_length)+ param.ref_length;
param.max_rows= max_rows;
param.examined_rows=0;
+ param.unique_buff=0;
if (select && select->quick)
{
@@ -106,17 +115,14 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
#ifdef CAN_TRUST_RANGE
else if (select && select->quick && select->quick->records > 0L)
{
- /* Get record-count */
- table[0]->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
- table[0]->file->records)+EXTRA_RECORDS;
+ table->file->records)+EXTRA_RECORDS;
selected_records_file=0;
}
#endif
else
{
- table[0]->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);/* Get record-count */
- records=table[0]->file->estimate_number_of_rows();
+ records=table->file->estimate_number_of_rows();
selected_records_file= 0;
}
if (param.sort_length == param.ref_length && records > param.max_rows)
@@ -170,7 +176,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
my_error(ER_OUTOFMEMORY,MYF(ME_ERROR+ME_WAITTANG),sortbuff_size);
goto err;
}
- param.sort_form= table[0];
+ param.sort_form= table;
param.end=(param.local_sortorder=sortorder)+s_length;
if ((records=find_all_keys(&param,select,sort_keys,buffpek,&maxbuffer,
&tempfile, selected_records_file)) ==
@@ -674,23 +680,22 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
int error;
uint sort_length,offset;
ulong maxcount;
- ha_rows count,max_rows;
+ ha_rows max_rows,org_max_rows;
my_off_t to_start_filepos;
uchar *strpos;
BUFFPEK *buffpek,**refpek;
QUEUE queue;
- volatile bool *killed= &current_thd->killed;
qsort2_cmp cmp;
DBUG_ENTER("merge_buffers");
statistic_increment(filesort_merge_passes, &LOCK_status);
- count=error=0;
+ error=0;
offset=(sort_length=param->sort_length)-param->ref_length;
maxcount=(ulong) (param->keys/((uint) (Tb-Fb) +1));
to_start_filepos=my_b_tell(to_file);
strpos=(uchar*) sort_buffer;
- max_rows=param->max_rows;
+ org_max_rows=max_rows=param->max_rows;
if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0,
(int (*) (void *, byte *,byte*))
@@ -698,7 +703,6 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
- count+= buffpek->count;
buffpek->base= strpos;
buffpek->max_keys=maxcount;
strpos+= (uint) (error=(int) read_to_buffer(from_file,buffpek,
@@ -724,22 +728,23 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
{
error=1; goto err; /* purecov: inspected */
}
+ buffpek->key+=sort_length;
+ buffpek->mem_count--;
+ max_rows--;
+ queue_replaced(&queue); // Top element has been used
}
else
cmp=0; // Not unique
while (queue.elements > 1)
{
- if (*killed)
- {
- error=1; goto err; /* purecov: inspected */
- }
for (;;)
{
buffpek=(BUFFPEK*) queue_top(&queue);
if (cmp) // Remove duplicates
{
- if (!cmp(&sort_length, param->unique_buff, (uchar*) buffpek->key))
+ if (!(*cmp)(&sort_length, &(param->unique_buff),
+ (uchar**) &buffpek->key))
goto skip_duplicate;
memcpy(param->unique_buff, (uchar*) buffpek->key,sort_length);
}
@@ -793,7 +798,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
break; /* One buffer have been removed */
}
else if (error == -1)
- goto err; /* purecov: inspected */
+ goto err; /* purecov: inspected */
}
queue_replaced(&queue); /* Top element has been replaced */
}
@@ -801,6 +806,20 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
buffpek=(BUFFPEK*) queue_top(&queue);
buffpek->base= sort_buffer;
buffpek->max_keys=param->keys;
+
+ /*
+ As we know all entries in the buffer are unique, we only have to
+ check if the first one is the same as the last one we wrote
+ */
+ if (cmp)
+ {
+ if (!(*cmp)(&sort_length, &(param->unique_buff), (uchar**) &buffpek->key))
+ {
+ buffpek->key+=sort_length; // Remove duplicate
+ --buffpek->mem_count;
+ }
+ }
+
do
{
if ((ha_rows) buffpek->mem_count > max_rows)
@@ -808,6 +827,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
buffpek->mem_count=(uint) max_rows;
buffpek->count=0; /* Don't read more */
}
+ max_rows-=buffpek->mem_count;
if (flag == 0)
{
if (my_b_write(to_file,(byte*) buffpek->key,
@@ -832,7 +852,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
!= -1 && error != 0);
end:
- lastbuff->count=min(count,param->max_rows);
+ lastbuff->count=min(org_max_rows-max_rows,param->max_rows);
lastbuff->file_pos=to_start_filepos;
err:
delete_queue(&queue);
diff --git a/sql/ha_gemini.cc b/sql/ha_gemini.cc
index 73241c60be7..c95a348f238 100644
--- a/sql/ha_gemini.cc
+++ b/sql/ha_gemini.cc
@@ -21,8 +21,7 @@
#include "mysql_priv.h"
#ifdef HAVE_GEMINI_DB
-
-#include "my_pthread.h"
+#include "ha_gemini.h"
#include "dbconfig.h"
#include "dsmpub.h"
#include "recpub.h"
@@ -30,11 +29,22 @@
#include <m_ctype.h>
#include <myisampack.h>
+#include <m_string.h>
#include <assert.h>
#include <hash.h>
#include <stdarg.h>
#include "geminikey.h"
-#include "ha_gemini.h"
+
+#define gemini_msg MSGD_CALLBACK
+
+pthread_mutex_t gem_mutex;
+
+static HASH gem_open_tables;
+static GEM_SHARE *get_share(const char *table_name, TABLE *table);
+static int free_share(GEM_SHARE *share, bool mutex_is_locked);
+static byte* gem_get_key(GEM_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)));
+static void gemini_lock_table_overflow_error(dsmContext_t *pcontext);
const char *ha_gemini_ext=".gmd";
const char *ha_gemini_idx_ext=".gmi";
@@ -48,6 +58,7 @@ long gemini_locktablesize;
long gemini_lock_wait_timeout;
long gemini_spin_retries;
long gemini_connection_limit;
+char *gemini_basedir;
const char gemini_dbname[] = "gemini";
dsmContext_t *pfirstContext = NULL;
@@ -61,7 +72,7 @@ TYPELIB gemini_recovery_typelib= {array_elements(gemini_recovery_names),"",
const int start_of_name = 2; /* Name passed as ./<db>/<table-name>
and we're not interested in the ./ */
-static const int keyBufSize = MYMAXKEYSIZE * 2;
+static const int keyBufSize = MAXKEYSZ + FULLKEYHDRSZ + MAX_REF_PARTS + 16;
static int gemini_tx_begin(THD *thd);
static void print_msg(THD *thd, const char *table_name, const char *op_name,
@@ -87,40 +98,56 @@ bool gemini_init(void)
goto badret;
}
+ /* dsmContextCreate and dsmContextSetString(DSM_TAGDB_DBNAME) must
+ ** be the first DSM calls we make so that we can log any errors which
+ ** occur in subsequent DSM calls. DO NOT INSERT ANY DSM CALLS IN
+ ** BETWEEN THIS COMMENT AND THE COMMENT THAT SAYS "END OF CODE..."
+ */
/* Gotta connect to the database regardless of the operation */
rc = dsmContextCreate(&pfirstContext);
if( rc != 0 )
{
- printf("dsmContextCreate failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmContextCreate failed %l",rc);
goto badret;
}
+ /* This call will also open the log file */
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_DBNAME,
strlen(gemini_dbname), (TEXT *)gemini_dbname);
if( rc != 0 )
{
- printf("Dbname tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "Dbname tag failed %l", rc);
goto badret;
}
+ /* END OF CODE NOT TO MESS WITH */
fn_format(pmsgsfile, GEM_MSGS_FILE, language, ".db", 2 | 4);
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_MSGS_FILE,
strlen(pmsgsfile), (TEXT *)pmsgsfile);
if( rc != 0 )
{
- printf("MSGS_DIR tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "MSGS_DIR tag failed %l", rc);
+ goto badret;
+ }
+
+ strxmov(pmsgsfile, gemini_basedir, GEM_SYM_FILE, NullS);
+ rc = dsmContextSetString(pfirstContext, DSM_TAGDB_SYMFILE,
+ strlen(pmsgsfile), (TEXT *)pmsgsfile);
+ if( rc != 0 )
+ {
+ gemini_msg(pfirstContext, "SYMFILE tag failed %l", rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_STARTUP);
if ( rc != 0 )
{
- printf("ACCESS TAG set failed %ld\n",rc);
+ gemini_msg(pfirstContext, "ACCESS TAG set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_ENV, DSM_SQL_ENGINE);
if( rc != 0 )
{
- printf("ACCESS_ENV set failed %ld",rc);
+ gemini_msg(pfirstContext, "ACCESS_ENV set failed %l",rc);
goto badret;
}
@@ -129,7 +156,7 @@ bool gemini_init(void)
(TEXT *)mysql_real_data_home);
if( rc != 0 )
{
- printf("Datadir tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "Datadir tag failed %l", rc);
goto badret;
}
@@ -137,7 +164,7 @@ bool gemini_init(void)
gemini_connection_limit);
if(rc != 0)
{
- printf("MAX_USERS tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_USERS tag set failed %l",rc);
goto badret;
}
@@ -145,7 +172,7 @@ bool gemini_init(void)
gemini_lock_wait_timeout);
if(rc != 0)
{
- printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_LOCK_ENTRIES tag set failed %l",rc);
goto badret;
}
@@ -153,7 +180,7 @@ bool gemini_init(void)
gemini_locktablesize);
if(rc != 0)
{
- printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_LOCK_ENTRIES tag set failed %l",rc);
goto badret;
}
@@ -161,7 +188,7 @@ bool gemini_init(void)
gemini_spin_retries);
if(rc != 0)
{
- printf("SPIN_AMOUNT tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "SPIN_AMOUNT tag set failed %l",rc);
goto badret;
}
@@ -172,22 +199,22 @@ bool gemini_init(void)
gemini_buffer_cache);
if(rc != 0)
{
- printf("DB_BUFFERS tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "DB_BUFFERS tag set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_FLUSH_AT_COMMIT,
- ((gemini_options & GEMOPT_FLUSH_LOG) ? 1 : 0));
+ ((gemini_options & GEMOPT_FLUSH_LOG) ? 0 : 1));
if(rc != 0)
{
- printf("FLush_Log_At_Commit tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "FLush_Log_At_Commit tag set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_DIRECT_IO,
((gemini_options & GEMOPT_UNBUFFERED_IO) ? 1 : 0));
if(rc != 0)
{
- printf("DIRECT_IO tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "DIRECT_IO tag set failed %l",rc);
goto badret;
}
@@ -195,10 +222,20 @@ bool gemini_init(void)
((gemini_recovery_options & GEMINI_RECOVERY_FULL) ? 1 : 0));
if(rc != 0)
{
- printf("CRASH_PROTECTION tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "CRASH_PROTECTION tag set failed %l",rc);
goto badret;
}
+ if (gemini_recovery_options & GEMINI_RECOVERY_FORCE)
+ {
+ rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_FORCE_ACCESS, 1);
+ if(rc != 0)
+ {
+ printf("CRASH_PROTECTION tag set failed %ld",rc);
+ goto badret;
+ }
+ }
+
/* cluster size will come in bytes, need to convert it to
16 K units. */
gemini_log_cluster_size = (gemini_log_cluster_size + 16383) / 16384;
@@ -207,7 +244,7 @@ bool gemini_init(void)
if(rc != 0)
{
- printf("CRASH_PROTECTION tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "CRASH_PROTECTION tag set failed %l",rc);
goto badret;
}
@@ -215,12 +252,20 @@ bool gemini_init(void)
DSM_DB_OPENDB | DSM_DB_OPENFILE);
if( rc != 0 )
{
- printf("dsmUserConnect failed rc = %ld\n",rc);
+ /* Message is output in dbenv() */
goto badret;
}
/* Set access to shared for subsequent user connects */
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_SHARED);
+
rc = gemini_helper_threads(pfirstContext);
+
+
+ (void) hash_init(&gem_open_tables,32,0,0,
+ (hash_get_key) gem_get_key,0,0);
+ pthread_mutex_init(&gem_mutex,NULL);
+
+
DBUG_RETURN(0);
badret:
@@ -231,30 +276,40 @@ badret:
static int gemini_helper_threads(dsmContext_t *pContext)
{
int rc = 0;
+ int i;
+ pthread_attr_t thr_attr;
+
pthread_t hThread;
DBUG_ENTER("gemini_helper_threads");
- rc = pthread_create (&hThread, 0, gemini_watchdog, (void *)pContext);
+
+ (void) pthread_attr_init(&thr_attr);
+#if !defined(HAVE_DEC_3_2_THREADS)
+ pthread_attr_setscope(&thr_attr,PTHREAD_SCOPE_SYSTEM);
+ (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
+ pthread_attr_setstacksize(&thr_attr,32768);
+#endif
+ rc = pthread_create (&hThread, &thr_attr, gemini_watchdog, (void *)pContext);
if (rc)
{
- printf("Can't create gemini watchdog thread");
+ gemini_msg(pContext, "Can't Create gemini watchdog thread");
goto done;
}
if(!gemini_io_threads)
goto done;
- rc = pthread_create(&hThread, 0, gemini_rl_writer, (void *)pContext);
+ rc = pthread_create(&hThread, &thr_attr, gemini_rl_writer, (void *)pContext);
if(rc)
{
- printf("Can't create gemini recovery log writer thread");
+ gemini_msg(pContext, "Can't create Gemini recovery log writer thread");
goto done;
}
- for( int i = gemini_io_threads - 1;i;i--)
+ for(i = gemini_io_threads - 1;i;i--)
{
- rc = pthread_create(&hThread, 0, gemini_apw, (void *)pContext);
+ rc = pthread_create(&hThread, &thr_attr, gemini_apw, (void *)pContext);
if(rc)
{
- printf("Can't create gemini page writer thread");
+ gemini_msg(pContext, "Can't create Gemini database page writer thread");
goto done;
}
}
@@ -273,7 +328,7 @@ pthread_handler_decl(gemini_watchdog,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for watchdog %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini watchdog %d",rc);
return 0;
}
@@ -281,7 +336,7 @@ pthread_handler_decl(gemini_watchdog,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for watchdog %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini watchdog %d",rc);
return 0;
}
@@ -311,7 +366,7 @@ pthread_handler_decl(gemini_rl_writer,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for recovery log writer %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini recovery log writer %d",rc);
return 0;
}
@@ -319,7 +374,7 @@ pthread_handler_decl(gemini_rl_writer,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for recovery log writer %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini recovery log writer %d",rc);
return 0;
}
@@ -348,7 +403,7 @@ pthread_handler_decl(gemini_apw,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for gemini page writer %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini page writer %d",rc);
my_thread_end();
return 0;
}
@@ -356,7 +411,7 @@ pthread_handler_decl(gemini_apw,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for gemini page writer %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini page writer %d",rc);
my_thread_end();
return 0;
}
@@ -388,7 +443,7 @@ int gemini_set_option_long(int optid, long optval)
}
if (rc)
{
- printf("SPIN_AMOUNT tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "SPIN_AMOUNT tag set failed %l",rc);
}
else
{
@@ -410,7 +465,7 @@ static int gemini_connect(THD *thd)
DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmContextCopy failed %l",rc);
return(rc);
}
@@ -418,7 +473,7 @@ static int gemini_connect(THD *thd)
if( rc != 0 )
{
- printf("dsmUserConnect failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmUserConnect failed %l",rc);
return(rc);
}
@@ -444,6 +499,9 @@ bool gemini_end(void)
THD *thd;
DBUG_ENTER("gemini_end");
+
+ hash_free(&gem_open_tables);
+ pthread_mutex_destroy(&gem_mutex);
if(pfirstContext)
{
rc = dsmShutdownSet(pfirstContext, DSM_SHUTDOWN_NORMAL);
@@ -534,6 +592,24 @@ int gemini_rollback_to_savepoint(THD *thd)
DBUG_RETURN(rc);
}
+int gemini_recovery_logging(THD *thd, bool on)
+{
+ int error;
+ int noLogging;
+
+ if(!thd->gemini.context)
+ return 0;
+
+ if(on)
+ noLogging = 0;
+ else
+ noLogging = 1;
+
+ error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
+ DSM_TAGCONTEXT_NO_LOGGING,noLogging);
+ return error;
+}
+
/* gemDataType - translates from mysql data type constant to gemini
key services data type contstant */
int gemDataType ( int mysqlType )
@@ -599,8 +675,13 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_gemini::open");
thd = current_thd;
- thr_lock_init(&alock);
- thr_lock_data_init(&alock,&lock,(void*)0);
+ /* Init shared structure */
+ if (!(share=get_share(name,table)))
+ {
+ DBUG_RETURN(1); /* purecov: inspected */
+ }
+ thr_lock_data_init(&share->lock,&lock,(void*) 0);
+
ref_length = sizeof(dsmRecid_t);
if(thd->gemini.context == NULL)
@@ -610,7 +691,7 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
if(rc)
return rc;
}
- if (!(rec_buff=my_malloc(table->rec_buff_length,
+ if (!(rec_buff=(byte*)my_malloc(table->rec_buff_length,
MYF(MY_WME))))
{
DBUG_RETURN(1);
@@ -635,6 +716,12 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)name_buff,
&tableId);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to find table number for %s", name_buff);
+ DBUG_RETURN(rc);
+ }
}
tableNumber = tableId;
@@ -649,8 +736,33 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
crashed while being in the midst of a repair operation */
rc = dsmTableStatus((dsmContext_t *)thd->gemini.context,
tableNumber,&tableStatus);
- if(tableStatus)
+ if(tableStatus == DSM_OBJECT_IN_REPAIR)
tableStatus = HA_ERR_CRASHED;
+
+ pthread_mutex_lock(&share->mutex);
+ share->use_count++;
+ pthread_mutex_unlock(&share->mutex);
+
+ if (table->blob_fields)
+ {
+ /* Allocate room for the blob ids from an unpacked row. Note that
+ ** we may not actually need all of this space because tiny blobs
+ ** are stored in the packed row, not in a separate storage object
+ ** like larger blobs. But we allocate an entry for all blobs to
+ ** keep the code simpler.
+ */
+ pBlobDescs = (gemBlobDesc_t *)my_malloc(
+ table->blob_fields * sizeof(gemBlobDesc_t),
+ MYF(MY_WME | MY_ZEROFILL));
+ }
+ else
+ {
+ pBlobDescs = 0;
+ }
+
+ get_index_stats(thd);
+ info(HA_STATUS_CONST);
+
DBUG_RETURN (rc);
}
@@ -680,6 +792,12 @@ int ha_gemini::index_open(char *tableName)
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)tableName,
&objectNumber);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to file Index number for %s", tableName);
+ DBUG_RETURN(rc);
+ }
pindexNumbers[i] = objectNumber;
}
}
@@ -692,12 +810,22 @@ int ha_gemini::index_open(char *tableName)
int ha_gemini::close(void)
{
DBUG_ENTER("ha_gemini::close");
- thr_lock_delete(&alock);
- my_free(rec_buff,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)rec_buff,MYF(MY_ALLOW_ZERO_PTR));
rec_buff = 0;
my_free((char *)pindexNumbers,MYF(MY_ALLOW_ZERO_PTR));
pindexNumbers = 0;
- DBUG_RETURN(0);
+
+ if (pBlobDescs)
+ {
+ for (uint i = 0; i < table->blob_fields; i++)
+ {
+ my_free((char*)pBlobDescs[i].pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ }
+ my_free((char *)pBlobDescs, MYF(0));
+ pBlobDescs = 0;
+ }
+
+ DBUG_RETURN(free_share(share, 0));
}
@@ -709,7 +837,7 @@ int ha_gemini::write_row(byte * record)
DBUG_ENTER("write_row");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -737,10 +865,11 @@ int ha_gemini::write_row(byte * record)
/* A set insert-id statement so set the auto-increment value if this
value is higher than it's current value */
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
- tableNumber, (ULONG64 *)&nr);
+ tableNumber, (ULONG64 *)&nr,1);
if(thd->next_insert_id > nr)
{
- error = dsmTableAutoIncrementSet((dsmContext_t *)thd->gemini.context,tableNumber,
+ error = dsmTableAutoIncrementSet((dsmContext_t *)thd->gemini.context,
+ tableNumber,
(ULONG64)thd->next_insert_id);
}
}
@@ -749,11 +878,13 @@ int ha_gemini::write_row(byte * record)
}
dsmRecord.table = tableNumber;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
- record)))
+ record, FALSE)))
+ {
DBUG_RETURN(error);
+ }
error = dsmRecordCreate((dsmContext_t *)thd->gemini.context,
&dsmRecord,0);
@@ -769,6 +900,8 @@ int ha_gemini::write_row(byte * record)
thd->gemini.needSavepoint = 1;
}
}
+ if(error == DSM_S_RQSTREJ)
+ error = HA_ERR_LOCK_WAIT_TIMEOUT;
DBUG_RETURN(error);
}
@@ -777,10 +910,17 @@ longlong ha_gemini::get_auto_increment()
{
longlong nr;
int error;
+ int update;
THD *thd=current_thd;
+ if(thd->lex.sql_command == SQLCOM_SHOW_TABLES)
+ update = 0;
+ else
+ update = 1;
+
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
- tableNumber, (ULONG64 *)&nr);
+ tableNumber, (ULONG64 *)&nr,
+ update);
return nr;
}
@@ -828,8 +968,8 @@ int ha_gemini::handleIndexEntry(const byte * record, dsmRecid_t recid,
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
- theKey.akey.keyLen = (COUNT)keyStringLen - 3;
- theKey.akey.unknown_comp = thereIsAnull;
+ theKey.akey.keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
+ theKey.akey.unknown_comp = (dsmBoolean_t)thereIsAnull;
theKey.akey.word_index = 0;
theKey.akey.descending_key =0;
if(option == KEY_CREATE)
@@ -880,6 +1020,7 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
int componentLen;
int fieldType;
int isNull;
+ uint key_part_length;
KEY_PART_INFO *key_part;
@@ -892,21 +1033,35 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
unsigned char *pos;
key_part = pkeyinfo->key_part + i;
+ key_part_length = key_part->length;
fieldType = gemDataType(key_part->field->type());
- if(fieldType == GEM_CHAR)
+ switch (fieldType)
{
+ case GEM_CHAR:
+ {
/* Save the current ptr to the field in case we're building a key
to remove an old key value when an indexed character column
gets updated. */
char *ptr = key_part->field->ptr;
key_part->field->ptr = (char *)record + key_part->offset;
- key_part->field->sort_string(rec_buff, key_part->length);
+ key_part->field->sort_string((char*)rec_buff, key_part->length);
key_part->field->ptr = ptr;
pos = (unsigned char *)rec_buff;
- }
- else
- {
+ }
+ break;
+
+ case GEM_TINYBLOB:
+ case GEM_BLOB:
+ case GEM_MEDIUMBLOB:
+ case GEM_LONGBLOB:
+ ((Field_blob*)key_part->field)->get_ptr((char**)&pos);
+ key_part_length = ((Field_blob*)key_part->field)->get_length(
+ (char*)record + key_part->offset);
+ break;
+
+ default:
pos = (unsigned char *)record + key_part->offset;
+ break;
}
isNull = record[key_part->null_offset] & key_part->null_bit;
@@ -914,7 +1069,7 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
*thereIsAnull = true;
rc = gemFieldToIdxComponent(pos,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
fieldType,
isNull ,
key_part->field->flags & UNSIGNED_FLAG,
@@ -951,7 +1106,7 @@ int ha_gemini::update_row(const byte * old_record, byte * new_record)
}
for (uint keynr=0 ; keynr < table->keys ; keynr++)
{
- if(key_cmp(keynr,old_record, new_record))
+ if(key_cmp(keynr,old_record, new_record,false))
{
error = handleIndexEntry(old_record,lastRowid,KEY_DELETE,keynr);
if(error)
@@ -973,10 +1128,10 @@ int ha_gemini::update_row(const byte * old_record, byte * new_record)
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
- new_record)))
+ new_record, TRUE)))
{
DBUG_RETURN(error);
}
@@ -992,6 +1147,7 @@ int ha_gemini::delete_row(const byte * record)
int error = 0;
dsmRecord_t dsmRecord;
THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
DBUG_ENTER("delete_row");
statistic_increment(ha_delete_count,&LOCK_status);
@@ -999,9 +1155,7 @@ int ha_gemini::delete_row(const byte * record)
if(thd->gemini.needSavepoint)
{
thd->gemini.savepoint++;
- error = dsmTransaction((dsmContext_t *)thd->gemini.context,
- &thd->gemini.savepoint,
- DSMTXN_SAVE, 0, 0);
+ error = dsmTransaction(pcontext, &thd->gemini.savepoint, DSMTXN_SAVE, 0, 0);
if (error)
DBUG_RETURN(error);
thd->gemini.needSavepoint = 0;
@@ -1013,8 +1167,27 @@ int ha_gemini::delete_row(const byte * record)
error = handleIndexEntries(record, dsmRecord.recid,KEY_DELETE);
if(!error)
{
- error = dsmRecordDelete((dsmContext_t *)thd->gemini.context,
- &dsmRecord, 0, NULL);
+ error = dsmRecordDelete(pcontext, &dsmRecord, 0, NULL);
+ }
+
+ /* Delete any blobs associated with this row */
+ if (table->blob_fields)
+ {
+ dsmBlob_t gemBlob;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ for (uint i = 0; i < table->blob_fields; i++)
+ {
+ if (pBlobDescs[i].blobId)
+ {
+ gemBlob.blobId = pBlobDescs[i].blobId;
+ my_free((char *)pBlobDescs[i].pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ dsmBlobStart(pcontext, &gemBlob);
+ dsmBlobDelete(pcontext, &gemBlob, NULL);
+ /* according to DSM doc, no need to call dsmBlobEnd() */
+ }
+ }
}
DBUG_RETURN(error);
@@ -1023,7 +1196,6 @@ int ha_gemini::delete_row(const byte * record)
int ha_gemini::index_init(uint keynr)
{
int error = 0;
- int keyStringLen;
THD *thd;
DBUG_ENTER("index_init");
thd = current_thd;
@@ -1046,19 +1218,9 @@ int ha_gemini::index_init(uint keynr)
}
pbracketBase->index = 0;
pbracketLimit->index = (dsmIndex_t)pindexNumbers[keynr];
- pbracketLimit->keycomps = 1;
- keyStringLen = 0;
- error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
- pbracketLimit->index);
-
- /* We have to subtract three here since cxKeyPrepare
- expects that the three lead bytes of the header are
- not counted in this length -- But cxKeyPrepare also
- expects that these three bytes are present in the keystr */
- pbracketLimit->keyLen = (COUNT)keyStringLen - 3;
-
pbracketBase->descending_key = pbracketLimit->descending_key = 0;
pbracketBase->ksubstr = pbracketLimit->ksubstr = 0;
+ pbracketLimit->keycomps = pbracketBase->keycomps = 1;
pfoundKey = (dsmKey_t *)my_malloc(sizeof(dsmKey_t) + keyBufSize,MYF(MY_WME));
if(!pfoundKey)
@@ -1130,6 +1292,7 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
{
uint offset=0;
unsigned char *pos;
+ uint key_part_length = key_part->length;
int fieldType;
if (key_part->null_bit)
@@ -1141,7 +1304,7 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
key_ptr+= key_part->store_length;
rc = gemFieldToIdxComponent(
(unsigned char *)key_ptr + offset,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
0,
1 , /* Tells it to build a null component */
key_part->field->flags & UNSIGNED_FLAG,
@@ -1153,20 +1316,31 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
}
}
fieldType = gemDataType(key_part->field->type());
- if(fieldType == GEM_CHAR)
+ switch (fieldType)
{
- key_part->field->store(key_ptr + offset, key_part->length);
- key_part->field->sort_string(rec_buff, key_part->length);
+ case GEM_CHAR:
+ key_part->field->store((char*)key_ptr + offset, key_part->length);
+ key_part->field->sort_string((char*)rec_buff, key_part->length);
pos = (unsigned char *)rec_buff;
- }
- else
- {
+ break;
+
+ case GEM_TINYBLOB:
+ case GEM_BLOB:
+ case GEM_MEDIUMBLOB:
+ case GEM_LONGBLOB:
+ ((Field_blob*)key_part->field)->get_ptr((char**)&pos);
+ key_part_length = ((Field_blob*)key_part->field)->get_length(
+ (char*)key_ptr + offset);
+ break;
+
+ default:
pos = (unsigned char *)key_ptr + offset;
+ break;
}
rc = gemFieldToIdxComponent(
pos,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
fieldType,
0 ,
key_part->field->flags & UNSIGNED_FLAG,
@@ -1189,7 +1363,7 @@ void ha_gemini::unpack_key(char *record, dsmKey_t *key, uint index)
int fieldIsNull, fieldType;
int rc = 0;
- char unsigned *pos= &key->keystr[7];
+ char unsigned *pos= &key->keystr[FULLKEYHDRSZ+4/* 4 for the index number*/];
for ( ; key_part != end; key_part++)
{
@@ -1202,7 +1376,8 @@ void ha_gemini::unpack_key(char *record, dsmKey_t *key, uint index)
}
rc = gemIdxComponentToField(pos, fieldType,
(unsigned char *)record + key_part->field->offset(),
- key_part->field->field_length,
+ //key_part->field->field_length,
+ key_part->length,
key_part->field->decimals(),
&fieldIsNull);
if(fieldIsNull)
@@ -1266,12 +1441,12 @@ int ha_gemini::index_read(byte * buf, const byte * key,
pbracketLimit->keyLen = componentLen;
}
- /* We have to subtract three here since cxKeyPrepare
+ /* We have to subtract the header size here since cxKeyPrepare
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
- pbracketBase->keyLen -= 3;
- pbracketLimit->keyLen -= 3;
+ pbracketBase->keyLen -= FULLKEYHDRSZ;
+ pbracketLimit->keyLen -= FULLKEYHDRSZ;
thd = current_thd;
@@ -1294,7 +1469,7 @@ int ha_gemini::index_next(byte * buf)
dsmMask_t findMode;
DBUG_ENTER("index_next");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -1304,9 +1479,12 @@ int ha_gemini::index_next(byte * buf)
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
- pbracketBase->keyLen = (COUNT)keyStringLen - 3;
+ pbracketBase->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
pbracketBase->index = pbracketLimit->index;
- pbracketBase->keycomps = 1;
+ error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
+ pbracketLimit->index);
+ pbracketLimit->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
+
findMode = DSMFINDFIRST;
}
else
@@ -1369,24 +1547,20 @@ int ha_gemini::index_last(byte * buf)
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
- if(error)
- goto errorReturn;
- pbracketBase->keyLen = (COUNT)keyStringLen - 3;
+ pbracketBase->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
pbracketBase->index = pbracketLimit->index;
- pbracketBase->keycomps = 1;
+ error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
+ pbracketLimit->index);
+ pbracketLimit->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
error = findRow(thd,DSMFINDLAST,buf);
-errorReturn:
if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
-
- table->status = error ? STATUS_NOT_FOUND : 0;
- DBUG_RETURN(error);
}
int ha_gemini::rnd_init(bool scan)
@@ -1414,7 +1588,7 @@ int ha_gemini::rnd_next(byte *buf)
DBUG_ENTER("rnd_next");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -1429,7 +1603,7 @@ int ha_gemini::rnd_next(byte *buf)
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
&dsmRecord, DSMFINDNEXT, lockMode, 0);
@@ -1437,17 +1611,23 @@ int ha_gemini::rnd_next(byte *buf)
if(!error)
{
lastRowid = dsmRecord.recid;
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
+ error = unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
if(!error)
;
- else if (error == DSM_S_ENDLOOP)
- error = HA_ERR_END_OF_FILE;
- else if (error == DSM_S_RQSTREJ)
- error = HA_ERR_LOCK_WAIT_TIMEOUT;
- else if (error == DSM_S_LKTBFULL)
- error = HA_ERR_LOCK_TABLE_FULL;
-
+ else
+ {
+ lastRowid = 0;
+ if (error == DSM_S_ENDLOOP)
+ error = HA_ERR_END_OF_FILE;
+ else if (error == DSM_S_RQSTREJ)
+ error = HA_ERR_LOCK_WAIT_TIMEOUT;
+ else if (error == DSM_S_LKTBFULL)
+ {
+ error = HA_ERR_LOCK_TABLE_FULL;
+ gemini_lock_table_overflow_error((dsmContext_t *)thd->gemini.context);
+ }
+ }
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
@@ -1500,14 +1680,14 @@ int ha_gemini::fetch_row(void *gemini_context,const byte *buf)
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
rc = dsmRecordGet((dsmContext_t *)gemini_context,
&dsmRecord, 0);
if(!rc)
{
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
+ rc = unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
DBUG_RETURN(rc);
@@ -1544,7 +1724,7 @@ int ha_gemini::findRow(THD *thd, dsmMask_t findMode, byte *buf)
if(key_read)
{
- unpack_key(buf, pkey, active_index);
+ unpack_key((char*)buf, pkey, active_index);
}
if(!key_read) /* unpack_key may have turned off key_read */
{
@@ -1554,10 +1734,17 @@ int ha_gemini::findRow(THD *thd, dsmMask_t findMode, byte *buf)
errorReturn:
if(!rc)
;
- else if(rc == DSM_S_RQSTREJ)
- rc = HA_ERR_LOCK_WAIT_TIMEOUT;
- else if (rc == DSM_S_LKTBFULL)
- rc = HA_ERR_LOCK_TABLE_FULL;
+ else
+ {
+ lastRowid = 0;
+ if(rc == DSM_S_RQSTREJ)
+ rc = HA_ERR_LOCK_WAIT_TIMEOUT;
+ else if (rc == DSM_S_LKTBFULL)
+ {
+ rc = HA_ERR_LOCK_TABLE_FULL;
+ gemini_lock_table_overflow_error((dsmContext_t *)thd->gemini.context);
+ }
+ }
DBUG_RETURN(rc);
}
@@ -1578,25 +1765,47 @@ void ha_gemini::info(uint flag)
dsmStatus_t error;
ULONG64 rows;
+ if(thd->gemini.context == NULL)
+ {
+ /* Need to get this thread a connection into the database */
+ error = gemini_connect(thd);
+ if(error)
+ DBUG_VOID_RETURN;
+ }
+
error = dsmRowCount((dsmContext_t *)thd->gemini.context,tableNumber,&rows);
records = (ha_rows)rows;
deleted = 0;
}
- else if ((flag & HA_STATUS_CONST))
+ if ((flag & HA_STATUS_CONST))
{
- ;
+ ha_rows *rec_per_key = share->rec_per_key;
+ for (uint i = 0; i < table->keys; i++)
+ for(uint k=0;
+ k < table->key_info[i].key_parts; k++,rec_per_key++)
+ table->key_info[i].rec_per_key[k] = *rec_per_key;
}
- else if ((flag & HA_STATUS_ERRKEY))
+ if ((flag & HA_STATUS_ERRKEY))
{
errkey=last_dup_key;
}
- else if ((flag & HA_STATUS_TIME))
+ if ((flag & HA_STATUS_TIME))
{
;
}
- else if ((flag & HA_STATUS_AUTO))
+ if ((flag & HA_STATUS_AUTO))
{
- ;
+ THD *thd = current_thd;
+ dsmStatus_t error;
+
+ error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
+ tableNumber,
+ (ULONG64 *)&auto_increment_value,
+ 0);
+ /* Should return the next auto-increment value that
+ will be given -- so we need to increment the one dsm
+ currently reports. */
+ auto_increment_value++;
}
DBUG_VOID_RETURN;
@@ -1658,7 +1867,22 @@ int ha_gemini::external_lock(THD *thd, int lock_type)
thd->gemini.lock_count = 1;
thd->gemini.tx_isolation = thd->tx_isolation;
}
-
+ // lockMode has already been set in store_lock
+ // If the statement about to be executed calls for
+ // exclusive locks and we're running at read uncommitted
+ // isolation level then raise an error.
+ if(thd->gemini.tx_isolation == ISO_READ_UNCOMMITTED)
+ {
+ if(lockMode == DSM_LK_EXCL)
+ {
+ DBUG_RETURN(HA_ERR_READ_ONLY_TRANSACTION);
+ }
+ else
+ {
+ lockMode = DSM_LK_NOLOCK;
+ }
+ }
+
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
@@ -1678,6 +1902,8 @@ int ha_gemini::external_lock(THD *thd, int lock_type)
rc = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,DSMOBJECT_TABLE,0,
lockMode, 1, 0);
+ if(rc == DSM_S_RQSTREJ)
+ rc = HA_ERR_LOCK_WAIT_TIMEOUT;
}
}
else /* lock_type == F_UNLK */
@@ -1703,18 +1929,24 @@ THR_LOCK_DATA **ha_gemini::store_lock(THD *thd, THR_LOCK_DATA **to,
!thd->in_lock_tables)
lock_type = TL_WRITE_ALLOW_WRITE;
lock.type=lock_type;
-
- if(thd->gemini.tx_isolation == ISO_READ_UNCOMMITTED)
- lockMode = DSM_LK_NOLOCK;
- else if(table->reginfo.lock_type > TL_WRITE_ALLOW_READ)
- lockMode = DSM_LK_EXCL;
- else
- lockMode = DSM_LK_SHARE;
}
+ if(table->reginfo.lock_type > TL_WRITE_ALLOW_READ)
+ lockMode = DSM_LK_EXCL;
+ else
+ lockMode = DSM_LK_SHARE;
+
*to++= &lock;
return to;
}
+void ha_gemini::update_create_info(HA_CREATE_INFO *create_info)
+{
+ table->file->info(HA_STATUS_AUTO | HA_STATUS_CONST);
+ if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=auto_increment_value;
+ }
+}
int ha_gemini::create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info)
@@ -1777,7 +2009,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)"gemini_data_area");
if( rc != 0 )
{
- printf("dsmAreaNew failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmAreaNew failed %l",rc);
return(rc);
}
@@ -1787,7 +2019,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
- printf("dsmExtentCreate failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmExtentCreate failed %l",rc);
return(rc);
}
@@ -1805,6 +2037,20 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name],
&dummy,&dummy);
+ if (rc == 0 && table->blob_fields)
+ {
+ /* create a storage object record for blob fields */
+ rc = dsmObjectCreate(pcontext, areaNumber, &tableNumber,
+ DSMOBJECT_BLOB,0,0,0,
+ (dsmText_t *)&name_buff[start_of_name],
+ &dummy,&dummy);
+ if( rc != 0 )
+ {
+ gemini_msg(pcontext, "dsmObjectCreate for blob object failed %l",rc);
+ return(rc);
+ }
+ }
+
if(rc == 0 && form->keys)
{
fn_format(name_buff, name, "", ha_gemini_idx_ext, 2 | 4);
@@ -1814,7 +2060,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)"gemini_index_area");
if( rc != 0 )
{
- printf("dsmAreaNew failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmAreaNew failed %l",rc);
return(rc);
}
/* Create an extent */
@@ -1823,7 +2069,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
- printf("dsmExtentCreate failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmExtentCreate failed %l",rc);
return(rc);
}
@@ -1859,10 +2105,11 @@ int ha_gemini::create(const char *name, register TABLE *form,
}
}
- rc = dsmTableAutoIncrementSet(pcontext,tableNumber,
- create_info->auto_increment_value);
-
-
+ /* The auto_increment value is the next one to be given
+ out so give dsm one less than this value */
+ if(create_info->auto_increment_value)
+ rc = dsmTableAutoIncrementSet(pcontext,tableNumber,
+ create_info->auto_increment_value-1);
/* Get a table lock on this table in case this table is being
created as part of an alter table statement. We don't want
@@ -1950,26 +2197,25 @@ int ha_gemini::delete_table(const char *pname)
(dsmObject_t *)&tableNum);
if (rc)
{
- printf("Cound not find table number for %s with string %s, %ld\n",
- pname,name_buff,rc);
+ gemini_msg(pcontext, "Unable to find table number for %s", name_buff);
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
- rc = dsmObjectInfo(pcontext, tableNum, DSMOBJECT_MIXTABLE, &tableArea,
- &objectAttr, &associate, &associateType, &block, &root);
+ rc = dsmObjectInfo(pcontext, tableNum, DSMOBJECT_MIXTABLE, tableNum,
+ &tableArea, &objectAttr, &associateType, &block, &root);
if (rc)
{
- printf("Failed to get area number for table %d, %s, return %ld\n",
+ gemini_msg(pcontext, "Failed to get area number for table %d, %s, return %l",
tableNum, pname, rc);
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
}
@@ -1979,14 +2225,14 @@ int ha_gemini::delete_table(const char *pname)
rc = dsmObjectDeleteAssociate(pcontext, tableNum, &indexArea);
if (rc)
{
- printf("Error deleting storage objects for table number %d, return %ld\n",
+ gemini_msg(pcontext, "Error deleting storage objects for table number %d, return %l",
(int)tableNum, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
@@ -1994,33 +2240,33 @@ int ha_gemini::delete_table(const char *pname)
if (indexArea != DSMAREA_INVALID)
{
/* Delete the extents for both Index and Table */
- rc = dsmExtentDelete(pcontext, indexArea, 0);
+ rc = dsmExtentDelete(pcontext, indexArea);
rc = dsmAreaDelete(pcontext, indexArea);
if (rc)
{
- printf("Error deleting Index Area %ld, return %ld\n", indexArea, rc);
+ gemini_msg(pcontext, "Error deleting Index Area %l, return %l", indexArea, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
}
- rc = dsmExtentDelete(pcontext, tableArea, 0);
+ rc = dsmExtentDelete(pcontext, tableArea);
rc = dsmAreaDelete(pcontext, tableArea);
if (rc)
{
- printf("Error deleting table Area %ld, name %s, return %ld\n",
+ gemini_msg(pcontext, "Error deleting table Area %l, name %s, return %l",
tableArea, pname, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
@@ -2030,7 +2276,7 @@ int ha_gemini::delete_table(const char *pname)
rc = gemini_commit(thd);
if (rc)
{
- printf("Failed to commit transaction %ld\n",rc);
+ gemini_msg(pcontext, "Failed to commit transaction %l",rc);
}
@@ -2047,7 +2293,6 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
THD *thd;
dsmContext_t *pcontext;
dsmStatus_t rc;
- char tabname_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
char name_buff[FN_REFLEN];
char newname_buff[FN_REFLEN];
@@ -2056,6 +2301,7 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
unsigned i, nameLen;
dsmObject_t tableNum;
dsmArea_t indexArea = 0;
+ dsmArea_t tableArea = 0;
DBUG_ENTER("ha_gemini::rename_table");
@@ -2068,7 +2314,7 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
{
if (gemini_is_vst(name_buff))
{
- return 0;
+ return DSM_S_CANT_RENAME_VST;
}
}
}
@@ -2113,21 +2359,51 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
rc = dsmObjectNameToNum(pcontext, (dsmText_t *)name_buff, &tableNum);
if (rc)
+ {
+ gemini_msg(pcontext, "Unable to file Table number for %s", name_buff);
goto errorReturn;
+ }
rc = dsmObjectRename(pcontext, tableNum,
(dsmText_t *)newname_buff,
(dsmText_t *)&newidxextname_buff[start_of_name],
(dsmText_t *)&newextname_buff[start_of_name],
- &indexArea);
+ &indexArea, &tableArea);
if (rc)
+ {
+ gemini_msg(pcontext, "Failed to rename %s to %s",name_buff,newname_buff);
goto errorReturn;
+ }
+
+ /* Rename the physical table and index files (if necessary).
+ ** Close the file, rename it, and reopen it (have to do it this
+ ** way so rename works on Windows).
+ */
+ if (!(rc = dsmAreaClose(pcontext, tableArea)))
+ {
+ if (!(rc = rename_file_ext(pfrom, pto, ha_gemini_ext)))
+ {
+ rc = dsmAreaOpen(pcontext, tableArea, 0);
+ if (rc)
+ {
+ gemini_msg(pcontext, "Failed to reopen area %d",tableArea);
+ }
+ }
+ }
- /* rename the physical table and index files (if necessary) */
- rc = rename_file_ext(pfrom, pto, ha_gemini_ext);
if (!rc && indexArea)
{
- rc = rename_file_ext(pfrom, pto, ha_gemini_idx_ext);
+ if (!(rc = dsmAreaClose(pcontext, indexArea)))
+ {
+ if (!(rc = rename_file_ext(pfrom, pto, ha_gemini_idx_ext)))
+ {
+ rc = dsmAreaOpen(pcontext, indexArea, 0);
+ if (rc)
+ {
+ gemini_msg(pcontext, "Failed to reopen area %d",tableArea);
+ }
+ }
+ }
}
errorReturn:
@@ -2143,17 +2419,38 @@ errorReturn:
double ha_gemini::scan_time()
{
- return records / (gemini_blocksize / table->reclength);
+ return (double)records /
+ (double)((gemini_blocksize / (double)table->reclength));
}
-int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
+int ha_gemini::analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
int error;
+ uint saveIsolation;
+ dsmMask_t saveLockMode;
+
+ check_opt->quick = true;
+ check_opt->optimize = true; // Tells check not to get table lock
+ saveLockMode = lockMode;
+ saveIsolation = thd->gemini.tx_isolation;
+ thd->gemini.tx_isolation = ISO_READ_UNCOMMITTED;
+ lockMode = DSM_LK_NOLOCK;
+ error = check(thd,check_opt);
+ lockMode = saveLockMode;
+ thd->gemini.tx_isolation = saveIsolation;
+ return (error);
+}
+
+int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ int error = 0;
int checkStatus = HA_ADMIN_OK;
ha_rows indexCount;
- byte *buf = 0, *indexBuf = 0;
+ byte *buf = 0, *indexBuf = 0, *prevBuf = 0;
int errorCount = 0;
+ info(HA_STATUS_VARIABLE); // Makes sure row count is up to date
+
/* Get a shared table lock */
if(thd->gemini.needSavepoint)
{
@@ -2167,23 +2464,33 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
return(error);
thd->gemini.needSavepoint = 0;
}
- buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
- indexBuf = my_malloc(table->rec_buff_length,MYF(MY_WME));
+ buf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
+ indexBuf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
+ prevBuf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME |MY_ZEROFILL ));
+
/* Lock the table */
- error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
- (dsmObject_t)tableNumber,
- DSMOBJECT_TABLE,0,
- DSM_LK_SHARE, 1, 0);
+ if (!check_opt->optimize)
+ error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
+ (dsmObject_t)tableNumber,
+ DSMOBJECT_TABLE,0,
+ DSM_LK_SHARE, 1, 0);
if(error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to lock table %d, error %d",tableNumber, error);
return error;
+ }
- info(HA_STATUS_VARIABLE);
-
+ ha_rows *rec_per_key = share->rec_per_key;
/* If quick option just scan along index converting and counting entries */
for (uint i = 0; i < table->keys; i++)
{
- key_read = 1;
+ key_read = 1; // Causes data to be extracted from the keys
indexCount = 0;
+ // Clear the cardinality stats for this index
+ memset(table->key_info[i].rec_per_key,0,
+ sizeof(table->key_info[0].rec_per_key[0]) *
+ table->key_info[i].key_parts);
error = index_init(i);
error = index_first(indexBuf);
while(!error)
@@ -2195,8 +2502,12 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
error = fetch_row(thd->gemini.context,buf);
if(!error)
{
- if(key_cmp(i,buf,indexBuf))
+ if(key_cmp(i,buf,indexBuf,false))
{
+
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Key does not match row for rowid %d for index %s",
+ lastRowid,table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Key does not match row for rowid %d for index %s",
lastRowid,table->key_info[i].name);
@@ -2209,6 +2520,9 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
{
errorCount++;
checkStatus = HA_ADMIN_CORRUPT;
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Key does not have a valid row pointer %d for index %s",
+ lastRowid,table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Key does not have a valid row pointer %d for index %s",
lastRowid,table->key_info[i].name);
@@ -2218,10 +2532,27 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
}
}
}
+
+ key_cmp(i,indexBuf,prevBuf,true);
+ bcopy((void *)indexBuf,(void *)prevBuf,table->rec_buff_length);
+
if(!error)
error = index_next(indexBuf);
}
-
+
+ for(uint j=1; j < table->key_info[i].key_parts; j++)
+ {
+ table->key_info[i].rec_per_key[j] += table->key_info[i].rec_per_key[j-1];
+ }
+ for(uint k=0; k < table->key_info[i].key_parts; k++)
+ {
+ if (table->key_info[i].rec_per_key[k])
+ table->key_info[i].rec_per_key[k] =
+ records / table->key_info[i].rec_per_key[k];
+ *rec_per_key = table->key_info[i].rec_per_key[k];
+ rec_per_key++;
+ }
+
if(error == HA_ERR_END_OF_FILE)
{
/* Check count of rows */
@@ -2231,6 +2562,10 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
/* Number of index entries does not agree with the number of
rows in the index. */
checkStatus = HA_ADMIN_CORRUPT;
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Total rows %d does not match total index entries %d for %s",
+ records, indexCount,
+ table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Total rows %d does not match total index entries %d for %s",
records, indexCount,
@@ -2248,23 +2583,61 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
{
/* Now scan the table and for each row generate the keys
and find them in the index */
- error = fullCheck(thd, buf);\
+ error = fullCheck(thd, buf);
if(error)
checkStatus = error;
}
+ // Store the key distribution information
+ error = saveKeyStats(thd);
error_return:
- my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)buf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)indexBuf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)prevBuf,MYF(MY_ALLOW_ZERO_PTR));
+
index_end();
key_read = 0;
- error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
- (dsmObject_t)tableNumber,
- DSMOBJECT_TABLE,0,
- DSM_LK_SHARE,0);
+ if(!check_opt->optimize)
+ {
+ error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
+ (dsmObject_t)tableNumber,
+ DSMOBJECT_TABLE,0,
+ DSM_LK_SHARE,0);
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to unlock table %d", tableNumber);
+ }
+ }
return checkStatus;
}
+int ha_gemini::saveKeyStats(THD *thd)
+{
+ dsmStatus_t rc = 0;
+
+ /* Insert a row in the indexStats table for each column of
+ each index of the table */
+
+ for(uint i = 0; i < table->keys; i++)
+ {
+ for (uint j = 0; j < table->key_info[i].key_parts && !rc ;j++)
+ {
+ rc = dsmIndexStatsPut((dsmContext_t *)thd->gemini.context,
+ tableNumber, pindexNumbers[i],
+ j, (LONG64)table->key_info[i].rec_per_key[j]);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to update index stats for table %d, index %d",
+ tableNumber, pindexNumbers[i]);
+ }
+ }
+ }
+ return rc;
+}
+
int ha_gemini::fullCheck(THD *thd,byte *buf)
{
int error;
@@ -2319,7 +2692,12 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Error setting savepoint number %d, error %d",
+ thd->gemini.savepoint++, error);
return(error);
+ }
thd->gemini.needSavepoint = 0;
}
@@ -2330,7 +2708,11 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
DSMOBJECT_TABLE,0,
DSM_LK_EXCL, 1, 0);
if(error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to lock table %d, error %d",tableNumber, error);
return error;
+ }
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,1);
@@ -2338,13 +2720,18 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
error = dsmTableReset((dsmContext_t *)thd->gemini.context,
(dsmTable_t)tableNumber, table->keys,
pindexNumbers);
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "dsmTableReset failed for table %d, error %d",tableNumber, error);
+ }
- buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
+ buf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
dsmRecord.table = tableNumber;
dsmRecord.recid = 0;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
while(!error)
{
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
@@ -2352,13 +2739,15 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
1);
if(!error)
{
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
- error = handleIndexEntries(buf,dsmRecord.recid,KEY_CREATE);
- if(error == HA_ERR_FOUND_DUPP_KEY)
+ if (!(error = unpack_row((char *)buf,(char *)dsmRecord.pbuffer)))
{
- /* We don't want to stop on duplicate keys -- we're repairing
- here so let's get as much repaired as possible. */
- error = 0;
+ error = handleIndexEntries(buf,dsmRecord.recid,KEY_CREATE);
+ if(error == HA_ERR_FOUND_DUPP_KEY)
+ {
+ /* We don't want to stop on duplicate keys -- we're repairing
+ here so let's get as much repaired as possible. */
+ error = 0;
+ }
}
}
}
@@ -2366,7 +2755,13 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_EXCL,0);
- my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to unlock table %d", tableNumber);
+ }
+
+ my_free((char*)buf,MYF(MY_ALLOW_ZERO_PTR));
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,0);
@@ -2374,6 +2769,313 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
return error;
}
+
+int ha_gemini::restore(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ char* backup_dir = thd->lex.backup_dir;
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char* table_name = table->real_name;
+ int error = 0;
+ int errornum;
+ const char* errmsg = "";
+ dsmArea_t tableArea = 0;
+ dsmObjectAttr_t objectAttr;
+ dsmObject_t associate;
+ dsmObjectType_t associateType;
+ dsmDbkey_t block, root;
+ dsmStatus_t rc;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXTABLE, tableNumber,
+ &tableArea, &objectAttr, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaFlush(pcontext, tableArea, FLUSH_BUFFERS | FLUSH_SYNC);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaClose(pcontext, tableArea);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaClose (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Restore the data file */
+ if (!fn_format(src_path, table_name, backup_dir, ha_gemini_ext, 4 + 64))
+ {
+ return HA_ADMIN_INVALID;
+ }
+
+ if (my_copy(src_path, fn_format(dst_path, table->path, "",
+ ha_gemini_ext, 4), MYF(MY_WME)))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in my_copy (.gmd) (Error %d)";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaFlush(pcontext, tableArea, FREE_BUFFERS);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaOpen(pcontext, tableArea, 1);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaOpen (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+#ifdef GEMINI_BACKUP_IDX
+ dsmArea_t indexArea = 0;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXINDEX, &indexArea,
+ &objectAttr, &associate, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaClose(pcontext, indexArea);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaClose (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Restore the index file */
+ if (!fn_format(src_path, table_name, backup_dir, ha_gemini_idx_ext, 4 + 64))
+ {
+ return HA_ADMIN_INVALID;
+ }
+
+ if (my_copy(src_path, fn_format(dst_path, table->path, "",
+ ha_gemini_idx_ext, 4), MYF(MY_WME)))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in my_copy (.gmi) (Error %d)";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaOpen(pcontext, indexArea, 1);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaOpen (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ return HA_ADMIN_OK;
+#else /* #ifdef GEMINI_BACKUP_IDX */
+ HA_CHECK_OPT tmp_check_opt;
+ tmp_check_opt.init();
+ /* The following aren't currently implemented in ha_gemini::repair
+ ** tmp_check_opt.quick = 1;
+ ** tmp_check_opt.flags |= T_VERY_SILENT;
+ */
+ return (repair(thd, &tmp_check_opt));
+#endif /* #ifdef GEMINI_BACKUP_IDX */
+
+ err:
+ {
+#if 0
+ /* mi_check_print_error is in ha_myisam.cc, so none of the informative
+ ** error messages above is currently being printed
+ */
+ MI_CHECK param;
+ myisamchk_init(&param);
+ param.thd = thd;
+ param.op_name = (char*)"restore";
+ param.table_name = table->table_name;
+ param.testflag = 0;
+ mi_check_print_error(&param,errmsg, errornum);
+#endif
+ return error;
+ }
+}
+
+
+int ha_gemini::backup(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ char* backup_dir = thd->lex.backup_dir;
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char* table_name = table->real_name;
+ int error = 0;
+ int errornum;
+ const char* errmsg = "";
+ dsmArea_t tableArea = 0;
+ dsmObjectAttr_t objectAttr;
+ dsmObject_t associate;
+ dsmObjectType_t associateType;
+ dsmDbkey_t block, root;
+ dsmStatus_t rc;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXTABLE, tableNumber,
+ &tableArea, &objectAttr, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmd) (Error %d)";
+ errornum = rc;
+ goto err;
+ }
+
+ /* Flush the buffers before backing up the table */
+ dsmAreaFlush((dsmContext_t *)thd->gemini.context, tableArea,
+ FLUSH_BUFFERS | FLUSH_SYNC);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the .FRM file */
+ if (!fn_format(dst_path, table_name, backup_dir, reg_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .frm file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", reg_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed copying .frm file: errno = %d";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the data file */
+ if (!fn_format(dst_path, table_name, backup_dir, ha_gemini_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .GMD file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", ha_gemini_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )) )
+ {
+ errmsg = "Failed copying .GMD file: errno = %d";
+ error= HA_ADMIN_FAILED;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+#ifdef GEMINI_BACKUP_IDX
+ dsmArea_t indexArea = 0;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXINDEX, &indexArea,
+ &objectAttr, &associate, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the index file */
+ if (!fn_format(dst_path, table_name, backup_dir, ha_gemini_idx_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .GMI file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", ha_gemini_idx_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )) )
+ {
+ errmsg = "Failed copying .GMI file: errno = %d";
+ error= HA_ADMIN_FAILED;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+#endif /* #ifdef GEMINI_BACKUP_IDX */
+
+ return HA_ADMIN_OK;
+
+ err:
+ {
+#if 0
+ /* mi_check_print_error is in ha_myisam.cc, so none of the informative
+ ** error messages above is currently being printed
+ */
+ MI_CHECK param;
+ myisamchk_init(&param);
+ param.thd = thd;
+ param.op_name = (char*)"backup";
+ param.table_name = table->table_name;
+ param.testflag = 0;
+ mi_check_print_error(&param,errmsg, errornum);
+#endif
+ return error;
+ }
+}
+
+
+int ha_gemini::optimize(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ return HA_ADMIN_ALREADY_DONE;
+}
+
+
ha_rows ha_gemini::records_in_range(int keynr,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
@@ -2412,7 +3114,7 @@ ha_rows ha_gemini::records_in_range(int keynr,
pbracketBase->keyLen = componentLen;
}
- pbracketBase->keyLen -= 3;
+ pbracketBase->keyLen -= FULLKEYHDRSZ;
if(end_key)
{
@@ -2431,9 +3133,10 @@ ha_rows ha_gemini::records_in_range(int keynr,
pbracketLimit->keyLen = componentLen;
}
- pbracketLimit->keyLen -= 3;
+ pbracketLimit->keyLen -= FULLKEYHDRSZ;
error = dsmIndexRowsInRange((dsmContext_t *)current_thd->gemini.context,
pbracketBase,pbracketLimit,
+ tableNumber,
&pctInrange);
if(pctInrange >= 1)
rows = (ha_rows)pctInrange;
@@ -2457,32 +3160,82 @@ ha_rows ha_gemini::records_in_range(int keynr,
may only happen in rows with blobs, as the default row length is
pre-allocated.
*/
-int ha_gemini::pack_row(byte **pprow, int *ppackedLength, const byte *record)
+int ha_gemini::pack_row(byte **pprow, int *ppackedLength, const byte *record,
+ bool update)
{
+ THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ gemBlobDesc_t *pBlobDesc = pBlobDescs;
+
if (fixed_length_row)
{
*pprow = (byte *)record;
*ppackedLength=(int)table->reclength;
return 0;
}
- if (table->blob_fields)
- {
- return HA_ERR_WRONG_COMMAND;
- }
/* Copy null bits */
memcpy(rec_buff, record, table->null_bytes);
byte *ptr=rec_buff + table->null_bytes;
for (Field **field=table->field ; *field ; field++)
- ptr=(byte*) (*field)->pack((char*) ptr,record + (*field)->offset());
+ {
+#ifdef GEMINI_TINYBLOB_IN_ROW
+ /* Tiny blobs (255 bytes or less) are stored in the row; larger
+ ** blobs are stored in a separate storage object (see ha_gemini::create).
+ */
+ if ((*field)->type() == FIELD_TYPE_BLOB &&
+ ((Field_blob*)*field)->blobtype() != FIELD_TYPE_TINY_BLOB)
+#else
+ if ((*field)->type() == FIELD_TYPE_BLOB)
+#endif
+ {
+ dsmBlob_t gemBlob;
+ char *blobptr;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ gemBlob.blobId = 0;
+ gemBlob.totLength = gemBlob.segLength =
+ ((Field_blob*)*field)->get_length((char*)record + (*field)->offset());
+ ((Field_blob*)*field)->get_ptr((char**) &blobptr);
+ gemBlob.pBuffer = (dsmBuffer_t *)blobptr;
+ gemBlob.blobContext.blobOffset = 0;
+ if (gemBlob.totLength)
+ {
+ dsmBlobStart(pcontext, &gemBlob);
+ if (update && pBlobDesc->blobId)
+ {
+ gemBlob.blobId = pBlobDesc->blobId;
+ dsmBlobUpdate(pcontext, &gemBlob, NULL);
+ }
+ else
+ {
+ dsmBlobPut(pcontext, &gemBlob, NULL);
+ }
+ dsmBlobEnd(pcontext, &gemBlob);
+ }
+ ptr = (byte*)((Field_blob*)*field)->pack_id((char*) ptr,
+ (char*)record + (*field)->offset(), (longlong)gemBlob.blobId);
+
+ pBlobDesc++;
+ }
+ else
+ {
+ ptr=(byte*) (*field)->pack((char*) ptr, (char*)record + (*field)->offset());
+ }
+ }
*pprow=rec_buff;
*ppackedLength= (ptr - rec_buff);
return 0;
}
-void ha_gemini::unpack_row(char *record, char *prow)
+int ha_gemini::unpack_row(char *record, char *prow)
{
+ THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ gemBlobDesc_t *pBlobDesc = pBlobDescs;
+
if (fixed_length_row)
{
/* If the table is a VST, the row is in Gemini internal format.
@@ -2568,38 +3321,129 @@ void ha_gemini::unpack_row(char *record, char *prow)
const char *ptr= (const char*) prow;
memcpy(record, ptr, table->null_bytes);
ptr+=table->null_bytes;
+
for (Field **field=table->field ; *field ; field++)
- ptr= (*field)->unpack(record + (*field)->offset(), ptr);
+ {
+#ifdef GEMINI_TINYBLOB_IN_ROW
+ /* Tiny blobs (255 bytes or less) are stored in the row; larger
+ ** blobs are stored in a separate storage object (see ha_gemini::create).
+ */
+ if ((*field)->type() == FIELD_TYPE_BLOB &&
+ ((Field_blob*)*field)->blobtype() != FIELD_TYPE_TINY_BLOB)
+#else
+ if ((*field)->type() == FIELD_TYPE_BLOB)
+#endif
+ {
+ dsmBlob_t gemBlob;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ gemBlob.blobId = (dsmBlobId_t)(((Field_blob*)*field)->get_id(ptr));
+ if (gemBlob.blobId)
+ {
+ gemBlob.totLength =
+ gemBlob.segLength = ((Field_blob*)*field)->get_length(ptr);
+ /* Allocate memory to store the blob. This memory is freed
+ ** the next time unpack_row is called for this table.
+ */
+ gemBlob.pBuffer = (dsmBuffer_t *)my_malloc(gemBlob.totLength,
+ MYF(0));
+ if (!gemBlob.pBuffer)
+ {
+ return HA_ERR_OUT_OF_MEM;
+ }
+ gemBlob.blobContext.blobOffset = 0;
+ dsmBlobStart(pcontext, &gemBlob);
+ dsmBlobGet(pcontext, &gemBlob, NULL);
+ dsmBlobEnd(pcontext, &gemBlob);
+ }
+ else
+ {
+ gemBlob.pBuffer = 0;
+ }
+ ptr = ((Field_blob*)*field)->unpack_id(record + (*field)->offset(),
+ ptr, (char *)gemBlob.pBuffer);
+ pBlobDesc->blobId = gemBlob.blobId;
+ my_free((char*)pBlobDesc->pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ pBlobDesc->pBlob = gemBlob.pBuffer;
+ pBlobDesc++;
+ }
+ else
+ {
+ ptr= (*field)->unpack(record + (*field)->offset(), ptr);
+ }
+ }
}
+
+ return 0;
}
int ha_gemini::key_cmp(uint keynr, const byte * old_row,
- const byte * new_row)
+ const byte * new_row, bool updateStats)
{
KEY_PART_INFO *key_part=table->key_info[keynr].key_part;
KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts;
- for ( ; key_part != end ; key_part++)
+ for ( uint i = 0 ; key_part != end ; key_part++, i++)
{
if (key_part->null_bit)
{
if ((old_row[key_part->null_offset] & key_part->null_bit) !=
(new_row[key_part->null_offset] & key_part->null_bit))
+ {
+ if(updateStats)
+ table->key_info[keynr].rec_per_key[i]++;
return 1;
+ }
+ else if((old_row[key_part->null_offset] & key_part->null_bit) &&
+ (new_row[key_part->null_offset] & key_part->null_bit))
+ /* Both are null */
+ continue;
}
if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH))
{
-
- if (key_part->field->cmp_binary(old_row + key_part->offset,
- new_row + key_part->offset,
+ if (key_part->field->cmp_binary((char*)(old_row + key_part->offset),
+ (char*)(new_row + key_part->offset),
(ulong) key_part->length))
+ {
+ if(updateStats)
+ table->key_info[keynr].rec_per_key[i]++;
return 1;
+ }
}
else
{
if (memcmp(old_row+key_part->offset, new_row+key_part->offset,
key_part->length))
+ {
+ /* Check for special case of -0 which causes table check
+ to find an invalid key when comparing the the index
+ value of 0 to the -0 stored in the row */
+ if(key_part->field->type() == FIELD_TYPE_DECIMAL)
+ {
+ double fieldValue;
+ char *ptr = key_part->field->ptr;
+
+ key_part->field->ptr = (char *)old_row + key_part->offset;
+ fieldValue = key_part->field->val_real();
+ if(fieldValue == 0)
+ {
+ key_part->field->ptr = (char *)new_row + key_part->offset;
+ fieldValue = key_part->field->val_real();
+ if(fieldValue == 0)
+ {
+ key_part->field->ptr = ptr;
+ continue;
+ }
+ }
+ key_part->field->ptr = ptr;
+ }
+ if(updateStats)
+ {
+ table->key_info[keynr].rec_per_key[i]++;
+ }
return 1;
+ }
}
}
return 0;
@@ -2612,13 +3456,13 @@ int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname)
/* separate out the name of the table and the database
*/
- namestart = strchr(fullname + start_of_name, '/');
+ namestart = (char *)strchr(fullname + start_of_name, '/');
if (!namestart)
{
/* if on Windows, slashes go the other way */
- namestart = strchr(fullname + start_of_name, '\\');
+ namestart = (char *)strchr(fullname + start_of_name, '\\');
}
- nameend = strchr(fullname + start_of_name, '.');
+ nameend = (char *)strchr(fullname + start_of_name, '.');
/* sometimes fullname has an extension, sometimes it doesn't */
if (!nameend)
{
@@ -2680,4 +3524,105 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name,
thd->killed=1;
}
+/* Load shared area with rows per key statistics */
+void
+ha_gemini::get_index_stats(THD *thd)
+{
+ dsmStatus_t rc = 0;
+ ha_rows *rec_per_key = share->rec_per_key;
+
+ for(uint i = 0; i < table->keys && !rc; i++)
+ {
+ for (uint j = 0; j < table->key_info[i].key_parts && !rc;j++)
+ {
+ LONG64 rows_per_key;
+ rc = dsmIndexStatsGet((dsmContext_t *)thd->gemini.context,
+ tableNumber, pindexNumbers[i],(int)j,
+ &rows_per_key);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Index Statistics faild for table %d index %d, error %d",
+ tableNumber, pindexNumbers[i], rc);
+ }
+ *rec_per_key = (ha_rows)rows_per_key;
+ rec_per_key++;
+ }
+ }
+ return;
+}
+
+/****************************************************************************
+ Handling the shared GEM_SHARE structure that is needed to provide
+ a global in memory storage location of the rec_per_key stats used
+ by the optimizer.
+****************************************************************************/
+
+static byte* gem_get_key(GEM_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+static GEM_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ GEM_SHARE *share;
+
+ pthread_mutex_lock(&gem_mutex);
+ uint length=(uint) strlen(table_name);
+ if (!(share=(GEM_SHARE*) hash_search(&gem_open_tables, (byte*) table_name,
+ length)))
+ {
+ ha_rows *rec_per_key;
+ char *tmp_name;
+
+ if ((share=(GEM_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &rec_per_key, table->key_parts * sizeof(ha_rows),
+ &tmp_name, length+1,
+ NullS)))
+ {
+ share->rec_per_key = rec_per_key;
+ share->table_name = tmp_name;
+ share->table_name_length=length;
+ strcpy(share->table_name,table_name);
+ if (hash_insert(&gem_open_tables, (byte*) share))
+ {
+ pthread_mutex_unlock(&gem_mutex);
+ my_free((gptr) share,0);
+ return 0;
+ }
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex,NULL);
+ }
+ }
+ pthread_mutex_unlock(&gem_mutex);
+ return share;
+}
+
+static int free_share(GEM_SHARE *share, bool mutex_is_locked)
+{
+ pthread_mutex_lock(&gem_mutex);
+ if (mutex_is_locked)
+ pthread_mutex_unlock(&share->mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&gem_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&gem_mutex);
+ return 0;
+}
+
+static void gemini_lock_table_overflow_error(dsmContext_t *pcontext)
+{
+ gemini_msg(pcontext, "The total number of locks exceeds the lock table size");
+ gemini_msg(pcontext, "Either increase gemini_lock_table_size or use a");
+ gemini_msg(pcontext, "different transaction isolation level");
+}
+
#endif /* HAVE_GEMINI_DB */
diff --git a/sql/ha_gemini.h b/sql/ha_gemini.h
index 7e6e8f26588..495dc2fd1c9 100644
--- a/sql/ha_gemini.h
+++ b/sql/ha_gemini.h
@@ -19,17 +19,26 @@
#pragma interface /* gcc class implementation */
#endif
+#include "gem_global.h"
#include "dstd.h"
#include "dsmpub.h"
/* class for the the gemini handler */
enum enum_key_string_options{KEY_CREATE,KEY_DELETE,KEY_CHECK};
-
-#define READ_UNCOMMITED 0
-#define READ_COMMITED 1
-#define REPEATABLE_READ 2
-#define SERIALIZEABLE 3
+typedef struct st_gemini_share {
+ ha_rows *rec_per_key;
+ THR_LOCK lock;
+ pthread_mutex_t mutex;
+ char *table_name;
+ uint table_name_length,use_count;
+} GEM_SHARE;
+
+typedef struct gemBlobDesc
+{
+ dsmBlobId_t blobId;
+ dsmBuffer_t *pBlob;
+} gemBlobDesc_t;
class ha_gemini: public handler
{
@@ -38,7 +47,7 @@ class ha_gemini: public handler
uint int_option_flag;
int tableNumber;
dsmIndex_t *pindexNumbers; // dsm object numbers for the indexes on this table
- unsigned long lastRowid;
+ dsmRecid_t lastRowid;
uint last_dup_key;
bool fixed_length_row, key_read, using_ignore;
byte *rec_buff;
@@ -46,10 +55,12 @@ class ha_gemini: public handler
dsmKey_t *pbracketLimit;
dsmKey_t *pfoundKey;
dsmMask_t tableStatus; // Crashed/repair status
+ gemBlobDesc_t *pBlobDescs;
int index_open(char *tableName);
- int pack_row(byte **prow, int *ppackedLength, const byte *record);
- void unpack_row(char *record, char *prow);
+ int pack_row(byte **prow, int *ppackedLength, const byte *record,
+ bool update);
+ int unpack_row(char *record, char *prow);
int findRow(THD *thd, dsmMask_t findMode, byte *buf);
int fetch_row(void *gemini_context, const byte *buf);
int handleIndexEntries(const byte * record, dsmRecid_t recid,
@@ -70,24 +81,28 @@ class ha_gemini: public handler
void unpack_key(char *record, dsmKey_t *key, uint index);
int key_cmp(uint keynr, const byte * old_row,
- const byte * new_row);
+ const byte * new_row, bool updateStats);
+ int saveKeyStats(THD *thd);
+ void get_index_stats(THD *thd);
short cursorId; /* cursorId of active index cursor if any */
dsmMask_t lockMode; /* Shared or exclusive */
/* FIXFIX Don't know why we need this because I don't know what
store_lock method does but we core dump without this */
- THR_LOCK alock;
THR_LOCK_DATA lock;
+ GEM_SHARE *share;
+
public:
ha_gemini(TABLE *table): handler(table), file(0),
int_option_flag(HA_READ_NEXT | HA_READ_PREV |
HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
- HA_NO_BLOBS | HA_NO_TEMP_TABLES |
- /* HA_BLOB_KEY | */ /*HA_NOT_EXACT_COUNT | */
+ HA_BLOB_KEY |
+ HA_NO_TEMP_TABLES | HA_NO_FULLTEXT_KEY |
+ /*HA_NOT_EXACT_COUNT | */
/*HA_KEY_READ_WRONG_STR |*/ HA_DROP_BEFORE_CREATE),
pbracketBase(0),pbracketLimit(0),pfoundKey(0),
cursorId(0)
@@ -100,7 +115,7 @@ class ha_gemini: public handler
uint max_record_length() const { return MAXRECSZ; }
uint max_keys() const { return MAX_KEY-1; }
uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return MAXKEYSZ; }
+ uint max_key_length() const { return MAXKEYSZ / 2; }
bool fast_key_read() { return 1;}
bool has_transactions() { return 1;}
@@ -129,8 +144,12 @@ class ha_gemini: public handler
void info(uint);
int extra(enum ha_extra_function operation);
int reset(void);
+ int analyze(THD* thd, HA_CHECK_OPT* check_opt);
int check(THD* thd, HA_CHECK_OPT* check_opt);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ int restore(THD* thd, HA_CHECK_OPT* check_opt);
+ int backup(THD* thd, HA_CHECK_OPT* check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int external_lock(THD *thd, int lock_type);
virtual longlong get_auto_increment();
void position(byte *record);
@@ -139,7 +158,7 @@ class ha_gemini: public handler
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag);
-
+ void update_create_info(HA_CREATE_INFO *create_info);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
@@ -167,6 +186,7 @@ extern long gemini_locktablesize;
extern long gemini_lock_wait_timeout;
extern long gemini_spin_retries;
extern long gemini_connection_limit;
+extern char *gemini_basedir;
extern TYPELIB gemini_recovery_typelib;
extern ulong gemini_recovery_options;
@@ -175,12 +195,13 @@ bool gemini_end(void);
bool gemini_flush_logs(void);
int gemini_commit(THD *thd);
int gemini_rollback(THD *thd);
+int gemini_recovery_logging(THD *thd, bool on);
void gemini_disconnect(THD *thd);
int gemini_rollback_to_savepoint(THD *thd);
int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname);
int gemini_is_vst(const char *pname);
int gemini_set_option_long(int optid, long optval);
-const int gemini_blocksize = 8192;
-const int gemini_recbits = 7;
+const int gemini_blocksize = BLKSIZE;
+const int gemini_recbits = DEFAULT_RECBITS;
diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc
index fa44cebe19d..184c97837db 100644
--- a/sql/ha_innobase.cc
+++ b/sql/ha_innobase.cc
@@ -679,7 +679,7 @@ ha_innobase::bas_ext() const
/* out: file extension strings, currently not
used */
{
- static const char* ext[] = {".not_used", NullS};
+ static const char* ext[] = {".InnoDB", NullS};
return(ext);
}
@@ -779,6 +779,13 @@ ha_innobase::open(
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
+ fprintf(stderr, "\
+Cannot find table %s from the internal data dictionary\n\
+of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\
+and created again an InnoDB database but forgotten to delete the\n\
+corresponding .frm files of old InnoDB tables?\n",
+ norm_name);
+
free_share(share);
my_free((char*) upd_buff, MYF(0));
my_errno = ENOENT;
@@ -1516,6 +1523,10 @@ ha_innobase::update_row(
DBUG_ENTER("ha_innobase::update_row");
+ if (table->time_stamp) {
+ update_timestamp(new_row + table->time_stamp - 1);
+ }
+
if (last_query_id != user_thd->query_id) {
prebuilt->sql_stat_start = TRUE;
last_query_id = user_thd->query_id;
@@ -2142,6 +2153,7 @@ ha_innobase::external_lock(
prebuilt->in_update_remember_pos = TRUE;
if (lock_type == F_WRLCK) {
+
/* If this is a SELECT, then it is in UPDATE TABLE ...
or SELECT ... FOR UPDATE */
prebuilt->select_lock_type = LOCK_X;
@@ -2153,13 +2165,27 @@ ha_innobase::external_lock(
}
trx->n_mysql_tables_in_use++;
+
+ if (prebuilt->select_lock_type != LOCK_NONE) {
+
+ trx->mysql_n_tables_locked++;
+ }
} else {
trx->n_mysql_tables_in_use--;
- if (trx->n_mysql_tables_in_use == 0 &&
- !(thd->options
- & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
- innobase_commit(thd, trx);
+ if (trx->n_mysql_tables_in_use == 0) {
+
+ trx->mysql_n_tables_locked = 0;
+
+ if (trx->has_search_latch) {
+
+ trx_search_latch_release_if_reserved(trx);
+ }
+
+ if (!(thd->options
+ & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
+ innobase_commit(thd, trx);
+ }
}
}
@@ -2690,6 +2716,39 @@ ha_innobase::info(
DBUG_VOID_RETURN;
}
+/***********************************************************************
+Tries to check that an InnoDB table is not corrupted. If corruption is
+noticed, prints to stderr information about it. In case of corruption
+may also assert a failure and crash the server. */
+
+int
+ha_innobase::check(
+/*===============*/
+ /* out: HA_ADMIN_CORRUPT or
+ HA_ADMIN_OK */
+ THD* thd, /* in: user thread handle */
+ HA_CHECK_OPT* check_opt) /* in: check options, currently
+ ignored */
+{
+ row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ ulint ret;
+
+ if (prebuilt->mysql_template == NULL) {
+ /* Build the template; we will use a dummy template
+ in index scans done in checking */
+
+ build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
+ }
+
+ ret = row_check_table_for_mysql(prebuilt);
+
+ if (ret == DB_SUCCESS) {
+ return(HA_ADMIN_OK);
+ }
+
+ return(HA_ADMIN_CORRUPT);
+}
+
/*****************************************************************
Adds information about free space in the InnoDB tablespace to a
table comment which is printed out when a user calls SHOW TABLE STATUS. */
diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h
index 258e34cbf86..d832ac93d0f 100644
--- a/sql/ha_innobase.h
+++ b/sql/ha_innobase.h
@@ -142,7 +142,7 @@ class ha_innobase: public handler
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
-
+ int check(THD* thd, HA_CHECK_OPT* check_opt);
char* update_table_comment(const char* comment);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 91c837e8023..63e2cf7c201 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -527,8 +527,8 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
int error=0;
uint extra_testflag=0;
bool optimize_done= !optimize, statistics_done=0;
- char fixed_name[FN_REFLEN];
const char *old_proc_info=thd->proc_info;
+ char fixed_name[FN_REFLEN];
MYISAM_SHARE* share = file->s;
ha_rows rows= file->state->records;
DBUG_ENTER("ha_myisam::repair");
@@ -540,8 +540,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
param.thd=thd;
param.tmpdir=mysql_tmpdir;
param.out_flag=0;
- VOID(fn_format(fixed_name,file->filename,"",MI_NAME_IEXT,
- 4+ (param.opt_follow_links ? 16 : 0)));
+ strmov(fixed_name,file->filename);
// Don't lock tables if we have used LOCK TABLE
if (!thd->locked_tables && mi_lock_database(file,F_WRLCK))
@@ -831,6 +830,8 @@ void ha_myisam::position(const byte* record)
void ha_myisam::info(uint flag)
{
MI_ISAMINFO info;
+ char name_buff[FN_REFLEN];
+
(void) mi_status(file,&info,flag);
if (flag & HA_STATUS_VARIABLE)
{
@@ -860,6 +861,18 @@ void ha_myisam::info(uint flag)
raid_type=info.raid_type;
raid_chunks=info.raid_chunks;
raid_chunksize=info.raid_chunksize;
+
+ /*
+ Set data_file_name and index_file_name to point at the symlink value
+ if table is symlinked (Ie; Real name is not same as generated name)
+ */
+ data_file_name=index_file_name=0;
+ fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2);
+ if (strcmp(name_buff, info.data_file_name))
+ data_file_name=info.data_file_name;
+ strmov(fn_ext(name_buff),MI_NAME_IEXT);
+ if (strcmp(name_buff, info.index_file_name))
+ index_file_name=info.index_file_name;
}
if (flag & HA_STATUS_ERRKEY)
{
@@ -915,6 +928,7 @@ THR_LOCK_DATA **ha_myisam::store_lock(THD *thd,
void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
{
+ MI_ISAMINFO info;
table->file->info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
@@ -926,6 +940,8 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
create_info->raid_chunks= raid_chunks;
create_info->raid_chunksize= raid_chunksize;
}
+ create_info->data_file_name=data_file_name;
+ create_info->index_file_name=index_file_name;
}
@@ -1097,8 +1113,10 @@ int ha_myisam::create(const char *name, register TABLE *form,
create_info.raid_type=info->raid_type;
create_info.raid_chunks=info->raid_chunks ? info->raid_chunks : RAID_DEFAULT_CHUNKS;
create_info.raid_chunksize=info->raid_chunksize ? info->raid_chunksize : RAID_DEFAULT_CHUNKSIZE;
+ create_info.data_file_name= info->data_file_name;
+ create_info.index_file_name=info->index_file_name;
- error=mi_create(fn_format(buff,name,"","",2+4+16),
+ error=mi_create(fn_format(buff,name,"","",2+4),
form->keys,keydef,
(uint) (recinfo_pos-recinfo), recinfo,
0, (MI_UNIQUEDEF*) 0,
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 21b7e5bbd39..ca5007228d8 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -38,6 +38,7 @@ class ha_myisam: public handler
{
MI_INFO *file;
uint int_option_flag,enable_activate_all_index;
+ char *data_file_name, *index_file_name;
int repair(THD *thd, MI_CHECK &param, bool optimize);
public:
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index b842c15cce0..e3e1d959438 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -232,7 +232,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
for (table=file->open_tables ; table != file->end_table ; table++)
{
- char *name=table->table->s->filename;
+ char *name=table->table->filename;
char buff[FN_REFLEN];
TABLE_LIST *ptr;
if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
@@ -278,7 +278,7 @@ void ha_myisammrg::append_create_info(String *packet)
for (first=table=file->open_tables ; table != file->end_table ; table++)
{
- char *name=table->table->s->filename;
+ char *name=table->table->filename;
fn_format(buff,name,"","",3);
if (table != first)
packet->append(',');
diff --git a/sql/handler.cc b/sql/handler.cc
index 212fcea11ae..5b5d6d4764c 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -694,6 +694,15 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_RECORD_FILE_FULL:
textno=ER_RECORD_FILE_FULL;
break;
+ case HA_ERR_LOCK_WAIT_TIMEOUT:
+ textno=ER_LOCK_WAIT_TIMEOUT;
+ break;
+ case HA_ERR_LOCK_TABLE_FULL:
+ textno=ER_LOCK_TABLE_FULL;
+ break;
+ case HA_ERR_READ_ONLY_TRANSACTION:
+ textno=ER_READ_ONLY_TRANSACTION;
+ break;
default:
{
my_error(ER_GET_ERRNO,errflag,error);
@@ -757,6 +766,25 @@ int ha_commit_rename(THD *thd)
return error;
}
+/* Tell the handler to turn on or off logging to the handler's
+ recovery log
+*/
+int ha_recovery_logging(THD *thd, bool on)
+{
+ int error=0;
+
+ DBUG_ENTER("ha_recovery_logging");
+#ifdef USING_TRANSACTIONS
+ if (opt_using_transactions)
+ {
+#ifdef HAVE_GEMINI_DB
+ error = gemini_recovery_logging(thd, on);
+#endif
+ }
+#endif
+ DBUG_RETURN(error);
+}
+
int handler::index_next_same(byte *buf, const byte *key, uint keylen)
{
int error;
@@ -825,5 +853,5 @@ static int NEAR_F delete_file(const char *name,const char *ext,int extflag)
{
char buff[FN_REFLEN];
VOID(fn_format(buff,name,"",ext,extflag | 4));
- return(my_delete(buff,MYF(MY_WME)));
+ return(my_delete_with_symlink(buff,MYF(MY_WME)));
}
diff --git a/sql/handler.h b/sql/handler.h
index 076bf783f80..fc20e563f9f 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -74,6 +74,7 @@
#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2)
#define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2)
#define HA_NO_PREFIX_CHAR_KEYS (HA_NO_TEMP_TABLES*2)
+#define HA_NO_FULLTEXT_KEY (HA_NO_PREFIX_CHAR_KEYS*2)
/* Parameters for open() (in register form->filestat) */
/* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */
@@ -141,6 +142,7 @@ typedef struct st_ha_create_information
ulonglong max_rows,min_rows;
ulonglong auto_increment_value;
char *comment,*password;
+ char *data_file_name, *index_file_name;
uint options; /* OR of HA_CREATE_ options */
uint raid_type,raid_chunks;
ulong raid_chunksize;
@@ -353,3 +355,4 @@ int ha_autocommit_or_rollback(THD *thd, int error);
void ha_set_spin_retries(uint retries);
bool ha_flush_logs(void);
int ha_commit_rename(THD *thd);
+int ha_recovery_logging(THD *thd, bool on);
diff --git a/sql/item.cc b/sql/item.cc
index b268c5eb928..44bbf9a9cbc 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -561,7 +561,7 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables)
{
if (!ref)
{
- if (!(ref=find_item_in_list(this,thd->lex.item_list)))
+ if (!(ref=find_item_in_list(this,thd->lex.select->item_list)))
return 1;
max_length= (*ref)->max_length;
maybe_null= (*ref)->maybe_null;
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index e7a6c52dfd9..373aede7b6b 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -487,7 +487,7 @@ Item_func_if::fix_length_and_dec()
{
maybe_null=args[1]->maybe_null || args[2]->maybe_null;
max_length=max(args[1]->max_length,args[2]->max_length);
- decimals=max(args[0]->decimals,args[1]->decimals);
+ decimals=max(args[1]->decimals,args[2]->decimals);
enum Item_result arg1_type=args[1]->result_type();
enum Item_result arg2_type=args[2]->result_type();
if (arg1_type == STRING_RESULT || arg2_type == STRING_RESULT)
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 84bc972608e..8a2bd15ae6d 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str)
return NULL;
switch (entry->type) {
case REAL_RESULT:
- str->set(*(double*) entry->value);
+ str->set(*(double*) entry->value,decimals);
break;
case INT_RESULT:
str->set(*(longlong*) entry->value);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 80f72c30e57..9d69b713611 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -30,7 +30,6 @@
#ifdef HAVE_CRYPT_H
#include <crypt.h>
#endif
-
#include "md5.h"
String empty_string("");
@@ -66,13 +65,13 @@ String *Item_func_md5::val_str(String *str)
String * sptr= args[0]->val_str(str);
if (sptr)
{
- MD5_CTX context;
+ my_MD5_CTX context;
unsigned char digest[16];
null_value=0;
- MD5Init (&context);
- MD5Update (&context,(unsigned char *) sptr->ptr(), sptr->length());
- MD5Final (digest, &context);
+ my_MD5Init (&context);
+ my_MD5Update (&context,(unsigned char *) sptr->ptr(), sptr->length());
+ my_MD5Final (digest, &context);
str->alloc(32); // Ensure that memory is free
sprintf((char *) str->ptr(),
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 40fe52a12e5..8d025891877 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -811,7 +811,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2)
{
int res;
Field* f = *field;
- int len = f->field_length;
+ int len = f->pack_length();
switch((*field)->type())
{
case FIELD_TYPE_STRING:
@@ -839,7 +839,8 @@ int dump_leaf(byte* key, uint32 count __attribute__((unused)),
{
char* buf = item->table->record[0];
int error;
- memset(buf, 0xff, item->rec_offset); // make up for cheating in the tree
+ // the first item->rec_offset bytes are taken care of with
+ // restore_record(table,2) in setup()
memcpy(buf + item->rec_offset, key, item->tree.size_of_element);
if ((error = item->table->file->write_row(buf)))
{
@@ -874,8 +875,19 @@ bool Item_sum_count_distinct::setup(THD *thd)
List<Item> list;
/* Create a table with an unique key over all parameters */
for (uint i=0; i < arg_count ; i++)
- if (list.push_back(args[i]))
- return 1;
+ {
+ Item *item=args[i];
+ if (list.push_back(item))
+ return 1; // End of memory
+ if (item->const_item())
+ {
+ (void) item->val_int();
+ if (item->null_value)
+ always_null=1;
+ }
+ }
+ if (always_null)
+ return 0;
count_field_types(tmp_table_param,list,0);
if (table)
{
@@ -883,17 +895,22 @@ bool Item_sum_count_distinct::setup(THD *thd)
tmp_table_param->cleanup();
}
if (!(table=create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1,
- 0, 0, current_lex->options | thd->options)))
+ 0, 0, current_lex->select->options | thd->options)))
return 1;
table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
table->no_rows=1;
+
if(table->db_type == DB_TYPE_HEAP) // no blobs, otherwise it would be
// MyISAM
{
qsort_cmp2 compare_key;
void* cmp_arg;
int key_len;
+
+ // to make things easier for dump_leaf if we ever have to dump to
+ // MyISAM
+ restore_record(table,2);
if(table->fields == 1) // if we have only one field, which is
// the most common use of count(distinct), it is much faster
@@ -915,20 +932,31 @@ bool Item_sum_count_distinct::setup(THD *thd)
compare_key = (qsort_cmp2)simple_raw_key_cmp;
break;
}
- cmp_arg = (void*)(key_len = field->field_length);
+ cmp_arg = (void*)(key_len = field->pack_length());
rec_offset = 1;
}
else // too bad, cannot cheat - there is more than one field
{
- cmp_arg = (void*)this;
- compare_key = (qsort_cmp2)composite_key_cmp;
+ bool all_binary = 1;
Field** field, **field_end;
field_end = (field = table->field) + table->fields;
for(key_len = 0; field < field_end; ++field)
{
- key_len += (*field)->field_length;
+ key_len += (*field)->pack_length();
+ if(!(*field)->binary())
+ all_binary = 0;
}
rec_offset = table->reclength - key_len;
+ if(all_binary)
+ {
+ compare_key = (qsort_cmp2)simple_raw_key_cmp;
+ cmp_arg = (void*)key_len;
+ }
+ else
+ {
+ compare_key = (qsort_cmp2)composite_key_cmp ;
+ cmp_arg = (void*)this;
+ }
}
init_tree(&tree, min(max_heap_table_size, sortbuff_size/16), 0,
@@ -940,7 +968,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
// but this has to be handled - otherwise someone can crash
// the server with a DoS attack
max_elements_in_tree = (key_len) ? max_heap_table_size/key_len :
- max_heap_table_size;
+ 1;
}
return 0;
@@ -960,20 +988,22 @@ int Item_sum_count_distinct::tree_to_myisam()
void Item_sum_count_distinct::reset()
{
- if(use_tree)
+ if (use_tree)
reset_tree(&tree);
- else
- {
- table->file->extra(HA_EXTRA_NO_CACHE);
- table->file->delete_all_rows();
- table->file->extra(HA_EXTRA_WRITE_CACHE);
- }
+ else if (table)
+ {
+ table->file->extra(HA_EXTRA_NO_CACHE);
+ table->file->delete_all_rows();
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
+ }
(void) add();
}
bool Item_sum_count_distinct::add()
{
int error;
+ if (always_null)
+ return 0;
copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->funcs);
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 1aa7f78d786..753a9de8b48 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -148,19 +148,21 @@ class Item_sum_count_distinct :public Item_sum_int
bool fix_fields(THD *thd,TABLE_LIST *tables);
TMP_TABLE_PARAM *tmp_table_param;
TREE tree;
- uint max_elements_in_tree;
+
// calculated based on max_heap_table_size. If reached,
// walk the tree and dump it into MyISAM table
+ uint max_elements_in_tree;
+
+ // the first few bytes of record ( at least one)
+ // are just markers for deleted and NULLs. We want to skip them since
+ // they will just bloat the tree without providing any valuable info
+ int rec_offset;
- bool use_tree;
// If there are no blobs, we can use a tree, which
// is faster than heap table. In that case, we still use the table
// to help get things set up, but we insert nothing in it
-
- int rec_offset;
- // the first few bytes of record ( at least one)
- // are just markers for deleted and NULLs. We want to skip them since
- // they will just bloat the tree without providing any valuable info
+ bool use_tree;
+ bool always_null; // Set to 1 if the result is always NULL
int tree_to_myisam();
@@ -171,7 +173,7 @@ class Item_sum_count_distinct :public Item_sum_int
public:
Item_sum_count_distinct(List<Item> &list)
:Item_sum_int(list),table(0),used_table_cache(~(table_map) 0),
- tmp_table_param(0),use_tree(0)
+ tmp_table_param(0),use_tree(0),always_null(0)
{ quick_group=0; }
~Item_sum_count_distinct();
table_map used_tables() const { return used_table_cache; }
diff --git a/sql/lex.h b/sql/lex.h
index c29c4081787..e9ab150f5b2 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -70,6 +70,7 @@ static SYMBOL symbols[] = {
{ "BIGINT", SYM(BIGINT),0,0},
{ "BIT", SYM(BIT_SYM),0,0},
{ "BINARY", SYM(BINARY),0,0},
+ { "BINLOG", SYM(BINLOG_SYM),0,0},
{ "BLOB", SYM(BLOB_SYM),0,0},
{ "BOOL", SYM(BOOL_SYM),0,0},
{ "BOTH", SYM(BOTH),0,0},
@@ -113,6 +114,7 @@ static SYMBOL symbols[] = {
{ "DELETE", SYM(DELETE_SYM),0,0},
{ "DESC", SYM(DESC),0,0},
{ "DESCRIBE", SYM(DESCRIBE),0,0},
+ { "DIRECTORY", SYM(DIRECTORY_SYM),0,0},
{ "DISABLE", SYM(DISABLE_SYM),0,0},
{ "DISTINCT", SYM(DISTINCT),0,0},
{ "DISTINCTROW", SYM(DISTINCT),0,0}, /* Access likes this */
@@ -127,6 +129,7 @@ static SYMBOL symbols[] = {
{ "ENABLE", SYM(ENABLE_SYM),0,0},
{ "ENCLOSED", SYM(ENCLOSED),0,0},
{ "ENUM", SYM(ENUM),0,0},
+ { "EVENTS", SYM(EVENTS_SYM),0,0},
{ "EXPLAIN", SYM(DESCRIBE),0,0},
{ "EXISTS", SYM(EXISTS),0,0},
{ "EXTENDED", SYM(EXTENDED_SYM),0,0},
@@ -167,6 +170,7 @@ static SYMBOL symbols[] = {
{ "IGNORE", SYM(IGNORE_SYM),0,0},
{ "IN", SYM(IN_SYM),0,0},
{ "INDEX", SYM(INDEX),0,0},
+ { "INDEXES", SYM(INDEXES),0,0},
{ "INFILE", SYM(INFILE),0,0},
{ "INNER", SYM(INNER_SYM),0,0},
{ "INNOBASE", SYM(INNOBASE_SYM),0,0},
diff --git a/sql/lock.cc b/sql/lock.cc
index 23f81c9c164..1d9aca66e74 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -35,6 +35,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count,
bool unlock, TABLE **write_locked);
static int lock_external(TABLE **table,uint count);
static int unlock_external(THD *thd, TABLE **table,uint count);
+static void print_lock_error(int error);
MYSQL_LOCK *mysql_lock_tables(THD *thd,TABLE **tables,uint count)
@@ -154,7 +155,7 @@ static int lock_external(TABLE **tables,uint count)
(*tables)->file->external_lock(thd, F_UNLCK);
(*tables)->current_lock=F_UNLCK;
}
- my_error(ER_CANT_LOCK,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error);
+ print_lock_error(error);
DBUG_RETURN(error);
}
else
@@ -325,7 +326,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
}
}
if (error_code)
- my_error(ER_CANT_LOCK,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error_code);
+ print_lock_error(error_code);
DBUG_RETURN(error_code);
}
@@ -480,3 +481,24 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
}
DBUG_RETURN(result);
}
+
+static void print_lock_error(int error)
+{
+ int textno;
+ DBUG_ENTER("print_lock_error");
+
+ switch (error) {
+ case HA_ERR_LOCK_WAIT_TIMEOUT:
+ textno=ER_LOCK_WAIT_TIMEOUT;
+ break;
+ case HA_ERR_READ_ONLY_TRANSACTION:
+ textno=ER_READ_ONLY_TRANSACTION;
+ break;
+ default:
+ textno=ER_CANT_LOCK;
+ break;
+ }
+ my_error(textno,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error);
+ DBUG_VOID_RETURN;
+}
+
diff --git a/sql/log.cc b/sql/log.cc
index 4cd93261973..40e5d5673be 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -81,7 +81,7 @@ static int find_uniq_filename(char *name)
MYSQL_LOG::MYSQL_LOG(): last_time(0), query_start(0),index_file(-1),
name(0), log_type(LOG_CLOSED),write_error(0),
- inited(0), no_rotate(0)
+ inited(0), log_seq(1), no_rotate(0)
{
/*
We don't want to intialize LOCK_Log here as the thread system may
@@ -230,8 +230,11 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
if ((do_magic && my_b_write(&log_file, (byte*) BINLOG_MAGIC, 4)) ||
open_index(O_APPEND | O_RDWR | O_CREAT))
goto err;
+
+ log_seq = 1;
Start_log_event s;
bool error;
+ s.set_log_seq(0, this);
s.write(&log_file);
flush_io_cache(&log_file);
pthread_mutex_lock(&LOCK_index);
@@ -531,6 +534,14 @@ void MYSQL_LOG::new_file()
to change base names at some point.
*/
Rotate_log_event r(new_name+dirname_length(new_name));
+ THD* thd = current_thd;
+ r.set_log_seq(0, this);
+ // this log rotation could have been initiated by a master of
+ // the slave running with log-bin
+ // we set the flag on rotate event to prevent inifinite log rotation
+ // loop
+ if(thd && slave_thd && thd == slave_thd)
+ r.flags |= LOG_EVENT_FORCED_ROTATE_F;
r.write(&log_file);
VOID(pthread_cond_broadcast(&COND_binlog_update));
}
@@ -626,6 +637,21 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command,
/* Write to binary log in a format to be used for replication */
+bool MYSQL_LOG::write(Slave_log_event* event_info)
+{
+ bool error;
+ if (!inited) // Can't use mutex if not init
+ return 0;
+ VOID(pthread_mutex_lock(&LOCK_log));
+ if(!event_info->log_seq)
+ event_info->set_log_seq(current_thd, this);
+ error = event_info->write(&log_file);
+ flush_io_cache(&log_file);
+ VOID(pthread_mutex_unlock(&LOCK_log));
+ return error;
+}
+
+
bool MYSQL_LOG::write(Query_log_event* event_info)
{
/* In most cases this is only called if 'is_open()' is true */
@@ -638,8 +664,12 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
if (is_open())
{
THD *thd=event_info->thd;
+#ifdef USING_TRANSACTIONS
IO_CACHE *file = (event_info->cache_stmt ? &thd->transaction.trans_log :
&log_file);
+#else
+ IO_CACHE *file = &log_file;
+#endif
if ((!(thd->options & OPTION_BIN_LOG) &&
(thd->master_access & PROCESS_ACL)) ||
!db_ok(event_info->db, binlog_do_db, binlog_ignore_db))
@@ -652,12 +682,18 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
if (thd->last_insert_id_used)
{
Intvar_log_event e((uchar)LAST_INSERT_ID_EVENT, thd->last_insert_id);
+ e.set_log_seq(thd, this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
if (e.write(file))
goto err;
}
if (thd->insert_id_used)
{
Intvar_log_event e((uchar)INSERT_ID_EVENT, thd->last_insert_id);
+ e.set_log_seq(thd, this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
if (e.write(file))
goto err;
}
@@ -670,10 +706,12 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
// just in case somebody wants it later
thd->query_length = (uint)(p - buf);
Query_log_event e(thd, buf);
+ e.set_log_seq(thd, this);
if (e.write(file))
goto err;
thd->query_length = save_query_length; // clean up
}
+ event_info->set_log_seq(thd, this);
if (event_info->write(file) ||
file == &log_file && flush_io_cache(file))
goto err;
@@ -768,6 +806,7 @@ bool MYSQL_LOG::write(Load_log_event* event_info)
if ((thd->options & OPTION_BIN_LOG) ||
!(thd->master_access & PROCESS_ACL))
{
+ event_info->set_log_seq(thd, this);
if (event_info->write(&log_file) || flush_io_cache(&log_file))
{
if (!write_error)
@@ -919,6 +958,7 @@ void MYSQL_LOG::close(bool exiting)
if (log_type == LOG_BIN)
{
Stop_log_event s;
+ s.set_log_seq(0, this);
s.write(&log_file);
VOID(pthread_cond_broadcast(&COND_binlog_update));
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index ac985c266c8..5538e6c0b7f 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -20,6 +20,7 @@
#pragma implementation // gcc: Class implementation
#endif
#include "mysql_priv.h"
+#include "slave.h"
#endif /* MYSQL_CLIENT */
@@ -31,6 +32,7 @@ static void pretty_print_char(FILE* file, int c)
case '\r': fprintf(file, "\\r"); break;
case '\\': fprintf(file, "\\\\"); break;
case '\b': fprintf(file, "\\b"); break;
+ case '\t': fprintf(file, "\\t"); break;
case '\'': fprintf(file, "\\'"); break;
case 0 : fprintf(file, "\\0"); break;
default:
@@ -40,6 +42,220 @@ static void pretty_print_char(FILE* file, int c)
fputc('\'', file);
}
+#ifndef MYSQL_CLIENT
+
+static void pretty_print_char(String* packet, int c)
+{
+ packet->append('\'');
+ switch(c) {
+ case '\n': packet->append( "\\n"); break;
+ case '\r': packet->append( "\\r"); break;
+ case '\\': packet->append( "\\\\"); break;
+ case '\b': packet->append( "\\b"); break;
+ case '\t': packet->append( "\\t"); break;
+ case '\'': packet->append( "\\'"); break;
+ case 0 : packet->append( "\\0"); break;
+ default:
+ packet->append((char)c);
+ break;
+ }
+ packet->append('\'');
+}
+
+#endif
+
+const char* Log_event::get_type_str()
+{
+ switch(get_type_code())
+ {
+ case START_EVENT: return "Start";
+ case STOP_EVENT: return "Stop";
+ case QUERY_EVENT: return "Query";
+ case ROTATE_EVENT: return "Rotate";
+ case INTVAR_EVENT: return "Intvar";
+ case LOAD_EVENT: return "Load";
+ case SLAVE_EVENT: return "Slave";
+ default: /* impossible */ return "Unknown";
+ }
+}
+
+#ifndef MYSQL_CLIENT
+
+void Log_event::pack_info(String* packet)
+{
+ net_store_data(packet, "", 0);
+}
+
+void Query_log_event::pack_info(String* packet)
+{
+ String tmp;
+ if(db && db_len)
+ {
+ tmp.append("use ");
+ tmp.append(db, db_len);
+ tmp.append("; ", 2);
+ }
+
+ if(query && q_len)
+ tmp.append(query, q_len);
+ net_store_data(packet, (char*)tmp.ptr(), tmp.length());
+}
+
+void Start_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+
+ tmp.append("Server ver: ");
+ tmp.append(server_version);
+ tmp.append(", Binlog ver: ");
+ tmp.append(llstr(binlog_version, buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Load_log_event::pack_info(String* packet)
+{
+ String tmp;
+ if(db && db_len)
+ {
+ tmp.append("use ");
+ tmp.append(db, db_len);
+ tmp.append("; ", 2);
+ }
+
+ tmp.append("LOAD DATA INFILE '");
+ tmp.append(fname);
+ tmp.append("' ", 2);
+ if(sql_ex.opt_flags && REPLACE_FLAG )
+ tmp.append(" REPLACE ");
+ else if(sql_ex.opt_flags && IGNORE_FLAG )
+ tmp.append(" IGNORE ");
+
+ tmp.append("INTO TABLE ");
+ tmp.append(table_name);
+ if (!(sql_ex.empty_flags & FIELD_TERM_EMPTY))
+ {
+ tmp.append(" FIELDS TERMINATED BY ");
+ pretty_print_char(&tmp, sql_ex.field_term);
+ }
+
+ if (!(sql_ex.empty_flags & ENCLOSED_EMPTY))
+ {
+ if (sql_ex.opt_flags && OPT_ENCLOSED_FLAG )
+ tmp.append(" OPTIONALLY ");
+ tmp.append( " ENCLOSED BY ");
+ pretty_print_char(&tmp, sql_ex.enclosed);
+ }
+
+ if (!(sql_ex.empty_flags & ESCAPED_EMPTY))
+ {
+ tmp.append( " ESCAPED BY ");
+ pretty_print_char(&tmp, sql_ex.escaped);
+ }
+
+ if (!(sql_ex.empty_flags & LINE_TERM_EMPTY))
+ {
+ tmp.append(" LINES TERMINATED BY ");
+ pretty_print_char(&tmp, sql_ex.line_term);
+ }
+
+ if (!(sql_ex.empty_flags & LINE_START_EMPTY))
+ {
+ tmp.append(" LINES STARTING BY ");
+ pretty_print_char(&tmp, sql_ex.line_start);
+ }
+
+ if ((int)skip_lines > 0)
+ tmp.append( " IGNORE %ld LINES ", (long) skip_lines);
+
+ if (num_fields)
+ {
+ uint i;
+ const char* field = fields;
+ tmp.append(" (");
+ for(i = 0; i < num_fields; i++)
+ {
+ if(i)
+ tmp.append(" ,");
+ tmp.append( field);
+
+ field += field_lens[i] + 1;
+ }
+ tmp.append(')');
+ }
+
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Rotate_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append(new_log_ident, ident_len);
+ tmp.append(";pos=");
+ tmp.append(llstr(pos,buf));
+ if(flags & LOG_EVENT_FORCED_ROTATE_F)
+ tmp.append("; forced by master");
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Intvar_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append(get_var_type_name());
+ tmp.append('=');
+ tmp.append(llstr(val, buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Slave_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append("host=");
+ tmp.append(master_host);
+ tmp.append(",port=");
+ tmp.append(llstr(master_port,buf));
+ tmp.append(",log=");
+ tmp.append(master_log);
+ tmp.append(",pos=");
+ tmp.append(llstr(master_pos,buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+
+void Log_event::init_show_field_list(List<Item>* field_list)
+{
+ field_list->push_back(new Item_empty_string("Log_name", 20));
+ field_list->push_back(new Item_empty_string("Pos", 20));
+ field_list->push_back(new Item_empty_string("Event_type", 20));
+ field_list->push_back(new Item_empty_string("Server_id", 20));
+ field_list->push_back(new Item_empty_string("Log_seq", 20));
+ field_list->push_back(new Item_empty_string("Info", 20));
+}
+
+int Log_event::net_send(THD* thd, const char* log_name, ulong pos)
+{
+ String* packet = &thd->packet;
+ const char* p = strrchr(log_name, FN_LIBCHAR);
+ const char* event_type;
+ if (p)
+ log_name = p + 1;
+
+ packet->length(0);
+ net_store_data(packet, log_name, strlen(log_name));
+ net_store_data(packet, (longlong)pos);
+ event_type = get_type_str();
+ net_store_data(packet, event_type, strlen(event_type));
+ net_store_data(packet, server_id);
+ net_store_data(packet, log_seq);
+ pack_info(packet);
+ return my_net_write(&thd->net, (char*)packet->ptr(), packet->length());
+}
+
+#endif
+
int Query_log_event::write(IO_CACHE* file)
{
return query ? Log_event::write(file) : -1;
@@ -52,7 +268,6 @@ int Log_event::write(IO_CACHE* file)
int Log_event::write_header(IO_CACHE* file)
{
- // make sure to change this when the header gets bigger
char buf[LOG_EVENT_HEADER_LEN];
char* pos = buf;
int4store(pos, when); // timestamp
@@ -63,6 +278,10 @@ int Log_event::write_header(IO_CACHE* file)
long tmp=get_data_size() + LOG_EVENT_HEADER_LEN;
int4store(pos, tmp);
pos += 4;
+ int4store(pos, log_seq);
+ pos += 4;
+ int2store(pos, flags);
+ pos += 2;
return (my_b_write(file, (byte*) buf, (uint) (pos - buf)));
}
@@ -115,91 +334,51 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock)
{
- time_t timestamp;
- uint32 server_id;
-
- char buf[LOG_EVENT_HEADER_LEN-4];
+ char head[LOG_EVENT_HEADER_LEN];
if(log_lock) pthread_mutex_lock(log_lock);
- if (my_b_read(file, (byte *) buf, sizeof(buf)))
+ if (my_b_read(file, (byte *) head, sizeof(head)))
{
if (log_lock) pthread_mutex_unlock(log_lock);
- return NULL;
- }
- timestamp = uint4korr(buf);
- server_id = uint4korr(buf + 5);
-
- switch(buf[EVENT_TYPE_OFFSET])
- {
- case QUERY_EVENT:
- {
- Query_log_event* q = new Query_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- if (!q->query)
- {
- delete q;
- q=NULL;
- }
- return q;
- }
-
- case LOAD_EVENT:
- {
- Load_log_event* l = new Load_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- if (!l->table_name)
- {
- delete l;
- l=NULL;
- }
- return l;
+ return 0;
}
+ uint data_len = uint4korr(head + EVENT_LEN_OFFSET);
+ char* buf = 0;
+ const char* error = 0;
+ Log_event* res = 0;
- case ROTATE_EVENT:
+ if (data_len > max_allowed_packet)
{
- Rotate_log_event* r = new Rotate_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
-
- if (!r->new_log_ident)
- {
- delete r;
- r=NULL;
- }
- return r;
+ error = "Event too big";
+ goto err;
}
- case INTVAR_EVENT:
+ if (data_len < LOG_EVENT_HEADER_LEN)
{
- Intvar_log_event* e = new Intvar_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
-
- if (e->type == INVALID_INT_EVENT)
- {
- delete e;
- e=NULL;
- }
- return e;
+ error = "Event too small";
+ goto err;
}
- case START_EVENT:
- {
- Start_log_event* e = new Start_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- return e;
- }
- case STOP_EVENT:
- {
- Stop_log_event* e = new Stop_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- return e;
- }
- default:
- break;
+ if (!(buf = my_malloc(data_len, MYF(MY_WME))))
+ {
+ error = "Out of memory";
+ goto err;
}
- // default
+ memcpy(buf, head, LOG_EVENT_HEADER_LEN);
+ if(my_b_read(file, (byte*) buf + LOG_EVENT_HEADER_LEN,
+ data_len - LOG_EVENT_HEADER_LEN))
+ {
+ error = "read error";
+ goto err;
+ }
+ res = read_log_event(buf, data_len);
+err:
if (log_lock) pthread_mutex_unlock(log_lock);
- return NULL;
+ if(error)
+ sql_print_error(error);
+ my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
+ return res;
}
Log_event* Log_event::read_log_event(const char* buf, int event_len)
@@ -245,6 +424,17 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len)
return r;
}
+ case SLAVE_EVENT:
+ {
+ Slave_log_event* s = new Slave_log_event(buf, event_len);
+ if (!s->master_host)
+ {
+ delete s;
+ return NULL;
+ }
+
+ return s;
+ }
case START_EVENT: return new Start_log_event(buf);
case STOP_EVENT: return new Stop_log_event(buf);
case INTVAR_EVENT: return new Intvar_log_event(buf);
@@ -305,6 +495,7 @@ void Stop_log_event::print(FILE* file, bool short_form, char* last_db)
void Rotate_log_event::print(FILE* file, bool short_form, char* last_db)
{
+ char buf[22];
if (short_form)
return;
@@ -313,51 +504,25 @@ void Rotate_log_event::print(FILE* file, bool short_form, char* last_db)
if (new_log_ident)
my_fwrite(file, (byte*) new_log_ident, (uint)ident_len,
MYF(MY_NABP | MY_WME));
- fprintf(file, "\n");
+ fprintf(file, "pos=%s\n", llstr(pos, buf));
fflush(file);
}
-Rotate_log_event::Rotate_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id):
- Log_event(when_arg, 0, 0, server_id),new_log_ident(NULL),alloced(0)
-{
- char *tmp_ident;
- char buf[4];
-
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return;
- ulong event_len;
- event_len = uint4korr(buf);
- if (event_len < ROTATE_EVENT_OVERHEAD)
- return;
-
- ident_len = (uchar)(event_len - ROTATE_EVENT_OVERHEAD);
- if (!(tmp_ident = (char*) my_malloc((uint)ident_len, MYF(MY_WME))))
- return;
- if (my_b_read( file, (byte*) tmp_ident, (uint) ident_len))
- {
- my_free((gptr) tmp_ident, MYF(0));
- return;
- }
-
- new_log_ident = tmp_ident;
- alloced = 1;
-}
-
Start_log_event::Start_log_event(const char* buf) :Log_event(buf)
{
- buf += EVENT_LEN_OFFSET + 4; // skip even length
- binlog_version = uint2korr(buf);
- memcpy(server_version, buf + 2, sizeof(server_version));
- created = uint4korr(buf + 2 + sizeof(server_version));
+ binlog_version = uint2korr(buf + LOG_EVENT_HEADER_LEN +
+ ST_BINLOG_VER_OFFSET);
+ memcpy(server_version, buf + ST_SERVER_VER_OFFSET + LOG_EVENT_HEADER_LEN,
+ ST_SERVER_VER_LEN);
+ created = uint4korr(buf + ST_CREATED_OFFSET + LOG_EVENT_HEADER_LEN);
}
int Start_log_event::write_data(IO_CACHE* file)
{
- char buff[sizeof(server_version)+2+4];
- int2store(buff,binlog_version);
- memcpy(buff+2,server_version,sizeof(server_version));
- int4store(buff+2+sizeof(server_version),created);
+ char buff[START_HEADER_LEN];
+ int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
+ memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
+ int4store(buff + ST_CREATED_OFFSET,created);
return (my_b_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0);
}
@@ -369,8 +534,10 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len):
if(event_len < ROTATE_EVENT_OVERHEAD)
return;
+ pos = uint8korr(buf + R_POS_OFFSET + LOG_EVENT_HEADER_LEN);
ident_len = (uchar)(event_len - ROTATE_EVENT_OVERHEAD);
- if (!(new_log_ident = (char*) my_memdup((byte*) buf + LOG_EVENT_HEADER_LEN,
+ if (!(new_log_ident = (char*) my_memdup((byte*) buf + R_IDENT_OFFSET
+ + LOG_EVENT_HEADER_LEN,
(uint) ident_len, MYF(MY_WME))))
return;
@@ -379,42 +546,10 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len):
int Rotate_log_event::write_data(IO_CACHE* file)
{
- return my_b_write(file, (byte*) new_log_ident, (uint) ident_len) ? -1 :0;
-}
-
-Query_log_event::Query_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id):
- Log_event(when_arg,0,0,server_id),data_buf(0),query(NULL),db(NULL)
-{
- char buf[QUERY_HEADER_LEN + 4];
- ulong data_len;
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return; // query == NULL will tell the
- // caller there was a problem
- data_len = uint4korr(buf);
- if (data_len < QUERY_EVENT_OVERHEAD)
- return; // tear-drop attack protection :)
-
- data_len -= QUERY_EVENT_OVERHEAD;
- exec_time = uint4korr(buf + 8);
- db_len = (uint)buf[12];
- error_code = uint2korr(buf + 13);
-
- /* Allocate one byte extra for end \0 */
- if (!(data_buf = (char*) my_malloc(data_len+1, MYF(MY_WME))))
- return;
- if (my_b_read( file, (byte*) data_buf, data_len))
- {
- my_free((gptr) data_buf, MYF(0));
- data_buf = 0;
- return;
- }
-
- thread_id = uint4korr(buf + 4);
- db = data_buf;
- query=data_buf + db_len + 1;
- q_len = data_len - 1 - db_len;
- *((char*) query + q_len) = 0; // Safety
+ char buf[ROTATE_HEADER_LEN];
+ int8store(buf, pos + R_POS_OFFSET);
+ return my_b_write(file, (byte*)buf, ROTATE_HEADER_LEN) ||
+ my_b_write(file, (byte*)new_log_ident, (uint) ident_len);
}
Query_log_event::Query_log_event(const char* buf, int event_len):
@@ -423,19 +558,19 @@ Query_log_event::Query_log_event(const char* buf, int event_len):
if ((uint)event_len < QUERY_EVENT_OVERHEAD)
return;
ulong data_len;
- buf += EVENT_LEN_OFFSET;
data_len = event_len - QUERY_EVENT_OVERHEAD;
+
- exec_time = uint4korr(buf + 8);
- error_code = uint2korr(buf + 13);
+ exec_time = uint4korr(buf + LOG_EVENT_HEADER_LEN + Q_EXEC_TIME_OFFSET);
+ error_code = uint2korr(buf + LOG_EVENT_HEADER_LEN + Q_ERR_CODE_OFFSET);
if (!(data_buf = (char*) my_malloc(data_len + 1, MYF(MY_WME))))
return;
- memcpy(data_buf, buf + QUERY_HEADER_LEN + 4, data_len);
- thread_id = uint4korr(buf + 4);
+ memcpy(data_buf, buf + LOG_EVENT_HEADER_LEN + Q_DATA_OFFSET, data_len);
+ thread_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + Q_THREAD_ID_OFFSET);
db = data_buf;
- db_len = (uint)buf[12];
+ db_len = (uint)buf[LOG_EVENT_HEADER_LEN + Q_DB_LEN_OFFSET];
query=data_buf + db_len + 1;
q_len = data_len - 1 - db_len;
*((char*)query+q_len) = 0;
@@ -474,44 +609,38 @@ int Query_log_event::write_data(IO_CACHE* file)
if (!query) return -1;
char buf[QUERY_HEADER_LEN];
- char* pos = buf;
- int4store(pos, thread_id);
- pos += 4;
- int4store(pos, exec_time);
- pos += 4;
- *pos++ = (char)db_len;
- int2store(pos, error_code);
- pos += 2;
+ int4store(buf + Q_THREAD_ID_OFFSET, thread_id);
+ int4store(buf + Q_EXEC_TIME_OFFSET, exec_time);
+ buf[Q_DB_LEN_OFFSET] = (char)db_len;
+ int2store(buf + Q_ERR_CODE_OFFSET, error_code);
- return (my_b_write(file, (byte*) buf, (uint)(pos - buf)) ||
+ return (my_b_write(file, (byte*) buf, QUERY_HEADER_LEN) ||
my_b_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) ||
my_b_write(file, (byte*) query, q_len)) ? -1 : 0;
}
-Intvar_log_event:: Intvar_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id)
- :Log_event(when_arg,0,0,server_id), type(INVALID_INT_EVENT)
+Intvar_log_event::Intvar_log_event(const char* buf):Log_event(buf)
{
- char buf[9+4];
- if (!my_b_read(file, (byte*) buf, sizeof(buf)))
- {
- type = buf[4];
- val = uint8korr(buf+1+4);
- }
+ buf += LOG_EVENT_HEADER_LEN;
+ type = buf[I_TYPE_OFFSET];
+ val = uint8korr(buf+I_VAL_OFFSET);
}
-Intvar_log_event::Intvar_log_event(const char* buf):Log_event(buf)
+const char* Intvar_log_event::get_var_type_name()
{
- buf += LOG_EVENT_HEADER_LEN;
- type = buf[0];
- val = uint8korr(buf+1);
+ switch(type)
+ {
+ case LAST_INSERT_ID_EVENT: return "LAST_INSERT_ID";
+ case INSERT_ID_EVENT: return "INSERT_ID";
+ default: /* impossible */ return "UNKNOWN";
+ }
}
int Intvar_log_event::write_data(IO_CACHE* file)
{
char buf[9];
- buf[0] = type;
- int8store(buf + 1, val);
+ buf[I_TYPE_OFFSET] = type;
+ int8store(buf + I_VAL_OFFSET, val);
return my_b_write(file, (byte*) buf, sizeof(buf));
}
@@ -542,12 +671,12 @@ void Intvar_log_event::print(FILE* file, bool short_form, char* last_db)
int Load_log_event::write_data(IO_CACHE* file)
{
char buf[LOAD_HEADER_LEN];
- int4store(buf, thread_id);
- int4store(buf + 4, exec_time);
- int4store(buf + 8, skip_lines);
- buf[12] = (char)table_name_len;
- buf[13] = (char)db_len;
- int4store(buf + 14, num_fields);
+ int4store(buf + L_THREAD_ID_OFFSET, thread_id);
+ int4store(buf + L_EXEC_TIME_OFFSET, exec_time);
+ int4store(buf + L_SKIP_LINES_OFFSET, skip_lines);
+ buf[L_TBL_LEN_OFFSET] = (char)table_name_len;
+ buf[L_DB_LEN_OFFSET] = (char)db_len;
+ int4store(buf + L_NUM_FIELDS_OFFSET, num_fields);
if(my_b_write(file, (byte*)buf, sizeof(buf)) ||
my_b_write(file, (byte*)&sql_ex, sizeof(sql_ex)))
@@ -566,52 +695,33 @@ int Load_log_event::write_data(IO_CACHE* file)
return 0;
}
-Load_log_event::Load_log_event(IO_CACHE* file, time_t when, uint32 server_id):
- Log_event(when,0,0,server_id),data_buf(0),num_fields(0),
- fields(0),field_lens(0),field_block_len(0),
- table_name(0),db(0),fname(0)
-{
- char buf[LOAD_HEADER_LEN + 4];
- ulong data_len;
- if (my_b_read(file, (byte*)buf, sizeof(buf)) ||
- my_b_read(file, (byte*)&sql_ex, sizeof(sql_ex)))
- return;
-
- data_len = uint4korr(buf) - LOAD_EVENT_OVERHEAD;
- if (!(data_buf = (char*)my_malloc(data_len + 1, MYF(MY_WME))))
- return;
- if (my_b_read(file, (byte*)data_buf, data_len))
- return;
- copy_log_event(buf,data_len);
-}
-
Load_log_event::Load_log_event(const char* buf, int event_len):
Log_event(buf),data_buf(0),num_fields(0),fields(0),
field_lens(0),field_block_len(0),
table_name(0),db(0),fname(0)
{
- ulong data_len;
-
+ uint data_len;
if((uint)event_len < (LOAD_EVENT_OVERHEAD + LOG_EVENT_HEADER_LEN))
return;
- buf += EVENT_LEN_OFFSET;
- memcpy(&sql_ex, buf + LOAD_HEADER_LEN + 4, sizeof(sql_ex));
- data_len = event_len;
-
+ memcpy(&sql_ex, buf + LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN,
+ sizeof(sql_ex));
+ data_len = event_len - LOAD_HEADER_LEN - LOG_EVENT_HEADER_LEN -
+ sizeof(sql_ex);
if(!(data_buf = (char*)my_malloc(data_len + 1, MYF(MY_WME))))
return;
- memcpy(data_buf, buf + 22 + sizeof(sql_ex), data_len);
+ memcpy(data_buf, buf +LOG_EVENT_HEADER_LEN + LOAD_HEADER_LEN
+ + sizeof(sql_ex), data_len);
copy_log_event(buf, data_len);
}
void Load_log_event::copy_log_event(const char *buf, ulong data_len)
{
- thread_id = uint4korr(buf+4);
- exec_time = uint4korr(buf+8);
- skip_lines = uint4korr(buf + 12);
- table_name_len = (uint)buf[16];
- db_len = (uint)buf[17];
- num_fields = uint4korr(buf + 18);
+ thread_id = uint4korr(buf + L_THREAD_ID_OFFSET + LOG_EVENT_HEADER_LEN);
+ exec_time = uint4korr(buf + L_EXEC_TIME_OFFSET + LOG_EVENT_HEADER_LEN);
+ skip_lines = uint4korr(buf + L_SKIP_LINES_OFFSET + LOG_EVENT_HEADER_LEN);
+ table_name_len = (uint)buf[L_TBL_LEN_OFFSET + LOG_EVENT_HEADER_LEN];
+ db_len = (uint)buf[L_DB_LEN_OFFSET + LOG_EVENT_HEADER_LEN];
+ num_fields = uint4korr(buf + L_NUM_FIELDS_OFFSET + LOG_EVENT_HEADER_LEN);
if (num_fields > data_len) // simple sanity check against corruption
return;
@@ -717,6 +827,12 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db)
#ifndef MYSQL_CLIENT
+void Log_event::set_log_seq(THD* thd, MYSQL_LOG* log)
+ {
+ log_seq = (thd && thd->log_seq) ? thd->log_seq++ : log->log_seq++;
+ }
+
+
void Load_log_event::set_fields(List<Item> &fields)
{
uint i;
@@ -729,4 +845,92 @@ void Load_log_event::set_fields(List<Item> &fields)
}
+Slave_log_event::Slave_log_event(THD* thd_arg,MASTER_INFO* mi):
+ Log_event(thd_arg->start_time, 0, 1, thd_arg->server_id),
+ mem_pool(0),master_host(0)
+{
+ if(!mi->inited)
+ return;
+ pthread_mutex_lock(&mi->lock);
+ master_host_len = strlen(mi->host);
+ master_log_len = strlen(mi->log_file_name);
+ // on OOM, just do not initialize the structure and print the error
+ if((mem_pool = (char*)my_malloc(get_data_size() + 1,
+ MYF(MY_WME))))
+ {
+ master_host = mem_pool + SL_MASTER_HOST_OFFSET ;
+ memcpy(master_host, mi->host, master_host_len + 1);
+ master_log = master_host + master_host_len + 1;
+ memcpy(master_log, mi->log_file_name, master_log_len + 1);
+ master_port = mi->port;
+ master_pos = mi->pos;
+ }
+ else
+ sql_print_error("Out of memory while recording slave event");
+ pthread_mutex_unlock(&mi->lock);
+}
+
+
#endif
+
+
+Slave_log_event::~Slave_log_event()
+{
+ my_free(mem_pool, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+void Slave_log_event::print(FILE* file, bool short_form = 0,
+ char* last_db = 0)
+{
+ char llbuff[22];
+ if(short_form)
+ return;
+ print_header(file);
+ fputc('\n', file);
+ fprintf(file, "Slave: master_host='%s' master_port=%d \
+ master_log=%s master_pos=%s\n", master_host, master_port, master_log,
+ llstr(master_pos, llbuff));
+}
+
+int Slave_log_event::get_data_size()
+{
+ return master_host_len + master_log_len + 1 + SL_MASTER_HOST_OFFSET;
+}
+
+int Slave_log_event::write_data(IO_CACHE* file)
+{
+ int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos);
+ int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port);
+ // log and host are already there
+ return my_b_write(file, (byte*)mem_pool, get_data_size());
+}
+
+void Slave_log_event::init_from_mem_pool(int data_size)
+{
+ master_pos = uint8korr(mem_pool + SL_MASTER_POS_OFFSET);
+ master_port = uint2korr(mem_pool + SL_MASTER_PORT_OFFSET);
+ master_host = mem_pool + SL_MASTER_HOST_OFFSET;
+ master_host_len = strlen(master_host);
+ // safety
+ master_log = master_host + master_host_len + 1;
+ if(master_log > mem_pool + data_size)
+ {
+ master_host = 0;
+ return;
+ }
+
+ master_log_len = strlen(master_log);
+}
+
+Slave_log_event::Slave_log_event(const char* buf, int event_len):
+ Log_event(buf),mem_pool(0),master_host(0)
+{
+ event_len -= LOG_EVENT_HEADER_LEN;
+ if(event_len < 0)
+ return;
+ if(!(mem_pool = (char*)my_malloc(event_len + 1, MYF(MY_WME))))
+ return;
+ memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len);
+ mem_pool[event_len] = 0;
+ init_from_mem_pool(event_len);
+}
diff --git a/sql/log_event.h b/sql/log_event.h
index 41f847e8d92..f38ddef05a2 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -34,40 +34,110 @@
#define LOG_READ_TOO_LARGE -7
#define LOG_EVENT_OFFSET 4
-#define BINLOG_VERSION 1
+#define BINLOG_VERSION 2
+
+/* we could have used SERVER_VERSION_LENGTH, but this introduces an
+ obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH
+ this would have broke the replication protocol
+*/
+#define ST_SERVER_VER_LEN 50
+
+/* Binary log consists of events. Each event has a fixed length header,
+ followed by possibly variable ( depending on the type of event) length
+ data body. The data body consists of an optional fixed length segment
+ (post-header), and an optional variable length segment. See #defines and
+ comments below for the format specifics
+*/
+
+/* event-specific post-header sizes */
+#define LOG_EVENT_HEADER_LEN 19
+#define QUERY_HEADER_LEN (4 + 4 + 1 + 2)
+#define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4)
+#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4)
+#define ROTATE_HEADER_LEN 8
+
+/* event header offsets */
-#define LOG_EVENT_HEADER_LEN 13
-#define QUERY_HEADER_LEN (sizeof(uint32) + sizeof(uint32) + \
- sizeof(uchar) + sizeof(uint16))
-#define LOAD_HEADER_LEN (sizeof(uint32) + sizeof(uint32) + \
- + sizeof(uint32) + 2 + sizeof(uint32))
-#define EVENT_LEN_OFFSET 9
#define EVENT_TYPE_OFFSET 4
+#define SERVER_ID_OFFSET 5
+#define EVENT_LEN_OFFSET 9
+#define LOG_SEQ_OFFSET 13
+#define FLAGS_OFFSET 17
+
+/* start event post-header */
+
+#define ST_BINLOG_VER_OFFSET 0
+#define ST_SERVER_VER_OFFSET 2
+#define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN)
+
+/* slave event post-header */
+
+#define SL_MASTER_PORT_OFFSET 8
+#define SL_MASTER_POS_OFFSET 0
+#define SL_MASTER_HOST_OFFSET 10
+
+/* query event post-header */
+
+#define Q_THREAD_ID_OFFSET 0
+#define Q_EXEC_TIME_OFFSET 4
+#define Q_DB_LEN_OFFSET 8
+#define Q_ERR_CODE_OFFSET 9
+#define Q_DATA_OFFSET QUERY_HEADER_LEN
+
+/* Intvar event post-header */
+
+#define I_TYPE_OFFSET 0
+#define I_VAL_OFFSET 1
+
+/* Load event post-header */
+
+#define L_THREAD_ID_OFFSET 0
+#define L_EXEC_TIME_OFFSET 4
+#define L_SKIP_LINES_OFFSET 8
+#define L_DB_LEN_OFFSET 12
+#define L_TBL_LEN_OFFSET 13
+#define L_NUM_FIELDS_OFFSET 14
+#define L_DATA_OFFSET LOAD_HEADER_LEN
+
+/* Rotate event post-header */
+
+#define R_POS_OFFSET 0
+#define R_IDENT_OFFSET 8
+
#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
-#define ROTATE_EVENT_OVERHEAD LOG_EVENT_HEADER_LEN
+#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
+#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN)
#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN+sizeof(sql_ex_info))
#define BINLOG_MAGIC "\xfe\x62\x69\x6e"
+#define LOG_EVENT_TIME_F 0x1
+#define LOG_EVENT_FORCED_ROTATE_F 0x2
+
enum Log_event_type { START_EVENT = 1, QUERY_EVENT =2,
STOP_EVENT=3, ROTATE_EVENT = 4, INTVAR_EVENT=5,
- LOAD_EVENT=6};
+ LOAD_EVENT=6, SLAVE_EVENT=7, FILE_EVENT=8};
enum Int_event_type { INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2
};
#ifndef MYSQL_CLIENT
class String;
+class MYSQL_LOG;
+class THD;
#endif
extern uint32 server_id;
+struct st_master_info;
+
class Log_event
{
public:
time_t when;
ulong exec_time;
- int valid_exec_time; // if false, the exec time setting is bogus
uint32 server_id;
+ uint32 log_seq;
+ uint16 flags;
static void *operator new(size_t size)
{
@@ -84,17 +154,22 @@ public:
virtual int write_data(IO_CACHE* file __attribute__((unused))) { return 0; }
virtual Log_event_type get_type_code() = 0;
Log_event(time_t when_arg, ulong exec_time_arg = 0,
- int valid_exec_time_arg = 0, uint32 server_id_arg = 0):
+ int valid_exec_time = 0, uint32 server_id_arg = 0,
+ uint32 log_seq_arg = 0, uint16 flags_arg = 0):
when(when_arg), exec_time(exec_time_arg),
- valid_exec_time(valid_exec_time_arg)
+ log_seq(log_seq_arg),flags(0)
{
server_id = server_id_arg ? server_id_arg : (::server_id);
+ if(valid_exec_time)
+ flags |= LOG_EVENT_TIME_F;
}
- Log_event(const char* buf): valid_exec_time(0)
+ Log_event(const char* buf)
{
when = uint4korr(buf);
- server_id = uint4korr(buf + 5);
+ server_id = uint4korr(buf + SERVER_ID_OFFSET);
+ log_seq = uint4korr(buf + LOG_SEQ_OFFSET);
+ flags = uint2korr(buf + FLAGS_OFFSET);
}
virtual ~Log_event() {}
@@ -108,10 +183,15 @@ public:
// if mutex is 0, the read will proceed without mutex
static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock);
static Log_event* read_log_event(const char* buf, int event_len);
+ const char* get_type_str();
#ifndef MYSQL_CLIENT
static int read_log_event(IO_CACHE* file, String* packet,
pthread_mutex_t* log_lock);
+ void set_log_seq(THD* thd, MYSQL_LOG* log);
+ virtual void pack_info(String* packet);
+ int net_send(THD* thd, const char* log_name, ulong pos);
+ static void init_show_field_list(List<Item>* field_list);
#endif
};
@@ -134,7 +214,8 @@ public:
THD* thd;
bool cache_stmt;
Query_log_event(THD* thd_arg, const char* query_arg, bool using_trans=0):
- Log_event(thd_arg->start_time,0,1,thd_arg->server_id), data_buf(0),
+ Log_event(thd_arg->start_time,0,1,thd_arg->server_id,thd_arg->log_seq),
+ data_buf(0),
query(query_arg), db(thd_arg->db), q_len(thd_arg->query_length),
error_code(thd_arg->killed ? ER_SERVER_SHUTDOWN: thd_arg->net.last_errno),
thread_id(thd_arg->thread_id), thd(thd_arg),
@@ -146,9 +227,10 @@ public:
exec_time = (ulong) (end_time - thd->start_time);
db_len = (db) ? (uint32) strlen(db) : 0;
}
+
+ void pack_info(String* packet);
#endif
- Query_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg);
Query_log_event(const char* buf, int event_len);
~Query_log_event()
{
@@ -172,6 +254,33 @@ public:
void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
+class Slave_log_event: public Log_event
+{
+protected:
+ char* mem_pool;
+ void init_from_mem_pool(int data_size);
+public:
+ char* master_host;
+ int master_host_len;
+ uint16 master_port;
+ char* master_log;
+ int master_log_len;
+ ulonglong master_pos;
+
+#ifndef MYSQL_CLIENT
+ Slave_log_event(THD* thd_arg, struct st_master_info* mi);
+ void pack_info(String* packet);
+#endif
+
+ Slave_log_event(const char* buf, int event_len);
+ ~Slave_log_event();
+ int get_data_size();
+ Log_event_type get_type_code() { return SLAVE_EVENT; }
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
+ int write_data(IO_CACHE* file );
+
+};
+
#define DUMPFILE_FLAG 0x1
#define OPT_ENCLOSED_FLAG 0x2
#define REPLACE_FLAG 0x4
@@ -234,7 +343,6 @@ public:
time_t end_time;
time(&end_time);
exec_time = (ulong) (end_time - thd->start_time);
- valid_exec_time = 1;
db_len = (db) ? (uint32) strlen(db) : 0;
table_name_len = (table_name) ? (uint32) strlen(table_name) : 0;
fname_len = (fname) ? (uint) strlen(fname) : 0;
@@ -288,9 +396,9 @@ public:
fields = fields_buf.ptr();
}
void set_fields(List<Item> &fields_arg);
+ void pack_info(String* packet);
#endif
- Load_log_event(IO_CACHE * file, time_t when, uint32 server_id_arg);
Load_log_event(const char* buf, int event_len);
~Load_log_event()
{
@@ -322,23 +430,12 @@ class Start_log_event: public Log_event
public:
uint32 created;
uint16 binlog_version;
- char server_version[50];
+ char server_version[ST_SERVER_VER_LEN];
Start_log_event() :Log_event(time(NULL)),binlog_version(BINLOG_VERSION)
{
created = (uint32) when;
- memcpy(server_version, ::server_version, sizeof(server_version));
- }
- Start_log_event(IO_CACHE* file, time_t when_arg, uint32 server_id_arg) :
- Log_event(when_arg, 0, 0, server_id_arg)
- {
- char buf[sizeof(server_version) + 2 + 4 + 4];
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return;
- binlog_version = uint2korr(buf+4);
- memcpy(server_version, buf + 6, sizeof(server_version));
- server_version[sizeof(server_version)-1]=0;
- created = uint4korr(buf + 6 + sizeof(server_version));
+ memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
}
Start_log_event(const char* buf);
@@ -347,9 +444,11 @@ public:
int write_data(IO_CACHE* file);
int get_data_size()
{
- // sizeof(binlog_version) + sizeof(server_version) sizeof(created)
- return 2 + sizeof(server_version) + 4;
+ return START_HEADER_LEN;
}
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
@@ -361,12 +460,15 @@ public:
Intvar_log_event(uchar type_arg, ulonglong val_arg)
:Log_event(time(NULL)),val(val_arg),type(type_arg)
{}
- Intvar_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg);
Intvar_log_event(const char* buf);
~Intvar_log_event() {}
Log_event_type get_type_code() { return INTVAR_EVENT;}
+ const char* get_var_type_name();
int get_data_size() { return sizeof(type) + sizeof(val);}
int write_data(IO_CACHE* file);
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
void print(FILE* file, bool short_form = 0, char* last_db = 0);
@@ -377,12 +479,6 @@ class Stop_log_event: public Log_event
public:
Stop_log_event() :Log_event(time(NULL))
{}
- Stop_log_event(IO_CACHE* file, time_t when_arg, uint32 server_id_arg):
- Log_event(when_arg,0,0,server_id_arg)
- {
- byte skip[4];
- my_b_read(file, skip, sizeof(skip)); // skip the event length
- }
Stop_log_event(const char* buf):Log_event(buf)
{
}
@@ -396,16 +492,18 @@ class Rotate_log_event: public Log_event
public:
const char* new_log_ident;
uchar ident_len;
+ ulonglong pos;
bool alloced;
- Rotate_log_event(const char* new_log_ident_arg, uint ident_len_arg = 0) :
+ Rotate_log_event(const char* new_log_ident_arg, uint ident_len_arg = 0,
+ ulonglong pos_arg = 4) :
Log_event(time(NULL)),
new_log_ident(new_log_ident_arg),
- ident_len(ident_len_arg ? ident_len_arg : (uint) strlen(new_log_ident_arg)),
+ ident_len(ident_len_arg ? ident_len_arg :
+ (uint) strlen(new_log_ident_arg)), pos(pos_arg),
alloced(0)
{}
- Rotate_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg) ;
Rotate_log_event(const char* buf, int event_len);
~Rotate_log_event()
{
@@ -413,10 +511,16 @@ public:
my_free((gptr) new_log_ident, MYF(0));
}
Log_event_type get_type_code() { return ROTATE_EVENT;}
- int get_data_size() { return ident_len;}
+ int get_data_size() { return ident_len + ROTATE_HEADER_LEN;}
int write_data(IO_CACHE* file);
void print(FILE* file, bool short_form = 0, char* last_db = 0);
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
};
#endif
+
+
+
diff --git a/sql/md5.c b/sql/md5.c
index 0775ba3bd1a..a19f8639f3a 100644
--- a/sql/md5.c
+++ b/sql/md5.c
@@ -108,7 +108,7 @@ Rotation is separate from addition to prevent recomputation.
/* MD5 initialization. Begins an MD5 operation, writing a new context.
*/
-void MD5Init (MD5_CTX *context) /* context */
+void my_MD5Init (my_MD5_CTX *context) /* context */
{
context->count[0] = context->count[1] = 0;
/* Load magic initialization constants.
@@ -123,8 +123,8 @@ void MD5Init (MD5_CTX *context) /* context */
operation, processing another message block, and updating the
context.
*/
-void MD5Update (context, input, inputLen)
-MD5_CTX *context; /* context */
+void my_MD5Update (context, input, inputLen)
+my_MD5_CTX *context; /* context */
unsigned char *input; /* input block */
unsigned int inputLen; /* length of input block */
{
@@ -164,9 +164,9 @@ unsigned int inputLen; /* length of input block */
/* MD5 finalization. Ends an MD5 message-digest operation, writing the
the message digest and zeroizing the context.
*/
-void MD5Final (digest, context)
+void my_MD5Final (digest, context)
unsigned char digest[16]; /* message digest */
-MD5_CTX *context; /* context */
+my_MD5_CTX *context; /* context */
{
unsigned char bits[8];
unsigned int idx, padLen;
@@ -178,10 +178,10 @@ MD5_CTX *context; /* context */
*/
idx = (unsigned int)((context->count[0] >> 3) & 0x3f);
padLen = (idx < 56) ? (56 - idx) : (120 - idx);
- MD5Update (context, PADDING, padLen);
+ my_MD5Update (context, PADDING, padLen);
/* Append length (before padding) */
- MD5Update (context, bits, 8);
+ my_MD5Update (context, bits, 8);
/* Store state in digest */
Encode (digest, context->state, 16);
diff --git a/sql/md5.h b/sql/md5.h
index 862129391f1..6fe4e543bb0 100644
--- a/sql/md5.h
+++ b/sql/md5.h
@@ -57,22 +57,20 @@ If using PROTOTYPES, then PROTO_LIST returns the list, otherwise it
#else
#define PROTO_LIST(list) ()
#endif
-
-
/* MD5 context. */
typedef struct {
UINT4 state[4]; /* state (ABCD) */
UINT4 count[2]; /* number of bits, modulo 2^64 (lsb first) */
unsigned char buffer[64]; /* input buffer */
-} MD5_CTX;
+} my_MD5_CTX;
#ifdef __cplusplus
extern "C" {
#endif
- void MD5Init PROTO_LIST ((MD5_CTX *));
- void MD5Update PROTO_LIST
- ((MD5_CTX *, unsigned char *, unsigned int));
- void MD5Final PROTO_LIST ((unsigned char [16], MD5_CTX *));
+ void my_MD5Init PROTO_LIST ((my_MD5_CTX *));
+ void my_MD5Update PROTO_LIST
+ ((my_MD5_CTX *, unsigned char *, unsigned int));
+ void my_MD5Final PROTO_LIST ((unsigned char [16], my_MD5_CTX *));
#ifdef __cplusplus
}
diff --git a/sql/mini_client.cc b/sql/mini_client.cc
index 38180c0c6c8..8966b303000 100644
--- a/sql/mini_client.cc
+++ b/sql/mini_client.cc
@@ -28,6 +28,8 @@
#include <odbcinst.h>
#endif
#include <global.h>
+#include <mysql_com.h>
+#include <violite.h>
#include <my_sys.h>
#include <mysys_err.h>
#include <m_string.h>
@@ -37,7 +39,6 @@
#include "mysql_version.h"
#include "mysqld_error.h"
#include "errmsg.h"
-#include <violite.h>
extern "C" { // Because of SCO 3.2V4.2
#include <sys/stat.h>
@@ -69,9 +70,22 @@ extern "C" { // Because of SCO 3.2V4.2
}
+static void mc_free_rows(MYSQL_DATA *cur);
+static MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
+ my_bool default_value,
+ my_bool long_flag_protocol);
+
static void mc_end_server(MYSQL *mysql);
static int mc_sock_connect(File s, const struct sockaddr *name, uint namelen, uint to);
static void mc_free_old_query(MYSQL *mysql);
+static int mc_send_file_to_server(MYSQL *mysql, const char *filename);
+static my_ulonglong mc_net_field_length_ll(uchar **packet);
+static ulong mc_net_field_length(uchar **packet);
+static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row,
+ ulong *lengths);
+static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
+ uint fields);
+
#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_LOCAL_FILES)
@@ -735,18 +749,18 @@ mc_mysql_connect(MYSQL *mysql,const char *host, const char *user,
#ifdef HAVE_OPENSSL
/* Oops.. are we careful enough to not send ANY information */
/* without encryption? */
- if (client_flag & CLIENT_SSL)
+/* if (client_flag & CLIENT_SSL)
{
if (my_net_write(net,buff,(uint) (2)) || net_flush(net))
- goto error;
+ goto error;*/
/* Do the SSL layering. */
- DBUG_PRINT("info", ("IO layer change in progress..."));
+ /* DBUG_PRINT("info", ("IO layer change in progress..."));
VioSSLConnectorFd* connector_fd = (VioSSLConnectorFd*)
(mysql->connector_fd);
VioSocket* vio_socket = (VioSocket*)(mysql->net.vio);
VioSSL* vio_ssl = connector_fd->connect(vio_socket);
mysql->net.vio = (NetVio*)(vio_ssl);
- }
+ }*/
#endif /* HAVE_OPENSSL */
int3store(buff+2,max_allowed_packet);
@@ -816,11 +830,506 @@ mc_mysql_close(MYSQL *mysql)
bzero((char*) &mysql->options,sizeof(mysql->options));
mysql->net.vio = 0;
#ifdef HAVE_OPENSSL
- ((VioConnectorFd*)(mysql->connector_fd))->delete();
- mysql->connector_fd = 0;
+/* ((VioConnectorFd*)(mysql->connector_fd))->delete();
+ mysql->connector_fd = 0;*/
#endif /* HAVE_OPENSSL */
if (mysql->free_me)
my_free((gptr) mysql,MYF(0));
}
DBUG_VOID_RETURN;
}
+
+void STDCALL mc_mysql_free_result(MYSQL_RES *result)
+{
+ DBUG_ENTER("mc_mysql_free_result");
+ DBUG_PRINT("enter",("mysql_res: %lx",result));
+ if (result)
+ {
+ if (result->handle && result->handle->status == MYSQL_STATUS_USE_RESULT)
+ {
+ DBUG_PRINT("warning",("Not all rows in set were read; Ignoring rows"));
+ for (;;)
+ {
+ uint pkt_len;
+ if ((pkt_len=(uint) mc_net_safe_read(result->handle)) == packet_error)
+ break;
+ if (pkt_len == 1 && result->handle->net.read_pos[0] == 254)
+ break; /* End of data */
+ }
+ result->handle->status=MYSQL_STATUS_READY;
+ }
+ mc_free_rows(result->data);
+ if (result->fields)
+ free_root(&result->field_alloc,MYF(0));
+ if (result->row)
+ my_free((gptr) result->row,MYF(0));
+ my_free((gptr) result,MYF(0));
+ }
+ DBUG_VOID_RETURN;
+}
+
+static void mc_free_rows(MYSQL_DATA *cur)
+{
+ if (cur)
+ {
+ free_root(&cur->alloc,MYF(0));
+ my_free((gptr) cur,MYF(0));
+ }
+}
+
+static MYSQL_FIELD *
+mc_unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
+ my_bool default_value, my_bool long_flag_protocol)
+{
+ MYSQL_ROWS *row;
+ MYSQL_FIELD *field,*result;
+ DBUG_ENTER("unpack_fields");
+
+ field=result=(MYSQL_FIELD*) alloc_root(alloc,sizeof(MYSQL_FIELD)*fields);
+ if (!result)
+ DBUG_RETURN(0);
+
+ for (row=data->data; row ; row = row->next,field++)
+ {
+ field->table= strdup_root(alloc,(char*) row->data[0]);
+ field->name= strdup_root(alloc,(char*) row->data[1]);
+ field->length= (uint) uint3korr(row->data[2]);
+ field->type= (enum enum_field_types) (uchar) row->data[3][0];
+ if (long_flag_protocol)
+ {
+ field->flags= uint2korr(row->data[4]);
+ field->decimals=(uint) (uchar) row->data[4][2];
+ }
+ else
+ {
+ field->flags= (uint) (uchar) row->data[4][0];
+ field->decimals=(uint) (uchar) row->data[4][1];
+ }
+ if (INTERNAL_NUM_FIELD(field))
+ field->flags|= NUM_FLAG;
+ if (default_value && row->data[5])
+ field->def=strdup_root(alloc,(char*) row->data[5]);
+ else
+ field->def=0;
+ field->max_length= 0;
+ }
+ mc_free_rows(data); /* Free old data */
+ DBUG_RETURN(result);
+}
+
+int STDCALL
+mc_mysql_send_query(MYSQL* mysql, const char* query, uint length)
+{
+ return mc_simple_command(mysql, COM_QUERY, query, length, 1);
+}
+
+int STDCALL mc_mysql_read_query_result(MYSQL *mysql)
+{
+ uchar *pos;
+ ulong field_count;
+ MYSQL_DATA *fields;
+ uint length;
+ DBUG_ENTER("mc_mysql_read_query_result");
+
+ if ((length = mc_net_safe_read(mysql)) == packet_error)
+ DBUG_RETURN(-1);
+ mc_free_old_query(mysql); /* Free old result */
+get_info:
+ pos=(uchar*) mysql->net.read_pos;
+ if ((field_count= mc_net_field_length(&pos)) == 0)
+ {
+ mysql->affected_rows= mc_net_field_length_ll(&pos);
+ mysql->insert_id= mc_net_field_length_ll(&pos);
+ if (mysql->server_capabilities & CLIENT_TRANSACTIONS)
+ {
+ mysql->server_status=uint2korr(pos); pos+=2;
+ }
+ if (pos < mysql->net.read_pos+length && mc_net_field_length(&pos))
+ mysql->info=(char*) pos;
+ DBUG_RETURN(0);
+ }
+ if (field_count == NULL_LENGTH) /* LOAD DATA LOCAL INFILE */
+ {
+ int error=mc_send_file_to_server(mysql,(char*) pos);
+ if ((length=mc_net_safe_read(mysql)) == packet_error || error)
+ DBUG_RETURN(-1);
+ goto get_info; /* Get info packet */
+ }
+ if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT))
+ mysql->server_status|= SERVER_STATUS_IN_TRANS;
+
+ mysql->extra_info= mc_net_field_length_ll(&pos); /* Maybe number of rec */
+ if (!(fields=mc_read_rows(mysql,(MYSQL_FIELD*) 0,5)))
+ DBUG_RETURN(-1);
+ if (!(mysql->fields=mc_unpack_fields(fields,&mysql->field_alloc,
+ (uint) field_count,0,
+ (my_bool) test(mysql->server_capabilities &
+ CLIENT_LONG_FLAG))))
+ DBUG_RETURN(-1);
+ mysql->status=MYSQL_STATUS_GET_RESULT;
+ mysql->field_count=field_count;
+ DBUG_RETURN(0);
+}
+
+int STDCALL mc_mysql_query(MYSQL *mysql, const char *query, uint length)
+{
+ DBUG_ENTER("mysql_real_query");
+ DBUG_PRINT("enter",("handle: %lx",mysql));
+ DBUG_PRINT("query",("Query = \"%s\"",query));
+ if(!length)
+ length = strlen(query);
+ if (mc_simple_command(mysql,COM_QUERY,query,length,1))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(mc_mysql_read_query_result(mysql));
+}
+
+static int mc_send_file_to_server(MYSQL *mysql, const char *filename)
+{
+ int fd, readcount;
+ char buf[IO_SIZE*15],*tmp_name;
+ DBUG_ENTER("send_file_to_server");
+
+ fn_format(buf,filename,"","",4); /* Convert to client format */
+ if (!(tmp_name=my_strdup(buf,MYF(0))))
+ {
+ strmov(mysql->net.last_error, ER(mysql->net.last_errno=CR_OUT_OF_MEMORY));
+ DBUG_RETURN(-1);
+ }
+ if ((fd = my_open(tmp_name,O_RDONLY, MYF(0))) < 0)
+ {
+ mysql->net.last_errno=EE_FILENOTFOUND;
+ sprintf(buf,EE(mysql->net.last_errno),tmp_name,errno);
+ strmake(mysql->net.last_error,buf,sizeof(mysql->net.last_error)-1);
+ my_net_write(&mysql->net,"",0); net_flush(&mysql->net);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ while ((readcount = (int) my_read(fd,buf,sizeof(buf),MYF(0))) > 0)
+ {
+ if (my_net_write(&mysql->net,buf,readcount))
+ {
+ mysql->net.last_errno=CR_SERVER_LOST;
+ strmov(mysql->net.last_error,ER(mysql->net.last_errno));
+ DBUG_PRINT("error",("Lost connection to MySQL server during LOAD DATA of local file"));
+ (void) my_close(fd,MYF(0));
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ }
+ (void) my_close(fd,MYF(0));
+ /* Send empty packet to mark end of file */
+ if (my_net_write(&mysql->net,"",0) || net_flush(&mysql->net))
+ {
+ mysql->net.last_errno=CR_SERVER_LOST;
+ sprintf(mysql->net.last_error,ER(mysql->net.last_errno),errno);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ if (readcount < 0)
+ {
+ mysql->net.last_errno=EE_READ; /* the errmsg for not entire file read */
+ sprintf(buf,EE(mysql->net.last_errno),tmp_name,errno);
+ strmake(mysql->net.last_error,buf,sizeof(mysql->net.last_error)-1);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+/* Get the length of next field. Change parameter to point at fieldstart */
+static ulong mc_net_field_length(uchar **packet)
+{
+ reg1 uchar *pos= *packet;
+ if (*pos < 251)
+ {
+ (*packet)++;
+ return (ulong) *pos;
+ }
+ if (*pos == 251)
+ {
+ (*packet)++;
+ return NULL_LENGTH;
+ }
+ if (*pos == 252)
+ {
+ (*packet)+=3;
+ return (ulong) uint2korr(pos+1);
+ }
+ if (*pos == 253)
+ {
+ (*packet)+=4;
+ return (ulong) uint3korr(pos+1);
+ }
+ (*packet)+=9; /* Must be 254 when here */
+ return (ulong) uint4korr(pos+1);
+}
+
+/* Same as above, but returns ulonglong values */
+
+static my_ulonglong mc_net_field_length_ll(uchar **packet)
+{
+ reg1 uchar *pos= *packet;
+ if (*pos < 251)
+ {
+ (*packet)++;
+ return (my_ulonglong) *pos;
+ }
+ if (*pos == 251)
+ {
+ (*packet)++;
+ return (my_ulonglong) NULL_LENGTH;
+ }
+ if (*pos == 252)
+ {
+ (*packet)+=3;
+ return (my_ulonglong) uint2korr(pos+1);
+ }
+ if (*pos == 253)
+ {
+ (*packet)+=4;
+ return (my_ulonglong) uint3korr(pos+1);
+ }
+ (*packet)+=9; /* Must be 254 when here */
+#ifdef NO_CLIENT_LONGLONG
+ return (my_ulonglong) uint4korr(pos+1);
+#else
+ return (my_ulonglong) uint8korr(pos+1);
+#endif
+}
+
+/* Read all rows (fields or data) from server */
+
+static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
+ uint fields)
+{
+ uint field,pkt_len;
+ ulong len;
+ uchar *cp;
+ char *to;
+ MYSQL_DATA *result;
+ MYSQL_ROWS **prev_ptr,*cur;
+ NET *net = &mysql->net;
+ DBUG_ENTER("mc_read_rows");
+
+ if ((pkt_len=(uint) mc_net_safe_read(mysql)) == packet_error)
+ DBUG_RETURN(0);
+ if (!(result=(MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA),
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ net->last_errno=CR_OUT_OF_MEMORY;
+ strmov(net->last_error,ER(net->last_errno));
+ DBUG_RETURN(0);
+ }
+ init_alloc_root(&result->alloc,8192,0); /* Assume rowlength < 8192 */
+ result->alloc.min_malloc=sizeof(MYSQL_ROWS);
+ prev_ptr= &result->data;
+ result->rows=0;
+ result->fields=fields;
+
+ while (*(cp=net->read_pos) != 254 || pkt_len != 1)
+ {
+ result->rows++;
+ if (!(cur= (MYSQL_ROWS*) alloc_root(&result->alloc,
+ sizeof(MYSQL_ROWS))) ||
+ !(cur->data= ((MYSQL_ROW)
+ alloc_root(&result->alloc,
+ (fields+1)*sizeof(char *)+pkt_len))))
+ {
+ mc_free_rows(result);
+ net->last_errno=CR_OUT_OF_MEMORY;
+ strmov(net->last_error,ER(net->last_errno));
+ DBUG_RETURN(0);
+ }
+ *prev_ptr=cur;
+ prev_ptr= &cur->next;
+ to= (char*) (cur->data+fields+1);
+ for (field=0 ; field < fields ; field++)
+ {
+ if ((len=(ulong) mc_net_field_length(&cp)) == NULL_LENGTH)
+ { /* null field */
+ cur->data[field] = 0;
+ }
+ else
+ {
+ cur->data[field] = to;
+ memcpy(to,(char*) cp,len); to[len]=0;
+ to+=len+1;
+ cp+=len;
+ if (mysql_fields)
+ {
+ if (mysql_fields[field].max_length < len)
+ mysql_fields[field].max_length=len;
+ }
+ }
+ }
+ cur->data[field]=to; /* End of last field */
+ if ((pkt_len=mc_net_safe_read(mysql)) == packet_error)
+ {
+ mc_free_rows(result);
+ DBUG_RETURN(0);
+ }
+ }
+ *prev_ptr=0; /* last pointer is null */
+ DBUG_PRINT("exit",("Got %d rows",result->rows));
+ DBUG_RETURN(result);
+}
+
+
+/*
+** Read one row. Uses packet buffer as storage for fields.
+** When next packet is read, the previous field values are destroyed
+*/
+
+
+static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row,
+ ulong *lengths)
+{
+ uint field;
+ ulong pkt_len,len;
+ uchar *pos,*prev_pos;
+
+ if ((pkt_len=(uint) mc_net_safe_read(mysql)) == packet_error)
+ return -1;
+ if (pkt_len == 1 && mysql->net.read_pos[0] == 254)
+ return 1; /* End of data */
+ prev_pos= 0; /* allowed to write at packet[-1] */
+ pos=mysql->net.read_pos;
+ for (field=0 ; field < fields ; field++)
+ {
+ if ((len=(ulong) mc_net_field_length(&pos)) == NULL_LENGTH)
+ { /* null field */
+ row[field] = 0;
+ *lengths++=0;
+ }
+ else
+ {
+ row[field] = (char*) pos;
+ pos+=len;
+ *lengths++=len;
+ }
+ if (prev_pos)
+ *prev_pos=0; /* Terminate prev field */
+ prev_pos=pos;
+ }
+ row[field]=(char*) prev_pos+1; /* End of last field */
+ *prev_pos=0; /* Terminate last field */
+ return 0;
+}
+
+my_ulonglong STDCALL mc_mysql_num_rows(MYSQL_RES *res)
+{
+ return res->row_count;
+}
+
+unsigned int STDCALL mc_mysql_num_fields(MYSQL_RES *res)
+{
+ return res->field_count;
+}
+
+void STDCALL mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row)
+{
+ MYSQL_ROWS *tmp=0;
+ DBUG_PRINT("info",("mysql_data_seek(%ld)",(long) row));
+ if (result->data)
+ for (tmp=result->data->data; row-- && tmp ; tmp = tmp->next) ;
+ result->current_row=0;
+ result->data_cursor = tmp;
+}
+
+MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res)
+{
+ DBUG_ENTER("mc_mysql_fetch_row");
+ if (!res->data)
+ { /* Unbufferred fetch */
+ if (!res->eof)
+ {
+ if (!(mc_read_one_row(res->handle,res->field_count,res->row,
+ res->lengths)))
+ {
+ res->row_count++;
+ DBUG_RETURN(res->current_row=res->row);
+ }
+ else
+ {
+ DBUG_PRINT("info",("end of data"));
+ res->eof=1;
+ res->handle->status=MYSQL_STATUS_READY;
+ }
+ }
+ DBUG_RETURN((MYSQL_ROW) NULL);
+ }
+ {
+ MYSQL_ROW tmp;
+ if (!res->data_cursor)
+ {
+ DBUG_PRINT("info",("end of data"));
+ DBUG_RETURN(res->current_row=(MYSQL_ROW) NULL);
+ }
+ tmp = res->data_cursor->data;
+ res->data_cursor = res->data_cursor->next;
+ DBUG_RETURN(res->current_row=tmp);
+ }
+}
+
+int STDCALL mc_mysql_select_db(MYSQL *mysql, const char *db)
+{
+ int error;
+ DBUG_ENTER("mysql_select_db");
+ DBUG_PRINT("enter",("db: '%s'",db));
+
+ if ((error=mc_simple_command(mysql,COM_INIT_DB,db,(uint) strlen(db),0)))
+ DBUG_RETURN(error);
+ my_free(mysql->db,MYF(MY_ALLOW_ZERO_PTR));
+ mysql->db=my_strdup(db,MYF(MY_WME));
+ DBUG_RETURN(0);
+}
+
+
+MYSQL_RES * STDCALL mc_mysql_store_result(MYSQL *mysql)
+{
+ MYSQL_RES *result;
+ DBUG_ENTER("mysql_store_result");
+
+ if (!mysql->fields)
+ DBUG_RETURN(0);
+ if (mysql->status != MYSQL_STATUS_GET_RESULT)
+ {
+ strmov(mysql->net.last_error,
+ ER(mysql->net.last_errno=CR_COMMANDS_OUT_OF_SYNC));
+ DBUG_RETURN(0);
+ }
+ mysql->status=MYSQL_STATUS_READY; /* server is ready */
+ if (!(result=(MYSQL_RES*) my_malloc(sizeof(MYSQL_RES)+
+ sizeof(ulong)*mysql->field_count,
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ mysql->net.last_errno=CR_OUT_OF_MEMORY;
+ strmov(mysql->net.last_error, ER(mysql->net.last_errno));
+ DBUG_RETURN(0);
+ }
+ result->eof=1; /* Marker for buffered */
+ result->lengths=(ulong*) (result+1);
+ if (!(result->data=mc_read_rows(mysql,mysql->fields,mysql->field_count)))
+ {
+ my_free((gptr) result,MYF(0));
+ DBUG_RETURN(0);
+ }
+ mysql->affected_rows= result->row_count= result->data->rows;
+ result->data_cursor= result->data->data;
+ result->fields= mysql->fields;
+ result->field_alloc= mysql->field_alloc;
+ result->field_count= mysql->field_count;
+ result->current_field=0;
+ result->current_row=0; /* Must do a fetch first */
+ mysql->fields=0; /* fields is now in result */
+ DBUG_RETURN(result); /* Data fetched */
+}
+
+
+
+
+
+
+
+
diff --git a/sql/mini_client.h b/sql/mini_client.h
index f7d95a1b66e..22cdb31f846 100644
--- a/sql/mini_client.h
+++ b/sql/mini_client.h
@@ -42,6 +42,17 @@ char * STDCALL mc_mysql_error(MYSQL *mysql);
int STDCALL mc_mysql_errno(MYSQL *mysql);
my_bool STDCALL mc_mysql_reconnect(MYSQL* mysql);
+int STDCALL mc_mysql_send_query(MYSQL* mysql, const char* query, uint length);
+int STDCALL mc_mysql_read_query_result(MYSQL *mysql);
+int STDCALL mc_mysql_query(MYSQL *mysql, const char *query, uint length);
+MYSQL_RES * STDCALL mc_mysql_store_result(MYSQL *mysql);
+void STDCALL mc_mysql_free_result(MYSQL_RES *result);
+void STDCALL mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row);
+my_ulonglong STDCALL mc_mysql_num_rows(MYSQL_RES *res);
+unsigned int STDCALL mc_mysql_num_fields(MYSQL_RES *res);
+MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res);
+int STDCALL mc_mysql_select_db(MYSQL *mysql, const char *db);
+
#endif
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index d00eb09a363..63c9478d236 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -26,7 +26,6 @@
#include <thr_lock.h>
#include <my_base.h> /* Needed by field.h */
#include <my_bitmap.h>
-#include <violite.h>
#undef write // remove pthread.h macro definition for EMX
@@ -35,6 +34,7 @@ typedef ulong key_map; /* Used for finding keys */
typedef ulong key_part_map; /* Used for finding key parts */
#include "mysql_com.h"
+#include <violite.h>
#include "unireg.h"
void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size);
@@ -147,7 +147,7 @@ void kill_one_thread(THD *thd, ulong id);
#define SELECT_BIG_RESULT 16
#define OPTION_FOUND_ROWS 32
#define SELECT_HIGH_PRIORITY 64 /* Intern */
-#define SELECT_USE_CACHE 256 /* Intern */
+#define SELECT_NO_JOIN_CACHE 256 /* Intern */
#define OPTION_BIG_TABLES 512 /* for SQL OPTION */
#define OPTION_BIG_SELECTS 1024 /* for SQL OPTION */
@@ -223,7 +223,7 @@ inline THD *_current_thd(void)
#include "opt_range.h"
-void mysql_create_db(THD *thd, char *db, uint create_info);
+int mysql_create_db(THD *thd, char *db, uint create_info);
void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags);
int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists);
int quick_rm_table(enum db_type base,const char *db,
@@ -232,6 +232,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list);
bool mysql_change_db(THD *thd,const char *name);
void mysql_parse(THD *thd,char *inBuf,uint length);
void mysql_init_select(LEX *lex);
+void mysql_new_select(LEX *lex);
void init_max_user_conn(void);
void free_max_user_conn(void);
pthread_handler_decl(handle_one_connection,arg);
@@ -245,7 +246,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char* packet, uint packet_length);
bool check_stack_overrun(THD *thd,char *dummy);
bool reload_acl_and_cache(THD *thd, uint options, TABLE_LIST *tables);
-void mysql_rm_db(THD *thd,char *db,bool if_exists);
+int mysql_rm_db(THD *thd,char *db,bool if_exists);
void table_cache_init(void);
void table_cache_free(void);
uint cached_tables(void);
@@ -304,6 +305,7 @@ int mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &list,COND *conds,
List<Item_func_match> &ftfuncs,
ORDER *order, ORDER *group,Item *having,ORDER *proc_param,
uint select_type,select_result *result);
+int mysql_union(THD *thd,LEX *lex, uint no);
Field *create_tmp_field(TABLE *table,Item *item, Item::Type type,
Item_result_field ***copy_func, Field **from_field,
bool group,bool modify_item);
@@ -473,8 +475,7 @@ pthread_handler_decl(handle_manager, arg);
#ifndef DBUG_OFF
void print_where(COND *cond,const char *info);
void print_cached_tables(void);
-void TEST_filesort(TABLE **form,SORT_FIELD *sortorder,uint s_length,
- ha_rows special);
+void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special);
#endif
void mysql_print_status(THD *thd);
/* key.cc */
@@ -520,7 +521,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_grant, LOCK_error_log, LOCK_delayed_insert,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
- LOCK_binlog_update, LOCK_slave, LOCK_server_id;
+ LOCK_binlog_update, LOCK_slave, LOCK_server_id, LOCK_slave_list;
extern pthread_cond_t COND_refresh,COND_thread_count, COND_binlog_update,
COND_slave_stopped, COND_slave_start;
extern pthread_attr_t connection_attrib;
@@ -548,7 +549,7 @@ extern ulong keybuff_size,sortbuff_size,max_item_sort_length,table_cache_size,
binlog_cache_size, max_binlog_cache_size;
extern ulong specialflag, current_pid;
extern bool low_priority_updates, using_update_log;
-extern bool opt_sql_bin_update, opt_safe_show_db;
+extern bool opt_sql_bin_update, opt_safe_show_db, opt_warnings;
extern char language[LIBLEN],reg_ext[FN_EXTLEN],blob_newline;
extern const char **errmesg; /* Error messages */
extern const char *default_tx_isolation_name;
@@ -616,7 +617,7 @@ void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
SQL_SELECT *select,
int use_record_cache, bool print_errors);
void end_read_record(READ_RECORD *info);
-ha_rows filesort(TABLE **form,struct st_sort_field *sortorder, uint s_length,
+ha_rows filesort(TABLE *form,struct st_sort_field *sortorder, uint s_length,
SQL_SELECT *select, ha_rows special,ha_rows max_rows,
ha_rows *examined_rows);
void change_double_for_sort(double nr,byte *to);
@@ -667,7 +668,7 @@ extern int sql_cache_hit(THD *thd, char *inBuf, uint length);
inline bool add_item_to_list(Item *item)
{
- return current_lex->item_list.push_back(item);
+ return current_lex->select->item_list.push_back(item);
}
inline bool add_value_to_list(Item *value)
{
@@ -675,11 +676,11 @@ inline bool add_value_to_list(Item *value)
}
inline bool add_order_to_list(Item *item,bool asc)
{
- return add_to_list(current_lex->order_list,item,asc);
+ return add_to_list(current_lex->select->order_list,item,asc);
}
inline bool add_group_to_list(Item *item,bool asc)
{
- return add_to_list(current_lex->group_list,item,asc);
+ return add_to_list(current_lex->select->group_list,item,asc);
}
inline void mark_as_null_row(TABLE *table)
{
diff --git a/sql/mysqlbinlog.cc b/sql/mysqlbinlog.cc
index f0a9692cc2d..5edfe6e0591 100644
--- a/sql/mysqlbinlog.cc
+++ b/sql/mysqlbinlog.cc
@@ -108,7 +108,7 @@ static void die(const char* fmt, ...)
static void print_version()
{
- printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
+ printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
@@ -132,7 +132,7 @@ the mysql command line client\n\n");
-s, --short-form Just show the queries, no extra info\n\
-o, --offset=N Skip the first N entries\n\
-h, --host=server Get the binlog from server\n\
--P, --port=port Use port to connect to the remove server\n\
+-P, --port=port Use port to connect to the remote server\n\
-u, --user=username Connect to the remove server as username\n\
-p, --password=password Password to connect to remote server\n\
-r, --result-file=file Direct output to a given file\n\
@@ -303,14 +303,12 @@ static void dump_remote_log_entries(const char* logname)
uint len;
NET* net = &mysql->net;
if(!position) position = 4; // protect the innocent from spam
- if(position < 4)
- {
- position = 4;
- // warn the guity
- fprintf(stderr,
- "Warning: with the position so small you would hit the magic number\n\
-Unfortunately, no sweepstakes today, adjusted position to 4\n");
- }
+ if (position < 4)
+ {
+ position = 4;
+ // warn the guity
+ sql_print_error("Warning: The position in the binary log can't be less than 4.\nStarting from position 4\n");
+ }
int4store(buf, position);
int2store(buf + 4, binlog_flags);
len = (uint) strlen(logname);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 91fde2b9131..6eb4625224e 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -20,6 +20,7 @@
#include <my_dir.h>
#include "sql_acl.h"
#include "slave.h"
+#include "sql_repl.h"
#include "stacktrace.h"
#ifdef HAVE_BERKELEY_DB
#include "ha_berkeley.h"
@@ -34,7 +35,6 @@
#include <nisam.h>
#include <thr_alarm.h>
#include <ft_global.h>
-#include "vio.h"
#ifndef DBUG_OFF
#define ONE_THREAD
@@ -95,17 +95,16 @@ int deny_severity = LOG_WARNING;
#include <sys/mman.h>
#endif
+#ifdef _AIX41
+int initgroups(const char *,unsigned int);
+#endif
+
#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
#include <ieeefp.h>
#ifdef HAVE_FP_EXCEPT // Fix type conflict
typedef fp_except fp_except_t;
#endif
-#ifdef _AIX41
-extern "C" int initgroups(const char *,int);
-#endif
-
-
/* We can't handle floating point expections with threads, so disable
this on freebsd
*/
@@ -207,6 +206,7 @@ SHOW_COMP_OPTION have_ssl=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_ssl=SHOW_OPTION_NO;
#endif
+SHOW_COMP_OPTION have_symlink=SHOW_OPTION_YES;
static bool opt_skip_slave_start = 0; // if set, slave is not autostarted
@@ -220,9 +220,10 @@ static char mysql_home[FN_REFLEN],pidfile_name[FN_REFLEN];
static pthread_t select_thread;
static bool opt_log,opt_update_log,opt_bin_log,opt_slow_log,opt_noacl,
opt_disable_networking=0, opt_bootstrap=0,opt_skip_show_db=0,
- opt_ansi_mode=0,opt_myisam_log=0,
+ opt_ansi_mode=0,opt_myisam_log=0,
opt_large_files=sizeof(my_off_t) > 4;
-bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0;
+bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0,
+ opt_show_slave_auth_info = 0;
FILE *bootstrap_file=0;
int segfaulted = 0; // ensure we do not enter SIGSEGV handler twice
extern MASTER_INFO glob_mi;
@@ -244,7 +245,7 @@ static char *opt_ssl_key = 0;
static char *opt_ssl_cert = 0;
static char *opt_ssl_ca = 0;
static char *opt_ssl_capath = 0;
-static struct st_VioSSLAcceptorFd * ssl_acceptor_fd = 0;
+struct st_VioSSLAcceptorFd * ssl_acceptor_fd = 0;
#endif /* HAVE_OPENSSL */
@@ -277,15 +278,18 @@ volatile ulong cached_thread_count=0;
// replication parameters, if master_host is not NULL, we are a slave
my_string master_user = (char*) "test", master_password = 0, master_host=0,
master_info_file = (char*) "master.info";
+my_string report_user = 0, report_password = 0, report_host=0;
+
const char *localhost=LOCAL_HOST;
const char *delayed_user="DELAYED";
uint master_port = MYSQL_PORT, master_connect_retry = 60;
+uint report_port = MYSQL_PORT;
ulong max_tmp_tables,max_heap_table_size;
ulong bytes_sent = 0L, bytes_received = 0L;
bool opt_endinfo,using_udf_functions,low_priority_updates, locked_in_memory;
-bool opt_using_transactions, using_update_log;
+bool opt_using_transactions, using_update_log, opt_warnings=0;
bool volatile abort_loop,select_thread_in_use,grant_option;
bool volatile ready_to_exit,shutdown_in_progress;
ulong refresh_version=1L,flush_version=1L; /* Increments on each reload */
@@ -341,7 +345,7 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_binlog_update, LOCK_slave, LOCK_server_id,
- LOCK_user_conn;
+ LOCK_user_conn, LOCK_slave_list;
pthread_cond_t COND_refresh,COND_thread_count,COND_binlog_update,
COND_slave_stopped, COND_slave_start;
@@ -695,6 +699,7 @@ void clean_up(bool print_message)
bitmap_free(&temp_pool);
free_max_user_conn();
end_slave();
+ end_slave_list();
#ifndef __WIN__
if (!opt_bootstrap)
(void) my_delete(pidfile_name,MYF(0)); // This may not always exist
@@ -1200,12 +1205,12 @@ Some pointers may be invalid and cause the dump to abort...\n");
fprintf(stderr, "\n
Successfully dumped variables, if you ran with --log, take a look at the\n\
details of what thread %ld did to cause the crash. In some cases of really\n\
-bad corruption, the above values may be invalid\n\n",
+bad corruption, the values shown above may be invalid\n\n",
thd->thread_id);
}
fprintf(stderr, "\
-Please use the information above to create a repeatable test case for the\n\
-crash, and send it to bugs@lists.mysql.com\n");
+The manual page at http://www.mysql.com/doc/C/r/Crashing.html contains\n\
+information that should help you find out what is causing the crash\n");
fflush(stderr);
#endif /* HAVE_STACKTRACE */
@@ -1685,7 +1690,8 @@ int main(int argc, char **argv)
randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2);
reset_floating_point_exceptions();
init_thr_lock();
-
+ init_slave_list();
+
/* Fix varibles that are base 1024*1024 */
myisam_max_temp_length= (my_off_t) min(((ulonglong) myisam_max_sort_file_size)*1024*1024, (ulonglong) MAX_FILE_SIZE);
myisam_max_extra_temp_length= (my_off_t) min(((ulonglong) myisam_max_extra_sort_file_size)*1024*1024, (ulonglong) MAX_FILE_SIZE);
@@ -2473,14 +2479,16 @@ enum options {
OPT_INNODB_LOG_ARCH_DIR,
OPT_INNODB_LOG_ARCHIVE,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
- OPT_INNODB_UNIX_FILE_FLUSH_METHOD,
+ OPT_INNODB_FLUSH_METHOD,
OPT_SAFE_SHOW_DB,
OPT_GEMINI_SKIP, OPT_INNODB_SKIP,
OPT_TEMP_POOL, OPT_DO_PSTACK, OPT_TX_ISOLATION,
OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER,
OPT_GEMINI_UNBUFFERED_IO, OPT_SKIP_SAFEMALLOC,
- OPT_SKIP_STACK_TRACE
-};
+ OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINK, OPT_REPORT_HOST,
+ OPT_REPORT_USER, OPT_REPORT_PASSWORD, OPT_REPORT_PORT,
+ OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
+ OPT_SHOW_SLAVE_AUTH_INFO};
static struct option long_options[] = {
{"ansi", no_argument, 0, 'a'},
@@ -2536,8 +2544,8 @@ static struct option long_options[] = {
OPT_INNODB_LOG_ARCHIVE},
{"innodb_flush_log_at_trx_commit", optional_argument, 0,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT},
- {"innodb_unix_file_flush_method", required_argument, 0,
- OPT_INNODB_UNIX_FILE_FLUSH_METHOD},
+ {"innodb_flush_method", required_argument, 0,
+ OPT_INNODB_FLUSH_METHOD},
#endif
{"help", no_argument, 0, '?'},
{"init-file", required_argument, 0, (int) OPT_INIT_FILE},
@@ -2565,6 +2573,10 @@ static struct option long_options[] = {
(int) OPT_DISCONNECT_SLAVE_EVENT_COUNT},
{"abort-slave-event-count", required_argument, 0,
(int) OPT_ABORT_SLAVE_EVENT_COUNT},
+ {"max-binlog-dump-events", required_argument, 0,
+ (int) OPT_MAX_BINLOG_DUMP_EVENTS},
+ {"sporadic-binlog-dump-fail", no_argument, 0,
+ (int) OPT_SPORADIC_BINLOG_DUMP_FAIL},
{"safemalloc-mem-limit", required_argument, 0, (int)
OPT_SAFEMALLOC_MEM_LIMIT},
{"new", no_argument, 0, 'n'},
@@ -2587,11 +2599,19 @@ static struct option long_options[] = {
(int) OPT_REPLICATE_WILD_IGNORE_TABLE},
{"replicate-rewrite-db", required_argument, 0,
(int) OPT_REPLICATE_REWRITE_DB},
+ // In replication, we may need to tell the other servers how to connect
+ // to us
+ {"report-host", required_argument, 0, (int) OPT_REPORT_HOST},
+ {"report-user", required_argument, 0, (int) OPT_REPORT_USER},
+ {"report-password", required_argument, 0, (int) OPT_REPORT_PASSWORD},
+ {"report-port", required_argument, 0, (int) OPT_REPORT_PORT},
{"safe-mode", no_argument, 0, (int) OPT_SAFE},
{"safe-show-database", no_argument, 0, (int) OPT_SAFE_SHOW_DB},
{"socket", required_argument, 0, (int) OPT_SOCKET},
{"server-id", required_argument, 0, (int) OPT_SERVER_ID},
{"set-variable", required_argument, 0, 'O'},
+ {"show-slave-auth-info", no_argument, 0,
+ (int) OPT_SHOW_SLAVE_AUTH_INFO},
{"skip-bdb", no_argument, 0, (int) OPT_BDB_SKIP},
{"skip-innodb", no_argument, 0, (int) OPT_INNODB_SKIP},
{"skip-gemini", no_argument, 0, (int) OPT_GEMINI_SKIP},
@@ -2607,6 +2627,7 @@ static struct option long_options[] = {
{"skip-show-database", no_argument, 0, (int) OPT_SKIP_SHOW_DB},
{"skip-slave-start", no_argument, 0, (int) OPT_SKIP_SLAVE_START},
{"skip-stack-trace", no_argument, 0, (int) OPT_SKIP_STACK_TRACE},
+ {"skip-symlink", no_argument, 0, (int) OPT_SKIP_SYMLINK},
{"skip-thread-priority", no_argument, 0, (int) OPT_SKIP_PRIOR},
{"sql-bin-update-same", no_argument, 0, (int) OPT_SQL_BIN_UPDATE_SAME},
#include "sslopt-longopts.h"
@@ -2622,6 +2643,7 @@ static struct option long_options[] = {
#endif
{"user", required_argument, 0, 'u'},
{"version", no_argument, 0, 'V'},
+ {"warnings", no_argument, 0, 'W'},
{0, 0, 0, 0}
};
@@ -2817,6 +2839,7 @@ struct show_var_st init_vars[]= {
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
+ {"have_symlink", (char*) &have_symlink, SHOW_HAVE},
{"have_ssl", (char*) &have_ssl, SHOW_HAVE},
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
#ifdef HAVE_INNOBASE_DB
@@ -2826,7 +2849,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
- {"innodb_unix_file_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
+ {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
#endif
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
@@ -3038,6 +3061,8 @@ static void usage(void)
-O, --set-variable var=option\n\
Give a variable an value. --help lists variables\n\
--safe-mode Skip some optimize stages (for testing)\n\
+ --safe-show-database Don't show databases for which the user has no\n\
+ privileges\n\
--skip-concurrent-insert\n\
Don't use concurrent insert with MyISAM\n\
--skip-delay-key-write\n\
@@ -3054,6 +3079,7 @@ static void usage(void)
/* We have to break the string here because of VC++ limits */
puts("\
--skip-stack-trace Don't print a stack trace on failure\n\
+ --skip-symlink Don't allow symlinking of tables\n\
--skip-show-database Don't allow 'SHOW DATABASE' commands\n\
--skip-thread-priority\n\
Don't give threads different priorities.\n\
@@ -3063,14 +3089,19 @@ static void usage(void)
Default transaction isolation level\n\
--temp-pool Use a pool of temporary files\n\
-u, --user=user_name Run mysqld daemon as user\n\
- -V, --version output version information and exit");
+ -V, --version output version information and exit\n\
+ -W, --warnings Log some not critical warnings to the log file\n");
#ifdef __WIN__
puts("NT and Win32 specific options:\n\
--console Don't remove the console window\n\
--install Install mysqld as a service (NT)\n\
--remove Remove mysqld from the service list (NT)\n\
- --standalone Dummy option to start as a standalone program (NT)\n\
+ --standalone Dummy option to start as a standalone program (NT)\
");
+#ifdef USE_SYMDIR
+ puts("--use-symbolic-links Enable symbolic link support");
+#endif
+ puts("");
#endif
#ifdef HAVE_BERKELEY_DB
puts("\
@@ -3099,6 +3130,7 @@ static void usage(void)
puts("\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
+ --innodb_flush_method=# With which method to flush data\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\
@@ -3192,7 +3224,9 @@ static void get_options(int argc,char **argv)
int c,option_index=0;
myisam_delay_key_write=1; // Allow use of this
- while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvI?",
+ my_use_symdir=1; // Use internal symbolic links
+
+ while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvWI?",
long_options, &option_index)) != EOF)
{
switch(c) {
@@ -3202,6 +3236,9 @@ static void get_options(int argc,char **argv)
#endif
opt_endinfo=1; /* unireg: memory allocation */
break;
+ case 'W':
+ opt_warnings=1;
+ break;
case 'a':
opt_ansi_mode=1;
thd_startup_options|=OPTION_ANSI_MODE;
@@ -3241,6 +3278,9 @@ static void get_options(int argc,char **argv)
safemalloc_mem_limit = atoi(optarg);
#endif
break;
+ case OPT_SHOW_SLAVE_AUTH_INFO:
+ opt_show_slave_auth_info = 1;
+ break;
case OPT_SOCKET:
mysql_unix_port= optarg;
break;
@@ -3305,6 +3345,17 @@ static void get_options(int argc,char **argv)
abort_slave_event_count = atoi(optarg);
#endif
break;
+ case (int)OPT_SPORADIC_BINLOG_DUMP_FAIL:
+#ifndef DBUG_OFF
+ opt_sporadic_binlog_dump_fail = 1;
+#endif
+ break;
+ case (int)OPT_MAX_BINLOG_DUMP_EVENTS:
+#ifndef DBUG_OFF
+ max_binlog_dump_events = atoi(optarg);
+#endif
+ break;
+
case (int) OPT_LOG_SLAVE_UPDATES:
opt_log_slave_updates = 1;
break;
@@ -3432,6 +3483,9 @@ static void get_options(int argc,char **argv)
myisam_delay_key_write=0;
myisam_concurrent_insert=0;
myisam_recover_options= HA_RECOVER_NONE;
+ my_disable_symlinks=1;
+ my_use_symdir=0;
+ have_symlink=SHOW_OPTION_DISABLED;
ha_open_options&= ~HA_OPEN_ABORT_IF_CRASHED;
break;
case (int) OPT_SAFE:
@@ -3488,6 +3542,11 @@ static void get_options(int argc,char **argv)
case (int) OPT_SKIP_STACK_TRACE:
test_flags|=TEST_NO_STACKTRACE;
break;
+ case (int) OPT_SKIP_SYMLINK:
+ my_disable_symlinks=1;
+ my_use_symdir=0;
+ have_symlink=SHOW_OPTION_DISABLED;
+ break;
case (int) OPT_BIND_ADDRESS:
if (optarg && isdigit(optarg[0]))
{
@@ -3673,7 +3732,7 @@ static void get_options(int argc,char **argv)
case OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT:
innobase_flush_log_at_trx_commit= optarg ? test(atoi(optarg)) : 1;
break;
- case OPT_INNODB_UNIX_FILE_FLUSH_METHOD:
+ case OPT_INNODB_FLUSH_METHOD:
innobase_unix_file_flush_method=optarg;
break;
#endif /* HAVE_INNOBASE_DB */
@@ -3715,6 +3774,18 @@ static void get_options(int argc,char **argv)
case OPT_MASTER_PORT:
master_port= atoi(optarg);
break;
+ case OPT_REPORT_HOST:
+ report_host=optarg;
+ break;
+ case OPT_REPORT_USER:
+ report_user=optarg;
+ break;
+ case OPT_REPORT_PASSWORD:
+ report_password=optarg;
+ break;
+ case OPT_REPORT_PORT:
+ report_port= atoi(optarg);
+ break;
case OPT_MASTER_CONNECT_RETRY:
master_connect_retry= atoi(optarg);
break;
diff --git a/sql/net_pkg.cc b/sql/net_pkg.cc
index 073c716d793..0b50b34c7bd 100644
--- a/sql/net_pkg.cc
+++ b/sql/net_pkg.cc
@@ -140,7 +140,7 @@ net_printf(NET *net, uint errcode, ...)
void
send_ok(NET *net,ha_rows affected_rows,ulonglong id,const char *message)
{
- if(net->no_send_ok)
+ if (net->no_send_ok) // hack for re-parsing queries
return;
char buff[MYSQL_ERRMSG_SIZE+10],*pos;
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index a5600dffa4c..cde27d4933a 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -31,6 +31,7 @@
#include <winsock.h>
#endif
#include <global.h>
+#include <mysql_com.h>
#include <violite.h>
#include <my_sys.h>
#include <m_string.h>
@@ -39,7 +40,6 @@
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
-#include <violite.h>
#include <assert.h>
extern "C" {
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b95b97d670f..0b3ac27d1f6 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -33,6 +33,7 @@
#include <m_ctype.h>
#include <nisam.h>
#include "sql_select.h"
+#include <assert.h>
#ifndef EXTRA_DEBUG
@@ -289,7 +290,6 @@ typedef struct st_qsel_param {
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
} PARAM;
-
static SEL_TREE * get_mm_parts(PARAM *param,Field *field,
Item_func::Functype type,Item *value,
Item_result cmp_type);
@@ -382,7 +382,7 @@ SQL_SELECT::~SQL_SELECT()
#undef index // Fix for Unixware 7
QUICK_SELECT::QUICK_SELECT(TABLE *table,uint key_nr,bool no_alloc)
- :error(0),index(key_nr),max_used_key_length(0),head(table),
+ :dont_free(0),error(0),index(key_nr),max_used_key_length(0),head(table),
it(ranges),range(0)
{
if (!no_alloc)
@@ -399,8 +399,11 @@ QUICK_SELECT::QUICK_SELECT(TABLE *table,uint key_nr,bool no_alloc)
QUICK_SELECT::~QUICK_SELECT()
{
- file->index_end();
- free_root(&alloc,MYF(0));
+ if (!dont_free)
+ {
+ file->index_end();
+ free_root(&alloc,MYF(0));
+ }
}
int QUICK_SELECT::init()
@@ -2455,8 +2458,8 @@ int QUICK_SELECT::get_next()
if ((error=file->index_first(record)))
DBUG_RETURN(error); // Empty table
if (cmp_next(range) == 0)
- DBUG_RETURN(0); // No matching records
- range=0; // To next range
+ DBUG_RETURN(0);
+ range=0; // No matching records; go to next range
continue;
}
if ((result = file->index_read(record,(byte*) range->min_key,
@@ -2516,6 +2519,223 @@ int QUICK_SELECT::cmp_next(QUICK_RANGE *range)
return (range->flag & NEAR_MAX) ? 1 : 0; // Exact match
}
+
+/*
+ * This is a hack: we inherit from QUICK_SELECT so that we can use the
+ * get_next() interface, but we have to hold a pointer to the original
+ * QUICK_SELECT because its data are used all over the place. What
+ * should be done is to factor out the data that is needed into a base
+ * class (QUICK_SELECT), and then have two subclasses (_ASC and _DESC)
+ * which handle the ranges and implement the get_next() function. But
+ * for now, this seems to work right at least.
+ */
+
+QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts)
+ : QUICK_SELECT(*q), rev_it(rev_ranges)
+{
+ bool not_read_after_key = file->option_flag() & HA_NOT_READ_AFTER_KEY;
+ for (QUICK_RANGE *r = it++; r; r = it++)
+ {
+ rev_ranges.push_front(r);
+ if (not_read_after_key && range_reads_after_key(r) ||
+ test_if_null_range(r,used_key_parts))
+ {
+ it.rewind(); // Reset range
+ error = HA_ERR_UNSUPPORTED;
+ dont_free=1; // Don't free memory from 'q'
+ return;
+ }
+ }
+ /* Remove EQ_RANGE flag for keys that are not using the full key */
+ for (QUICK_RANGE *r = rev_it++; r; r = rev_it++)
+ {
+ if ((r->flag & EQ_RANGE) &&
+ head->key_info[index].key_length != r->max_length)
+ r->flag&= ~EQ_RANGE;
+ }
+ rev_it.rewind();
+ q->dont_free=1; // Don't free shared mem
+ delete q;
+}
+
+
+int QUICK_SELECT_DESC::get_next()
+{
+ DBUG_ENTER("QUICK_SELECT_DESC::get_next");
+
+ /* The max key is handled as follows:
+ * - if there is NO_MAX_RANGE, start at the end and move backwards
+ * - if it is an EQ_RANGE, which means that max key covers the entire
+ * key, go directly to the key and read through it (sorting backwards is
+ * same as sorting forwards)
+ * - if it is NEAR_MAX, go to the key or next, step back once, and
+ * move backwards
+ * - otherwise (not NEAR_MAX == include the key), go after the key,
+ * step back once, and move backwards
+ */
+
+ for (;;)
+ {
+ int result;
+ if (range)
+ { // Already read through key
+ result = ((range->flag & EQ_RANGE)
+ ? file->index_next_same(record, (byte*) range->min_key,
+ range->min_length) :
+ file->index_prev(record));
+ if (!result)
+ {
+ if (cmp_prev(*rev_it.ref()) == 0)
+ DBUG_RETURN(0);
+ }
+ else if (result != HA_ERR_END_OF_FILE)
+ DBUG_RETURN(result);
+ }
+
+ if (!(range=rev_it++))
+ DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used
+
+ if (range->flag & NO_MAX_RANGE) // Read last record
+ {
+ int error;
+ if ((error=file->index_last(record)))
+ DBUG_RETURN(error); // Empty table
+ if (cmp_prev(range) == 0)
+ DBUG_RETURN(0);
+ range=0; // No matching records; go to next range
+ continue;
+ }
+
+ if (range->flag & EQ_RANGE)
+ {
+ result = file->index_read(record, (byte*) range->max_key,
+ range->max_length, HA_READ_KEY_EXACT);
+ }
+ else
+ {
+ dbug_assert(range->flag & NEAR_MAX || range_reads_after_key(range));
+ /* Note: even if max_key is only a prefix, HA_READ_AFTER_KEY will
+ * do the right thing - go past all keys which match the prefix */
+ result=file->index_read(record, (byte*) range->max_key,
+ range->max_length,
+ ((range->flag & NEAR_MAX) ?
+ HA_READ_KEY_EXACT : HA_READ_AFTER_KEY));
+ result = file->index_prev(record);
+ }
+ if (result)
+ {
+ if (result != HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN(result);
+ range=0; // Not found, to next range
+ continue;
+ }
+ if (cmp_prev(range) == 0)
+ {
+ if (range->flag == (UNIQUE_RANGE | EQ_RANGE))
+ range = 0; // Stop searching
+ DBUG_RETURN(0); // Found key is in range
+ }
+ range = 0; // To next range
+ }
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+/*
+ * Returns 0 if found key is inside range (found key >= range->min_key).
+ */
+int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range)
+{
+ if (range->flag & NO_MIN_RANGE)
+ return (0); /* key can't be to small */
+
+ KEY_PART *key_part = key_parts;
+ for (char *key = range->min_key, *end = key + range->min_length;
+ key < end;
+ key += key_part++->part_length)
+ {
+ int cmp;
+ if (key_part->null_bit)
+ {
+ // this key part allows null values; NULL is lower than everything else
+ if (*key++)
+ {
+ // the range is expecting a null value
+ if (!key_part->field->is_null())
+ return 0; // not null -- still inside the range
+ continue; // null -- exact match, go to next key part
+ }
+ else if (key_part->field->is_null())
+ return 1; // null -- outside the range
+ }
+ if ((cmp = key_part->field->key_cmp((byte*) key,
+ key_part->part_length)) > 0)
+ return 0;
+ if (cmp < 0)
+ return 1;
+ }
+ return (range->flag & NEAR_MIN) ? 1 : 0; // Exact match
+}
+
+/*
+ * True if this range will require using HA_READ_AFTER_KEY
+ See comment in get_next() about this
+ */
+
+bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range)
+{
+ return ((range->flag & (NO_MAX_RANGE | NEAR_MAX)) ||
+ !(range->flag & EQ_RANGE) ||
+ head->key_info[index].key_length != range->max_length) ? 1 : 0;
+}
+
+/* True if we are reading over a key that may have a NULL value */
+
+bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range,
+ uint used_key_parts)
+{
+ uint offset,end;
+ KEY_PART *key_part = key_parts,
+ *key_part_end= key_part+used_key_parts;
+
+ for (offset= 0, end = min(range->min_length, range->max_length) ;
+ offset < end && key_part != key_part_end ;
+ offset += key_part++->part_length)
+ {
+ uint null_length=test(key_part->null_bit);
+ if (!memcmp((char*) range->min_key+offset, (char*) range->max_key+offset,
+ key_part->part_length + null_length))
+ {
+ offset+=null_length;
+ continue;
+ }
+ if (null_length && range->min_key[offset])
+ return 1; // min_key is null and max_key isn't
+ // Range doesn't cover NULL. This is ok if there is no more null parts
+ break;
+ }
+ /*
+ If the next min_range is > NULL, then we can use this, even if
+ it's a NULL key
+ Example: SELECT * FROM t1 WHERE a = 2 AND b >0 ORDER BY a DESC,b DESC;
+
+ */
+ if (key_part != key_part_end && key_part->null_bit)
+ {
+ if (offset >= range->min_length || range->min_key[offset])
+ return 1; // Could be null
+ key_part++;
+ }
+ /*
+ If any of the key parts used in the ORDER BY could be NULL, we can't
+ use the key to sort the data.
+ */
+ for (; key_part != key_part_end ; key_part++)
+ if (key_part->null_bit)
+ return 1; // Covers null part
+ return 0;
+}
+
+
/*****************************************************************************
** Print a quick range for debugging
** TODO:
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 247dd260817..50215b94be0 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -54,9 +54,10 @@ class QUICK_RANGE :public Sql_alloc {
{}
};
+
class QUICK_SELECT {
public:
- bool next;
+ bool next,dont_free;
int error;
uint index,max_used_key_length;
TABLE *head;
@@ -80,6 +81,21 @@ public:
bool unique_key_range();
};
+
+class QUICK_SELECT_DESC: public QUICK_SELECT
+{
+public:
+ QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts);
+ int get_next();
+private:
+ int cmp_prev(QUICK_RANGE *range);
+ bool range_reads_after_key(QUICK_RANGE *range);
+ bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts);
+ void reset(void) { next=0; rev_it.rewind(); }
+ List<QUICK_RANGE> rev_ranges;
+ List_iterator<QUICK_RANGE> rev_it;
+};
+
class SQL_SELECT :public Sql_alloc {
public:
QUICK_SELECT *quick; // If quick-select used
diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt
index 666d70c957a..a8d7c187ad3 100644
--- a/sql/share/czech/errmsg.txt
+++ b/sql/share/czech/errmsg.txt
@@ -215,3 +215,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt
index 9f1f6accc1f..57a6ad3d13f 100644
--- a/sql/share/danish/errmsg.txt
+++ b/sql/share/danish/errmsg.txt
@@ -209,3 +209,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt
index 8b44af7eb7b..b886ba43f6f 100644
--- a/sql/share/dutch/errmsg.txt
+++ b/sql/share/dutch/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index ff29fffe958..f0887f5b376 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -153,7 +153,7 @@
"You have an error in your SQL syntax",
"Delayed insert thread couldn't get requested lock for table %-.64s",
"Too many delayed threads in use",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)",
+"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s) - see http://www.mysql.com/doc/C/o/Communication_errors.html",
"Got a packet bigger than 'max_allowed_packet'",
"Got a read error from the connection pipe",
"Got an error from fcntl()",
@@ -185,7 +185,7 @@
"Got error %d during ROLLBACK",
"Got error %d during FLUSH_LOGS",
"Got error %d during CHECKPOINT",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)",
+"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s) - see http://www.mysql.com/doc/C/o/Communication_errors.html",
"The handler for the table does not support binary table dump",
"Binlog closed, cannot RESET MASTER",
"Failed rebuilding the index of dumped table '%-.64s'",
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt
index e807c8d4a64..77a7d2f7841 100644
--- a/sql/share/estonian/errmsg.txt
+++ b/sql/share/estonian/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt
index 5cbcfe81b87..2e375bd5e15 100644
--- a/sql/share/french/errmsg.txt
+++ b/sql/share/french/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt
index 307ed7a00f4..85289b46967 100644
--- a/sql/share/german/errmsg.txt
+++ b/sql/share/german/errmsg.txt
@@ -2,7 +2,7 @@
This file is public domain and comes with NO WARRANTY of any kind
Dirk Munzinger (dmun@4t2.com)
- Version: 17.03.1999 */
+ Version: 07.06.2001 */
"hashchk",
"isamchk",
@@ -196,16 +196,24 @@
"Netzfehler beim Lesen vom Master",
"Netzfehler beim Schreiben zum Master",
"Kann keinen FULLTEXT-Index finden der der Spaltenliste entspricht",
-"Can't execute the given command because you have active locked tables or an active transaction",
-"Unknown system variable '%-.64'",
-"Table '%-.64s' is marked as crashed and should be repaired",
-"Table '%-.64s' is marked as crashed and last (automatic?) repair failed",
-"Warning: Some non-transactional changed tables couldn't be rolled back",
-"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"Kann das aktuelle Kommando wegen aktiver Tabellensperre oder aktiver Transaktion nicht ausführen",
+"Unbekannte System-Variabel '%-.64'",
+"Tabelle '%-.64s' ist als defekt makiert und sollte repariert werden",
+"Tabelle '%-.64s' ist als defekt makiert und der letzte (automatische) Reparaturversuch schlug fehl.",
+"Warnung: Das Rollback konnte bei einigen Tabellen, die nicht mittels Transaktionen geändert wurden, nicht ausgeführt werden.",
+"Multi-Statement Transaktionen benötigen mehr als 'max_binlog_cache_size' Bytes An Speicher. Diese mysqld-Variabel vergrössern und nochmal versuchen.',
+"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Das Kommand SLAVE STOP muss zuerst ausgeführt werden.",
+"Diese Operationbenötigt einen aktiven Slave. Slave konfigurieren und mittels SLAVE START aktivieren.",
+"Der Server ist nicht als Slave konfigiriert. Im Konfigurations-File oder mittel CHANGE MASTER TO beheben.",
+"Konnte Master-Info-Struktur nicht initialisieren; Berechtigungen von master.info prüfen.",
+"Konnte keinen Slave-Thread starten. System-Resourcen überprüfen.",
+"Benutzer %-.64s hat mehr als 'max_user_connections' aktive Verbindungen",
+"Bei der Verwendung mit SET dürfen nur Constante Ausdrücke verwendet werden",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt
index 119de63b2a7..bdae260f2f8 100644
--- a/sql/share/greek/errmsg.txt
+++ b/sql/share/greek/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt
index 7e9b9e6a3bf..f2d45b94b50 100644
--- a/sql/share/hungarian/errmsg.txt
+++ b/sql/share/hungarian/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt
index d6c857d44a4..a46e712a6e6 100644
--- a/sql/share/italian/errmsg.txt
+++ b/sql/share/italian/errmsg.txt
@@ -199,10 +199,18 @@
"La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita",
"Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)",
"La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima SLAVE STOP",
+"Questa operaione richiede un database 'slave', configurarlo ed eseguire SLAVE START",
+"Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO",
+"Impossibile inizializzare la struttura 'master info', controllare i permessi sul file master.info",
+"Impossibile creare il thread 'slave', controllare le risorse di sistema",
+"L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive",
+"Si possono usare solo espressioni costanti con SET",
+"E' scaduto il timeout per l'attesa del lock",
+"Il numero totale di lock e' maggiore della grandezza della tabella di lock",
+"I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt
index a62f22c253d..6d5ab99f86d 100644
--- a/sql/share/japanese/errmsg.txt
+++ b/sql/share/japanese/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt
index c476ad8fa3c..5fa44f581bf 100644
--- a/sql/share/korean/errmsg.txt
+++ b/sql/share/korean/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt
index 2a57c93cc84..f45daa00449 100644
--- a/sql/share/norwegian-ny/errmsg.txt
+++ b/sql/share/norwegian-ny/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt
index cf23991eefa..951631cae75 100644
--- a/sql/share/norwegian/errmsg.txt
+++ b/sql/share/norwegian/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt
index 03e9d59dacd..79b420022bf 100644
--- a/sql/share/polish/errmsg.txt
+++ b/sql/share/polish/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt
index 37f2bf9e7ac..cd3e948546e 100644
--- a/sql/share/portuguese/errmsg.txt
+++ b/sql/share/portuguese/errmsg.txt
@@ -1,208 +1,216 @@
/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
This file is public domain and comes with NO WARRANTY of any kind */
-
+/* Updated by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
"hashchk",
"isamchk",
-"NO",
-"YES",
-"Nao consegui criar o arquivo '%-.64s' (Erro: %d)",
-"Nao consegui criar a tabela '%-.64s' (Erro: %d)",
-"Nao consegui criar o banco de dados '%-.64s'. Erro %d",
-"Nao consegui criar o banco de dados '%-.64s'. Este banco ja existe",
-"Nao consegui deletar o banco de dados '%-.64s'. Este banco nao existe",
-"Erro deletando o banco de dados(Nao foi possivel deletar '%-.64s', erro %d)",
-"Erro deletando o banco de dados(Nao foi possivel remover o diretorio '%-.64s', erro %d)",
-"Erro ao deletar '%-.64s' (Erro: %d)",
-"Nao foi possivel ler o registro na tabela do sistema",
-"Nao foi possivel obter o status de '%-.64s' (Erro: %d)",
-"Nao foi possivel obter o diretorio corrente (Erro: %d)",
-"Nao foi possivel travar o arquivo (Erro: %d)",
-"Nao foi possivel abrir arquivo: '%-.64s'. (Erro: %d)",
-"Nao foi possivel encontrar arquivo: '%-.64s' (Erro: %d)",
-"Nao foi possivel ler o diretorio de '%-.64s' (Erro: %d)",
-"Nao foi possivel ir para o diretorio '%-.64s' (Erro: %d)",
-"Registro alterado apos a ultima leitura da tabela '%-.64s'",
-"Disco cheio (%s). Aguardando espaco livre....",
-"Nao foi possivel gravar, chave duplicada na tabela '%-.64s'",
-"Erro ao fechar '%-.64s' (Erro: %d)",
-"Erro lendo arquivo '%-.64s' (Erro: %d)",
-"Erro ao renomear '%-.64s' to '%-.64s' (Erro: %d)",
-"Error gravando arquivo '%-.64s' (Erro: %d)",
-"'%-.64s' esta travado contra alteracoes",
-"Ordenacao cancelada",
-"Visao '%-.64s' nao existe para '%-.64s'",
-"Erro %d do manipulador de tabelas",
-"Manipulador da tabela '%-.64s' nao suporta esta opcao",
-"Nao foi possivel encontrar o registro em '%-.64s'",
-"Informacao invalida no arquivo: '%-.64s'",
-"Arquivo de indice invalido na tabela: '%-.64s'. Tente conserta-lo!",
-"Arquivo de indice destaualizado na tabela '%-.64s'; Conserte-o!",
-"'%-.64s' esta disponivel somente para leitura",
-"Sem memoria. Renicie o programa e tente novamente (Necessita de %d bytes)",
-"Sem memoria para ordenacao. Aumente o espaco de memoria para ordenacao.",
-"Fim de arquivo inesperado enquanto lendo o arquivo '%-.64s' (Erro: %d)",
-"Excesso de conexoes",
-"Thread sem memoria disponivel",
-"Nao foi possivel obter o nome da maquina para este endereco IP",
-"Comunicacao invalida",
-"Acesso negado ao usuario : '%-.32s@%-.64s' ao banco de dados '%-.64s'",
-"Acesso negado ao usuario: '%-.32s@%-.64s' (usando a senha: %s)",
-"Nenhum banco de dados selecionado",
+"não",
+"sim",
+"Não pode criar arquivo '%-.64s' (erro no. %d)",
+"Não pode criar tabela '%-.64s' (erro no. %d)",
+"Não pode criar banco de dados '%-.64s' (erro no. %d)",
+"Não pode criar banco de dados '%-.64s'. Banco de dados já existe",
+"Não pode eliminar banco de dados '%-.64s'. Banco de dados não existe",
+"Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)",
+"Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)",
+"Erro na deleção de '%-.64s' (erro no. %d)",
+"Não pode ler registro em tabela do sistema",
+"Não pode obter status de '%-.64s' (erro no. %d)",
+"Não pode obter diretório corrente (erro no. %d)",
+"Não pode travar arquivo (erro no. %d)",
+"Não pode abrir arquivo '%-.64s' (erro no. %d)",
+"Não pode encontrar arquivo '%-.64s' (erro no. %d)",
+"Não pode ler diretório de '%-.64s' (erro no. %d)",
+"Não pode mudar para o diretório '%-.64s' (erro no. %d)",
+"Registro alterado desde a última leitura da tabela '%-.64s'",
+"Disco cheio (%s). Aguardando alguém liberar algum espaço....",
+"Não pode gravar. Chave duplicada na tabela '%-.64s'",
+"Erro ao fechar '%-.64s' (erro no. %d)",
+"Erro ao ler arquivo '%-.64s' (erro no. %d)",
+"Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)",
+"Erro ao gravar arquivo '%-.64s' (erro no. %d)",
+"'%-.64s' está com travamento contra alterações",
+"Ordenação abortada",
+"'View' '%-.64s' não existe para '%-.64s'",
+"Obteve erro %d no manipulador de tabelas",
+"Manipulador de tabela para '%-.64s' não tem esta opção",
+"Não pode encontrar registro em '%-.64s'",
+"Informação incorreta no arquivo '%-.64s'",
+"Arquivo chave incorreto para tabela '%-.64s'. Tente reparar",
+"Arquivo chave desatualizado para tabela '%-.64s'. Repare-o!",
+"Tabela '%-.64s' é somente para leitura",
+"Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)",
+"Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação",
+"Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)",
+"Excesso de conexões",
+"Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou se você pode adicionar mais área de 'swap'",
+"Não pode obter nome do 'host' para seu endereço",
+"Negociação de acesso falhou",
+"Acesso negado para o usuário '%-.32s@%-.64s' ao banco de dados '%-.64s'",
+"Acesso negado para o usuário '%-.32s@%-.64s' (uso de senha: %s)",
+"Nenhum banco de dados foi selecionado",
"Comando desconhecido",
-"Coluna '%-.64s' nao pode ser vazia",
+"Coluna '%-.64s' não pode ter NULL",
"Banco de dados '%-.64s' desconhecido",
-"Tabela '%-.64s' ja existe",
+"Tabela '%-.64s' já existe",
"Tabela '%-.64s' desconhecida",
-"Coluna: '%-.64s' em %s e ambigua",
-"Finalizacao do servidor em andamento",
-"Coluna '%-.64s' desconhecida em %s",
-"'%-.64s' utilizado nao esta em 'group by'",
-"Nao foi possivel agrupar em '%-.64s'",
-"Clausula contem funcoes de soma e colunas juntos",
-"Contagem de colunas nao confere com a contagem de valores",
-"Nome do identificador '%-.64s' muito grande",
+"Coluna '%-.64s' em '%-.64s' é ambígua",
+"'Shutdown' do servidor em andamento",
+"Coluna '%-.64s' desconhecida em '%-.64s'",
+"'%-.64s' não está em 'GROUP BY'",
+"Não pode agrupar em '%-.64s'",
+"Cláusula contém funções de soma e colunas juntos",
+"Contagem de colunas não confere com a contagem de valores",
+"Nome identificador '%-.100s' é longo demais",
"Nome da coluna '%-.64s' duplicado",
"Nome da chave '%-.64s' duplicado",
-"Inclusao de '%-.64s' duplicada para a chave %d",
-"Especificador de coluna invalido para a coluna '%-.64s'",
-"%s proximo de '%-.64s' a linha %d",
-"Selecao vazia",
-"Tabela/alias nao e unica: '%-.64s'",
-"Valor padrao invalido para '%-.64s'",
-"Mais de uma chave primaria definida",
-"Muitas chaves definidas. O maximo permitido sao %d chaves",
-"Muitas partes de chave definidas. O maximo permitido sao %d partes",
-"Chave especificada e muito longa. O comprimento maximo permitido e %d",
-"Coluna chave '%-.64s' nao existe na tabela",
-"Coluna binaria '%-.64s' nao pode ser utilizada na definicao de chaves",
-"Comprimento da coluna '%-.64s' muito grande(max = %d). Utilize o campo binario",
-"Somente e permitido um campo auto incrementado, e ele deve ser chave da tabela",
-"%s: pronto para conexoes\n",
-"%s: Finalizacao concluida normalmente\n",
-"%s: Recebeu o sinal %d. Cancelando!\n",
-"%s: Finalizacao concluida\n",
-"%s: Forcando a finalizacao da tarefa %ld usuario: '%-.64s'\n",
-"Nao foi possivel criar o socket IP",
-"Tabela '%-.64s' nao possui um indice criado por CREATE INDEX. Recrie a tabela",
-"O separador de campos nao esta conforme esperado. Confira no manual",
-"Nao e possivel utilizar comprimento de linha fixo com campos binarios. Favor usar 'fields terminated by'.",
-"O arquivo '%-.64s' precisa estar no diretorio do banco de dados, e sua leitura permitida a todos",
-"Arquivo '%-.64s' ja existe",
-"Registros: %ld Apagados: %ld Ignorados: %ld Avisos: %ld",
-"Registros: %ld Duplicados: %ld",
-"Parte da chave errada. A parte utilizada nao e um texto ou tem comprimento maior que o definido",
-"Nao e possivel retirar todas as colunas da tabela com ALTER TABLE. Use DROP TABLE",
-"Nao foi possivel DROP '%-.64s'. Confira se este campo/chave existe",
-"Registros: %ld Duplicados: %ld Avisos: %ld",
-"INSERT TABLE '%-.64s' nao e permitido em FROM lista de tabelas",
-"Tarefa desconhecida id: %lu",
-"Voce nao e o responsavel pela tarefa %lu",
-"Nenhuma tabela em uso",
-"Muitos textos para a coluna %s e SET",
-"Nao foi possivel um unico nome para o arquivo %s.(1-999)\n",
-"Tabela '%-.64s' esta travada para leitura, e nao pode ser atualizada",
-"Tabela '%-.64s' nao foi travada com LOCK TABLES",
-"Campo binario '%-.64s' nao pode ter um valor inicial",
-"Nome de banco de dados invalido: '%-.64s'",
-"Nome de tabela invalido: '%-.64s'",
-"O SELECT muitos registros, e possivelmente vai demorar. Confira sua clausula WHERE e utilize SET OPTION SQL_BIG_SELECTS=1 se o SELECT esta correto",
+"Entrada '%-.64s' duplicada para a chave %d",
+"Especificador de coluna incorreto para a coluna '%-.64s'",
+"%s próximo a '%-.80s' na linha %d",
+"'Query' estava vazia",
+"Tabela/alias '%-.64s' não única",
+"Valor 'default' inválido para '%-.64s'",
+"Definida mais de uma chave primária",
+"Especificadas chaves demais. O máximo permitido são %d chaves",
+"Especificadas partes de chave demais. O máximo permitido são %d partes",
+"Chave especificada longa demais. O comprimento máximo permitido é %d",
+"Coluna chave '%-.64s' não existe na tabela",
+"Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado",
+"Comprimento da coluna '%-.64s' grande demais (max = %d). Use BLOB em seu lugar",
+"Definição incorreta de tabela. Somente é permitido um campo auto-incrementado e ele tem que ser definido como chave",
+"%s: Pronto para conexões\n",
+"%s: 'Shutdown' normal\n",
+"%s: Obteve sinal %d. Abortando!\n",
+"%s: 'Shutdown' completo\n",
+"%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n",
+"Não pode criar 'socket' IP",
+"Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela",
+"Argumento separador de campos não é o esperado. Confira no manual",
+"Você não pode usar comprimento de linha fixo com BLOBs. Favor usar 'fields terminated by'",
+"Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura permitida para todos",
+"Arquivo '%-.80s' já existe",
+"Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld",
+"Registros: %ld - Duplicados: %ld",
+"Parte de chave incorreta. A parte de chave usada não é um 'string' ou o comprimento usado é maior do que a parte de chave",
+"Você não pode deletar todas as colunas com ALTER TABLE. Use DROP TABLE em seu lugar",
+"Não pode fazer DROP '%-.64s'. Confira se este campo/chave existe",
+"Registros: %ld - Duplicados: %ld - Avisos: %ld",
+"INSERT TABLE '%-.64s' não é permitido em lista de tabelas FROM",
+"'Id' de 'thread' %lu desconhecido",
+"Você não é proprietário da 'thread' %lu",
+"Nenhuma tabela usada",
+"'Strings' demais para coluna '%-.64s' e SET",
+"Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n",
+"Tabela '%-.64s' foi travada com trava de READ e não pode ser atualizada",
+"Tabela '%-.64s' não foi travada com LOCK TABLES",
+"Coluna BLOB '%-.64s' não pode ter um valor 'default'",
+"Nome de banco de dados '%-.100s' incorreto",
+"Nome de tabela '%-.100s' incorreto",
+"O SELECT examinaria registros demais e provavelmente tomaria um tempo muito longo. Confira sua cláusula WHERE e use SET OPTION SQL_BIG_SELECTS=1, se o SELECT estiver correto",
"Erro desconhecido",
-"Procedimento %s desconhecido",
-"Numero de parametros para o procedimento %s esta incorreto",
-"Parametro incorreto para o procedimento %s",
-"Tabela '%-.64s' descohecida em %s",
-"Campo '%-.64s' definido em duplicidade",
-"Invalid use of group function",
-"Table '%-.64s' uses a extension that doesn't exist in this MySQL version",
-"A table must have at least 1 column",
-"The table '%-.64s' is full",
-"Unknown character set: '%-.64s'",
-"Too many tables. MySQL can only use %d tables in a join",
-"Too many fields",
-"Too big row size. The maximum row size, not counting blobs, is %d. You have to change some fields to blobs",
-"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed",
-"Cross dependency found in OUTER JOIN. Examine your ON conditions",
-"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL",
-"Can't load function '%-.64s'",
-"Can't initialize function '%-.64s'; %-.80s",
-"No paths allowed for shared library",
-"Function '%-.64s' already exist",
-"Can't open shared library '%-.64s' (errno: %d %s)",
-"Can't find function '%-.64s' in library'",
-"Function '%-.64s' is not defined",
-"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'",
-"Host '%-.64s' is not allowed to connect to this MySQL server",
-"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords",
-"You must have privileges to update tables in the mysql database to be able to change passwords for others",
-"Can't find any matching row in the user table",
-"Rows matched: %ld Changed: %ld Warnings: %ld",
-"Can't create a new thread (errno %d). If you are not out of available memory you can consult the manual for any possible OS dependent bug",
-"Column count doesn't match value count at row %ld",
-"Can't reopen table: '%-.64s',
-"Invalid use of NULL value",
-"Got error '%-.64s' from regexp",
-"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause",
-"There is no such grant defined for user '%-.32s' on host '%-.64s'",
-"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'",
-"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'",
-"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.",
-"The host or user argument to GRANT is too long",
-"Table '%-64s.%s' doesn't exist",
-"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'",
-"The used command is not allowed with this MySQL version",
-"Something is wrong in your syntax",
-"Delayed insert thread couldn't get requested lock for table %-.64s",
-"Too many delayed threads in use",
-"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)",
-"Got a packet bigger than 'max_allowed_packet'",
-"Got a read error from the connection pipe",
-"Got an error from fcntl()",
-"Got packets out of order",
-"Couldn't uncompress communication packet",
-"Got an error reading communication packets"
-"Got timeout reading communication packets",
-"Got an error writing communication packets",
-"Got timeout writing communication packets",
-"Result string is longer than max_allowed_packet",
-"The used table type doesn't support BLOB/TEXT columns",
-"The used table type doesn't support AUTO_INCREMENT columns",
-"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES",
-"Incorrect column name '%-.100s'",
-"The used table handler can't index column '%-.64s'",
-"All tables in the MERGE table are not defined identically",
-"Can't write, because of unique constraint, to table '%-.64s'",
-"BLOB column '%-.64s' used in key specification without a key length",
-"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead",
-"Result consisted of more than one row",
-"This table type requires a primary key",
-"This version of MySQL is not compiled with RAID support",
-"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column",
-"Key '%-.64s' doesn't exist in table '%-.64s'",
-"Can't open table",
-"The handler for the table doesn't support check/repair",
-"You are not allowed to execute this command in a transaction",
-"Got error %d during COMMIT",
-"Got error %d during ROLLBACK",
-"Got error %d during FLUSH_LOGS",
-"Got error %d during CHECKPOINT",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)",
-"The handler for the table does not support binary table dump",
-"Binlog closed while trying to FLUSH MASTER",
-"Failed rebuilding the index of dumped table '%-.64s'",
-"Error from master: '%-.64s'",
-"Net error reading from master",
-"Net error writing to master",
-"Can't find FULLTEXT index matching the column list",
-"Can't execute the given command because you have active locked tables or an active transaction",
-"Unknown system variable '%-.64'",
-"Table '%-.64s' is marked as crashed and should be repaired",
-"Table '%-.64s' is marked as crashed and last (automatic?) repair failed",
-"Warning: Some non-transactional changed tables couldn't be rolled back",
-"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"'Procedure' '%-.64s' desconhecida",
+"Número de parâmetros incorreto para a 'procedure' '%-.64s'",
+"Parâmetros incorretos para a 'procedure' '%-.64s'",
+"Tabela '%-.64s' desconhecida em '%-.32s'",
+"Coluna '%-.64s' especificada duas vezes",
+"Uso inválido da função GROUP",
+"Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL",
+"Uma tabela tem que ter pelo menos uma (1) coluna",
+"Tabela '%-.64s' está cheia",
+"Conjunto de caracteres '%-.64s' desconhecido",
+"Tabelas demais. O MySQL pode usar somente %d tabelas em um JOIN",
+"Colunas demais",
+"Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é de %d. Você tem que mudar alguns campos para BLOBs",
+"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld . Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário",
+"Dependência cruzada encontrada em OUTER JOIN. Examine suas condições ON",
+"Coluna '%-.64s' é usada com UNIQUE ou INDEX, mas não está definida como NOT NULL",
+"Não pode carregar a função '%-.64s'",
+"Não pode inicializar a função '%-.64s' - '%-.80s'",
+"Não é permitido caminho para biblioteca compartilhada",
+"Função '%-.64s' já existe",
+"Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.64s')",
+"Não pode encontrar a função '%-.64s' na biblioteca",
+"Função '%-.64s' não está definida",
+"'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'",
+"'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL",
+"Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas",
+"Você tem que ter o privilégio para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros",
+"Não pode encontrar nenhuma linha que combine na tabela user",
+"Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld",
+"Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre uma possível falha dependente do sistema operacional",
+"Contagem de colunas não confere com a contagem de valores na linha %ld",
+"Não pode reabrir a tabela '%-.64s',
+"Uso inválido do valor NULL",
+"Obteve erro '%-.64s' em regexp",
+"Mistura de colunas GROUP (MIN(),MAX(),COUNT()...) com colunas não GROUP é ilegal, se não existir cláusula GROUP BY",
+"Não existe tal 'grant' definido para o usuário '%-.32s' no 'host' '%-.64s'",
+"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na tabela '%-.64s'",
+"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na coluna '%-.64s', na tabela '%-.64s'",
+"Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados.",
+"Argumento de 'host' ou de usuário para o GRANT é longo demais",
+"Tabela '%-.64s.%-.64s' não existe",
+"Não existe tal 'grant' definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'",
+"Comando usado não é permitido para esta versão do MySQL",
+"Você tem um erro de sintaxe no seu SQL",
+"'Thread' de inserção retardada ('delayed') não conseguiu obter trava solicitada na tabela '%-.64s'",
+"Excesso de 'threads' retardadas ('delayed') em uso",
+"Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)",
+"Obteve um pacote maior do que 'max_allowed_packet'",
+"Obteve um erro de leitura no 'pipe' de conexão",
+"Obteve um erro em fcntl()",
+"Obteve pacotes fora de ordem",
+"Não conseguiu descomprimir pacote de comunicação",
+"Obteve um erro na leitura de pacotes de comunicação",
+"Obteve expiração de tempo ('timeout') na leitura de pacotes de comunicação",
+"Obteve um erro na gravação de pacotes de comunicação",
+"Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação",
+"'String' resultante é mais longa do que 'max_allowed_packet'",
+"Tipo de tabela usado não permite colunas BLOB/TEXT",
+"Tipo de tabela usado não permite colunas AUTO_INCREMENT",
+"INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque está travada com LOCK TABLES",
+"Nome de coluna '%-.100s' incorreto",
+"O manipulador de tabela usado não pode indexar a coluna '%-.64s'",
+"Tabelas no MERGE não estão todas definidas identicamente",
+"Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'",
+"Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave",
+"Todas as partes de uma PRIMARY KEY têm que ser NOT NULL. Se você precisar de NULL em uma chave, use UNIQUE em seu lugar",
+"O resultado consistiu em mais do que uma linha",
+"Este tipo de tabela requer uma chave primária",
+"Esta versão do MySQL não foi compilada com suporte a RAID",
+"Você está usando modo de atualização seguro e tentou atualizar uma tabela sem um WHERE que use uma coluna tipo KEY",
+"Chave '%-.64s' não existe na tabela '%-.64s'",
+"Não pode abrir a tabela",
+"O manipulador de tabela não suporta check/repair",
+"Não lhe é permitido executar este comando em uma 'transaction'",
+"Obteve erro %d durante COMMIT",
+"Obteve erro %d durante ROLLBACK",
+"Obteve erro %d durante FLUSH_LOGS",
+"Obteve erro %d durante CHECKPOINT",
+"Conexão %ld abortada ao banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')",
+"O manipulador de tabela não suporta DUMP binário de tabela",
+"Binlog fechado. Não pode fazer RESET MASTER",
+"Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'",
+"Erro no 'master' '%-.64s'",
+"Erro de rede na leitura do 'master'",
+"Erro de rede na gravação do 'master'",
+"Não pode encontrar índice FULLTEXT que combine com a lista de colunas",
+"Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma 'transaction' ativa",
+"Variável de sistema '%-.64' desconhecida",
+"Tabela '%-.64s' está marcada como danificada e deve ser reparada",
+"Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou",
+"Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas ('rolled back')",
+"'Multi-statement transaction' requereu mais do que 'max_binlog_cache_size' bytes de armazenagem. Aumente o valor desta variável do mysqld e tente novamente',
+"Esta operação não pode ser realizada com um 'slave' em execução. Execute SLAVE STOP primeiro",
+"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute SLAVE START",
+"O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO",
+"Não pode inicializar a estrutura de informação do 'master'. Verifique as permissões em 'master.info'",
+"Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema",
+"Usuário '%-.64s' já possui 'max_user_connections' conexões ativas",
+"Você pode usar apenas expressões de constante com SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt
index 6bc2695bed5..05362606c44 100644
--- a/sql/share/romanian/errmsg.txt
+++ b/sql/share/romanian/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt
index 75d21dda888..64adb134c34 100644
--- a/sql/share/russian/errmsg.txt
+++ b/sql/share/russian/errmsg.txt
@@ -209,3 +209,11 @@
"îÅ ÍÏÇÕ ÓÏÚÄÁÔØ ÐÒÏÃÅÓÓ SLAVE, ÐÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt
index 673499f5105..f951e8f9435 100644
--- a/sql/share/slovak/errmsg.txt
+++ b/sql/share/slovak/errmsg.txt
@@ -214,3 +214,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt
index d470556fd58..3088c9b4ee1 100644
--- a/sql/share/spanish/errmsg.txt
+++ b/sql/share/spanish/errmsg.txt
@@ -207,3 +207,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
index 672ce97c575..227a02ac873 100644
--- a/sql/share/swedish/errmsg.OLD
+++ b/sql/share/swedish/errmsg.OLD
@@ -206,3 +206,8 @@
"Kunde inte starta en tråd för replikering",
"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
"Du kan endast använda konstant-uttryck med SET",
+"Tiden att få ett lås var för lång",
+"Antal lås är större än vad som ryms i lock tabellen",
+"Du kan inte låsa tabeller/poster under READ UNCOMMITTED",
+"Fick fel vid inloggning till master: %-.128s",
+"Fick fel vid exekvering av fråga på master: %-.128s",
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index 672ce97c575..b8ee9e62b03 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -206,3 +206,11 @@
"Kunde inte starta en tråd för replikering",
"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
"Du kan endast använda konstant-uttryck med SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/slave.cc b/sql/slave.cc
index 6b9c376a625..e8ffb15110b 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -20,6 +20,7 @@
#include <myisam.h>
#include "mini_client.h"
#include "slave.h"
+#include "sql_repl.h"
#include <thr_alarm.h>
#include <my_dir.h>
@@ -55,7 +56,7 @@ static int init_slave_thread(THD* thd);
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_sleep(THD* thd, int sec);
-static int request_table_dump(MYSQL* mysql, char* db, char* table);
+static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, NET* net, const char* db,
const char* table_name);
inline char* rewrite_db(char* db);
@@ -314,28 +315,31 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
const char* table_name)
{
uint packet_len = my_net_read(net); // read create table statement
+ Vio* save_vio;
+ HA_CHECK_OPT check_opt;
TABLE_LIST tables;
- int error = 0;
+ int error= 1;
+ handler *file;
- if(packet_len == packet_error)
- {
- send_error(&thd->net, ER_MASTER_NET_READ);
- return 1;
- }
- if(net->read_pos[0] == 255) // error from master
- {
- net->read_pos[packet_len] = 0;
- net_printf(&thd->net, ER_MASTER, net->read_pos + 3);
- return 1;
- }
+ if (packet_len == packet_error)
+ {
+ send_error(&thd->net, ER_MASTER_NET_READ);
+ return 1;
+ }
+ if (net->read_pos[0] == 255) // error from master
+ {
+ net->read_pos[packet_len] = 0;
+ net_printf(&thd->net, ER_MASTER, net->read_pos + 3);
+ return 1;
+ }
thd->command = COM_TABLE_DUMP;
thd->query = sql_alloc(packet_len + 1);
- if(!thd->query)
- {
- sql_print_error("create_table_from_dump: out of memory");
- net_printf(&thd->net, ER_GET_ERRNO, "Out of memory");
- return 1;
- }
+ if (!thd->query)
+ {
+ sql_print_error("create_table_from_dump: out of memory");
+ net_printf(&thd->net, ER_GET_ERRNO, "Out of memory");
+ return 1;
+ }
memcpy(thd->query, net->read_pos, packet_len);
thd->query[packet_len] = 0;
thd->current_tablenr = 0;
@@ -344,15 +348,12 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
thd->proc_info = "Creating table from master dump";
// save old db in case we are creating in a different database
char* save_db = thd->db;
- thd->db = thd->last_nx_db;
+ thd->db = (char*)db;
mysql_parse(thd, thd->query, packet_len); // run create table
- thd->db = save_db; // leave things the way the were before
+ thd->db = save_db; // leave things the way the were before
- if(thd->query_error)
- {
- close_thread_tables(thd); // mysql_parse takes care of the error send
- return 1;
- }
+ if (thd->query_error)
+ goto err; // mysql_parse took care of the error send
bzero((char*) &tables,sizeof(tables));
tables.db = (char*)db;
@@ -361,83 +362,90 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
thd->proc_info = "Opening master dump table";
if (!open_ltable(thd, &tables, TL_WRITE))
{
- // open tables will send the error
+ send_error(&thd->net,0,0); // Send error from open_ltable
sql_print_error("create_table_from_dump: could not open created table");
- close_thread_tables(thd);
- return 1;
+ goto err;
}
- handler *file = tables.table->file;
+ file = tables.table->file;
thd->proc_info = "Reading master dump table data";
if (file->net_read_dump(net))
{
net_printf(&thd->net, ER_MASTER_NET_READ);
sql_print_error("create_table_from_dump::failed in\
handler::net_read_dump()");
- close_thread_tables(thd);
- return 1;
+ goto err;
}
- HA_CHECK_OPT check_opt;
check_opt.init();
check_opt.flags|= T_VERY_SILENT;
check_opt.quick = 1;
thd->proc_info = "Rebuilding the index on master dump table";
- Vio* save_vio = thd->net.vio;
// we do not want repair() to spam us with messages
// just send them to the error log, and report the failure in case of
// problems
+ save_vio = thd->net.vio;
thd->net.vio = 0;
- if (file->repair(thd,&check_opt ))
- {
- net_printf(&thd->net, ER_INDEX_REBUILD,tables.table->real_name );
- error = 1;
- }
+ error=file->repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
+ if (error)
+ net_printf(&thd->net, ER_INDEX_REBUILD,tables.table->real_name);
+
+err:
close_thread_tables(thd);
-
thd->net.no_send_ok = 0;
return error;
}
-int fetch_nx_table(THD* thd, MASTER_INFO* mi)
+int fetch_nx_table(THD* thd, const char* db_name, const char* table_name,
+ MASTER_INFO* mi, MYSQL* mysql)
{
- MYSQL* mysql = mc_mysql_init(NULL);
int error = 1;
int nx_errno = 0;
- if(!mysql)
- {
- sql_print_error("fetch_nx_table: Error in mysql_init()");
- nx_errno = ER_GET_ERRNO;
- goto err;
- }
-
- safe_connect(thd, mysql, mi);
- if(slave_killed(thd))
+ bool called_connected = (mysql != NULL);
+ if (!called_connected && !(mysql = mc_mysql_init(NULL)))
+ {
+ sql_print_error("fetch_nx_table: Error in mysql_init()");
+ nx_errno = ER_GET_ERRNO;
goto err;
+ }
- if(request_table_dump(mysql, thd->last_nx_db, thd->last_nx_table))
+ if (!called_connected)
+ {
+ if (connect_to_master(thd, mysql, mi))
{
- nx_errno = ER_GET_ERRNO;
- sql_print_error("fetch_nx_table: failed on table dump request ");
+ sql_print_error("Could not connect to master while fetching table\
+ '%-64s.%-64s'", db_name, table_name);
+ nx_errno = ER_CONNECT_TO_MASTER;
goto err;
}
+ }
+ if (slave_killed(thd))
+ goto err;
- if(create_table_from_dump(thd, &mysql->net, thd->last_nx_db,
- thd->last_nx_table))
- {
- // create_table_from_dump will have sent the error alread
- sql_print_error("fetch_nx_table: failed on create table ");
- goto err;
- }
+ if (request_table_dump(mysql, db_name, table_name))
+ {
+ nx_errno = ER_GET_ERRNO;
+ sql_print_error("fetch_nx_table: failed on table dump request ");
+ goto err;
+ }
+
+ if (create_table_from_dump(thd, &mysql->net, db_name,
+ table_name))
+ {
+ // create_table_from_dump will have sent the error alread
+ sql_print_error("fetch_nx_table: failed on create table ");
+ goto err;
+ }
error = 0;
err:
- if (mysql)
+ if (mysql && !called_connected)
mc_mysql_close(mysql);
if (nx_errno && thd->net.vio)
send_error(&thd->net, nx_errno, "Error in fetch_nx_table");
+ thd->net.no_send_ok = 0; // Clear up garbage after create_table_from_dump
return error;
}
@@ -460,7 +468,7 @@ int init_master_info(MASTER_INFO* mi)
MY_STAT stat_area;
char fname[FN_REFLEN+128];
const char *msg;
- fn_format(fname, master_info_file, mysql_data_home, "", 4+16+32);
+ fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
// we need a mutex while we are changing master info parameters to
// keep other threads from reading bogus info
@@ -537,7 +545,9 @@ int init_master_info(MASTER_INFO* mi)
master_password) ||
init_intvar_from_file((int*)&mi->port, &mi->file, master_port) ||
init_intvar_from_file((int*)&mi->connect_retry, &mi->file,
- master_connect_retry))
+ master_connect_retry) ||
+ init_intvar_from_file((int*)&mi->last_log_seq, &mi->file, 0)
+ )
{
msg="Error reading master configuration";
goto error;
@@ -560,6 +570,44 @@ error:
return 1;
}
+int register_slave_on_master(MYSQL* mysql)
+{
+ String packet;
+ uint len;
+ char buf[4];
+
+ if(!report_host)
+ return 0;
+
+ int4store(buf, server_id);
+ packet.append(buf, 4);
+
+ net_store_data(&packet, report_host);
+ if(report_user)
+ net_store_data(&packet, report_user);
+ else
+ packet.append((char)0);
+
+ if(report_password)
+ net_store_data(&packet, report_user);
+ else
+ packet.append((char)0);
+
+ int2store(buf, (uint16)report_port);
+ packet.append(buf, 2);
+
+ if(mc_simple_command(mysql, COM_REGISTER_SLAVE, (char*)packet.ptr(),
+ packet.length(), 0))
+ {
+ sql_print_error("Error on COM_REGISTER_SLAVE: '%s'",
+ mc_mysql_error(mysql));
+ return 1;
+ }
+
+ return 0;
+}
+
+
int show_master_info(THD* thd)
{
DBUG_ENTER("show_master_info");
@@ -579,10 +627,12 @@ int show_master_info(THD* thd)
field_list.push_back(new Item_empty_string("Last_errno", 4));
field_list.push_back(new Item_empty_string("Last_error", 20));
field_list.push_back(new Item_empty_string("Skip_counter", 12));
+ field_list.push_back(new Item_empty_string("Last_log_seq", 12));
if(send_fields(thd, field_list, 1))
DBUG_RETURN(-1);
String* packet = &thd->packet;
+ uint32 last_log_seq;
packet->length(0);
pthread_mutex_lock(&glob_mi.lock);
@@ -591,7 +641,8 @@ int show_master_info(THD* thd)
net_store_data(packet, (uint32) glob_mi.port);
net_store_data(packet, (uint32) glob_mi.connect_retry);
net_store_data(packet, glob_mi.log_file_name);
- net_store_data(packet, (uint32) glob_mi.pos); // QQ: Should be fixed
+ net_store_data(packet, (longlong) glob_mi.pos);
+ last_log_seq = glob_mi.last_log_seq;
pthread_mutex_unlock(&glob_mi.lock);
pthread_mutex_lock(&LOCK_slave);
net_store_data(packet, slave_running ? "Yes":"No");
@@ -601,6 +652,7 @@ int show_master_info(THD* thd)
net_store_data(packet, (uint32)last_slave_errno);
net_store_data(packet, last_slave_error);
net_store_data(packet, slave_skip_counter);
+ net_store_data(packet, last_log_seq);
if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length()))
DBUG_RETURN(-1);
@@ -613,11 +665,13 @@ int flush_master_info(MASTER_INFO* mi)
{
IO_CACHE* file = &mi->file;
char lbuf[22];
+ char lbuf1[22];
my_b_seek(file, 0L);
- my_b_printf(file, "%s\n%s\n%s\n%s\n%s\n%d\n%d\n",
+ my_b_printf(file, "%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n",
mi->log_file_name, llstr(mi->pos, lbuf), mi->host, mi->user,
- mi->password, mi->port, mi->connect_retry);
+ mi->password, mi->port, mi->connect_retry,
+ llstr(mi->last_log_seq, lbuf1));
flush_io_cache(file);
return 0;
}
@@ -764,7 +818,7 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi)
return 0;
}
-static int request_table_dump(MYSQL* mysql, char* db, char* table)
+static int request_table_dump(MYSQL* mysql, const char* db, const char* table)
{
char buf[1024];
char * p = buf;
@@ -882,7 +936,10 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
thd->server_id = ev->server_id; // use the original server id for logging
thd->set_time(); // time the query
- if(!ev->when)
+ if(!thd->log_seq)
+ thd->log_seq = ev->log_seq;
+
+ if (!ev->when)
ev->when = time(NULL);
switch(type_code) {
@@ -901,7 +958,6 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query_id = query_id++;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
- thd->last_nx_table = thd->last_nx_db = 0;
thd->query_error = 0; // clear error
thd->net.last_errno = 0;
thd->net.last_error[0] = 0;
@@ -909,36 +965,37 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
// sanity check to make sure the master did not get a really bad
// error on the query
- if(!check_expected_error(thd, (expected_error = qev->error_code)))
+ if (!check_expected_error(thd, (expected_error = qev->error_code)))
+ {
+ mysql_parse(thd, thd->query, q_len);
+ if (expected_error !=
+ (actual_error = thd->net.last_errno) && expected_error)
{
- mysql_parse(thd, thd->query, q_len);
- if (expected_error !=
- (actual_error = thd->net.last_errno) && expected_error)
- {
- const char* errmsg = "Slave: did not get the expected error\
- running query from master - expected: '%s', got '%s'";
- sql_print_error(errmsg, ER(expected_error),
- actual_error ? thd->net.last_error:"no error"
- );
- thd->query_error = 1;
- }
- else if (expected_error == actual_error)
- {
- thd->query_error = 0;
- *last_slave_error = 0;
- last_slave_errno = 0;
- }
+ const char* errmsg = "Slave: did not get the expected error\
+ running query from master - expected: '%s'(%d), got '%s'(%d)";
+ sql_print_error(errmsg, ER_SAFE(expected_error),
+ expected_error,
+ actual_error ? thd->net.last_error:"no error",
+ actual_error);
+ thd->query_error = 1;
}
- else // master could be inconsistent, abort and tell DBA to
- // check/fix it
+ else if (expected_error == actual_error)
{
- thd->db = thd->query = 0;
- thd->convert_set = 0;
- close_thread_tables(thd);
- free_root(&thd->mem_root,0);
- delete ev;
- return 1;
+ thd->query_error = 0;
+ *last_slave_error = 0;
+ last_slave_errno = 0;
}
+ }
+ else
+ {
+ // master could be inconsistent, abort and tell DBA to check/fix it
+ thd->db = thd->query = 0;
+ thd->convert_set = 0;
+ close_thread_tables(thd);
+ free_root(&thd->mem_root,0);
+ delete ev;
+ return 1;
+ }
}
thd->db = 0; // prevent db from being freed
thd->query = 0; // just to be sure
@@ -962,8 +1019,25 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
return 1;
}
free_root(&thd->mem_root,0);
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
+ mi->inc_pos(event_len);
+ flush_master_info(mi);
+ break;
+ }
+ case SLAVE_EVENT:
+ {
+ if(mysql_bin_log.is_open())
+ {
+ Slave_log_event *sev = (Slave_log_event*)ev;
+ mysql_bin_log.write(sev);
+ }
+
+ mi->last_log_seq = ev->log_seq;
+ delete ev;
+ thd->log_seq = 0;
mi->inc_pos(event_len);
flush_master_info(mi);
break;
@@ -1076,7 +1150,9 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
return 1;
}
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
free_root(&thd->mem_root,0);
if(thd->fatal_error)
@@ -1094,8 +1170,10 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
case START_EVENT:
close_temporary_tables(thd);
mi->inc_pos(event_len);
+ mi->last_log_seq = ev->log_seq;
flush_master_info(mi);
delete ev;
+ thd->log_seq = 0;
break;
case STOP_EVENT:
@@ -1105,24 +1183,56 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
mi->inc_pos(event_len);
flush_master_info(mi);
}
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
break;
case ROTATE_EVENT:
{
Rotate_log_event* rev = (Rotate_log_event*)ev;
int ident_len = rev->ident_len;
+ bool rotate_binlog = 0, write_slave_event = 0;
+ char* log_name = mi->log_file_name;
pthread_mutex_lock(&mi->lock);
- memcpy(mi->log_file_name, rev->new_log_ident,ident_len );
- mi->log_file_name[ident_len] = 0;
- mi->pos = 4; // skip magic number
+
+ // rotate local binlog only if the name of remote has changed
+ if (!*log_name || !(log_name[ident_len] == 0 &&
+ !memcmp(log_name, rev->new_log_ident, ident_len)))
+ {
+ write_slave_event = (!(rev->flags & LOG_EVENT_FORCED_ROTATE_F)
+ && mysql_bin_log.is_open());
+ rotate_binlog = (*log_name && write_slave_event);
+ memcpy(log_name, rev->new_log_ident,ident_len );
+ log_name[ident_len] = 0;
+ }
+ mi->pos = rev->pos;
+ mi->last_log_seq = ev->log_seq;
pthread_cond_broadcast(&mi->cond);
pthread_mutex_unlock(&mi->lock);
- flush_master_info(mi);
#ifndef DBUG_OFF
- if(abort_slave_event_count)
+ if (abort_slave_event_count)
++events_till_abort;
-#endif
+#endif
+ if (rotate_binlog)
+ {
+ mi->last_log_seq = 0;
+ mysql_bin_log.new_file();
+ }
+ flush_master_info(mi);
+
+ if (write_slave_event)
+ {
+ Slave_log_event s(thd, mi);
+ if (s.master_host)
+ {
+ s.set_log_seq(0, &mysql_bin_log);
+ s.server_id = ::server_id;
+ mysql_bin_log.write(&s);
+ }
+ }
+
delete ev;
+ thd->log_seq = 0;
break;
}
@@ -1142,6 +1252,7 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
}
mi->inc_pending(event_len);
delete ev;
+ // do not reset log_seq
break;
}
}
@@ -1237,6 +1348,14 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
sql_print_error("Slave thread killed while connecting to master");
goto err;
}
+
+connected:
+
+ // register ourselves with the master
+ // if fails, this is not fatal - we just print the error message and go
+ // on with life
+ thd->proc_info = "Registering slave on master";
+ register_slave_on_master(mysql);
while (!slave_killed(thd))
{
@@ -1280,7 +1399,7 @@ try again, log '%s' at postion %s", RPL_LOG_NAME,
goto err;
}
- continue;
+ goto connected;
}
@@ -1331,8 +1450,9 @@ reconnecting to retry, log '%s' position %s", RPL_LOG_NAME,
reconnect done to recover from failed read");
goto err;
}
- break;
- }
+
+ goto connected;
+ } // if(event_len == packet_error)
thd->proc_info = "Processing master log event";
if(exec_event(thd, &mysql->net, &glob_mi, event_len))
@@ -1369,15 +1489,14 @@ the slave thread with \"mysqladmin start-slave\". We stopped at log \
{
// show a little mercy, allow slave to read one more event
// before cutting him off - otherwise he gets stuck
- // on Invar events, since they do not advance the offset
+ // on Intvar events, since they do not advance the offset
// immediately
if (++stuck_count > 2)
events_till_disconnect++;
}
#endif
-
- }
- }
+ } // while(!slave_killed(thd)) - read/exec loop
+ } // while(!slave_killed(thd)) - slave loop
// error = 0;
err:
diff --git a/sql/slave.h b/sql/slave.h
index 311368a4b82..d9131bb53be 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -1,6 +1,8 @@
#ifndef SLAVE_H
#define SLAVE_H
+#include "mysql.h"
+
typedef struct st_master_info
{
char log_file_name[FN_REFLEN];
@@ -13,11 +15,12 @@ typedef struct st_master_info
char password[HASH_PASSWORD_LENGTH+1];
uint port;
uint connect_retry;
+ uint32 last_log_seq; // log sequence number of last processed event
pthread_mutex_t lock;
pthread_cond_t cond;
bool inited;
- st_master_info():pending(0),fd(-1),inited(0)
+ st_master_info():pending(0),fd(-1),last_log_seq(0),inited(0)
{
host[0] = 0; user[0] = 0; password[0] = 0;
pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);
@@ -64,12 +67,16 @@ typedef struct st_table_rule_ent
#define TABLE_RULE_ARR_SIZE 16
int flush_master_info(MASTER_INFO* mi);
+int register_slave_on_master(MYSQL* mysql);
-int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd = -1);
+int mysql_table_dump(THD* thd, const char* db,
+ const char* tbl_name, int fd = -1);
// if fd is -1, dump to NET
-int fetch_nx_table(THD* thd, MASTER_INFO* mi);
+
+int fetch_nx_table(THD* thd, const char* db_name, const char* table_name,
+ MASTER_INFO* mi, MYSQL* mysql);
// retrieve non-exitent table from master
-// the caller must set thd->last_nx_table and thd->last_nx_db first
+
int show_master_info(THD* thd);
int show_binlog_info(THD* thd);
@@ -112,9 +119,9 @@ extern int disconnect_slave_event_count, abort_slave_event_count ;
#endif
// the master variables are defaults read from my.cnf or command line
-extern uint master_port, master_connect_retry;
+extern uint master_port, master_connect_retry, report_port;
extern my_string master_user, master_password, master_host,
- master_info_file;
+ master_info_file, report_user, report_host, report_password;
extern I_List<i_string> replicate_do_db, replicate_ignore_db;
extern I_List<i_string_pair> replicate_rewrite_db;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 37a14f02bcc..ea3d77c5158 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -384,6 +384,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
thd->in_lock_tables=1;
result=reopen_tables(thd,1,1);
thd->in_lock_tables=0;
+ /* Set version for table */
+ for (TABLE *table=thd->open_tables; table ; table=table->next)
+ table->version=refresh_version;
}
VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)
@@ -501,11 +504,12 @@ void close_temporary(TABLE *table,bool delete_table)
void close_temporary_tables(THD *thd)
{
TABLE *table,*next;
- uint init_query_buf_size = 11, query_buf_size; // "drop table "
- char* query, *p;
+ char *query, *end;
+ const uint init_query_buf_size = 11; // "drop table "
+ uint query_buf_size;
bool found_user_tables = 0;
- LINT_INIT(p);
+ LINT_INIT(end);
query_buf_size = init_query_buf_size;
for (table=thd->temporary_tables ; table ; table=table->next)
@@ -513,37 +517,37 @@ void close_temporary_tables(THD *thd)
query_buf_size += table->key_length;
}
- if(query_buf_size == init_query_buf_size)
+ if (query_buf_size == init_query_buf_size)
return; // no tables to close
- if((query = alloc_root(&thd->mem_root, query_buf_size)))
- {
- memcpy(query, "drop table ", init_query_buf_size);
- p = query + init_query_buf_size;
- }
+ if ((query = alloc_root(&thd->mem_root, query_buf_size)))
+ {
+ memcpy(query, "drop table ", init_query_buf_size);
+ end = query + init_query_buf_size;
+ }
for (table=thd->temporary_tables ; table ; table=next)
{
- if(query) // we might be out of memory, but this is not fatal
+ if (query) // we might be out of memory, but this is not fatal
+ {
+ // skip temporary tables not created directly by the user
+ if (table->table_name[0] != '#')
{
- // skip temporary tables not created directly by the user
- if(table->table_name[0] != '#')
- {
- p = strxmov(p,table->table_cache_key,".",
- table->table_name,",", NullS);
- // here we assume table_cache_key always starts
- // with \0 terminated db name
- found_user_tables = 1;
- }
+ end = strxmov(end,table->table_cache_key,".",
+ table->table_name,",", NullS);
+ // here we assume table_cache_key always starts
+ // with \0 terminated db name
+ found_user_tables = 1;
}
+ }
next=table->next;
close_temporary(table);
}
if (query && found_user_tables && mysql_bin_log.is_open())
{
uint save_query_len = thd->query_length;
- *--p = 0;
- thd->query_length = (uint)(p-query);
+ *--end = 0; // Remove last ','
+ thd->query_length = (uint)(end-query);
Query_log_event qinfo(thd, query);
mysql_bin_log.write(&qinfo);
thd->query_length = save_query_len;
@@ -837,25 +841,6 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
!(table->table_cache_key=memdup_root(&table->mem_root,(char*) key,
key_length)))
{
- MEM_ROOT* glob_alloc;
- LINT_INIT(glob_alloc);
-
- if (errno == ENOENT &&
- (glob_alloc = my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC)))
- // Sasha: needed for replication
- // remember the name of the non-existent table
- // so we can try to download it from the master
- {
- int table_name_len = (uint) strlen(table_name);
- int db_len = (uint) strlen(db);
- thd->last_nx_db = alloc_root(glob_alloc,db_len + table_name_len + 2);
- if(thd->last_nx_db)
- {
- thd->last_nx_table = thd->last_nx_db + db_len + 1;
- memcpy(thd->last_nx_table, table_name, table_name_len + 1);
- memcpy(thd->last_nx_db, db, db_len + 1);
- }
- }
table->next=table->prev=table;
free_cache_entry(table);
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1394,11 +1379,6 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
bool refresh;
DBUG_ENTER("open_ltable");
-#ifdef __WIN__
- /* Win32 can't drop a file that is open */
- if (lock_type == TL_WRITE_ALLOW_READ)
- lock_type= TL_WRITE;
-#endif
thd->proc_info="Opening table";
while (!(table=open_table(thd,table_list->db ? table_list->db : thd->db,
table_list->real_name,table_list->name,
@@ -1406,6 +1386,19 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
if (table)
{
int error;
+
+#ifdef __WIN__
+ /* Win32 can't drop a file that is open */
+ if (lock_type == TL_WRITE_ALLOW_READ
+#ifdef HAVE_GEMINI_DB
+ && table->db_type != DB_TYPE_GEMINI
+#endif /* HAVE_GEMINI_DB */
+ )
+ {
+ lock_type= TL_WRITE;
+ }
+#endif /* __WIN__ */
+
table_list->table=table;
table->grant= table_list->grant;
if (thd->locked_tables)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ffcb15b4c9b..f196e4ff852 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -49,6 +49,8 @@ template class List<Alter_drop>;
template class List_iterator<Alter_drop>;
template class List<Alter_column>;
template class List_iterator<Alter_column>;
+template class List<Set_option>;
+template class List_iterator<Set_option>;
#endif
/****************************************************************************
@@ -96,7 +98,7 @@ THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
current_linfo = 0;
slave_thread = 0;
slave_proxy_id = 0;
- last_nx_table = last_nx_db = 0;
+ log_seq = 0;
cond_count=0;
convert_set=0;
mysys_var=0;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 603e4bdeeb9..14055a1444c 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -23,6 +23,7 @@
class Query_log_event;
class Load_log_event;
+class Slave_log_event;
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY };
@@ -62,11 +63,15 @@ class MYSQL_LOG {
char time_buff[20],db[NAME_LEN+1];
char log_file_name[FN_REFLEN],index_file_name[FN_REFLEN];
bool write_error,inited;
+ uint32 log_seq; // current event sequence number
+ // needed this for binlog
bool no_rotate; // for binlog - if log name can never change
// we should not try to rotate it or write any rotation events
// the user should use FLUSH MASTER instead of FLUSH LOGS for
// purging
+ friend class Log_event;
+
public:
MYSQL_LOG();
~MYSQL_LOG();
@@ -83,6 +88,7 @@ public:
time_t query_start=0);
bool write(Query_log_event* event_info); // binary log write
bool write(Load_log_event* event_info);
+ bool write(Slave_log_event* event_info);
bool write(IO_CACHE *cache);
int generate_new_name(char *new_name,const char *old_name);
void make_log_name(char* buf, const char* log_ident);
@@ -241,9 +247,8 @@ public:
struct st_my_thread_var *mysys_var;
enum enum_server_command command;
uint32 server_id;
+ uint32 log_seq;
const char *where;
- char* last_nx_table; // last non-existent table, we need this for replication
- char* last_nx_db; // database of the last nx table
time_t start_time,time_after_lock,user_time;
time_t connect_time,thr_create_time; // track down slow pthread_create
thr_lock_type update_lock_default;
@@ -405,6 +410,8 @@ public:
** This is used to get result from a select
*/
+class JOIN;
+
class select_result :public Sql_alloc {
protected:
THD *thd;
@@ -414,6 +421,7 @@ public:
virtual int prepare(List<Item> &list) { return 0; }
virtual bool send_fields(List<Item> &list,uint flag)=0;
virtual bool send_data(List<Item> &items)=0;
+ virtual void initialize_tables (JOIN *join=0) {};
virtual void send_error(uint errcode,const char *err)=0;
virtual bool send_eof()=0;
virtual void abort() {}
@@ -466,8 +474,6 @@ public:
void send_error(uint errcode,const char *err);
bool send_eof();
};
-
-
class select_insert :public select_result {
protected:
TABLE *table;
@@ -580,19 +586,48 @@ class Unique :public Sql_alloc
public:
ulong elements;
- Unique(qsort_cmp2 comp_func, uint size, ulong max_in_memory_size_arg);
+ Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
+ uint size, ulong max_in_memory_size_arg);
~Unique();
inline bool Unique::unique_add(gptr ptr)
{
if (tree.elements_in_tree > max_elements && flush())
return 1;
- return tree_insert(&tree,ptr,0);
+ return !tree_insert(&tree,ptr,0);
}
bool get(TABLE *table);
- friend int unique_write_to_file(gptr key, Unique *unique,
- element_count count);
- friend int unique_write_to_ptrs(gptr key, Unique *unique,
- element_count count);
+ friend int unique_write_to_file(gptr key, element_count count, Unique *unique);
+ friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique);
};
+
+ class multi_delete : public select_result {
+ TABLE_LIST *delete_tables, *table_being_deleted;
+#ifdef SINISAS_STRIP
+ IO_CACHE **tempfiles;
+ byte *memory_lane;
+#else
+ Unique **tempfiles;
+#endif
+ THD *thd;
+ ha_rows deleted;
+ uint num_of_tables;
+ int error;
+ thr_lock_type lock_option;
+ bool do_delete;
+ public:
+ multi_delete(THD *thd, TABLE_LIST *dt, thr_lock_type lock_option_arg,
+ uint num_of_tables);
+ ~multi_delete();
+ int prepare(List<Item> &list);
+ bool send_fields(List<Item> &list,
+ uint flag) { return 0; }
+ bool send_data(List<Item> &items);
+ void initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_deletes (bool from_send_error);
+ bool send_eof();
+ };
+
+
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 5243498f7fc..85d3f0a344c 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -30,14 +30,41 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *path,
/* db-name is already validated when we come here */
-void mysql_create_db(THD *thd, char *db, uint create_options)
+int mysql_create_db(THD *thd, char *db, uint create_options)
{
char path[FN_REFLEN+16];
MY_DIR *dirp;
long result=1;
+ int error = 0;
DBUG_ENTER("mysql_create_db");
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
+ VOID(pthread_mutex_lock(&LOCK_open));
+
+ // do not create database if another thread is holding read lock
+ if (global_read_lock)
+ {
+ if (thd->global_read_lock)
+ {
+ net_printf(&thd->net, ER_CREATE_DB_WITH_READ_LOCK);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ goto exit;
+ }
+ while (global_read_lock && ! thd->killed)
+ {
+ (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
+ }
+
+ if (thd->killed)
+ {
+ net_printf(&thd->net, ER_SERVER_SHUTDOWN);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ goto exit;
+ }
+
+ }
+
+ VOID(pthread_mutex_unlock(&LOCK_open));
/* Check directory */
(void)sprintf(path,"%s/%s", mysql_data_home, db);
@@ -47,7 +74,9 @@ void mysql_create_db(THD *thd, char *db, uint create_options)
my_dirend(dirp);
if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
{
- net_printf(&thd->net,ER_DB_CREATE_EXISTS,db);
+ if(thd)
+ net_printf(&thd->net,ER_DB_CREATE_EXISTS,db);
+ error = 1;
goto exit;
}
result = 0;
@@ -57,34 +86,39 @@ void mysql_create_db(THD *thd, char *db, uint create_options)
strend(path)[-1]=0; // Remove last '/' from path
if (my_mkdir(path,0777,MYF(0)) < 0)
{
- net_printf(&thd->net,ER_CANT_CREATE_DB,db,my_errno);
+ if(thd)
+ net_printf(&thd->net,ER_CANT_CREATE_DB,db,my_errno);
+ error = 1;
goto exit;
}
}
- if (!thd->query)
- {
- thd->query = path;
- thd->query_length = (uint) (strxmov(path,"create database ", db, NullS)-
- path);
- }
+
+ if(thd)
{
- mysql_update_log.write(thd,thd->query, thd->query_length);
- if (mysql_bin_log.is_open())
+ if (!thd->query)
{
- Query_log_event qinfo(thd, thd->query);
- mysql_bin_log.write(&qinfo);
+ thd->query = path;
+ thd->query_length = (uint) (strxmov(path,"create database ", db, NullS)-
+ path);
}
+ {
+ mysql_update_log.write(thd,thd->query, thd->query_length);
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query);
+ mysql_bin_log.write(&qinfo);
+ }
+ }
+ if (thd->query == path)
+ {
+ thd->query = 0; // just in case
+ thd->query_length = 0;
+ }
+ send_ok(&thd->net, result);
}
- if (thd->query == path)
- {
- thd->query = 0; // just in case
- thd->query_length = 0;
- }
- send_ok(&thd->net, result);
-
exit:
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
const char *del_exts[]=
@@ -94,10 +128,14 @@ static TYPELIB deletable_extentions=
/* db-name is already validated when we come here */
-
-void mysql_rm_db(THD *thd,char *db,bool if_exists)
+/* If thd == 0, do not write any messages
+ This is useful in replication when we want to remove
+ a stale database before replacing it with the new one
+*/
+int mysql_rm_db(THD *thd,char *db,bool if_exists)
{
long deleted=0;
+ int error = 0;
char path[FN_REFLEN+16];
MY_DIR *dirp;
DBUG_ENTER("mysql_rm_db");
@@ -105,20 +143,44 @@ void mysql_rm_db(THD *thd,char *db,bool if_exists)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
VOID(pthread_mutex_lock(&LOCK_open));
+ // do not drop database if another thread is holding read lock
+ if (global_read_lock)
+ {
+ if (thd->global_read_lock)
+ {
+ net_printf(&thd->net, ER_DROP_DB_WITH_READ_LOCK);
+ goto exit;
+ }
+ while (global_read_lock && ! thd->killed)
+ {
+ (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
+ }
+
+ if (thd->killed)
+ {
+ net_printf(&thd->net, ER_SERVER_SHUTDOWN);
+ goto exit;
+ }
+ }
+
(void) sprintf(path,"%s/%s",mysql_data_home,db);
unpack_dirname(path,path); // Convert if not unix
/* See if the directory exists */
if (!(dirp = my_dir(path,MYF(MY_WME | MY_DONT_SORT))))
{
- if (!if_exists)
- net_printf(&thd->net,ER_DB_DROP_EXISTS,db);
- else
- send_ok(&thd->net,0);
+ if(thd)
+ {
+ if (!if_exists)
+ net_printf(&thd->net,ER_DB_DROP_EXISTS,db);
+ else
+ send_ok(&thd->net,0);
+ }
+ error = !if_exists;
goto exit;
}
remove_db_from_cache(db);
- if ((deleted=mysql_rm_known_files(thd, dirp, path,0)) >= 0)
+ if ((deleted=mysql_rm_known_files(thd, dirp, path,0)) >= 0 && thd)
{
if (!thd->query)
{
@@ -137,13 +199,14 @@ void mysql_rm_db(THD *thd,char *db,bool if_exists)
thd->query = 0; // just in case
thd->query_length = 0;
}
+
send_ok(&thd->net,(ulong) deleted);
}
exit:
VOID(pthread_mutex_unlock(&LOCK_open));
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
/*
@@ -151,6 +214,7 @@ exit:
are 2 digits (raid directories).
*/
+/* This one also needs to work with thd == 0 for replication */
static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
uint level)
{
@@ -162,7 +226,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* remove all files with known extensions */
for (uint idx=2 ;
- idx < (uint) dirp->number_off_files && !thd->killed ;
+ idx < (uint) dirp->number_off_files && (!thd || !thd->killed) ;
idx++)
{
FILEINFO *file=dirp->dir_entry+idx;
@@ -194,9 +258,10 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
}
strxmov(filePath,org_path,"/",file->name,NullS);
unpack_filename(filePath,filePath);
- if (my_delete(filePath,MYF(MY_WME)))
+ if (my_delete_with_symlink(filePath,MYF(MY_WME)))
{
- net_printf(&thd->net,ER_DB_DROP_DELETE,filePath,my_error);
+ if(thd)
+ net_printf(&thd->net,ER_DB_DROP_DELETE,filePath,my_error);
my_dirend(dirp);
DBUG_RETURN(-1);
}
@@ -205,7 +270,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
my_dirend(dirp);
- if (thd->killed)
+ if (thd && thd->killed)
{
send_error(&thd->net,ER_SERVER_SHUTDOWN);
DBUG_RETURN(-1);
@@ -229,7 +294,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* Don't give errors if we can't delete 'RAID' directory */
if (level)
DBUG_RETURN(deleted);
- send_error(&thd->net);
+ if(thd)
+ send_error(&thd->net);
DBUG_RETURN(-1);
}
path=filePath;
@@ -242,7 +308,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* Don't give errors if we can't delete 'RAID' directory */
if (rmdir(path) < 0 && !level)
{
- net_printf(&thd->net,ER_DB_DROP_RMDIR, path,errno);
+ if(thd)
+ net_printf(&thd->net,ER_DB_DROP_RMDIR, path,errno);
DBUG_RETURN(-1);
}
}
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 85b1c947460..0f70bd71ddd 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sinisa
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
#include "mysql_priv.h"
#include "ha_innobase.h"
+#include "sql_select.h"
/*
Optimize delete of all rows by doing a full generate of the table
@@ -151,7 +152,7 @@ int mysql_delete(THD *thd,
#ifdef HAVE_INNOBASE_DB
/* We need to add code to not generate table based on the table type */
if (!innodb_skip)
- use_generate_table=0; // Innodb can't use re-generate table
+ use_generate_table=0; // Innobase can't use re-generate table
#endif
if (use_generate_table && ! thd->open_tables)
{
@@ -186,7 +187,7 @@ int mysql_delete(THD *thd,
/* If running in safe sql mode, don't allow updates without keys */
if (!table->quick_keys)
{
- thd->lex.options|=QUERY_NO_INDEX_USED;
+ thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
if ((thd->options & OPTION_SAFE_UPDATES) && limit == HA_POS_ERROR)
{
delete select;
@@ -214,7 +215,7 @@ int mysql_delete(THD *thd,
MYF(MY_FAE | MY_ZEROFILL));
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (table->found_records = filesort(&table, sortorder, length,
+ (table->found_records = filesort(table, sortorder, length,
(SQL_SELECT *) 0, 0L, HA_POS_ERROR,
&examined_rows))
== HA_POS_ERROR)
@@ -286,3 +287,666 @@ int mysql_delete(THD *thd,
}
+/***************************************************************************
+** delete multiple tables from join
+***************************************************************************/
+
+#define MEM_STRIP_BUF_SIZE sortbuff_size
+
+#ifndef SINISAS_STRIP
+int refposcmp2(void* arg, const void *a,const void *b)
+{
+ return memcmp(a,b,(int) arg);
+}
+#endif
+
+multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt,
+ thr_lock_type lock_option_arg,
+ uint num_of_tables_arg)
+ : delete_tables (dt), thd(thd_arg), deleted(0),
+ num_of_tables(num_of_tables_arg), error(0), lock_option(lock_option_arg),
+ do_delete(false)
+{
+ uint counter=0;
+#ifdef SINISAS_STRIP
+ tempfiles = (IO_CACHE **) sql_calloc(sizeof(IO_CACHE *)* num_of_tables);
+ memory_lane = (byte *)sql_alloc(MAX_REFLENGTH*MEM_STRIP_BUF_SIZE);
+#else
+ tempfiles = (Unique **) sql_calloc(sizeof(Unique *) * (num_of_tables-1));
+#endif
+
+ (void) dt->table->file->extra(HA_EXTRA_NO_READCHECK);
+ (void) dt->table->file->extra(HA_EXTRA_NO_KEYREAD);
+ /* Don't use key read with MULTI-TABLE-DELETE */
+ dt->table->used_keys=0;
+ for (dt=dt->next ; dt ; dt=dt->next,counter++)
+ {
+ TABLE *table=dt->table;
+ (void) dt->table->file->extra(HA_EXTRA_NO_READCHECK);
+ (void) dt->table->file->extra(HA_EXTRA_NO_KEYREAD);
+#ifdef SINISAS_STRIP
+ tempfiles[counter]=(IO_CACHE *) sql_alloc(sizeof(IO_CACHE));
+ if (open_cached_file(tempfiles[counter], mysql_tmpdir,TEMP_PREFIX,
+ DISK_BUFFER_SIZE, MYF(MY_WME)))
+ {
+ my_error(ER_CANT_OPEN_FILE,MYF(0),(tempfiles[counter])->file_name,errno);
+ thd->fatal_error=1;
+ return;
+ }
+#else
+ tempfiles[counter] = new Unique (refposcmp2,
+ (void *) table->file->ref_length,
+ table->file->ref_length,
+ MEM_STRIP_BUF_SIZE);
+#endif
+ }
+}
+
+
+int
+multi_delete::prepare(List<Item> &values)
+{
+ DBUG_ENTER("multi_delete::prepare");
+ do_delete = true;
+ thd->proc_info="deleting from main table";
+
+ if (thd->options & OPTION_SAFE_UPDATES)
+ {
+ TABLE_LIST *table_ref;
+ for (table_ref=delete_tables; table_ref; table_ref=table_ref->next)
+ {
+ TABLE *table=table_ref->table;
+ if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
+ {
+ my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+inline static void
+link_in_list(SQL_LIST *list,byte *element,byte **next)
+{
+ list->elements++;
+ (*list->next)=element;
+ list->next=next;
+ *next=0;
+}
+
+void
+multi_delete::initialize_tables(JOIN *join)
+{
+ SQL_LIST *new_list=(SQL_LIST *) sql_alloc(sizeof(SQL_LIST));
+ new_list->elements=0; new_list->first=0;
+ new_list->next= (byte**) &(new_list->first);
+ for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
+ tab < end;
+ tab++)
+ {
+ TABLE_LIST *walk;
+ for (walk=(TABLE_LIST*) delete_tables ; walk ; walk=walk->next)
+ if (!strcmp(tab->table->path,walk->table->path))
+ break;
+ if (walk) // Table need not be the one to be deleted
+ {
+ register TABLE_LIST *ptr = (TABLE_LIST *) sql_alloc(sizeof(TABLE_LIST));
+ memcpy(ptr,walk,sizeof(TABLE_LIST)); ptr->next=0;
+ link_in_list(new_list,(byte*) ptr,(byte**) &ptr->next);
+ }
+ }
+ delete_tables=(TABLE_LIST *)new_list->first;
+ return;
+}
+
+multi_delete::~multi_delete()
+{
+
+ /* Add back EXTRA_READCHECK; In 4.0.1 we shouldn't need this anymore */
+ for (table_being_deleted=delete_tables ;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next)
+ {
+ VOID(table_being_deleted->table->file->extra(HA_EXTRA_READCHECK));
+ }
+ for (uint counter = 0; counter < num_of_tables-1; counter++)
+ {
+ if (tempfiles[counter])
+ {
+#ifdef SINISAS_STRIP
+ end_io_cache(tempfiles[counter]);
+#else
+ delete tempfiles[counter];
+#endif
+ }
+ }
+}
+
+
+bool multi_delete::send_data(List<Item> &values)
+{
+ int secure_counter= -1;
+ for (table_being_deleted=delete_tables ;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next, secure_counter++)
+ {
+ TABLE *table=table_being_deleted->table;
+
+ /* Check if we are using outer join and we didn't find the row */
+ if (table->status & (STATUS_NULL_ROW | STATUS_DELETED))
+ continue;
+
+ table->file->position(table->record[0]);
+ int rl = table->file->ref_length;
+
+ if (secure_counter < 0)
+ {
+ table->status|= STATUS_DELETED;
+ if (!(error=table->file->delete_row(table->record[0])))
+ deleted++;
+ else
+ {
+ table->file->print_error(error,MYF(0));
+ return 1;
+ }
+ }
+ else
+ {
+#ifdef SINISAS_STRIP
+ error=my_b_write(tempfiles[secure_counter],table->file->ref,rl);
+#else
+ error=tempfiles[secure_counter]->unique_add(table->file->ref);
+#endif
+ if (error)
+ {
+ error=-1;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+
+#ifdef SINISAS_STRIP
+static inline int COMP (byte *ml,uint len,unsigned int left, unsigned int right)
+{
+ return memcmp(ml + left*len,ml + right*len,len);
+}
+
+#define EX(ML,LEN,LLLEFT,RRRIGHT) \
+ptr1 = ML + LLLEFT*LEN;\
+ptr2 = ML + RRRIGHT*LEN;\
+memcpy(tmp,ptr1,LEN);\
+memcpy(ptr1,ptr2,LEN);\
+memcpy(ptr2,tmp,LEN);\
+
+
+
+static void qsort_mem_pieces(byte *ml, uint length, unsigned short pivotP, unsigned int nElem)
+{
+ unsigned int leftP, rightP, pivotEnd, pivotTemp, leftTemp;
+ unsigned int lNum; byte tmp [MAX_REFLENGTH], *ptr1, *ptr2;
+ int retval;
+tailRecursion:
+ if (nElem <= 1) return;
+ if (nElem == 2)
+ {
+ if (COMP(ml,length,pivotP, rightP = pivotP + 1) > 0)
+ {
+ EX(ml,length,pivotP, rightP);
+ }
+ return;
+ }
+
+ rightP = (nElem - 1) + pivotP;
+ leftP = (nElem >> 1) + pivotP;
+
+/* sort the pivot, left, and right elements for "median of 3" */
+
+ if (COMP (ml,length,leftP, rightP) > 0)
+ {
+ EX (ml,length,leftP, rightP);
+ }
+ if (COMP (ml,length,leftP, pivotP) > 0)
+ {
+ EX (ml,length,leftP, pivotP);
+ }
+ else if (COMP (ml,length, pivotP, rightP) > 0)
+ {
+ EX (ml,length,pivotP, rightP);
+ }
+
+ if (nElem == 3) {
+ EX (ml,length,pivotP, leftP);
+ return;
+ }
+
+/* now for the classic Hoare algorithm */
+
+ leftP = pivotEnd = pivotP + 1;
+
+ do {
+ while ((retval = COMP (ml,length, leftP, pivotP)) <= 0)
+ {
+ if (retval == 0) {
+ EX(ml,length,leftP, pivotEnd);
+ pivotEnd++;
+ }
+ if (leftP < rightP)
+ leftP++;
+ else
+ goto qBreak;
+ }
+ while (leftP < rightP) {
+ if ((retval = COMP(ml,length,pivotP, rightP)) < 0)
+ rightP--;
+ else
+ {
+ EX (ml,length,leftP, rightP);
+ if (retval != 0) {
+ leftP++;
+ rightP--;
+ }
+ break;
+ }
+ }
+ } while (leftP < rightP);
+
+qBreak:
+
+ if (COMP(ml,length,leftP, pivotP) <= 0)
+ leftP++;
+
+ leftTemp = leftP - 1; pivotTemp = pivotP;
+
+ while ((pivotTemp < pivotEnd) && (leftTemp >= pivotEnd))
+ {
+ EX(ml,length,pivotTemp, leftTemp);
+ pivotTemp++; leftTemp--;
+ }
+
+ lNum = leftP - pivotEnd; nElem = (nElem + pivotP) - leftP;
+
+ /* Sort smaller partition first to reduce stack usage */
+ if (nElem < lNum)
+ {
+ qsort_mem_pieces(ml,length,leftP, nElem); nElem = lNum;
+ }
+ else
+ {
+ qsort_mem_pieces(ml,length,pivotP, lNum);
+ pivotP = leftP;
+ }
+ goto tailRecursion;
+}
+
+static byte * btree_search(byte *lane, byte *key,register int last, uint length)
+{
+ register int first = 0;
+ if (last == first)
+ {
+ if (!memcmp(lane,key,length)) return lane;
+ return (byte *)0;
+ }
+Recursion_is_too_slow:
+ if (last - first < 3)
+ {
+ if (!memcmp(lane + first*length,key,length)) return lane + first * length;
+ if (last == first + 1) return (byte *)0;
+ if (!memcmp(lane + last*length,key,length)) return lane + last * length;
+ return (byte *)0;
+ }
+ else
+ {
+ int half = first + (last - first)/2;
+ int result = memcmp(lane + half*length,key,length);
+ if (!result) return lane + half*length;
+ if (result < 0)
+ {
+ first = half + 1; goto Recursion_is_too_slow;
+ }
+ else
+ {
+ last = half + 1; goto Recursion_is_too_slow;
+ }
+ }
+}
+
+struct written_block {
+ byte first[MAX_REFLENGTH], last[MAX_REFLENGTH];
+ my_off_t offset;
+ uint how_many;
+};
+
+static IO_CACHE *strip_duplicates_from_temp (byte *memory_lane, IO_CACHE *ptr, uint ref_length, int *written)
+{
+ byte *mem_ptr; my_off_t off = 0;
+ int read_error, write_error, how_many_to_read, total_to_read = *written, pieces_in_memory = 0, mem_count,written_rows;
+ int offset = written_rows=*written=0;
+ int mem_pool_size = MEM_STRIP_BUF_SIZE * MAX_REFLENGTH / ref_length;
+ byte dup_record[MAX_REFLENGTH]; memset(dup_record,'\xFF',MAX_REFLENGTH);
+ if (reinit_io_cache(ptr,READ_CACHE,0L,0,0))
+ return ptr;
+ IO_CACHE *tempptr = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL));
+ if (open_cached_file(tempptr, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)))
+ {
+ my_free((gptr) tempptr, MYF (0));
+ return ptr;
+ }
+ DYNAMIC_ARRAY written_blocks;
+ VOID(init_dynamic_array(&written_blocks,sizeof(struct written_block),20,50));
+ for (;pieces_in_memory < total_to_read;)
+ {
+ how_many_to_read = total_to_read - pieces_in_memory; read_error=write_error=0;
+ if (how_many_to_read > mem_pool_size)
+ how_many_to_read = mem_pool_size;
+ if (my_b_read(ptr, memory_lane, (uint) how_many_to_read * ref_length))
+ {
+ read_error = 1;
+ break;
+ }
+ pieces_in_memory += how_many_to_read;
+ qsort_mem_pieces(memory_lane,0, how_many_to_read, ref_length);
+ byte *checking = dup_record, *cursor=NULL, *mem_end = memory_lane + how_many_to_read * ref_length;
+ int opt_unique_pieces, unique_pieces_in_memory=0; write_error=0;
+ for (mem_ptr=memory_lane; mem_ptr < mem_end ; mem_ptr += ref_length)
+ {
+ if (memcmp(mem_ptr,checking, ref_length))
+ {
+ if (cursor)
+ {
+ memmove(cursor,mem_ptr,mem_end - mem_ptr);
+ mem_end -= mem_ptr - cursor;
+ mem_ptr = cursor; cursor = NULL;
+ }
+ unique_pieces_in_memory++;
+ checking = mem_ptr;
+ }
+ else if (!cursor) cursor = mem_ptr;
+ }
+ opt_unique_pieces=unique_pieces_in_memory;
+ if (written_rows)
+ {
+ if (reinit_io_cache(tempptr,READ_CACHE,0L,0,0)) {write_error = -1; break;}
+ for (uint i=0 ; i < written_blocks.elements ; i++)
+ {
+ struct written_block *wbp=dynamic_element(&written_blocks,i,struct written_block*);
+ if ((memcmp(memory_lane,wbp->last,ref_length) == 1) || (memcmp(memory_lane + (unique_pieces_in_memory - 1) * ref_length, wbp->first, ref_length) == -1))
+ continue;
+ else
+ {
+ if (wbp->how_many < 3)
+ {
+ if ((mem_ptr=btree_search(memory_lane,wbp->first,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ if (wbp->how_many == 2 && (mem_ptr=btree_search(memory_lane,wbp->last,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ }
+ else
+ {
+ byte block[MAX_REFLENGTH * MEM_STRIP_BUF_SIZE]; // 16 K maximum and only temporary !!
+ if (my_b_read(tempptr, block, (uint) wbp->how_many * ref_length))
+ {
+ read_error = 1; goto skip_writting;
+ }
+ if (unique_pieces_in_memory < 3)
+ {
+ if ((mem_ptr=btree_search(block,memory_lane,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(memory_lane,dup_record,ref_length);
+ }
+ if (unique_pieces_in_memory == 2 && (mem_ptr=btree_search(block,memory_lane + ref_length,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ }
+ else
+ {
+ byte *cursor; bool do_check_past;
+ if (unique_pieces_in_memory < wbp->how_many)
+ {
+ do_check_past = (memcmp(memory_lane + (unique_pieces_in_memory - 1)*ref_length,wbp->last,ref_length) == 1);
+ for (cursor=memory_lane; cursor < memory_lane + unique_pieces_in_memory*ref_length; cursor += ref_length)
+ {
+ if ((mem_ptr=btree_search(block,cursor,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(cursor,dup_record,ref_length);
+ }
+ else if (do_check_past && (memcmp(cursor,wbp->last,ref_length) == 1)) break;
+ }
+ }
+ else
+ {
+ do_check_past = (memcmp(memory_lane + (unique_pieces_in_memory - 1)*ref_length,wbp->last,ref_length) == -1);
+ for (cursor=block; cursor < block + wbp->how_many*ref_length;cursor += ref_length)
+ {
+ if ((mem_ptr=btree_search(memory_lane,cursor,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ else if (do_check_past && (memcmp(cursor,memory_lane + (unique_pieces_in_memory - 1)*ref_length,ref_length) == 1)) break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ reinit_io_cache(tempptr, WRITE_CACHE,off,0,0);
+ struct written_block wb; wb.offset = off; wb.how_many=opt_unique_pieces; byte *last;
+ if (opt_unique_pieces < unique_pieces_in_memory)
+ {
+ for (mem_count=0, mem_ptr=memory_lane; mem_count<unique_pieces_in_memory;mem_count++, mem_ptr += ref_length)
+ {
+ if (memcmp(mem_ptr,dup_record,ref_length))
+ {
+ if (my_b_write(tempptr,mem_ptr,ref_length))
+ {
+ if (write_error == 9 || write_error == -1) write_error = 0;
+ if (write_error) break;
+ }
+ if (!mem_count) memcpy(wb.first,mem_ptr,ref_length);
+ last = mem_ptr;
+ written_rows++;
+ }
+ }
+ memcpy(wb.last,last,ref_length);
+ }
+ else
+ {
+ memcpy(wb.first,memory_lane,ref_length); memcpy(wb.last,memory_lane + (unique_pieces_in_memory -1)*ref_length,ref_length);
+ if (my_b_write(tempptr, memory_lane,unique_pieces_in_memory * ref_length))
+ {
+ write_error = 1; break;
+ }
+ written_rows += unique_pieces_in_memory;
+ }
+ off = my_b_tell(tempptr);
+ VOID(push_dynamic(&written_blocks,(gptr) &wb));
+ skip_writting:
+ if (write_error || read_error) break;
+ }
+ delete_dynamic(&written_blocks);
+ if (read_error || write_error)
+ {
+ close_cached_file(tempptr); end_io_cache(tempptr);
+ return ptr;
+ }
+ else
+ {
+ close_cached_file(ptr); *written=written_rows; end_io_cache(ptr);
+ reinit_io_cache(tempptr,READ_CACHE,0L,0,0);
+ return tempptr;
+ }
+}
+
+#endif /* SINISAS_STRIP */
+
+/* Return true if some table is not transaction safe */
+
+static bool some_table_is_not_transaction_safe (TABLE_LIST *tl)
+{
+ for (; tl ; tl=tl->next)
+ {
+ if (!(tl->table->file->has_transactions()))
+ return true;
+ }
+ return false;
+}
+
+
+void multi_delete::send_error(uint errcode,const char *err)
+{
+ /* First send error what ever it is ... */
+ ::send_error(&thd->net,errcode,err);
+ /* If nothing deleted return */
+ if (!deleted)
+ return;
+ /* Below can happen when thread is killed early ... */
+ if (!table_being_deleted)
+ table_being_deleted=delete_tables;
+
+ /*
+ If rows from the first table only has been deleted and it is transactional,
+ just do rollback.
+ The same if all tables are transactional, regardless of where we are.
+ In all other cases do attempt deletes ...
+ */
+ if ((table_being_deleted->table->file->has_transactions() &&
+ table_being_deleted == delete_tables) ||
+ !some_table_is_not_transaction_safe(delete_tables->next))
+ ha_rollback(thd);
+ else if (do_delete)
+ VOID(do_deletes(true));
+}
+
+
+int multi_delete::do_deletes (bool from_send_error)
+{
+ int error = 0, counter = 0, count;
+
+ if (from_send_error)
+ {
+ /* Found out table number for 'table_being_deleted' */
+ for (TABLE_LIST *aux=delete_tables;
+ aux != table_being_deleted;
+ aux=aux->next)
+ counter++;
+ }
+ else
+ table_being_deleted = delete_tables;
+
+ do_delete = false;
+ for (table_being_deleted=table_being_deleted->next;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next, counter++)
+ {
+ TABLE *table = table_being_deleted->table;
+ int rl = table->file->ref_length;
+#ifdef SINISAS_STRIP
+ int num_of_positions = (int)my_b_tell(tempfiles[counter])/rl;
+ if (!num_of_positions) continue;
+ tempfiles[counter] = strip_duplicates_from_temp(memory_lane, tempfiles[counter],rl,&num_of_positions);
+ if (!num_of_positions)
+ {
+ error=1; break;
+ }
+#else
+ if (tempfiles[counter]->get(table))
+ {
+ error=1;
+ break;
+ }
+#endif
+
+#if USE_REGENERATE_TABLE
+ // nice little optimization ....
+ // but Monty has to fix generate_table...
+ // This will not work for transactional tables because for other types
+ // records is not absolute
+ if (num_of_positions == table->file->records)
+ {
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.name=table->table_name; table_list.real_name=table_being_deleted->real_name;
+ table_list.table=table;
+ table_list.grant=table->grant;
+ table_list.db = table_being_deleted->db;
+ error=generate_table(thd,&table_list,(TABLE *)0);
+ if (error <= 0) {error = 1; break;}
+ deleted += num_of_positions;
+ continue;
+ }
+#endif /* USE_REGENERATE_TABLE */
+
+ READ_RECORD info;
+ error=0;
+#ifdef SINISAS_STRIP
+ SQL_SELECT *select= new SQL_SELECT;
+ select->head=table;
+ select->file=*tempfiles[counter];
+ init_read_record(&info,thd,table,select,0,0);
+#else
+ init_read_record(&info,thd,table,NULL,0,0);
+#endif
+ bool not_trans_safe = some_table_is_not_transaction_safe(delete_tables);
+ while (!(error=info.read_record(&info)) &&
+ (!thd->killed || from_send_error || not_trans_safe))
+ {
+ error=table->file->delete_row(table->record[0]);
+ if (error)
+ {
+ table->file->print_error(error,MYF(0));
+ break;
+ }
+ else
+ deleted++;
+ }
+ end_read_record(&info);
+#ifdef SINISAS_STRIP
+ delete select;
+#endif
+ if (error == -1)
+ error = 0;
+ }
+ return error;
+}
+
+
+bool multi_delete::send_eof()
+{
+ thd->proc_info="deleting from reference tables";
+ int error = do_deletes(false);
+
+ thd->proc_info="end";
+ if (error && error != -1)
+ {
+ ::send_error(&thd->net);
+ return 1;
+ }
+
+ if (deleted &&
+ (error <= 0 || some_table_is_not_transaction_safe(delete_tables)))
+ {
+ mysql_update_log.write(thd,thd->query,thd->query_length);
+ Query_log_event qinfo(thd, thd->query);
+ if (mysql_bin_log.write(&qinfo) &&
+ !some_table_is_not_transaction_safe(delete_tables))
+ error=1; // Rollback
+ VOID(ha_autocommit_or_rollback(thd,error >= 0));
+ }
+ ::send_ok(&thd->net,deleted);
+ return 0;
+}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index cfd16df5d17..19dc239c050 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -642,7 +642,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
/* Copy error message and abort */
thd->fatal_error=1;
strmov(thd->net.last_error,tmp->thd.net.last_error);
- thd->net.last_errno=thd->net.last_errno;
+ thd->net.last_errno=tmp->thd.net.last_errno;
}
tmp->unlock();
pthread_mutex_unlock(&LOCK_delayed_create);
@@ -1362,6 +1362,7 @@ select_create::prepare(List<Item> &values)
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 0a1ee0649c4..3f6c09073e6 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -142,11 +142,11 @@ LEX *lex_start(THD *thd, uchar *buf,uint length)
lex->next_state=STATE_START;
lex->end_of_query=(lex->ptr=buf)+length;
lex->yylineno = 1;
- lex->create_refs=lex->in_comment=0;
+ lex->select->create_refs=lex->in_comment=0;
lex->length=0;
- lex->in_sum_expr=0;
- lex->expr_list.empty();
- lex->ftfunc_list.empty();
+ lex->select->in_sum_expr=0;
+ lex->select->expr_list.empty();
+ lex->select->ftfunc_list.empty();
lex->convert_set=(lex->thd=thd)->convert_set;
lex->yacc_yyss=lex->yacc_yyvs=0;
lex->ignore_space=test(thd->client_capabilities & CLIENT_IGNORE_SPACE);
@@ -155,7 +155,7 @@ LEX *lex_start(THD *thd, uchar *buf,uint length)
void lex_end(LEX *lex)
{
- lex->expr_list.delete_elements(); // If error when parsing sql-varargs
+ lex->select->expr_list.delete_elements(); // If error when parsing sql-varargs
x_free(lex->yacc_yyss);
x_free(lex->yacc_yyvs);
}
@@ -656,12 +656,9 @@ int yylex(void *arg)
if (c == 'e' || c == 'E')
{
c = yyGet();
- if (c != '-' && c != '+' && !isdigit(c))
- { // No exp sig found
- state= STATE_CHAR;
- break;
- }
- if (!isdigit(yyGet()))
+ if (c == '-' || c == '+')
+ c = yyGet(); // Skipp sign
+ if (!isdigit(c))
{ // No digit after sign
state= STATE_CHAR;
break;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 0df5bbebc37..e585ec65191 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -53,8 +53,10 @@ enum enum_sql_command {
SQLCOM_BEGIN, SQLCOM_LOAD_MASTER_TABLE, SQLCOM_CHANGE_MASTER,
SQLCOM_RENAME_TABLE, SQLCOM_BACKUP_TABLE, SQLCOM_RESTORE_TABLE,
SQLCOM_RESET, SQLCOM_PURGE, SQLCOM_SHOW_BINLOGS,
- SQLCOM_SHOW_OPEN_TABLES,
- SQLCOM_HA_OPEN, SQLCOM_HA_CLOSE, SQLCOM_HA_READ
+ SQLCOM_SHOW_OPEN_TABLES, SQLCOM_LOAD_MASTER_DATA,
+ SQLCOM_HA_OPEN, SQLCOM_HA_CLOSE, SQLCOM_HA_READ,
+ SQLCOM_SHOW_SLAVE_HOSTS, SQLCOM_MULTI_DELETE, SQLCOM_UNION_SELECT,
+ SQLCOM_SHOW_BINLOG_EVENTS
};
enum lex_states { STATE_START, STATE_CHAR, STATE_IDENT,
@@ -94,39 +96,70 @@ typedef struct st_lex_master_info
ulonglong pos;
} LEX_MASTER_INFO;
+
+enum sub_select_type {UNSPECIFIED_TYPE,UNION_TYPE, INTERSECT_TYPE, EXCEPT_TYPE};
+
+/* The state of the lex parsing for selects */
+
+typedef struct st_select_lex {
+ enum sub_select_type linkage;
+ uint select_number; /* For Item_select */
+ char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */
+ Item *where,*having;
+ ha_rows select_limit,offset_limit;
+ ulong options;
+ List<List_item> expr_list;
+ List<List_item> when_list;
+ SQL_LIST order_list,table_list,group_list;
+ List<Item> item_list;
+ List<String> interval_list,use_index, *use_index_ptr, ignore_index, *ignore_index_ptr;
+ List<Item_func_match> ftfunc_list;
+ uint in_sum_expr, sort_default;
+ bool create_refs;
+ st_select_lex *next;
+} SELECT_LEX;
+
+
+class Set_option :public Sql_alloc {
+public:
+ const char *name;
+ Item *item;
+ uint name_length;
+ bool type; /* 1 if global */
+ Set_option(bool par_type, const char *par_name, uint length,
+ Item *par_item)
+ :name(par_name), item(par_item), name_length(length), type(par_type) {}
+};
+
+
/* The state of the lex parsing. This is saved in the THD struct */
typedef struct st_lex {
uint yylineno,yytoklen; /* Simulate lex */
LEX_YYSTYPE yylval;
+ SELECT_LEX select_lex, *select;
uchar *ptr,*tok_start,*tok_end,*end_of_query;
char *length,*dec,*change,*name;
- char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */
char *backup_dir; /* For RESTORE/BACKUP */
char* to_log; /* For PURGE MASTER LOGS TO */
String *wild;
sql_exchange *exchange;
- ha_rows select_limit,offset_limit;
- List<List_item> expr_list;
- List<List_item> when_list;
- List<List_item> many_values;
List<key_part_spec> col_list;
List<Alter_drop> drop_list;
List<Alter_column> alter_list;
- List<String> interval_list,use_index,*use_index_ptr,
- ignore_index, *ignore_index_ptr;
+ List<String> interval_list;
List<st_lex_user> users_list;
List<LEX_COLUMN> columns;
List<Key> key_list;
List<create_field> create_list;
- List<Item> item_list,*insert_list,field_list,value_list;
- List<Item_func_match> ftfunc_list;
- SQL_LIST order_list,table_list,group_list,proc_list;
+ List<Item> *insert_list,field_list,value_list;
+ List<List_item> many_values;
+ List<Set_option> option_list;
+ SQL_LIST proc_list, auxilliary_table_list;
TYPELIB *interval;
create_field *last_field;
-
- Item *where,*having,*default_value;
+ Item *default_value;
CONVERT *convert_set;
LEX_USER *grant_user;
gptr yacc_yyss,yacc_yyvs;
@@ -136,7 +169,6 @@ typedef struct st_lex {
HA_CREATE_INFO create_info;
LEX_MASTER_INFO mi; // used by CHANGE MASTER
ulong thread_id,type;
- ulong options;
ulong gemini_spin_retries;
enum_sql_command sql_command;
enum lex_states next_state;
@@ -145,10 +177,10 @@ typedef struct st_lex {
enum enum_ha_read_modes ha_read_mode;
enum ha_rkey_function ha_rkey_mode;
enum enum_enable_or_disable alter_keys_onoff;
- uint in_sum_expr,grant,grant_tot_col,which_columns, sort_default;
+ uint grant,grant_tot_col,which_columns;
thr_lock_type lock_option;
- bool create_refs,drop_primary,drop_if_exists,local_file;
- bool in_comment,ignore_space,verbose,simple_alter;
+ bool drop_primary,drop_if_exists,local_file;
+ bool in_comment,ignore_space,verbose,simple_alter, option_type;
} LEX;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 3dda8d1cff7..928a62a397e 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -46,6 +46,8 @@ static bool check_dup(THD *thd,const char *db,const char *name,
static void mysql_init_query(THD *thd);
static void remove_escape(char *name);
static void refresh_status(void);
+static bool append_file_to_dir(char **filename_ptr, char *table_name);
+static int link_in_large_list_and_check_acl(THD *thd,LEX *lex,SQL_LIST *tables);
const char *any_db="*any*"; // Special symbol for check_access
@@ -53,13 +55,13 @@ const char *command_name[]={
"Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB",
"Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist",
"Connect","Kill","Debug","Ping","Time","Delayed_insert","Change user",
- "Binlog Dump","Table Dump", "Connect Out"
+ "Binlog Dump","Table Dump", "Connect Out", "Register Slave"
};
bool volatile abort_slave = 0;
#ifdef HAVE_OPENSSL
-extern VioSSLAcceptorFd* ssl_acceptor_fd;
+extern struct st_VioSSLAcceptorFd * ssl_acceptor_fd;
#endif /* HAVE_OPENSSL */
#ifdef __WIN__
@@ -423,9 +425,7 @@ check_connections(THD *thd)
DBUG_PRINT("info", ("Agreed to change IO layer to SSL") );
/* Do the SSL layering. */
DBUG_PRINT("info", ("IO layer change in progress..."));
- VioSocket* vio_socket = my_reinterpret_cast(VioSocket*)(net->vio);
- VioSSL* vio_ssl = ssl_acceptor_fd->accept(vio_socket);
- net->vio = my_reinterpret_cast(NetVio*) (vio_ssl);
+ net->vio = sslaccept(ssl_acceptor_fd, net->vio);
DBUG_PRINT("info", ("Reading user information over SSL layer"));
if ((pkt_len=my_net_read(net)) == packet_error ||
pkt_len < NORMAL_HANDSHAKE_SIZE)
@@ -556,6 +556,7 @@ pthread_handler_decl(handle_one_connection,arg)
free_root(&thd->mem_root,MYF(0));
if (net->error && net->vio != 0)
{
+ if (!thd->killed && ! opt_warnings)
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
thd->thread_id,(thd->db ? thd->db : "unconnected"),
thd->user ? thd->user : "unauthenticated",
@@ -760,12 +761,20 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thread_running++;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->set_time();
- thd->lex.options=0; // We store status here
+ thd->lex.select_lex.options=0; // We store status here
switch (command) {
case COM_INIT_DB:
if (!mysql_change_db(thd,packet))
mysql_log.write(thd,command,"%s",thd->db);
break;
+ case COM_REGISTER_SLAVE:
+ {
+ if(register_slave(thd, (uchar*)packet, packet_length))
+ send_error(&thd->net);
+ else
+ send_ok(&thd->net);
+ break;
+ }
case COM_TABLE_DUMP:
{
slow_command = TRUE;
@@ -1027,7 +1036,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->proc_info="logging slow query";
if ((ulong) (thd->start_time - thd->time_after_lock) > long_query_time ||
- ((thd->lex.options &
+ ((thd->lex.select_lex.options &
(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED)) &&
(specialflag & SPECIAL_LONG_LOG_FORMAT)))
{
@@ -1058,7 +1067,8 @@ mysql_execute_command(void)
int res=0;
THD *thd=current_thd;
LEX *lex= &thd->lex;
- TABLE_LIST *tables=(TABLE_LIST*) lex->table_list.first;
+ TABLE_LIST *tables=(TABLE_LIST*) lex->select->table_list.first;
+ SELECT_LEX *select_lex = lex->select;
DBUG_ENTER("mysql_execute_command");
if(table_rules_on && thd->slave_thread && tables && !tables_ok(thd,tables))
@@ -1070,7 +1080,7 @@ mysql_execute_command(void)
case SQLCOM_SELECT:
{
select_result *result;
- if (lex->options & SELECT_DESCRIBE)
+ if (select_lex->options & SELECT_DESCRIBE)
lex->exchange=0;
if (tables)
{
@@ -1088,12 +1098,12 @@ mysql_execute_command(void)
break; // Error message is given
}
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // no limit
if (thd->select_limit == HA_POS_ERROR)
- lex->options&= ~OPTION_FOUND_ROWS;
+ select_lex->options&= ~OPTION_FOUND_ROWS;
if (lex->exchange)
{
@@ -1118,8 +1128,8 @@ mysql_execute_command(void)
{
res= -1;
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
@@ -1137,22 +1147,22 @@ mysql_execute_command(void)
if (!(res=open_and_lock_tables(thd,tables)))
{
- res=mysql_select(thd,tables,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
if (res)
result->abort();
}
delete result;
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
@@ -1163,6 +1173,20 @@ mysql_execute_command(void)
res = purge_master_logs(thd, lex->to_log);
break;
}
+ case SQLCOM_SHOW_SLAVE_HOSTS:
+ {
+ if(check_access(thd, FILE_ACL, any_db))
+ goto error;
+ res = show_slave_hosts(thd);
+ break;
+ }
+ case SQLCOM_SHOW_BINLOG_EVENTS:
+ {
+ if(check_access(thd, FILE_ACL, any_db))
+ goto error;
+ res = show_binlog_events(thd);
+ break;
+ }
case SQLCOM_BACKUP_TABLE:
{
if (check_db_used(thd,tables) ||
@@ -1203,6 +1227,13 @@ mysql_execute_command(void)
res = show_binlog_info(thd);
break;
}
+
+ case SQLCOM_LOAD_MASTER_DATA: // sync with master
+ if(check_process_priv(thd))
+ goto error;
+ res = load_master_data(thd);
+ break;
+
case SQLCOM_LOAD_MASTER_TABLE:
if (!tables->db)
@@ -1222,23 +1253,11 @@ mysql_execute_command(void)
if (strlen(tables->name) > NAME_LEN)
{
net_printf(&thd->net,ER_WRONG_TABLE_NAME,tables->name);
- res=0;
break;
}
- thd->last_nx_table = tables->real_name;
- thd->last_nx_db = tables->db;
- if(fetch_nx_table(thd, &glob_mi))
- // fetch_nx_table is responsible for sending
- // the error
- {
- res = 0;
- thd->net.no_send_ok = 0; // easier to do it here
- // this way we make sure that when we are done, we are clean
- break;
- }
-
- res = 0;
+ if (fetch_nx_table(thd, tables->db, tables->real_name, &glob_mi, 0))
+ break; // fetch_nx_table did send the error to the client
send_ok(&thd->net);
break;
@@ -1266,7 +1285,14 @@ mysql_execute_command(void)
res=0;
break;
}
- if (lex->item_list.elements) // With select
+ /* Fix names if symlinked tables */
+ if (append_file_to_dir(&lex->create_info.data_file_name, tables->name) ||
+ append_file_to_dir(&lex->create_info.index_file_name, tables->name))
+ {
+ res=-1;
+ break;
+ }
+ if (select_lex->item_list.elements) // With select
{
select_result *result;
@@ -1284,9 +1310,9 @@ mysql_execute_command(void)
for (table = tables->next ; table ; table=table->next)
table->lock_type= lex->lock_option;
}
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // No limit
if (!(res=open_and_lock_tables(thd,tables->next)))
@@ -1295,16 +1321,16 @@ mysql_execute_command(void)
tables->real_name, &lex->create_info,
lex->create_list,
lex->key_list,
- lex->item_list,lex->duplicates)))
+ select_lex->item_list,lex->duplicates)))
{
- res=mysql_select(thd,tables->next,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables->next,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
if (res)
result->abort();
@@ -1359,10 +1385,10 @@ mysql_execute_command(void)
}
if (!tables->db)
tables->db=thd->db;
- if (!lex->db)
- lex->db=tables->db;
+ if (!select_lex->db)
+ select_lex->db=tables->db;
if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege) ||
- check_access(thd,INSERT_ACL | CREATE_ACL,lex->db,&priv) ||
+ check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv) ||
check_merge_table_access(thd, tables->db,
(TABLE_LIST *)
lex->create_info.merge_list.first))
@@ -1378,21 +1404,23 @@ mysql_execute_command(void)
TABLE_LIST tmp_table;
bzero((char*) &tmp_table,sizeof(tmp_table));
tmp_table.real_name=lex->name;
- tmp_table.db=lex->db;
+ tmp_table.db=select_lex->db;
tmp_table.grant.privilege=priv;
if (check_grant(thd,INSERT_ACL | CREATE_ACL,tables))
goto error;
}
}
+ /* Don't yet allow changing of symlinks with ALTER TABLE */
+ lex->create_info.data_file_name=lex->create_info.index_file_name=0;
/* ALTER TABLE ends previous transaction */
if (end_active_trans(thd))
res= -1;
else
- res= mysql_alter_table(thd, lex->db, lex->name,
+ res= mysql_alter_table(thd, select_lex->db, lex->name,
&lex->create_info,
tables, lex->create_list,
lex->key_list, lex->drop_list, lex->alter_list,
- (ORDER *) lex->order_list.first,
+ (ORDER *) select_lex->order_list.first,
lex->drop_primary, lex->duplicates,
lex->alter_keys_onoff, lex->simple_alter);
break;
@@ -1512,22 +1540,22 @@ mysql_execute_command(void)
goto error;
if (grant_option && check_grant(thd,UPDATE_ACL,tables))
goto error;
- if (lex->item_list.elements != lex->value_list.elements)
+ if (select_lex->item_list.elements != lex->value_list.elements)
{
send_error(&thd->net,ER_WRONG_VALUE_COUNT);
DBUG_VOID_RETURN;
}
res = mysql_update(thd,tables,
- lex->item_list,
+ select_lex->item_list,
lex->value_list,
- lex->where,
- (ORDER *) lex->order_list.first,
- lex->select_limit,
+ select_lex->where,
+ (ORDER *) select_lex->order_list.first,
+ select_lex->select_limit,
lex->duplicates,
lex->lock_option);
#ifdef DELETE_ITEMS
- delete lex->where;
+ delete select_lex->where;
#endif
break;
case SQLCOM_INSERT:
@@ -1571,9 +1599,9 @@ mysql_execute_command(void)
}
select_result *result;
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // No limit
if (check_dup(thd,tables->db,tables->real_name,tables->next))
@@ -1593,14 +1621,14 @@ mysql_execute_command(void)
lex->sql_command == SQLCOM_REPLACE_SELECT ?
DUP_REPLACE : DUP_IGNORE)))
{
- res=mysql_select(thd,tables->next,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables->next,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
delete result;
}
@@ -1608,14 +1636,14 @@ mysql_execute_command(void)
res= -1;
}
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
case SQLCOM_TRUNCATE:
- lex->where=0;
- lex->select_limit=HA_POS_ERROR;
+ select_lex->where=0;
+ select_lex->select_limit=HA_POS_ERROR;
/* Fall through */
case SQLCOM_DELETE:
{
@@ -1629,8 +1657,100 @@ mysql_execute_command(void)
if (lex->sql_command == SQLCOM_TRUNCATE && end_active_trans(thd))
res= -1;
else
- res = mysql_delete(thd,tables, lex->where, (ORDER*)lex->order_list.first,
- lex->select_limit, lex->lock_option, lex->options);
+ res = mysql_delete(thd,tables, select_lex->where,
+ (ORDER*) select_lex->order_list.first,
+ select_lex->select_limit, lex->lock_option,
+ select_lex->options);
+ break;
+ }
+ case SQLCOM_MULTI_DELETE:
+ {
+ TABLE_LIST *aux_tables=(TABLE_LIST *)thd->lex.auxilliary_table_list.first;
+ TABLE_LIST *auxi;
+ uint table_count=0;
+ multi_delete *result;
+
+ /* sql_yacc guarantees that tables and aux_tables are not zero */
+ if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) ||
+ check_table_access(thd,SELECT_ACL, tables) ||
+ check_table_access(thd,DELETE_ACL, aux_tables))
+ goto error;
+ if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where)
+ {
+ send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE);
+ goto error;
+ }
+ for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next)
+ {
+ table_count++;
+ /* All tables in aux_tables must be found in FROM PART */
+ TABLE_LIST *walk;
+ for (walk=(TABLE_LIST*) tables ; walk ; walk=walk->next)
+ {
+ if (!strcmp(auxi->real_name,walk->real_name) &&
+ !strcmp(walk->db,auxi->db))
+ break;
+ }
+ if (!walk)
+ {
+ net_printf(&thd->net,ER_NONUNIQ_TABLE,auxi->real_name);
+ goto error;
+ }
+ auxi->lock_type=walk->lock_type=TL_WRITE;
+ auxi->table= (TABLE *) walk; // Remember corresponding table
+ }
+ tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
+ if (add_item_to_list(new Item_null()))
+ {
+ res= -1;
+ break;
+ }
+ thd->proc_info="init";
+ if ((res=open_and_lock_tables(thd,tables)))
+ break;
+ /* Fix tables-to-be-deleted-from list to point at opened tables */
+ for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next)
+ auxi->table= ((TABLE_LIST*) auxi->table)->table;
+ if ((result=new multi_delete(thd,aux_tables,lex->lock_option,
+ table_count)) && ! thd->fatal_error)
+ {
+ res=mysql_select(thd,tables,select_lex->item_list,
+ select_lex->where,select_lex->ftfunc_list,
+ (ORDER *)NULL,(ORDER *)NULL,(Item *)NULL,
+ (ORDER *)NULL,
+ select_lex->options | thd->options |
+ SELECT_NO_JOIN_CACHE,
+ result);
+ }
+ else
+ res= -1; // Error is not sent
+ delete result;
+ close_thread_tables(thd);
+ break;
+ }
+ case SQLCOM_UNION_SELECT:
+ {
+ SQL_LIST *total=(SQL_LIST *) thd->calloc(sizeof(SQL_LIST));
+ if (select_lex->options & SELECT_DESCRIBE)
+ lex->exchange=0;
+ if ((res = link_in_large_list_and_check_acl(thd,lex,total)) == -1)
+ {
+ res=0;
+ break;
+ }
+ if (res &&
+ (res=check_access(thd,
+ lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
+ any_db)))
+ {
+ res=0;
+ break;
+ }
+ if (!(res=open_and_lock_tables(thd,(TABLE_LIST *)total->first)))
+ {
+ res=mysql_union(thd,lex, select_lex->select_number+1);
+ if (res==-1) res=0;
+ }
break;
}
case SQLCOM_DROP_TABLE:
@@ -1657,7 +1777,7 @@ mysql_execute_command(void)
break;
case SQLCOM_SHOW_DATABASES:
#if defined(DONT_ALLOW_SHOW_COMMANDS)
- send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
+ send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
DBUG_VOID_RETURN;
#else
if ((specialflag & SPECIAL_SKIP_SHOW_DB) &&
@@ -1698,7 +1818,7 @@ mysql_execute_command(void)
DBUG_VOID_RETURN;
#else
{
- char *db=lex->db ? lex->db : thd->db;
+ char *db=select_lex->db ? select_lex->db : thd->db;
if (!db)
{
send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: inspected */
@@ -1713,7 +1833,7 @@ mysql_execute_command(void)
if (check_access(thd,SELECT_ACL,db,&thd->col_access))
goto error; /* purecov: inspected */
/* grant is checked in mysqld_show_tables */
- if (lex->options & SELECT_DESCRIBE)
+ if (select_lex->options & SELECT_DESCRIBE)
res= mysqld_extend_show_tables(thd,db,
(lex->wild ? lex->wild->ptr() : NullS));
else
@@ -1778,7 +1898,7 @@ mysql_execute_command(void)
}
#endif
case SQLCOM_CHANGE_DB:
- mysql_change_db(thd,lex->db);
+ mysql_change_db(thd,select_lex->db);
break;
case SQLCOM_LOAD:
{
@@ -1802,10 +1922,10 @@ mysql_execute_command(void)
case SQLCOM_SET_OPTION:
{
uint org_options=thd->options;
- thd->options=lex->options;
+ thd->options=select_lex->options;
thd->update_lock_default= ((thd->options & OPTION_LOW_PRIORITY_UPDATES) ?
TL_WRITE_LOW_PRIORITY : TL_WRITE);
- thd->default_select_limit=lex->select_limit;
+ thd->default_select_limit=select_lex->select_limit;
thd->tx_isolation=lex->tx_isolation;
if (thd->gemini_spin_retries != lex->gemini_spin_retries)
{
@@ -1816,7 +1936,7 @@ mysql_execute_command(void)
thd->options,(long) thd->default_select_limit));
/* Check if auto_commit mode changed */
- if ((org_options ^ lex->options) & OPTION_NOT_AUTO_COMMIT)
+ if ((org_options ^ select_lex->options) & OPTION_NOT_AUTO_COMMIT)
{
if ((org_options & OPTION_NOT_AUTO_COMMIT))
{
@@ -1864,6 +1984,8 @@ mysql_execute_command(void)
}
if (check_db_used(thd,tables) || end_active_trans(thd))
goto error;
+ if (grant_option && check_grant(thd,SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL,tables))
+ goto error;
thd->in_lock_tables=1;
if (!(res=open_and_lock_tables(thd,tables)))
{
@@ -1924,7 +2046,7 @@ mysql_execute_command(void)
if (tables && !tables->db)
tables->db=thd->db;
if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL,
- tables && tables->db ? tables->db : lex->db,
+ tables && tables->db ? tables->db : select_lex->db,
tables ? &tables->grant.privilege : 0,
tables ? 0 : 1))
goto error;
@@ -1976,7 +2098,7 @@ mysql_execute_command(void)
res=1;
}
else
- res = mysql_grant(thd, lex->db, lex->users_list, lex->grant,
+ res = mysql_grant(thd, select_lex->db, lex->users_list, lex->grant,
lex->sql_command == SQLCOM_REVOKE);
if(!res)
{
@@ -2024,8 +2146,8 @@ mysql_execute_command(void)
if (check_db_used(thd,tables) || check_table_access(thd,SELECT_ACL, tables))
goto error;
res = mysql_ha_read(thd, tables, lex->ha_read_mode, lex->backup_dir,
- lex->insert_list, lex->ha_rkey_mode, lex->where,
- lex->select_limit, lex->offset_limit);
+ lex->insert_list, lex->ha_rkey_mode, select_lex->where,
+ select_lex->select_limit, select_lex->offset_limit);
break;
case SQLCOM_BEGIN:
@@ -2286,13 +2408,13 @@ static void
mysql_init_query(THD *thd)
{
DBUG_ENTER("mysql_init_query");
- thd->lex.item_list.empty();
+ thd->lex.select_lex.item_list.empty();
thd->lex.value_list.empty();
- thd->lex.table_list.elements=0;
+ thd->lex.select_lex.table_list.elements=0;
thd->free_list=0;
-
- thd->lex.table_list.first=0;
- thd->lex.table_list.next= (byte**) &thd->lex.table_list.first;
+ thd->lex.select = &thd->lex.select_lex;
+ thd->lex.select_lex.table_list.first=0;
+ thd->lex.select_lex.table_list.next= (byte**) &thd->lex.select_lex.table_list.first;
thd->fatal_error=0; // Safety
thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0;
thd->sent_row_count=thd->examined_row_count=0;
@@ -2302,19 +2424,35 @@ mysql_init_query(THD *thd)
void
mysql_init_select(LEX *lex)
{
- lex->where=lex->having=0;
- lex->select_limit=current_thd->default_select_limit;
- lex->offset_limit=0L;
- lex->options=0;
- lex->exchange = 0;
+ SELECT_LEX *select_lex = lex->select;
+ select_lex->where=select_lex->having=0;
+ select_lex->select_limit=current_thd->default_select_limit;
+ select_lex->offset_limit=0L;
+ select_lex->options=0; select_lex->linkage=UNSPECIFIED_TYPE;
+ select_lex->select_number = 0; lex->exchange = 0;
lex->proc_list.first=0;
- lex->order_list.elements=lex->group_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->group_list.first=0;
- lex->group_list.next= (byte**) &lex->group_list.first;
+ select_lex->order_list.elements=select_lex->group_list.elements=0;
+ select_lex->order_list.first=0;
+ select_lex->order_list.next= (byte**) &select_lex->order_list.first;
+ select_lex->group_list.first=0;
+ select_lex->group_list.next= (byte**) &select_lex->group_list.first;
+ select_lex->next = (SELECT_LEX *)NULL;
}
+void
+mysql_new_select(LEX *lex)
+{
+ uint select_no=lex->select->select_number;
+ SELECT_LEX *select_lex = (SELECT_LEX *)sql_calloc(sizeof(SELECT_LEX));
+ lex->select->next=select_lex;
+ lex->select=select_lex; lex->select->select_number = ++select_no;
+ lex->select->item_list = lex->select_lex.item_list;
+ lex->select->item_list.empty();
+ lex->select->table_list = lex->select_lex.table_list;
+ lex->select->table_list.elements=0;
+ lex->select->table_list.first=0;
+ lex->select->table_list.next= (byte**) &lex->select->table_list.first;
+}
void
mysql_parse(THD *thd,char *inBuf,uint length)
@@ -2732,7 +2870,7 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias,
if (flags != TL_IGNORE)
{
- for (TABLE_LIST *tables=(TABLE_LIST*) thd->lex.table_list.first ; tables ;
+ for (TABLE_LIST *tables=(TABLE_LIST*) thd->lex.select->table_list.first ; tables ;
tables=tables->next)
{
if (!strcmp(alias_str,tables->name) &&
@@ -2744,10 +2882,46 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias,
}
}
}
- link_in_list(&thd->lex.table_list,(byte*) ptr,(byte**) &ptr->next);
+ link_in_list(&thd->lex.select->table_list,(byte*) ptr,(byte**) &ptr->next);
DBUG_RETURN(ptr);
}
+static int link_in_large_list_and_check_acl(THD *thd,LEX *lex,SQL_LIST *tables)
+{
+ SELECT_LEX *sl; const char *current_db=thd->db ? thd->db : "";
+ for (sl=&lex->select_lex;sl;sl=sl->next)
+ {
+ if ((lex->sql_command == SQLCOM_UNION_SELECT) && (sl->order_list.first != (byte *)NULL) && (sl->next != (st_select_lex *)NULL))
+ {
+ net_printf(&thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); // correct error message will come here; only last SELECt can have ORDER BY
+ return -1;
+ }
+ if (sl->table_list.first == (byte *)NULL) continue;
+ TABLE_LIST *cursor,*aux=(TABLE_LIST*) sl->table_list.first;
+ if (aux)
+ {
+ if (check_table_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL , aux))
+ return -1;
+ for (;aux;aux=aux->next)
+ {
+ if (!aux->db)
+ aux->db=(char *)current_db;
+ for (cursor=(TABLE_LIST *)tables->first;cursor;cursor=cursor->next)
+ if (!strcmp(cursor->db,aux->db) && (!strcmp(cursor->real_name,aux->real_name)))
+ break;
+ if (!cursor || !tables->first)
+ {
+ aux->lock_type= lex->lock_option;
+ if (!tables->next)
+ tables->next= (byte**) &tables->first;
+ link_in_list(tables,(byte*)aux,(byte**) &aux->next);
+ }
+ }
+ }
+ }
+ return (tables->first) ? 0 : 1;
+}
+
void add_join_on(TABLE_LIST *b,Item *expr)
{
if (!b->on_expr)
@@ -2864,3 +3038,29 @@ static void refresh_status(void)
pthread_mutex_unlock(&LOCK_status);
pthread_mutex_unlock(&THR_LOCK_keycache);
}
+
+
+ /* If pointer is not a null pointer, append filename to it */
+
+static bool append_file_to_dir(char **filename_ptr, char *table_name)
+{
+ char buff[FN_REFLEN],*ptr;
+ if (!*filename_ptr)
+ return 0; // nothing to do
+
+ /* Check that the filename is not too long and it's a hard path */
+ if (strlen(*filename_ptr)+strlen(table_name) >= FN_REFLEN-1 ||
+ !test_if_hard_path(*filename_ptr))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), *filename_ptr);
+ return 1;
+ }
+ /* Fix is using unix filename format on dos */
+ strmov(buff,*filename_ptr);
+ convert_dirname(buff);
+ if (!(ptr=sql_alloc(strlen(buff)+strlen(table_name)+1)))
+ return 1; // End of memory
+ *filename_ptr=ptr;
+ strxmov(ptr,buff,table_name,NullS);
+ return 0;
+}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index e5039d118be..6153c4bd0f9 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -21,17 +21,54 @@
#include "sql_repl.h"
#include "sql_acl.h"
#include "log_event.h"
+#include "mini_client.h"
#include <thr_alarm.h>
#include <my_dir.h>
+#define SLAVE_LIST_CHUNK 128
+
extern const char* any_db;
extern pthread_handler_decl(handle_slave,arg);
+HASH slave_list;
+
+#ifndef DBUG_OFF
+int max_binlog_dump_events = 0; // unlimited
+bool opt_sporadic_binlog_dump_fail = 0;
+static int binlog_dump_count = 0;
+#endif
+
+static uint32* slave_list_key(SLAVE_INFO* si, uint* len,
+ my_bool not_used __attribute__((unused)))
+{
+ *len = 4;
+ return &si->server_id;
+}
+
+static void slave_info_free(void *s)
+{
+ my_free((byte*)s, MYF(MY_WME));
+}
+
+void init_slave_list()
+{
+ hash_init(&slave_list, SLAVE_LIST_CHUNK, 0, 0,
+ (hash_get_key) slave_list_key, slave_info_free, 0);
+ pthread_mutex_init(&LOCK_slave_list, MY_MUTEX_INIT_FAST);
+}
+
+void end_slave_list()
+{
+ pthread_mutex_lock(&LOCK_slave_list);
+ hash_free(&slave_list);
+ pthread_mutex_unlock(&LOCK_slave_list);
+ pthread_mutex_destroy(&LOCK_slave_list);
+}
static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
const char**errmsg)
{
- char header[LOG_EVENT_HEADER_LEN];
+ char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN];
memset(header, 0, 4); // when does not matter
header[EVENT_TYPE_OFFSET] = ROTATE_EVENT;
char* p = strrchr(log_file_name, FN_LIBCHAR);
@@ -42,10 +79,14 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
p = log_file_name;
uint ident_len = (uint) strlen(p);
- ulong event_len = ident_len + sizeof(header);
- int4store(header + EVENT_TYPE_OFFSET + 1, server_id);
+ ulong event_len = ident_len + ROTATE_EVENT_OVERHEAD;
+ int4store(header + SERVER_ID_OFFSET, server_id);
int4store(header + EVENT_LEN_OFFSET, event_len);
+ int2store(header + FLAGS_OFFSET, 0);
+ int4store(header + LOG_SEQ_OFFSET, 0);
packet->append(header, sizeof(header));
+ int8store(buf, 4); // tell slave to skip magic number
+ packet->append(buf, ROTATE_HEADER_LEN);
packet->append(p,ident_len);
if(my_net_write(net, (char*)packet->ptr(), packet->length()))
{
@@ -55,6 +96,55 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
return 0;
}
+int register_slave(THD* thd, uchar* packet, uint packet_length)
+{
+ uint len;
+ SLAVE_INFO* si, *old_si;
+ int res = 1;
+ uchar* p = packet, *p_end = packet + packet_length;
+
+ if(check_access(thd, FILE_ACL, any_db))
+ return 1;
+
+ if(!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME))))
+ goto err;
+
+ si->server_id = uint4korr(p);
+ p += 4;
+ len = (uint)*p++;
+ if(p + len > p_end || len > sizeof(si->host) - 1)
+ goto err;
+ memcpy(si->host, p, len);
+ si->host[len] = 0;
+ p += len;
+ len = *p++;
+ if(p + len > p_end || len > sizeof(si->user) - 1)
+ goto err;
+ memcpy(si->user, p, len);
+ si->user[len] = 0;
+ p += len;
+ len = *p++;
+ if(p + len > p_end || len > sizeof(si->password) - 1)
+ goto err;
+ memcpy(si->password, p, len);
+ si->password[len] = 0;
+ p += len;
+ si->port = uint2korr(p);
+ pthread_mutex_lock(&LOCK_slave_list);
+
+ if((old_si = (SLAVE_INFO*)hash_search(&slave_list,
+ (byte*)&si->server_id, 4)))
+ hash_delete(&slave_list, (byte*)old_si);
+
+ res = hash_insert(&slave_list, (byte*)si);
+ pthread_mutex_unlock(&LOCK_slave_list);
+ return res;
+err:
+ if(si)
+ my_free((byte*)si, MYF(MY_WME));
+ return res;
+}
+
static int send_file(THD *thd)
{
@@ -265,8 +355,19 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags)
int error;
const char *errmsg = "Unknown error";
NET* net = &thd->net;
+#ifndef DBUG_OFF
+ int left_events = max_binlog_dump_events;
+#endif
DBUG_ENTER("mysql_binlog_send");
+#ifndef DBUG_OFF
+ if (opt_sporadic_binlog_dump_fail && (binlog_dump_count++ % 2))
+ {
+ errmsg = "Master failed COM_BINLOG_DUMP to test if slave can recover";
+ goto err;
+ }
+#endif
+
bzero((char*) &log,sizeof(log));
if(!mysql_bin_log.is_open())
@@ -297,10 +398,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags)
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
goto err;
- if(pos < 4)
+ if (pos < 4)
{
- errmsg = "Congratulations! You have hit the magic number and can win \
-sweepstakes if you report the bug";
+ errmsg = "Client requested master to start repliction from \
+impossible position";
goto err;
}
@@ -326,6 +427,14 @@ sweepstakes if you report the bug";
while (!(error = Log_event::read_log_event(&log, packet, log_lock)))
{
+#ifndef DBUG_OFF
+ if(max_binlog_dump_events && !left_events--)
+ {
+ net_flush(net);
+ errmsg = "Debugging binlog dump abort";
+ goto err;
+ }
+#endif
if (my_net_write(net, (char*)packet->ptr(), packet->length()) )
{
errmsg = "Failed on my_net_write()";
@@ -400,6 +509,15 @@ sweepstakes if you report the bug";
bool read_packet = 0, fatal_error = 0;
+#ifndef DBUG_OFF
+ if(max_binlog_dump_events && !left_events--)
+ {
+ net_flush(net);
+ errmsg = "Debugging binlog dump abort";
+ goto err;
+ }
+#endif
+
// no one will update the log while we are reading
// now, but we'll be quick and just read one record
pthread_mutex_lock(log_lock);
@@ -614,7 +732,7 @@ void reset_slave()
pthread_mutex_unlock(&LOCK_slave);
end_master_info(&glob_mi);
- fn_format(fname, master_info_file, mysql_data_home, "", 4+16+32);
+ fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
if(my_stat(fname, &stat_area, MYF(0)))
if(my_delete(fname, MYF(MY_WME)))
return;
@@ -685,14 +803,18 @@ int change_master(THD* thd)
// if we change host or port, we must reset the postion
glob_mi.log_file_name[0] = 0;
glob_mi.pos = 4; // skip magic number
+ glob_mi.pending = 0;
}
if(lex_mi->log_file_name)
strmake(glob_mi.log_file_name, lex_mi->log_file_name,
sizeof(glob_mi.log_file_name));
if(lex_mi->pos)
+ {
glob_mi.pos = lex_mi->pos;
-
+ glob_mi.pending = 0;
+ }
+
if(lex_mi->host)
{
strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host));
@@ -741,6 +863,149 @@ void reset_master()
}
+
+int show_binlog_events(THD* thd)
+{
+ DBUG_ENTER("show_binlog_events");
+ List<Item> field_list;
+ const char* errmsg = 0;
+ IO_CACHE log;
+ File file = -1;
+
+ Log_event::init_show_field_list(&field_list);
+ if (send_fields(thd, field_list, 1))
+ DBUG_RETURN(-1);
+
+ if (mysql_bin_log.is_open())
+ {
+ LOG_INFO linfo;
+ char search_file_name[FN_REFLEN];
+ LEX_MASTER_INFO* lex_mi = &thd->lex.mi;
+ uint event_count, limit_start, limit_end;
+ const char* log_file_name = lex_mi->log_file_name;
+ Log_event* ev;
+ ulong pos = (ulong) lex_mi->pos;
+
+ limit_start = thd->lex.select->offset_limit;
+ limit_end = thd->lex.select->select_limit + limit_start;
+
+ if (log_file_name)
+ mysql_bin_log.make_log_name(search_file_name, log_file_name);
+ else
+ search_file_name[0] = 0;
+
+ linfo.index_file_offset = 0;
+ thd->current_linfo = &linfo;
+
+ if (mysql_bin_log.find_first_log(&linfo, search_file_name))
+ {
+ errmsg = "Could not find target log";
+ goto err;
+ }
+
+ if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
+ goto err;
+
+ if (pos < 4)
+ {
+ errmsg = "Invalid log position";
+ goto err;
+ }
+
+ pthread_mutex_lock(mysql_bin_log.get_log_lock());
+
+ my_b_seek(&log, pos);
+
+ for (event_count = 0;
+ (ev = Log_event::read_log_event(&log, 0));)
+ {
+ if (event_count >= limit_start &&
+ ev->net_send(thd, linfo.log_file_name, pos))
+ {
+ errmsg = "Net error";
+ delete ev;
+ pthread_mutex_unlock(mysql_bin_log.get_log_lock());
+ goto err;
+ }
+
+ pos = my_b_tell(&log);
+ delete ev;
+
+ if (++event_count >= limit_end)
+ break;
+ }
+
+ if (event_count < limit_end && log.error)
+ {
+ errmsg = "Wrong offset or I/O error";
+ goto err;
+ }
+
+ pthread_mutex_unlock(mysql_bin_log.get_log_lock());
+ }
+
+err:
+ if (file >= 0)
+ {
+ end_io_cache(&log);
+ (void) my_close(file, MYF(MY_WME));
+ }
+
+ if (errmsg)
+ {
+ net_printf(&thd->net, ER_SHOW_BINLOG_EVENTS, errmsg);
+ DBUG_RETURN(1);
+ }
+
+ send_eof(&thd->net);
+ DBUG_RETURN(0);
+}
+
+
+int show_slave_hosts(THD* thd)
+{
+ DBUG_ENTER("show_slave_hosts");
+ List<Item> field_list;
+ field_list.push_back(new Item_empty_string("Server_id", 20));
+ field_list.push_back(new Item_empty_string("Host", 20));
+ if(opt_show_slave_auth_info)
+ {
+ field_list.push_back(new Item_empty_string("User",20));
+ field_list.push_back(new Item_empty_string("Password",20));
+ }
+ field_list.push_back(new Item_empty_string("Port",20));
+
+ if(send_fields(thd, field_list, 1))
+ DBUG_RETURN(-1);
+ String* packet = &thd->packet;
+ uint i;
+ NET* net = &thd->net;
+
+ pthread_mutex_lock(&LOCK_slave_list);
+
+ for(i = 0; i < slave_list.records; ++i)
+ {
+ SLAVE_INFO* si = (SLAVE_INFO*)hash_element(&slave_list, i);
+ packet->length(0);
+ net_store_data(packet, si->server_id);
+ net_store_data(packet, si->host);
+ if(opt_show_slave_auth_info)
+ {
+ net_store_data(packet, si->user);
+ net_store_data(packet, si->password);
+ }
+ net_store_data(packet, (uint)si->port);
+ if(my_net_write(net, (char*)packet->ptr(), packet->length()))
+ {
+ pthread_mutex_unlock(&LOCK_slave_list);
+ DBUG_RETURN(-1);
+ }
+ }
+ pthread_mutex_unlock(&LOCK_slave_list);
+ send_eof(net);
+ DBUG_RETURN(0);
+}
+
int show_binlog_info(THD* thd)
{
DBUG_ENTER("show_binlog_info");
@@ -845,5 +1110,221 @@ err:
return 1;
}
+int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi)
+{
+ if(!mc_mysql_connect(mysql, mi->host, mi->user, mi->password, 0,
+ mi->port, 0, 0))
+ {
+ sql_print_error("Connection to master failed: %s",
+ mc_mysql_error(mysql));
+ return 1;
+ }
+ return 0;
+}
+
+static inline void cleanup_mysql_results(MYSQL_RES* db_res,
+ MYSQL_RES** cur, MYSQL_RES** start)
+{
+ for( ; cur >= start; --cur)
+ if(*cur)
+ mc_mysql_free_result(*cur);
+ mc_mysql_free_result(db_res);
+}
+
+static inline int fetch_db_tables(THD* thd, MYSQL* mysql, const char* db,
+ MYSQL_RES* table_res)
+{
+ MYSQL_ROW row;
+
+ for( row = mc_mysql_fetch_row(table_res); row;
+ row = mc_mysql_fetch_row(table_res))
+ {
+ TABLE_LIST table;
+ const char* table_name = row[0];
+ int error;
+ if(table_rules_on)
+ {
+ table.next = 0;
+ table.db = (char*)db;
+ table.real_name = (char*)table_name;
+ table.updating = 1;
+ if(!tables_ok(thd, &table))
+ continue;
+ }
+
+ if((error = fetch_nx_table(thd, db, table_name, &glob_mi, mysql)))
+ return error;
+ }
+
+ return 0;
+}
+
+int load_master_data(THD* thd)
+{
+ MYSQL mysql;
+ MYSQL_RES* master_status_res = 0;
+ bool slave_was_running = 0;
+ int error = 0;
+
+ mc_mysql_init(&mysql);
+
+ pthread_mutex_lock(&LOCK_slave);
+ // we do not want anyone messing with the slave at all for the entire
+ // duration of the data load;
+
+ // first, kill the slave
+ if((slave_was_running = slave_running))
+ {
+ abort_slave = 1;
+ thr_alarm_kill(slave_real_id);
+ thd->proc_info = "waiting for slave to die";
+ while(slave_running)
+ pthread_cond_wait(&COND_slave_stopped, &LOCK_slave); // wait until done
+ }
+
+
+ if(connect_to_master(thd, &mysql, &glob_mi))
+ {
+ net_printf(&thd->net, error = ER_CONNECT_TO_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ // now that we are connected, get all database and tables in each
+ {
+ MYSQL_RES *db_res, **table_res, **table_res_end, **cur_table_res;
+ uint num_dbs;
+ MYSQL_ROW row;
+
+ if(mc_mysql_query(&mysql, "show databases", 0) ||
+ !(db_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ if(!(num_dbs = mc_mysql_num_rows(db_res)))
+ goto err;
+ // in theory, the master could have no databases at all
+ // and run with skip-grant
+
+ if(!(table_res = (MYSQL_RES**)thd->alloc(num_dbs * sizeof(MYSQL_RES*))))
+ {
+ net_printf(&thd->net, error = ER_OUTOFMEMORY);
+ goto err;
+ }
+
+ // this is a temporary solution until we have online backup
+ // capabilities - to be replaced once online backup is working
+ // we wait to issue FLUSH TABLES WITH READ LOCK for as long as we
+ // can to minimize the lock time
+ if(mc_mysql_query(&mysql, "FLUSH TABLES WITH READ LOCK", 0)
+ || mc_mysql_query(&mysql, "SHOW MASTER STATUS",0) ||
+ !(master_status_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ // go through every table in every database, and if the replication
+ // rules allow replicating it, get it
+
+ table_res_end = table_res + num_dbs;
+
+ for(cur_table_res = table_res; cur_table_res < table_res_end;
+ ++cur_table_res)
+ {
+ MYSQL_ROW row = mc_mysql_fetch_row(db_res);
+ // since we know how many rows we have, this can never be NULL
+
+ char* db = row[0];
+ int drop_error = 0;
+
+ // do not replicate databases excluded by rules
+ // also skip mysql database - in most cases the user will
+ // mess up and not exclude mysql database with the rules when
+ // he actually means to - in this case, he is up for a surprise if
+ // his priv tables get dropped and downloaded from master
+ // TO DO - add special option, not enabled
+ // by default, to allow inclusion of mysql database into load
+ // data from master
+ if(!db_ok(db, replicate_do_db, replicate_ignore_db) ||
+ !strcmp(db,"mysql"))
+ {
+ *cur_table_res = 0;
+ continue;
+ }
+
+ if((drop_error = mysql_rm_db(0, db, 1)) ||
+ mysql_create_db(0, db, 0))
+ {
+ error = (drop_error) ? ER_DB_DROP_DELETE : ER_CANT_CREATE_DB;
+ net_printf(&thd->net, error, db, my_error);
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+ goto err;
+ }
+
+ if(mc_mysql_select_db(&mysql, db) ||
+ mc_mysql_query(&mysql, "show tables", 0) ||
+ !(*cur_table_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+ goto err;
+ }
+
+ if((error = fetch_db_tables(thd, &mysql, db, *cur_table_res)))
+ {
+ // we do not report the error - fetch_db_tables handles it
+ cleanup_mysql_results(db_res, cur_table_res, table_res);
+ goto err;
+ }
+ }
+
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+
+ // adjust position in the master
+ if(master_status_res)
+ {
+ MYSQL_ROW row = mc_mysql_fetch_row(master_status_res);
+
+ // we need this check because the master may not be running with
+ // log-bin, but it will still allow us to do all the steps
+ // of LOAD DATA FROM MASTER - no reason to forbid it, really,
+ // although it does not make much sense for the user to do it
+ if(row[0] && row[1])
+ {
+ strmake(glob_mi.log_file_name, row[0], sizeof(glob_mi.log_file_name));
+ glob_mi.pos = atoi(row[1]); // atoi() is ok, since offset is <= 1GB
+ if(glob_mi.pos < 4)
+ glob_mi.pos = 4; // don't hit the magic number
+ glob_mi.pending = 0;
+ flush_master_info(&glob_mi);
+ }
+
+ mc_mysql_free_result(master_status_res);
+ }
+
+ if(mc_mysql_query(&mysql, "UNLOCK TABLES", 0))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+ }
+err:
+ pthread_mutex_unlock(&LOCK_slave);
+ if(slave_was_running)
+ start_slave(0, 0);
+ mc_mysql_close(&mysql); // safe to call since we always do mc_mysql_init()
+ if(!error)
+ send_ok(&thd->net);
+
+ return error;
+}
+
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 68f2b4ba6c4..a988658ed68 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -3,20 +3,43 @@
#include "slave.h"
+typedef struct st_slave_info
+{
+ uint32 server_id;
+ char host[HOSTNAME_LENGTH+1];
+ char user[USERNAME_LENGTH+1];
+ char password[HASH_PASSWORD_LENGTH+1];
+ uint16 port;
+} SLAVE_INFO;
+
+extern bool opt_show_slave_auth_info;
+extern HASH slave_list;
extern char* master_host;
extern my_string opt_bin_logname, master_info_file;
extern uint32 server_id;
extern bool server_id_supplied;
extern I_List<i_string> binlog_do_db, binlog_ignore_db;
+#ifndef DBUG_OFF
+extern int max_binlog_dump_events;
+extern bool opt_sporadic_binlog_dump_fail;
+#endif
+
File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg);
int start_slave(THD* thd = 0, bool net_report = 1);
int stop_slave(THD* thd = 0, bool net_report = 1);
+int load_master_data(THD* thd);
+int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi);
int change_master(THD* thd);
+int show_slave_hosts(THD* thd);
+int show_binlog_events(THD* thd);
void reset_slave();
void reset_master();
+void init_slave_list();
+void end_slave_list();
+int register_slave(THD* thd, uchar* packet, uint packet_length);
int purge_master_logs(THD* thd, const char* to_log);
bool log_in_use(const char* log_name);
void adjust_linfo_offsets(my_off_t purge_offset);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 90b3c6eefaf..eff200e9bdf 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -385,6 +385,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
thd->fatal_error)
goto err;
thd->proc_info="preparing";
+ result->initialize_tables(&join);
if ((tmp=join_read_const_tables(&join)) > 0)
goto err;
if (tmp && !(select_options & SELECT_DESCRIBE))
@@ -403,7 +404,22 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
goto err; /* purecov: inspected */
}
if (join.const_tables && !thd->locked_tables)
+ {
+ TABLE **table, **end;
+ for (table=join.table, end=table + join.const_tables ;
+ table != end;
+ table++)
+ {
+ /* BDB tables require that we call index_end() before doing an unlock */
+ if ((*table)->key_read)
+ {
+ (*table)->key_read=0;
+ (*table)->file->extra(HA_EXTRA_NO_KEYREAD);
+ }
+ (*table)->file->index_end();
+ }
mysql_unlock_some_tables(thd, join.table,join.const_tables);
+ }
if (!conds && join.outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
@@ -484,8 +500,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
(group && order) ||
test(select_options & OPTION_BUFFER_RESULT)));
- make_join_readinfo(&join, (select_options & SELECT_DESCRIBE) |
- (ftfuncs.elements ? 0 : SELECT_USE_CACHE)); // No cache for MATCH
+ // No cache for MATCH
+ make_join_readinfo(&join,
+ (select_options & (SELECT_DESCRIBE |
+ SELECT_NO_JOIN_CACHE)) |
+ (ftfuncs.elements ? SELECT_NO_JOIN_CACHE : 0));
/* Need to tell Innobase that to play it safe, it should fetch all
columns of the tables: this is because MySQL
@@ -2465,7 +2484,7 @@ make_join_readinfo(JOIN *join,uint options)
** if previous table use cache
*/
table->status=STATUS_NO_RECORD;
- if (i != join->const_tables && (options & SELECT_USE_CACHE) &&
+ if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) &&
tab->use_quick != 2 && !tab->on_expr)
{
if ((options & SELECT_DESCRIBE) ||
@@ -2478,7 +2497,7 @@ make_join_readinfo(JOIN *join,uint options)
/* These init changes read_record */
if (tab->use_quick == 2)
{
- join->thd->lex.options|=QUERY_NO_GOOD_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_GOOD_INDEX_USED;
tab->read_first_record= join_init_quick_read_record;
statistic_increment(select_range_check_count, &LOCK_status);
}
@@ -2493,7 +2512,7 @@ make_join_readinfo(JOIN *join,uint options)
}
else
{
- join->thd->lex.options|=QUERY_NO_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
statistic_increment(select_scan_count, &LOCK_status);
}
}
@@ -2505,7 +2524,7 @@ make_join_readinfo(JOIN *join,uint options)
}
else
{
- join->thd->lex.options|=QUERY_NO_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
statistic_increment(select_full_join_count, &LOCK_status);
}
}
@@ -2807,7 +2826,12 @@ return_zero_rows(select_result *result,TABLE_LIST *tables,List<Item> &fields,
if (send_row)
result->send_data(fields);
if (tables) // Not from do_select()
+ {
+ /* Close open cursors */
+ for (TABLE_LIST *table=tables; table ; table=table->next)
+ table->table->file->index_end();
result->send_eof(); // Should be safe
+ }
}
DBUG_RETURN(0);
}
@@ -3920,7 +3944,7 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
thd->proc_info="converting HEAP to MyISAM";
if (create_myisam_tmp_table(&new_table,param,
- thd->lex.options | thd->options))
+ thd->lex.select_lex.options | thd->options))
goto err2;
if (open_tmp_table(&new_table))
goto err1;
@@ -5130,9 +5154,11 @@ part_of_refkey(TABLE *table,Field *field)
** Returns: 1 if key is ok.
** 0 if key can't be used
** -1 if reverse key can be used
+** used_key_parts is set to key parts used if length != 0
*****************************************************************************/
-static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx)
+static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
+ uint *used_key_parts)
{
KEY_PART_INFO *key_part,*key_part_end;
key_part=table->key_info[idx].key_part;
@@ -5164,6 +5190,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx)
reverse=flag; // Remember if reverse
key_part++;
}
+ *used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
return reverse;
}
@@ -5225,10 +5252,41 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
if (ref_key >= 0)
{
+ int order_direction;
+ uint used_key_parts;
/* Check if we get the rows in requested sorted order by using the key */
if ((usable_keys & ((key_map) 1 << ref_key)) &&
- test_if_order_by_key(order,table,ref_key) == 1)
+ (order_direction = test_if_order_by_key(order,table,ref_key,
+ &used_key_parts)))
+ {
+ if (order_direction == -1)
+ {
+ if (select && select->quick)
+ {
+ // ORDER BY ref_key DESC
+ QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
+ used_key_parts);
+ if (!tmp || tmp->error)
+ {
+ delete tmp;
+ DBUG_RETURN(0); // Reverse sort not supported
+ }
+ select->quick=tmp;
+ DBUG_RETURN(1);
+ }
+ if (tab->ref.key_parts < used_key_parts)
+ {
+ /*
+ SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
+ TODO:
+ Add a new traversal function to read last matching row and
+ traverse backwards.
+ */
+ DBUG_RETURN(0);
+ }
+ }
DBUG_RETURN(1); /* No need to sort */
+ }
}
else
{
@@ -5247,10 +5305,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
for (nr=0; keys ; keys>>=1, nr++)
{
+ uint not_used;
if (keys & 1)
{
int flag;
- if ((flag=test_if_order_by_key(order,table,nr)))
+ if ((flag=test_if_order_by_key(order, table, nr, &not_used)))
{
if (!no_changes)
{
@@ -5311,7 +5370,9 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit)
goto err;
}
}
- table->found_records=filesort(&table,sortorder,length,
+ if (table->tmp_table)
+ table->file->info(HA_STATUS_VARIABLE); // Get record count
+ table->found_records=filesort(table,sortorder,length,
select, 0L, select_limit, &examined_rows);
delete select; // filesort did select
tab->select=0;
@@ -6647,7 +6708,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
DBUG_ENTER("select_describe");
/* Don't log this into the slow query log */
- join->thd->lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
+ join->thd->lex.select_lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
field_list.push_back(new Item_empty_string("table",NAME_LEN));
field_list.push_back(new Item_empty_string("type",10));
field_list.push_back(item=new Item_empty_string("possible_keys",
@@ -6806,7 +6867,7 @@ static void describe_info(THD *thd, const char *info)
String *packet= &thd->packet;
/* Don't log this into the slow query log */
- thd->lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
+ thd->lex.select_lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
field_list.push_back(new Item_empty_string("Comment",80));
if (send_fields(thd,field_list,1))
return; /* purecov: inspected */
diff --git a/sql/sql_select.h b/sql/sql_select.h
index bb9bb374c76..0ec1854d641 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -138,8 +138,11 @@ class TMP_TABLE_PARAM {
}
inline void cleanup(void)
{
- delete [] copy_field;
- copy_field=0;
+ if (copy_field) /* Fix for Intel compiler */
+ {
+ delete [] copy_field;
+ copy_field=0;
+ }
}
};
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ac89b7a2782..199d6a764e0 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -844,18 +844,22 @@ store_create_info(THD *thd, TABLE *table, String *packet)
for (uint i=0 ; i < table->keys ; i++,key_info++)
{
+ KEY_PART_INFO *key_part= key_info->key_part;
+ bool found_primary=0;
packet->append(",\n ", 4);
- KEY_PART_INFO *key_part= key_info->key_part;
- if (i == primary_key)
+ if (i == primary_key && !strcmp(key_info->name,"PRIMARY"))
+ {
+ found_primary=1;
packet->append("PRIMARY ", 8);
+ }
else if (key_info->flags & HA_NOSAME)
packet->append("UNIQUE ", 7);
else if (key_info->flags & HA_FULLTEXT)
packet->append("FULLTEXT ", 9);
packet->append("KEY ", 4);
- if (i != primary_key)
+ if (!found_primary)
append_identifier(thd,packet,key_info->name);
packet->append(" (", 2);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 95f5d4da24d..e91a9a83e73 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -59,9 +59,9 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists)
VOID(pthread_mutex_lock(&LOCK_open));
pthread_mutex_unlock(&thd->mysys_var->mutex);
- if(global_read_lock)
+ if (global_read_lock)
{
- if(thd->global_read_lock)
+ if (thd->global_read_lock)
{
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE,MYF(0),
tables->real_name);
@@ -221,6 +221,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
db_options|=HA_OPTION_PACK_RECORD;
file=get_new_handler((TABLE*) 0, create_info->db_type);
+ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
+ (file->option_flag() & HA_NO_TEMP_TABLES))
+ {
+ my_error(ER_ILLEGAL_HA,MYF(0),table_name);
+ DBUG_RETURN(-1);
+ }
+
/* Don't pack keys in old tables if the user has requested this */
while ((sql_field=it++))
@@ -423,6 +430,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
column->field_name);
DBUG_RETURN(-1);
}
+ if (key->type == Key::FULLTEXT &&
+ (file->option_flag() & HA_NO_FULLTEXT_KEY))
+ {
+ my_printf_error(ER_WRONG_KEY_COLUMN, ER(ER_WRONG_KEY_COLUMN), MYF(0),
+ column->field_name);
+ DBUG_RETURN(-1);
+ }
if (f_is_blob(sql_field->pack_flag))
{
if (!(file->option_flag() & HA_BLOB_KEY))
@@ -825,13 +839,13 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
int lock_retcode;
pthread_mutex_lock(&LOCK_open);
- if((lock_retcode = lock_table_name(thd, table)) < 0)
+ if ((lock_retcode = lock_table_name(thd, table)) < 0)
{
pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(-1);
}
- if(lock_retcode && wait_for_locked_table_names(thd, table))
+ if (lock_retcode && wait_for_locked_table_names(thd, table))
{
unlock_table_name(thd, table);
pthread_mutex_unlock(&LOCK_open);
@@ -839,7 +853,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
}
pthread_mutex_unlock(&LOCK_open);
- if(my_copy(src_path,
+ if (my_copy(src_path,
fn_format(dst_path, dst_path,"",
reg_ext, 4),
MYF(MY_WME)))
@@ -853,7 +867,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
// generate table will try to send OK which messes up the output
// for the client
- if(generate_table(thd, table, 0))
+ if (generate_table(thd, table, 0))
{
unlock_table_name(thd, table);
thd->net.no_send_ok = save_no_send_ok;
@@ -914,7 +928,7 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables,
// now we should be able to open the partially restored table
// to finish the restore in the handler later on
- if(!(table->table = reopen_name_locked_table(thd, table)))
+ if (!(table->table = reopen_name_locked_table(thd, table)))
unlock_table_name(thd, table);
}
@@ -1091,7 +1105,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
TABLE *table,*new_table;
int error;
char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN],
- *table_name,*db;
+ *table_name,*db;
+ char index_file[FN_REFLEN], data_file[FN_REFLEN];
bool use_timestamp=0;
ha_rows copied,deleted;
ulonglong next_insert_id;
@@ -1113,10 +1128,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
strmov(new_name_buff,new_name);
fn_same(new_name_buff,table_name,3);
+ // Check if name changed
#ifdef FN_LOWER_CASE
- if (!my_strcasecmp(new_name_buff,table_name))// Check if name changed
+ if (!strcmp(db,new_db) && !my_strcasecmp(new_name_buff,table_name))
#else
- if (!strcmp(new_name_buff,table_name)) // Check if name changed
+ if (!strcmp(db,new_db) && !strcmp(new_name_buff,table_name))
#endif
new_name=table_name; // No. Make later check easier
else
@@ -1233,7 +1249,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (drop->type == Alter_drop::COLUMN &&
!my_strcasecmp(field->field_name, drop->name))
+ {
+ /* Reset auto_increment value if it was dropped */
+ if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
+ !(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=0;
+ create_info->used_fields|=HA_CREATE_USED_AUTO;
+ }
break;
+ }
}
if (drop)
{
@@ -1440,6 +1465,53 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (table->tmp_table)
create_info->options|=HA_LEX_CREATE_TMP_TABLE;
+ /*
+ Handling of symlinked tables:
+ If no rename:
+ Create new data file and index file on the same disk as the
+ old data and index files.
+ Copy data.
+ Rename new data file over old data file and new index file over
+ old index file.
+ Symlinks are not changed.
+
+ If rename:
+ Create new data file and index file on the same disk as the
+ old data and index files. Create also symlinks to point at
+ the new tables.
+ Copy data.
+ At end, rename temporary tables and symlinks to temporary table
+ to final table name.
+ Remove old table and old symlinks
+
+ If rename is made to another database:
+ Create new tables in new database.
+ Copy data.
+ Remove old table and symlinks.
+ */
+
+ if (!strcmp(db, new_db)) // Ignore symlink if db changed
+ {
+ if (create_info->index_file_name)
+ {
+ /* Fix index_file_name to have 'tmp_name' as basename */
+ strmov(index_file, tmp_name);
+ create_info->index_file_name=fn_same(index_file,
+ create_info->index_file_name,
+ 1);
+ }
+ if (create_info->data_file_name)
+ {
+ /* Fix data_file_name to have 'tmp_name' as basename */
+ strmov(data_file, tmp_name);
+ create_info->data_file_name=fn_same(data_file,
+ create_info->data_file_name,
+ 1);
+ }
+ }
+ else
+ create_info->data_file_name=create_info->index_file_name=0;
+
if ((error=mysql_create_table(thd, new_db, tmp_name,
create_info,
create_list,key_list,1,1))) // no logging
@@ -1685,13 +1757,23 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (from->found_records = filesort(&from, sortorder, length,
- (SQL_SELECT *) 0, 0L, HA_POS_ERROR,
+ (from->found_records = filesort(from, sortorder, length,
+ (SQL_SELECT *) 0, 0L, HA_POS_ERROR,
&examined_rows))
== HA_POS_ERROR)
goto err;
};
+ /* Turn off recovery logging since rollback of an
+ alter table is to delete the new table so there
+ is no need to log the changes to it. */
+ error = ha_recovery_logging(thd,false);
+ if (error)
+ {
+ error = 1;
+ goto err;
+ }
+
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (handle_duplicates == DUP_IGNORE ||
handle_duplicates == DUP_REPLACE)
@@ -1737,6 +1819,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->activate_all_index(thd))
error=1;
+ tmp_error = ha_recovery_logging(thd,true);
/*
Ensure that the new table is saved properly to disk so that we
can do a rename
@@ -1748,6 +1831,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->external_lock(thd,F_UNLCK))
error=1;
err:
+ tmp_error = ha_recovery_logging(thd,true);
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 3edfdd3d5ef..d20bc74ecb2 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -96,8 +96,7 @@ void print_cached_tables(void)
}
-void TEST_filesort(TABLE **table,SORT_FIELD *sortorder,uint s_length,
- ha_rows special)
+void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special)
{
char buff[256],buff2[256];
String str(buff,sizeof(buff)),out(buff2,sizeof(buff2));
diff --git a/sql/sql_unions.cc b/sql/sql_unions.cc
new file mode 100644
index 00000000000..55aca0f5b68
--- /dev/null
+++ b/sql/sql_unions.cc
@@ -0,0 +1,34 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & Monty & Sinisa
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+/* Union of selects */
+
+#include "mysql_priv.h"
+
+/*
+ Do a union of selects
+*/
+
+
+int mysql_union(THD *thd,LEX *lex,uint no_of_selects)
+{
+ SELECT_LEX *sl;
+ for (sl=&lex->select_lex;sl;sl=sl->next)
+ {
+ }
+ return 0;
+}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b6b22ecbc99..a6ded7cef9c 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -128,7 +128,7 @@ int mysql_update(THD *thd,
/* If running in safe sql mode, don't allow updates without keys */
if (!table->quick_keys)
{
- thd->lex.options|=QUERY_NO_INDEX_USED;
+ thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
if ((thd->options & OPTION_SAFE_UPDATES) && limit == HA_POS_ERROR)
{
delete select;
@@ -184,7 +184,7 @@ int mysql_update(THD *thd,
MYF(MY_FAE | MY_ZEROFILL));
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (table->found_records = filesort(&table, sortorder, length,
+ (table->found_records = filesort(table, sortorder, length,
(SQL_SELECT *) 0, 0L,
HA_POS_ERROR, &examined_rows))
== HA_POS_ERROR)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 172fb0830fe..c013ebe1c8c 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -21,6 +21,7 @@
#define YYINITDEPTH 100
#define YYMAXDEPTH 3200 /* Because of 64K stack */
#define Lex current_lex
+#define Select Lex->select
#include "mysql_priv.h"
#include "slave.h"
#include "sql_acl.h"
@@ -129,6 +130,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token LOAD
%token LOCK_SYM
%token UNLOCK_SYM
+%token BINLOG_SYM
+%token EVENTS_SYM
%token ACTION
%token AGGREGATE_SYM
@@ -161,6 +164,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token DELAY_KEY_WRITE_SYM
%token DESC
%token DESCRIBE
+%token DIRECTORY_SYM
%token DISTINCT
%token DISABLE_SYM
%token DYNAMIC_SYM
@@ -193,6 +197,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token IDENT
%token IGNORE_SYM
%token INDEX
+%token INDEXES
%token INFILE
%token INNER_SYM
%token INNOBASE_SYM
@@ -464,12 +469,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
opt_escape
%type <string>
- text_string
+ text_string
%type <num>
type int_type real_type order_dir opt_field_spec set_option lock_option
udf_type if_exists opt_local opt_table_options table_options
- table_option opt_if_not_exists
+ table_option opt_if_not_exists
%type <ulong_num>
ULONG_NUM raid_types
@@ -526,7 +531,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
select_item_list select_item values_list no_braces
limit_clause delete_limit_clause fields opt_values values
procedure_list procedure_list2 procedure_item
- when_list2 expr_list2 handler
+ when_list2 expr_list2 handler
opt_precision opt_ignore opt_column opt_restrict
grant revoke set lock unlock string_list field_options field_option
field_opt_list opt_binary table_lock_list table_lock varchar
@@ -541,7 +546,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
opt_mi_check_type opt_to mi_check_types normal_join
table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_rkey_mode handler_read_or_scan
- END_OF_INPUT
+ single_multi table_wild_list table_wild_one opt_wild union union_list
+ precision
+END_OF_INPUT
%type <NONE>
'-' '+' '*' '/' '%' '(' ')'
@@ -647,7 +654,6 @@ master_def:
}
-
/* create a table */
create:
@@ -671,36 +677,41 @@ create:
| CREATE opt_unique_or_fulltext INDEX ident ON table_ident
{
- Lex->sql_command= SQLCOM_CREATE_INDEX;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_CREATE_INDEX;
if (!add_table_to_list($6,NULL,1))
YYABORT;
- Lex->create_list.empty();
- Lex->key_list.empty();
- Lex->col_list.empty();
- Lex->change=NullS;
+ lex->create_list.empty();
+ lex->key_list.empty();
+ lex->col_list.empty();
+ lex->change=NullS;
}
'(' key_list ')'
{
- Lex->key_list.push_back(new Key($2,$4.str,Lex->col_list));
- Lex->col_list.empty();
+ LEX *lex=Lex;
+ lex->key_list.push_back(new Key($2,$4.str,lex->col_list));
+ lex->col_list.empty();
}
| CREATE DATABASE opt_if_not_exists ident
{
- Lex->sql_command=SQLCOM_CREATE_DB;
- Lex->name=$4.str;
- Lex->create_info.options=$3;
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CREATE_DB;
+ lex->name=$4.str;
+ lex->create_info.options=$3;
}
| CREATE udf_func_type UDF_SYM ident
{
- Lex->sql_command = SQLCOM_CREATE_FUNCTION;
- Lex->udf.name=$4.str;
- Lex->udf.name_length=$4.length;
- Lex->udf.type= $2;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_CREATE_FUNCTION;
+ lex->udf.name=$4.str;
+ lex->udf.name_length=$4.length;
+ lex->udf.type= $2;
}
UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING
{
- Lex->udf.returns=(Item_result) $7;
- Lex->udf.dl=$9.str;
+ LEX *lex=Lex;
+ lex->udf.returns=(Item_result) $7;
+ lex->udf.dl=$9.str;
}
create2:
@@ -711,8 +722,9 @@ create3:
/* empty */ {}
| opt_duplicate opt_as SELECT_SYM
{
- Lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
- mysql_init_select(Lex);
+ LEX *lex=Lex;
+ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
+ mysql_init_select(lex);
}
select_options select_item_list opt_select_from {}
@@ -762,15 +774,17 @@ create_table_option:
{
/* Move the union list to the merge_list */
LEX *lex=Lex;
- TABLE_LIST *table_list= (TABLE_LIST*) lex->table_list.first;
- lex->create_info.merge_list= lex->table_list;
+ TABLE_LIST *table_list= (TABLE_LIST*) lex->select->table_list.first;
+ lex->create_info.merge_list= lex->select->table_list;
lex->create_info.merge_list.elements--;
lex->create_info.merge_list.first= (byte*) (table_list->next);
- lex->table_list.elements=1;
- lex->table_list.next= (byte**) &(table_list->next);
+ lex->select->table_list.elements=1;
+ lex->select->table_list.next= (byte**) &(table_list->next);
table_list->next=0;
lex->create_info.used_fields|= HA_CREATE_USED_UNION;
}
+ | DATA_SYM DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.data_file_name= $4.str; }
+ | INDEX DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.index_file_name= $4.str; }
table_types:
ISAM_SYM { $$= DB_TYPE_ISAM; }
@@ -818,8 +832,9 @@ field_list_item:
}
| key_type opt_ident '(' key_list ')'
{
- Lex->key_list.push_back(new Key($1,$2,Lex->col_list));
- Lex->col_list.empty(); /* Alloced by sql_alloc */
+ LEX *lex=Lex;
+ lex->key_list.push_back(new Key($1,$2,lex->col_list));
+ lex->col_list.empty(); /* Alloced by sql_alloc */
}
| opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
{
@@ -837,16 +852,18 @@ opt_constraint:
field_spec:
field_ident
{
- Lex->length=Lex->dec=0; Lex->type=0; Lex->interval=0;
- Lex->default_value=0;
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->default_value=0;
}
type opt_attribute
{
+ LEX *lex=Lex;
if (add_field_to_list($1.str,
(enum enum_field_types) $3,
- Lex->length,Lex->dec,Lex->type,
- Lex->default_value,Lex->change,
- Lex->interval))
+ lex->length,lex->dec,lex->type,
+ lex->default_value,lex->change,
+ lex->interval))
YYABORT;
}
@@ -898,12 +915,14 @@ type:
{ $$=FIELD_TYPE_DECIMAL;}
| ENUM {Lex->interval_list.empty();} '(' string_list ')'
{
- Lex->interval=typelib(Lex->interval_list);
+ LEX *lex=Lex;
+ lex->interval=typelib(lex->interval_list);
$$=FIELD_TYPE_ENUM;
}
| SET { Lex->interval_list.empty();} '(' string_list ')'
{
- Lex->interval=typelib(Lex->interval_list);
+ LEX *lex=Lex;
+ lex->interval=typelib(lex->interval_list);
$$=FIELD_TYPE_SET;
}
@@ -935,7 +954,14 @@ real_type:
float_options:
/* empty */ {}
| '(' NUM ')' { Lex->length=$2.str; }
- | '(' NUM ',' NUM ')' { Lex->length=$2.str; Lex->dec=$4.str; }
+ | precision {}
+
+precision:
+ '(' NUM ',' NUM ')'
+ {
+ LEX *lex=Lex;
+ lex->length=$2.str; lex->dec=$4.str;
+ }
field_options:
/* empty */ {}
@@ -955,7 +981,7 @@ opt_len:
opt_precision:
/* empty */ {}
- | '(' NUM ',' NUM ')' { Lex->length=$2.str; Lex->dec=$4.str; }
+ | precision {}
opt_attribute:
/* empty */ {}
@@ -1022,6 +1048,7 @@ key_or_index:
keys_or_index:
KEYS {}
| INDEX {}
+ | INDEXES {}
opt_unique_or_fulltext:
/* empty */ { $$= Key::MULTIPLE; }
@@ -1062,10 +1089,10 @@ alter:
lex->col_list.empty();
lex->drop_list.empty();
lex->alter_list.empty();
- lex->order_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->db=lex->name=0;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
+ lex->select->db=lex->name=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.db_type= DB_TYPE_DEFAULT;
lex->alter_keys_onoff=LEAVE_AS_IS;
@@ -1083,40 +1110,70 @@ add_column:
alter_list_item:
add_column field_list_item opt_place { Lex->simple_alter=0; }
| add_column '(' field_list ')' { Lex->simple_alter=0; }
- | CHANGE opt_column field_ident { Lex->change= $3.str; Lex->simple_alter=0; }
+ | CHANGE opt_column field_ident
+ {
+ LEX *lex=Lex;
+ lex->change= $3.str; lex->simple_alter=0;
+ }
field_spec
| MODIFY_SYM opt_column field_ident
{
- Lex->length=Lex->dec=0; Lex->type=0; Lex->interval=0;
- Lex->default_value=0;
- Lex->simple_alter=0;
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->default_value=0;
+ lex->simple_alter=0;
}
type opt_attribute
{
+ LEX *lex=Lex;
if (add_field_to_list($3.str,
(enum enum_field_types) $5,
- Lex->length,Lex->dec,Lex->type,
- Lex->default_value, $3.str,
- Lex->interval))
+ lex->length,lex->dec,lex->type,
+ lex->default_value, $3.str,
+ lex->interval))
YYABORT;
- Lex->simple_alter=0;
+ lex->simple_alter=0;
}
| DROP opt_column field_ident opt_restrict
- { Lex->drop_list.push_back(new Alter_drop(Alter_drop::COLUMN,
- $3.str)); Lex->simple_alter=0; }
- | DROP PRIMARY_SYM KEY_SYM { Lex->drop_primary=1; Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::COLUMN,
+ $3.str)); lex->simple_alter=0;
+ }
+ | DROP PRIMARY_SYM KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->drop_primary=1; lex->simple_alter=0;
+ }
| DROP FOREIGN KEY_SYM opt_ident { Lex->simple_alter=0; }
| DROP key_or_index field_ident
- { Lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
- $3.str)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ $3.str));
+ lex->simple_alter=0;
+ }
| DISABLE_SYM KEYS { Lex->alter_keys_onoff=DISABLE; }
| ENABLE_SYM KEYS { Lex->alter_keys_onoff=ENABLE; }
| ALTER opt_column field_ident SET DEFAULT literal
- { Lex->alter_list.push_back(new Alter_column($3.str,$6)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->alter_list.push_back(new Alter_column($3.str,$6));
+ lex->simple_alter=0;
+ }
| ALTER opt_column field_ident DROP DEFAULT
- { Lex->alter_list.push_back(new Alter_column($3.str,(Item*) 0)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->alter_list.push_back(new Alter_column($3.str,(Item*) 0));
+ lex->simple_alter=0;
+ }
| RENAME opt_to table_alias table_ident
- { Lex->db=$4->db.str ; Lex->name= $4->table.str; Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->select->db=$4->db.str;
+ lex->name= $4->table.str;
+ lex->simple_alter=0;
+ }
| create_table_options { Lex->simple_alter=0; }
| order_clause { Lex->simple_alter=0; }
@@ -1146,14 +1203,16 @@ opt_to:
slave:
SLAVE START_SYM
{
- Lex->sql_command = SQLCOM_SLAVE_START;
- Lex->type = 0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_START;
+ lex->type = 0;
}
|
SLAVE STOP_SYM
{
- Lex->sql_command = SQLCOM_SLAVE_STOP;
- Lex->type = 0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_STOP;
+ lex->type = 0;
};
restore:
@@ -1179,8 +1238,9 @@ backup:
repair:
REPAIR table_or_tables
{
- Lex->sql_command = SQLCOM_REPAIR;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REPAIR;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
@@ -1204,24 +1264,27 @@ mi_check_type:
analyze:
ANALYZE_SYM table_or_tables
{
- Lex->sql_command = SQLCOM_ANALYZE;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_ANALYZE;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
check:
CHECK_SYM table_or_tables
{
- Lex->sql_command = SQLCOM_CHECK;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_CHECK;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
optimize:
OPTIMIZE table_or_tables
{
- Lex->sql_command = SQLCOM_OPTIMIZE;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_OPTIMIZE;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
@@ -1252,11 +1315,11 @@ select:
SELECT_SYM
{
LEX *lex=Lex;
- lex->sql_command= SQLCOM_SELECT;
+ if (lex->sql_command!=SQLCOM_UNION_SELECT) lex->sql_command= SQLCOM_SELECT;
lex->lock_option=TL_READ;
mysql_init_select(lex);
}
- select_options select_item_list select_into select_lock_type
+ select_options select_item_list select_into select_lock_type union
select_into:
limit_clause {}
@@ -1277,13 +1340,13 @@ select_option_list:
| select_option
select_option:
- STRAIGHT_JOIN { Lex->options|= SELECT_STRAIGHT_JOIN; }
+ STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; }
| HIGH_PRIORITY { Lex->lock_option= TL_READ_HIGH_PRIORITY; }
- | DISTINCT { Lex->options|= SELECT_DISTINCT; }
- | SQL_SMALL_RESULT { Lex->options|= SELECT_SMALL_RESULT; }
- | SQL_BIG_RESULT { Lex->options|= SELECT_BIG_RESULT; }
- | SQL_BUFFER_RESULT { Lex->options|= OPTION_BUFFER_RESULT; }
- | SQL_CALC_FOUND_ROWS { Lex->options|= OPTION_FOUND_ROWS; }
+ | DISTINCT { Select->options|= SELECT_DISTINCT; }
+ | SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
+ | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
+ | SQL_BUFFER_RESULT { Select->options|= OPTION_BUFFER_RESULT; }
+ | SQL_CALC_FOUND_ROWS { Select->options|= OPTION_FOUND_ROWS; }
| ALL {}
select_lock_type:
@@ -1470,10 +1533,10 @@ simple_expr:
| '(' expr ')' { $$= $2; }
| '{' ident expr '}' { $$= $3; }
| MATCH '(' ident_list ')' AGAINST '(' expr ')'
- { Lex->ftfunc_list.push_back(
+ { Select->ftfunc_list.push_back(
(Item_func_match *)($$=new Item_func_match(*$3,$7))); }
| MATCH ident_list AGAINST '(' expr ')'
- { Lex->ftfunc_list.push_back(
+ { Select->ftfunc_list.push_back(
(Item_func_match *)($$=new Item_func_match(*$2,$5))); }
| BINARY expr %prec NEG { $$= new Item_func_binary($2); }
| CASE_SYM opt_expr WHEN_SYM when_list opt_else END
@@ -1704,30 +1767,30 @@ sum_expr:
{ $$=new Item_sum_sum($3); }
in_sum_expr:
- { Lex->in_sum_expr++ }
+ { Select->in_sum_expr++ }
expr
{
- Lex->in_sum_expr--;
+ Select->in_sum_expr--;
$$=$2;
}
expr_list:
- { Lex->expr_list.push_front(new List<Item>); }
+ { Select->expr_list.push_front(new List<Item>); }
expr_list2
- { $$= Lex->expr_list.pop(); }
+ { $$= Select->expr_list.pop(); }
expr_list2:
- expr { Lex->expr_list.head()->push_back($1); }
- | expr_list2 ',' expr { Lex->expr_list.head()->push_back($3); }
+ expr { Select->expr_list.head()->push_back($1); }
+ | expr_list2 ',' expr { Select->expr_list.head()->push_back($3); }
ident_list:
- { Lex->expr_list.push_front(new List<Item>); }
+ { Select->expr_list.push_front(new List<Item>); }
ident_list2
- { $$= Lex->expr_list.pop(); }
+ { $$= Select->expr_list.pop(); }
ident_list2:
- simple_ident { Lex->expr_list.head()->push_back($1); }
- | ident_list2 ',' simple_ident { Lex->expr_list.head()->push_back($3); }
+ simple_ident { Select->expr_list.head()->push_back($1); }
+ | ident_list2 ',' simple_ident { Select->expr_list.head()->push_back($3); }
opt_expr:
/* empty */ { $$= NULL; }
@@ -1738,20 +1801,22 @@ opt_else:
| ELSE expr { $$= $2; }
when_list:
- { Lex->when_list.push_front(new List<Item>) }
+ { Select->when_list.push_front(new List<Item>) }
when_list2
- { $$= Lex->when_list.pop(); }
+ { $$= Select->when_list.pop(); }
when_list2:
expr THEN_SYM expr
{
- Lex->when_list.head()->push_back($1);
- Lex->when_list.head()->push_back($3);
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($1);
+ sel->when_list.head()->push_back($3);
}
| when_list2 WHEN_SYM expr THEN_SYM expr
{
- Lex->when_list.head()->push_back($3);
- Lex->when_list.head()->push_back($5);
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($3);
+ sel->when_list.head()->push_back($5);
}
opt_pad:
@@ -1766,15 +1831,21 @@ join_table_list:
| join_table_list INNER_SYM JOIN_SYM join_table ON expr
{ add_join_on($4,$6); $$=$4; }
| join_table_list INNER_SYM JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$4->db; Lex->table2=$4->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$4->db; sel->table2=$4->name;
+ }
USING '(' using_list ')'
{ add_join_on($4,$8); $$=$4; }
| join_table_list LEFT opt_outer JOIN_SYM join_table ON expr
{ add_join_on($5,$7); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
| join_table_list LEFT opt_outer JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$5->db; Lex->table2=$5->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$5->db; sel->table2=$5->name;
+ }
USING '(' using_list ')'
{ add_join_on($5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
| join_table_list NATURAL LEFT opt_outer JOIN_SYM join_table
@@ -1782,8 +1853,11 @@ join_table_list:
| join_table_list RIGHT opt_outer JOIN_SYM join_table ON expr
{ add_join_on($1,$7); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$1; }
| join_table_list RIGHT opt_outer JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$5->db; Lex->table2=$5->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$5->db; sel->table2=$5->name;
+ }
USING '(' using_list ')'
{ add_join_on($1,$9); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$1; }
| join_table_list NATURAL RIGHT opt_outer JOIN_SYM join_table
@@ -1797,10 +1871,16 @@ normal_join:
| CROSS JOIN_SYM {}
join_table:
- { Lex->use_index_ptr=Lex->ignore_index_ptr=0; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->use_index_ptr=sel->ignore_index_ptr=0;
+ }
table_ident opt_table_alias opt_key_definition
- { if (!($$=add_table_to_list($2,$3,0,TL_UNLOCK, Lex->use_index_ptr,
- Lex->ignore_index_ptr))) YYABORT; }
+ {
+ SELECT_LEX *sel=Select;
+ if (!($$=add_table_to_list($2,$3,0,TL_UNLOCK, sel->use_index_ptr,
+ sel->ignore_index_ptr))) YYABORT;
+ }
| '{' ident join_table LEFT OUTER JOIN_SYM join_table ON expr '}'
{ add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; }
@@ -1811,30 +1891,41 @@ opt_outer:
opt_key_definition:
/* empty */ {}
| USE_SYM key_usage_list
- { Lex->use_index= *$2; Lex->use_index_ptr= &Lex->use_index; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->use_index= *$2;
+ sel->use_index_ptr= &sel->use_index;
+ }
| IGNORE_SYM key_usage_list
- { Lex->ignore_index= *$2; Lex->ignore_index_ptr= &Lex->ignore_index;}
+ {
+ SELECT_LEX *sel=Select;
+ sel->ignore_index= *$2;
+ sel->ignore_index_ptr= &sel->ignore_index;
+ }
key_usage_list:
- key_or_index { Lex->interval_list.empty() } '(' key_usage_list2 ')'
- { $$= &Lex->interval_list; }
+ key_or_index { Select->interval_list.empty() } '(' key_usage_list2 ')'
+ { $$= &Select->interval_list; }
key_usage_list2:
key_usage_list2 ',' ident
- { Lex->interval_list.push_back(new String((const char*) $3.str,$3.length)); }
+ { Select->interval_list.push_back(new String((const char*) $3.str,$3.length)); }
| ident
- { Lex->interval_list.push_back(new String((const char*) $1.str,$1.length)); }
+ { Select->interval_list.push_back(new String((const char*) $1.str,$1.length)); }
| PRIMARY_SYM
- { Lex->interval_list.push_back(new String("PRIMARY",7)); }
+ { Select->interval_list.push_back(new String("PRIMARY",7)); }
using_list:
ident
- { if (!($$= new Item_func_eq(new Item_field(Lex->db1,Lex->table1, $1.str), new Item_field(Lex->db2,Lex->table2,$1.str))))
+ {
+ SELECT_LEX *sel=Select;
+ if (!($$= new Item_func_eq(new Item_field(sel->db1,sel->table1, $1.str), new Item_field(sel->db2,sel->table2,$1.str))))
YYABORT;
}
| using_list ',' ident
{
- if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(Lex->db1,Lex->table1,$3.str), new Item_field(Lex->db2,Lex->table2,$3.str)), $1)))
+ SELECT_LEX *sel=Select;
+ if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(sel->db1,sel->table1,$3.str), new Item_field(sel->db2,sel->table2,$3.str)), $1)))
YYABORT;
}
@@ -1865,13 +1956,16 @@ opt_table_alias:
where_clause:
- /* empty */ { Lex->where= 0; }
- | WHERE expr { Lex->where= $2; }
+ /* empty */ { Select->where= 0; }
+ | WHERE expr { Select->where= $2; }
having_clause:
/* empty */
- | HAVING { Lex->create_refs=1; } expr
- { Lex->having= $3; Lex->create_refs=0; }
+ | HAVING { Select->create_refs=1; } expr
+ {
+ SELECT_LEX *sel=Select;
+ sel->having= $3; sel->create_refs=0;
+ }
opt_escape:
ESCAPE_SYM TEXT_STRING { $$= $2.str; }
@@ -1901,7 +1995,7 @@ opt_order_clause:
| order_clause
order_clause:
- ORDER_SYM BY { Lex->sort_default=1; } order_list
+ ORDER_SYM BY { Select->sort_default=1; } order_list
order_list:
order_list ',' order_ident order_dir
@@ -1911,39 +2005,46 @@ order_list:
order_dir:
/* empty */ { $$ = 1; }
- | ASC { $$ = Lex->sort_default=1; }
- | DESC { $$ = Lex->sort_default=0; }
+ | ASC { $$ = Select->sort_default=1; }
+ | DESC { $$ = Select->sort_default=0; }
limit_clause:
/* empty */
{
- Lex->select_limit= (Lex->sql_command == SQLCOM_HA_READ) ?
+ SELECT_LEX *sel=Select;
+ sel->select_limit= (Lex->sql_command == SQLCOM_HA_READ) ?
1 : current_thd->default_select_limit;
- Lex->offset_limit= 0L;
+ sel->offset_limit= 0L;
}
| LIMIT ULONG_NUM
- { Lex->select_limit= $2; Lex->offset_limit=0L; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->select_limit= $2; sel->offset_limit=0L;
+ }
| LIMIT ULONG_NUM ',' ULONG_NUM
- { Lex->select_limit= $4; Lex->offset_limit=$2; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->select_limit= $4; sel->offset_limit=$2;
+ }
delete_limit_clause:
/* empty */
{
- Lex->select_limit= HA_POS_ERROR;
+ Select->select_limit= HA_POS_ERROR;
}
| LIMIT ULONGLONG_NUM
- { Lex->select_limit= (ha_rows) $2; }
+ { Select->select_limit= (ha_rows) $2; }
ULONG_NUM:
- NUM { $$= strtoul($1.str,NULL,10); }
- | REAL_NUM { $$= strtoul($1.str,NULL,10); }
+ NUM { $$= strtoul($1.str,NULL,10); }
+ | REAL_NUM { $$= strtoul($1.str,NULL,10); }
| FLOAT_NUM { $$= strtoul($1.str,NULL,10); }
ULONGLONG_NUM:
- NUM { $$= (ulonglong) strtoul($1.str,NULL,10); }
- | LONG_NUM { $$= strtoull($1.str,NULL,10); }
- | REAL_NUM { $$= strtoull($1.str,NULL,10); }
+ NUM { $$= (ulonglong) strtoul($1.str,NULL,10); }
+ | LONG_NUM { $$= strtoull($1.str,NULL,10); }
+ | REAL_NUM { $$= strtoull($1.str,NULL,10); }
| FLOAT_NUM { $$= strtoull($1.str,NULL,10); }
procedure_clause:
@@ -1998,28 +2099,32 @@ opt_into:
drop:
DROP TABLE_SYM if_exists table_list opt_restrict
{
- Lex->sql_command = SQLCOM_DROP_TABLE;
- Lex->drop_if_exists = $3;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DROP_TABLE;
+ lex->drop_if_exists = $3;
}
| DROP INDEX ident ON table_ident {}
{
- Lex->sql_command= SQLCOM_DROP_INDEX;
- Lex->drop_list.empty();
- Lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_INDEX;
+ lex->drop_list.empty();
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
$3.str));
if (!add_table_to_list($5,NULL, 1))
YYABORT;
}
| DROP DATABASE if_exists ident
{
- Lex->sql_command= SQLCOM_DROP_DB;
- Lex->drop_if_exists=$3;
- Lex->name=$4.str;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_DB;
+ lex->drop_if_exists=$3;
+ lex->name=$4.str;
}
| DROP UDF_SYM ident
{
- Lex->sql_command = SQLCOM_DROP_FUNCTION;
- Lex->udf.name=$3.str;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DROP_FUNCTION;
+ lex->udf.name=$3.str;
}
@@ -2062,17 +2167,19 @@ insert2:
insert_table:
table_name
{
- Lex->field_list.empty();
- Lex->many_values.empty();
- Lex->insert_list=0;
+ LEX *lex=Lex;
+ lex->field_list.empty();
+ lex->many_values.empty();
+ lex->insert_list=0;
}
insert_field_spec:
opt_field_spec insert_values {}
| SET
{
- if (!(Lex->insert_list = new List_item) ||
- Lex->many_values.push_back(Lex->insert_list))
+ LEX *lex=Lex;
+ if (!(lex->insert_list = new List_item) ||
+ lex->many_values.push_back(lex->insert_list))
YYABORT;
}
ident_eq_list
@@ -2110,8 +2217,9 @@ ident_eq_list:
ident_eq_value:
simple_ident equal expr
{
- if (Lex->field_list.push_back($1) ||
- Lex->insert_list->push_back($3))
+ LEX *lex=Lex;
+ if (lex->field_list.push_back($1) ||
+ lex->insert_list->push_back($3))
YYABORT;
}
@@ -2126,7 +2234,8 @@ no_braces:
}
opt_values ')'
{
- if (Lex->many_values.push_back(Lex->insert_list))
+ LEX *lex=Lex;
+ if (lex->many_values.push_back(lex->insert_list))
YYABORT;
}
@@ -2155,10 +2264,11 @@ update:
opt_order_clause
delete_limit_clause
{
- Lex->sql_command = SQLCOM_UPDATE;
- Lex->order_list.elements=0;
- Lex->order_list.first=0;
- Lex->order_list.next= (byte**) &Lex->order_list.first;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_UPDATE;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
}
update_list:
@@ -2182,22 +2292,64 @@ opt_low_priority:
delete:
DELETE_SYM
{
- Lex->sql_command= SQLCOM_DELETE; Lex->options=0;
- Lex->lock_option= current_thd->update_lock_default;
- Lex->order_list.elements=0;
- Lex->order_list.first=0;
- Lex->order_list.next= (byte**) &Lex->order_list.first;
- }
- opt_delete_options FROM table_name
- where_clause opt_order_clause delete_limit_clause
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DELETE; lex->select->options=0;
+ lex->lock_option= lex->thd->update_lock_default;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
+ }
+ opt_delete_options single_multi {}
+
+single_multi:
+ FROM table_name where_clause opt_order_clause delete_limit_clause {}
+ | table_wild_list
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_MULTI_DELETE;
+ mysql_init_select(lex);
+ lex->select->select_limit=HA_POS_ERROR;
+ lex->auxilliary_table_list.elements=0;
+ lex->auxilliary_table_list.first=0;
+ lex->auxilliary_table_list.next= (byte**) &(lex->auxilliary_table_list.first);
+ }
+ FROM
+ {
+ LEX *lex=Lex;
+ lex->auxilliary_table_list=lex->select_lex.table_list;
+ lex->select->table_list.elements=0;
+ lex->select->table_list.first=0;
+ lex->select->table_list.next= (byte**) &(lex->select->table_list.first);
+ } join_table_list where_clause
+
+
+table_wild_list:
+ table_wild_one {}
+ | table_wild_list ',' table_wild_one {}
+
+table_wild_one:
+ ident opt_wild
+ {
+ if (!add_table_to_list(new Table_ident($1),NULL,1,TL_WRITE))
+ YYABORT;
+ }
+ | ident '.' ident opt_wild
+ {
+ if (!add_table_to_list(new Table_ident($1,$3,0),NULL,1,TL_WRITE))
+ YYABORT;
+ }
+
+opt_wild:
+ /* empty */ {}
+ | '.' '*' {}
opt_delete_options:
- /* empty */ {}
+ /* empty */ {}
| opt_delete_option opt_delete_options {}
opt_delete_option:
- QUICK { Lex->options|= OPTION_QUICK; }
+ QUICK { Select->options|= OPTION_QUICK; }
| LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }
truncate:
@@ -2205,17 +2357,16 @@ truncate:
{
LEX* lex = Lex;
lex->sql_command= SQLCOM_TRUNCATE;
- lex->options=0;
- lex->order_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
-
+ lex->select->options=0;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
lex->lock_option= current_thd->update_lock_default; }
opt_table_sym:
/* empty */
| TABLE_SYM
-
+
/* Show things */
show: SHOW { Lex->wild=0;} show_param
@@ -2224,18 +2375,26 @@ show_param:
DATABASES wild
{ Lex->sql_command= SQLCOM_SHOW_DATABASES; }
| TABLES opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_TABLES; Lex->db= $2; Lex->options=0;}
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
+ lex->select->db= $2; lex->select->options=0;
+ }
| TABLE_SYM STATUS_SYM opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_TABLES;
- Lex->options|= SELECT_DESCRIBE;
- Lex->db= $3;
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
+ lex->select->options|= SELECT_DESCRIBE;
+ lex->select->db= $3;
}
| OPEN_SYM TABLES opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
- Lex->db= $3;
- Lex->options=0;
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
+ lex->select->db= $3;
+ lex->select->options=0;
}
- | opt_full COLUMNS FROM table_ident opt_db wild
+ | opt_full COLUMNS from_or_in table_ident opt_db wild
{
Lex->sql_command= SQLCOM_SHOW_FIELDS;
if ($5)
@@ -2246,7 +2405,15 @@ show_param:
| MASTER_SYM LOGS_SYM
{
Lex->sql_command = SQLCOM_SHOW_BINLOGS;
- }
+ }
+ | SLAVE HOSTS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_SLAVE_HOSTS;
+ }
+ | BINLOG_SYM EVENTS_SYM binlog_in binlog_from limit_clause
+ {
+ Lex->sql_command = SQLCOM_SHOW_BINLOG_EVENTS;
+ }
| keys_or_index FROM table_ident opt_db
{
Lex->sql_command= SQLCOM_SHOW_KEYS;
@@ -2264,8 +2431,12 @@ show_param:
| LOGS_SYM
{ Lex->sql_command= SQLCOM_SHOW_LOGS; }
| GRANTS FOR_SYM user
- { Lex->sql_command= SQLCOM_SHOW_GRANTS;
- Lex->grant_user=$3; Lex->grant_user->password.str=NullS; }
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_GRANTS;
+ lex->grant_user=$3;
+ lex->grant_user->password.str=NullS;
+ }
| CREATE TABLE_SYM table_ident
{
Lex->sql_command = SQLCOM_SHOW_CREATE;
@@ -2283,7 +2454,7 @@ show_param:
opt_db:
/* empty */ { $$= 0; }
- | FROM ident { $$= $2.str; }
+ | from_or_in ident { $$= $2.str; }
wild:
/* empty */
@@ -2293,18 +2464,32 @@ opt_full:
/* empty */ { Lex->verbose=0; }
| FULL { Lex->verbose=1; }
+from_or_in:
+ FROM
+ | IN_SYM
+
+binlog_in:
+ /* empty */ { Lex->mi.log_file_name = 0; }
+ | IN_SYM TEXT_STRING { Lex->mi.log_file_name = $2.str; }
+
+binlog_from:
+ /* empty */ { Lex->mi.pos = 4; /* skip magic number */ }
+ | FROM ULONGLONG_NUM { Lex->mi.pos = $2; }
+
+
/* A Oracle compatible synonym for show */
describe:
describe_command table_ident
{
- Lex->wild=0;
- Lex->verbose=0;
- Lex->sql_command=SQLCOM_SHOW_FIELDS;
+ LEX *lex=Lex;
+ lex->wild=0;
+ lex->verbose=0;
+ lex->sql_command=SQLCOM_SHOW_FIELDS;
if (!add_table_to_list($2, NULL,0))
YYABORT;
}
opt_describe_column
- | describe_command select { Lex->options|= SELECT_DESCRIBE };
+ | describe_command select { Select->options|= SELECT_DESCRIBE };
describe_command:
@@ -2320,7 +2505,12 @@ opt_describe_column:
/* flush things */
flush:
- FLUSH_SYM {Lex->sql_command= SQLCOM_FLUSH; Lex->type=0; } flush_options
+ FLUSH_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_FLUSH; lex->type=0;
+ }
+ flush_options
flush_options:
flush_options ',' flush_option
@@ -2341,8 +2531,11 @@ opt_table_list:
| table_list {}
reset:
- RESET_SYM {Lex->sql_command= SQLCOM_RESET; Lex->type=0; } reset_options
-
+ RESET_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_RESET; lex->type=0;
+ } reset_options
reset_options:
reset_options ',' reset_option
| reset_option
@@ -2352,7 +2545,12 @@ reset_option:
| MASTER_SYM { Lex->type|= REFRESH_MASTER; }
purge:
- PURGE { Lex->sql_command = SQLCOM_PURGE; Lex->type=0;}
+ PURGE
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_PURGE;
+ lex->type=0;
+ }
MASTER_SYM LOGS_SYM TO_SYM TEXT_STRING
{
Lex->to_log = $6.str;
@@ -2363,29 +2561,34 @@ purge:
kill:
KILL_SYM expr
{
- if ($2->fix_fields(current_thd,0))
- {
- send_error(&current_thd->net, ER_SET_CONSTANTS_ONLY);
- YYABORT;
- }
- Lex->sql_command=SQLCOM_KILL;
- Lex->thread_id= (ulong) $2->val_int();
+ LEX *lex=Lex;
+ if ($2->fix_fields(lex->thd,0))
+ {
+ send_error(&lex->thd->net, ER_SET_CONSTANTS_ONLY);
+ YYABORT;
+ }
+ lex->sql_command=SQLCOM_KILL;
+ lex->thread_id= (ulong) $2->val_int();
}
/* change database */
use: USE_SYM ident
- { Lex->sql_command=SQLCOM_CHANGE_DB; Lex->db= $2.str; }
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CHANGE_DB; lex->select->db= $2.str;
+ }
/* import, export of files */
-load: LOAD DATA_SYM opt_low_priority opt_local INFILE TEXT_STRING
+load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING
{
- Lex->sql_command= SQLCOM_LOAD;
- Lex->local_file= $4;
- if (!(Lex->exchange= new sql_exchange($6.str,0)))
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_LOAD;
+ lex->local_file= $4;
+ if (!(lex->exchange= new sql_exchange($6.str,0)))
YYABORT;
- Lex->field_list.empty();
+ lex->field_list.empty();
}
opt_duplicate INTO TABLE_SYM table_ident opt_field_term opt_line_term
opt_ignore_lines opt_field_spec
@@ -2401,6 +2604,11 @@ load: LOAD DATA_SYM opt_low_priority opt_local INFILE TEXT_STRING
YYABORT;
}
+ |
+ LOAD DATA_SYM FROM MASTER_SYM
+ {
+ Lex->sql_command = SQLCOM_LOAD_MASTER_DATA;
+ }
opt_local:
/* empty */ { $$=0;}
@@ -2428,7 +2636,11 @@ field_term_list:
field_term:
TERMINATED BY text_string { Lex->exchange->field_term= $3;}
| OPTIONALLY ENCLOSED BY text_string
- { Lex->exchange->enclosed= $4; Lex->exchange->opt_enclosed=1;}
+ {
+ LEX *lex=Lex;
+ lex->exchange->enclosed= $4;
+ lex->exchange->opt_enclosed=1;
+ }
| ENCLOSED BY text_string { Lex->exchange->enclosed= $3;}
| ESCAPED BY text_string { Lex->exchange->escaped= $3;}
@@ -2498,13 +2710,25 @@ order_ident:
simple_ident:
ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(NullS,NullS,$1.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(NullS,NullS,$1.str);
+ }
| ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(NullS,$1.str,$3.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(NullS,$1.str,$3.str);
+ }
| '.' ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(NullS,$2.str,$4.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(NullS,$2.str,$4.str);
+ }
| ident '.' ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str) : (Item*) new Item_ref((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str) : (Item*) new Item_ref((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str);
+ }
field_ident:
@@ -2521,10 +2745,11 @@ ident:
IDENT { $$=$1; }
| keyword
{
+ LEX *lex;
$$.str=sql_strmake($1.str,$1.length);
$$.length=$1.length;
- if (Lex->next_state != STATE_END)
- Lex->next_state=STATE_OPERATOR_OR_IDENT;
+ if ((lex=Lex)->next_state != STATE_END)
+ lex->next_state=STATE_OPERATOR_OR_IDENT;
}
ident_or_text:
@@ -2575,6 +2800,7 @@ keyword:
| DATETIME {}
| DATE_SYM {}
| DAY_SYM {}
+ | DIRECTORY_SYM {}
| DELAY_KEY_WRITE_SYM {}
| DISABLE_SYM {}
| DUMPFILE {}
@@ -2598,6 +2824,7 @@ keyword:
| HOSTS_SYM {}
| HOUR_SYM {}
| IDENTIFIED_SYM {}
+ | INDEXES {}
| ISOLATION {}
| ISAM_SYM {}
| INNOBASE_SYM {}
@@ -2675,12 +2902,14 @@ keyword:
set:
SET opt_option
{
- THD *thd=current_thd;
- Lex->sql_command= SQLCOM_SET_OPTION;
- Lex->options=thd->options;
- Lex->select_limit=thd->default_select_limit;
- Lex->gemini_spin_retries=thd->gemini_spin_retries;
- Lex->tx_isolation=thd->tx_isolation;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SET_OPTION;
+ lex->select->options=lex->thd->options;
+ lex->select->select_limit=lex->thd->default_select_limit;
+ lex->gemini_spin_retries=lex->thd->gemini_spin_retries;
+ lex->tx_isolation=lex->thd->tx_isolation;
+ lex->option_type=0;
+ lex->option_list.empty()
}
option_value_list
@@ -2690,36 +2919,41 @@ opt_option:
option_value_list:
option_value
+ | GLOBAL_SYM { Lex->option_type=1; } option_value
+ | LOCAL_SYM { Lex->option_type=0; } option_value
| option_value_list ',' option_value
option_value:
set_option equal NUM
{
+ SELECT_LEX *sel=Select;
if (atoi($3.str) == 0)
- Lex->options&= ~$1;
+ sel->options&= ~$1;
else
- Lex->options|= $1;
+ sel->options|= $1;
}
| set_isolation
| AUTOCOMMIT equal NUM
{
+ SELECT_LEX *sel=Select;
if (atoi($3.str) != 0) /* Test NOT AUTOCOMMIT */
- Lex->options&= ~(OPTION_NOT_AUTO_COMMIT);
+ sel->options&= ~(OPTION_NOT_AUTO_COMMIT);
else
- Lex->options|= OPTION_NOT_AUTO_COMMIT;
+ sel->options|= OPTION_NOT_AUTO_COMMIT;
}
| SQL_SELECT_LIMIT equal ULONG_NUM
{
- Lex->select_limit= $3;
+ Select->select_limit= $3;
}
| SQL_SELECT_LIMIT equal DEFAULT
{
- Lex->select_limit= HA_POS_ERROR;
+ Select->select_limit= HA_POS_ERROR;
}
| SQL_MAX_JOIN_SIZE equal ULONG_NUM
{
- current_thd->max_join_size= $3;
- Lex->options&= ~OPTION_BIG_SELECTS;
+ LEX *lex=Lex;
+ lex->thd->max_join_size= $3;
+ lex->select->options&= ~OPTION_BIG_SELECTS;
}
| SQL_MAX_JOIN_SIZE equal DEFAULT
{
@@ -2794,6 +3028,28 @@ option_value:
slave_skip_counter = $3;
pthread_mutex_unlock(&LOCK_slave);
}
+ | ident equal DEFAULT
+ {
+ LEX *lex=Lex;
+ lex->option_list.push_back(new Set_option(lex->option_type,
+ $1.str,$1.length,
+ (Item*) 0));
+ }
+ | ident equal expr
+ {
+ THD *thd=current_thd;
+ Item *item= $3;
+ if (item->fix_fields(current_thd,0))
+ {
+ send_error(&thd->net, ER_SET_CONSTANTS_ONLY);
+ YYABORT;
+ }
+ thd->lex.option_list.
+ push_back(new Set_option(thd->lex.option_type,
+ $1.str,$1.length,
+ item));
+ }
+
text_or_password:
TEXT_STRING { $$=$1.str;}
@@ -2842,7 +3098,10 @@ set_isolation:
default_tx_isolation_name=tx_isolation_typelib.type_names[default_tx_isolation];
}
| SESSION_SYM tx_isolation
- { current_thd->session_tx_isolation= Lex->tx_isolation= $2; }
+ {
+ LEX *lex=Lex;
+ lex->thd->session_tx_isolation= lex->tx_isolation= $2;
+ }
| tx_isolation
{ Lex->tx_isolation= $1; }
@@ -2926,8 +3185,9 @@ handler_rkey_function:
| LAST_SYM { Lex->ha_read_mode = RLAST; }
| handler_rkey_mode
{
- Lex->ha_read_mode = RKEY;
- if (!(Lex->insert_list = new List_item))
+ LEX *lex=Lex;
+ lex->ha_read_mode = RKEY;
+ if (!(lex->insert_list = new List_item))
YYABORT;
} '(' values ')' { }
@@ -2943,22 +3203,24 @@ handler_rkey_mode:
revoke:
REVOKE
{
- Lex->sql_command = SQLCOM_REVOKE;
- Lex->users_list.empty();
- Lex->columns.empty();
- Lex->grant= Lex->grant_tot_col=0;
- Lex->db=0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REVOKE;
+ lex->users_list.empty();
+ lex->columns.empty();
+ lex->grant= lex->grant_tot_col=0;
+ lex->select->db=0;
}
grant_privileges ON opt_table FROM user_list
grant:
GRANT
{
- Lex->sql_command = SQLCOM_GRANT;
- Lex->users_list.empty();
- Lex->columns.empty();
- Lex->grant= Lex->grant_tot_col=0;
- Lex->db=0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_GRANT;
+ lex->users_list.empty();
+ lex->columns.empty();
+ lex->grant= lex->grant_tot_col=0;
+ lex->select->db=0;
}
grant_privileges ON opt_table TO_SYM user_list
grant_option
@@ -2998,43 +3260,47 @@ grant_privilege:
opt_table:
'*'
{
- Lex->db=current_thd->db;
- if (Lex->grant == UINT_MAX)
- Lex->grant = DB_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db=lex->thd->db;
+ if (lex->grant == UINT_MAX)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
- }
+ }
}
| ident '.' '*'
{
- Lex->db = $1.str;
- if (Lex->grant == UINT_MAX)
- Lex->grant = DB_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db = $1.str;
+ if (lex->grant == UINT_MAX)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
}
}
| '*' '.' '*'
{
- Lex->db = NULL;
- if (Lex->grant == UINT_MAX)
- Lex->grant = GLOBAL_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db = NULL;
+ if (lex->grant == UINT_MAX)
+ lex->grant = GLOBAL_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
}
}
| table_ident
{
+ LEX *lex=Lex;
if (!add_table_to_list($1,NULL,0))
YYABORT;
- if (Lex->grant == UINT_MAX)
- Lex->grant = TABLE_ACLS & ~GRANT_ACL;
+ if (lex->grant == UINT_MAX)
+ lex->grant = TABLE_ACLS & ~GRANT_ACL;
}
@@ -3065,7 +3331,11 @@ grant_user:
opt_column_list:
- /* empty */ { Lex->grant |= Lex->which_columns; }
+ /* empty */
+ {
+ LEX *lex=Lex;
+ lex->grant |= lex->which_columns;
+ }
| '(' column_list ')'
column_list:
@@ -3078,16 +3348,17 @@ column_list_id:
String *new_str = new String((const char*) $1.str,$1.length);
List_iterator <LEX_COLUMN> iter(Lex->columns);
class LEX_COLUMN *point;
+ LEX *lex=Lex;
while ((point=iter++))
{
if (!my_strcasecmp(point->column.ptr(),new_str->ptr()))
break;
}
- Lex->grant_tot_col|= Lex->which_columns;
+ lex->grant_tot_col|= lex->which_columns;
if (point)
- point->rights |= Lex->which_columns;
+ point->rights |= lex->which_columns;
else
- Lex->columns.push_back(new LEX_COLUMN (*new_str,Lex->which_columns));
+ lex->columns.push_back(new LEX_COLUMN (*new_str,lex->which_columns));
}
grant_option:
@@ -3106,3 +3377,23 @@ commit:
rollback:
ROLLBACK_SYM { Lex->sql_command = SQLCOM_ROLLBACK;}
+
+
+/*
+** UNIONS : glue selects together
+*/
+
+
+union:
+ /* empty */ {}
+ | union_list
+
+union_list:
+ UNION_SYM
+ {
+ LEX *lex=Lex;
+ if (lex->exchange) YYABORT; /* Only the last SELECT can have INTO...... */
+ lex->sql_command=SQLCOM_UNION_SELECT;
+ mysql_new_select(lex); lex->select->linkage=UNION_TYPE;
+ }
+ select
diff --git a/sql/structs.h b/sql/structs.h
index 36f503312c0..594432134b2 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -164,3 +164,4 @@ typedef struct st_lex_user {
#define STATUS_NOT_READ 8 /* Record isn't read */
#define STATUS_UPDATED 16 /* Record is updated by formula */
#define STATUS_NULL_ROW 32 /* table->null_row is set */
+#define STATUS_DELETED 64
diff --git a/sql/time.cc b/sql/time.cc
index 086977af72f..e0b74fc9d25 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -455,8 +455,8 @@ str_to_TIME(const char *str, uint length, TIME *l_time,bool fuzzy_date)
if ((date[i]=tmp_value))
date_used=1; // Found something
if (i == 2 && str != end && *str == 'T')
- str++; // ISO8601: CCYYMMDDThhmmss
- else
+ str++; // ISO8601: CCYYMMDDThhmmss
+ else if ( i != 5 ) // Skip inter-field delimiters
{
while (str != end && (ispunct(*str) || isspace(*str)))
{
diff --git a/sql/uniques.cc b/sql/uniques.cc
index becb3d8a3a5..5ef7ead276b 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -34,14 +34,18 @@
#include "mysql_priv.h"
#include "sql_sort.h"
-Unique::Unique(qsort_cmp2 comp_func, uint size, ulong max_in_memory_size_arg)
+
+Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
+ uint size, ulong max_in_memory_size_arg)
:max_in_memory_size(max_in_memory_size_arg),elements(0)
{
my_b_clear(&file);
- init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, NULL);
+ init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, comp_func_fixed_arg);
/* If the following fail's the next add will also fail */
init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size);
+ open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
+ MYF(MY_WME));
}
@@ -69,12 +73,12 @@ bool Unique::flush()
}
-int unique_write_to_file(gptr key, Unique *unique, element_count count)
+int unique_write_to_file(gptr key, element_count count, Unique *unique)
{
return my_b_write(&unique->file, key, unique->tree.size_of_element) ? 1 : 0;
}
-int unique_write_to_ptrs(gptr key, Unique *unique, element_count count)
+int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
{
memcpy(unique->record_pointers, key, unique->tree.size_of_element);
unique->record_pointers+=unique->tree.size_of_element;
@@ -92,7 +96,7 @@ bool Unique::get(TABLE *table)
SORTPARAM sort_param;
table->found_records=elements+tree.elements_in_tree;
- if (!my_b_inited(&file))
+ if (my_b_tell(&file) == 0)
{
/* Whole tree is in memory; Don't use disk if you don't need to */
if ((record_pointers=table->record_pointers= (byte*)
@@ -107,47 +111,47 @@ bool Unique::get(TABLE *table)
if (flush())
return 1;
- IO_CACHE *outfile=table->io_cache, tempfile;
+ IO_CACHE *outfile=table->io_cache;
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
- uint maxbuffer= file_ptrs.elements;
+ uint maxbuffer= file_ptrs.elements - 1;
uchar *sort_buffer;
my_off_t save_pos;
bool error=1;
- my_b_clear(&tempfile);
-
/* Open cached file if it isn't open */
- if (! my_b_inited(outfile) &&
+ outfile=table->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
+ MYF(MY_ZEROFILL));
+
+ if (!outfile || ! my_b_inited(outfile) &&
open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
MYF(MY_WME)))
return 1;
reinit_io_cache(outfile,WRITE_CACHE,0L,0,0);
-
- sort_param.keys=elements;
+
+ sort_param.max_rows= elements;
sort_param.sort_form=table;
sort_param.sort_length=sort_param.ref_length=tree.size_of_element;
sort_param.keys= max_in_memory_size / sort_param.sort_length;
- if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
- sort_param.sort_length,
- MYF(0))))
+ if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
+ sort_param.sort_length,
+ MYF(0))))
return 1;
sort_param.unique_buff= sort_buffer+(sort_param.keys*
sort_param.sort_length);
/* Merge the buffers to one file, removing duplicates */
- if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&tempfile))
+ if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&file))
goto err;
- if (flush_io_cache(&tempfile) ||
- reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
+ if (flush_io_cache(&file) ||
+ reinit_io_cache(&file,READ_CACHE,0L,0,0))
goto err;
- if (merge_buffers(&sort_param, &tempfile, outfile, sort_buffer, file_ptr,
+ if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr,
file_ptr, file_ptr+maxbuffer,0))
goto err;
error=0;
err:
x_free((gptr) sort_buffer);
- close_cached_file(&tempfile);
if (flush_io_cache(outfile))
error=1;
diff --git a/sql/unireg.h b/sql/unireg.h
index 7ad3bac2eab..159832295fd 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -38,6 +38,8 @@
#endif
#define ER(X) errmesg[(X)-1000]
+#define ER_SAFE(X) (((X) >= 1000 && (X) < ER_ERROR_MESSAGES + 1000) ? ER(X) : "Invalid error code")
+
#define ERRMAPP 1 /* Errormap f|r my_error */
#define LIBLEN FN_REFLEN-FN_LEN /* Max l{ngd p} dev */
diff --git a/sql/violite.c b/sql/violite.c
deleted file mode 100644
index 902110ff072..00000000000
--- a/sql/violite.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA */
-
-/*
- Note that we can't have assertion on file descriptors; The reason for
- this is that during mysql shutdown, another thread can close a file
- we are working on. In this case we should just return read errors from
- the file descriptior.
-*/
-
-#include <global.h>
-
-#ifndef HAVE_VIO /* is Vio suppored by the Vio lib ? */
-
-#include <errno.h>
-#include <assert.h>
-#include <violite.h>
-#include <my_sys.h>
-#include <my_net.h>
-#include <m_string.h>
-#ifdef HAVE_POLL
-#include <sys/poll.h>
-#endif
-#ifdef HAVE_SYS_IOCTL_H
-#include <sys/ioctl.h>
-#endif
-
-#if defined(__EMX__)
-#define ioctlsocket ioctl
-#endif /* defined(__EMX__) */
-
-#if defined(MSDOS) || defined(__WIN__)
-#ifdef __WIN__
-#undef errno
-#undef EINTR
-#undef EAGAIN
-#define errno WSAGetLastError()
-#define EINTR WSAEINTR
-#define EAGAIN WSAEINPROGRESS
-#endif /* __WIN__ */
-#define O_NONBLOCK 1 /* For emulation of fcntl() */
-#endif
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK EAGAIN
-#endif
-
-#ifndef __WIN__
-#define HANDLE void *
-#endif
-
-struct st_vio
-{
- my_socket sd; /* my_socket - real or imaginary */
- HANDLE hPipe;
- my_bool localhost; /* Are we from localhost? */
- int fcntl_mode; /* Buffered fcntl(sd,F_GETFL) */
- struct sockaddr_in local; /* Local internet address */
- struct sockaddr_in remote; /* Remote internet address */
- enum enum_vio_type type; /* Type of connection */
- char desc[30]; /* String description */
-};
-
-typedef void *vio_ptr;
-typedef char *vio_cstring;
-
-/*
- * Helper to fill most of the Vio* with defaults.
- */
-
-static void vio_reset(Vio* vio, enum enum_vio_type type,
- my_socket sd, HANDLE hPipe,
- my_bool localhost)
-{
- bzero((char*) vio, sizeof(*vio));
- vio->type = type;
- vio->sd = sd;
- vio->hPipe = hPipe;
- vio->localhost= localhost;
-}
-
-/* Open the socket or TCP/IP connection and read the fnctl() status */
-
-Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
-{
- Vio *vio;
- DBUG_ENTER("vio_new");
- DBUG_PRINT("enter", ("sd=%d", sd));
- if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME))))
- {
- vio_reset(vio, type, sd, 0, localhost);
- sprintf(vio->desc,
- (vio->type == VIO_TYPE_SOCKET ? "socket (%d)" : "TCP/IP (%d)"),
- vio->sd);
-#if !defined(___WIN__) && !defined(__EMX__)
-#if !defined(NO_FCNTL_NONBLOCK)
- vio->fcntl_mode = fcntl(sd, F_GETFL);
-#elif defined(HAVE_SYS_IOCTL_H) /* hpux */
- /* Non blocking sockets doesn't work good on HPUX 11.0 */
- (void) ioctl(sd,FIOSNBIO,0);
-#endif
-#else /* !defined(__WIN__) && !defined(__EMX__) */
- {
- /* set to blocking mode by default */
- ulong arg=0, r;
- r = ioctlsocket(sd,FIONBIO,(void*) &arg, sizeof(arg));
- }
-#endif
- }
- DBUG_RETURN(vio);
-}
-
-
-#ifdef __WIN__
-
-Vio *vio_new_win32pipe(HANDLE hPipe)
-{
- Vio *vio;
- DBUG_ENTER("vio_new_handle");
- if ((vio = (Vio*) my_malloc(sizeof(Vio),MYF(MY_WME))))
- {
- vio_reset(vio, VIO_TYPE_NAMEDPIPE, 0, hPipe, TRUE);
- strmov(vio->desc, "named pipe");
- }
- DBUG_RETURN(vio);
-}
-
-#endif
-
-void vio_delete(Vio * vio)
-{
- /* It must be safe to delete null pointers. */
- /* This matches the semantics of C++'s delete operator. */
- if (vio)
- {
- if (vio->type != VIO_CLOSED)
- vio_close(vio);
- my_free((gptr) vio,MYF(0));
- }
-}
-
-int vio_errno(Vio *vio __attribute__((unused)))
-{
- return errno; /* On Win32 this mapped to WSAGetLastError() */
-}
-
-
-int vio_read(Vio * vio, gptr buf, int size)
-{
- int r;
- DBUG_ENTER("vio_read");
- DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size));
-#ifdef __WIN__
- if (vio->type == VIO_TYPE_NAMEDPIPE)
- {
- DWORD length;
- if (!ReadFile(vio->hPipe, buf, size, &length, NULL))
- DBUG_RETURN(-1);
- DBUG_RETURN(length);
- }
- r = recv(vio->sd, buf, size,0);
-#else
- errno=0; /* For linux */
- r = read(vio->sd, buf, size);
-#endif /* __WIN__ */
-#ifndef DBUG_OFF
- if (r < 0)
- {
- DBUG_PRINT("vio_error", ("Got error %d during read",errno));
- }
-#endif /* DBUG_OFF */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-
-int vio_write(Vio * vio, const gptr buf, int size)
-{
- int r;
- DBUG_ENTER("vio_write");
- DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size));
-#ifdef __WIN__
- if ( vio->type == VIO_TYPE_NAMEDPIPE)
- {
- DWORD length;
- if (!WriteFile(vio->hPipe, (char*) buf, size, &length, NULL))
- DBUG_RETURN(-1);
- DBUG_RETURN(length);
- }
- r = send(vio->sd, buf, size,0);
-#else
- r = write(vio->sd, buf, size);
-#endif /* __WIN__ */
-#ifndef DBUG_OFF
- if (r < 0)
- {
- DBUG_PRINT("vio_error", ("Got error on write: %d",errno));
- }
-#endif /* DBUG_OFF */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-
-int vio_blocking(Vio * vio, my_bool set_blocking_mode)
-{
- int r=0;
- DBUG_ENTER("vio_blocking");
- DBUG_PRINT("enter", ("set_blocking_mode: %d", (int) set_blocking_mode));
-
-#if !defined(___WIN__) && !defined(__EMX__)
-#if !defined(NO_FCNTL_NONBLOCK)
-
- if (vio->sd >= 0)
- {
- int old_fcntl=vio->fcntl_mode;
- if (set_blocking_mode)
- vio->fcntl_mode &= ~O_NONBLOCK; /* clear bit */
- else
- vio->fcntl_mode |= O_NONBLOCK; /* set bit */
- if (old_fcntl != vio->fcntl_mode)
- r = fcntl(vio->sd, F_SETFL, vio->fcntl_mode);
- }
-#endif /* !defined(NO_FCNTL_NONBLOCK) */
-#else /* !defined(__WIN__) && !defined(__EMX__) */
-#ifndef __EMX__
- if (vio->type != VIO_TYPE_NAMEDPIPE)
-#endif
- {
- ulong arg;
- int old_fcntl=vio->fcntl_mode;
- if (set_blocking_mode)
- {
- arg = 0;
- vio->fcntl_mode &= ~O_NONBLOCK; /* clear bit */
- }
- else
- {
- arg = 1;
- vio->fcntl_mode |= O_NONBLOCK; /* set bit */
- }
- if (old_fcntl != vio->fcntl_mode)
- r = ioctlsocket(vio->sd,FIONBIO,(void*) &arg, sizeof(arg));
- }
-#endif /* !defined(__WIN__) && !defined(__EMX__) */
- DBUG_RETURN(r);
-}
-
-my_bool
-vio_is_blocking(Vio * vio)
-{
- my_bool r;
- DBUG_ENTER("vio_is_blocking");
- r = !(vio->fcntl_mode & O_NONBLOCK);
- DBUG_PRINT("exit", ("%d", (int) r));
- DBUG_RETURN(r);
-}
-
-
-int vio_fastsend(Vio * vio __attribute__((unused)))
-{
- int r=0;
- DBUG_ENTER("vio_fastsend");
-
-#ifdef IPTOS_THROUGHPUT
- {
-#ifndef __EMX__
- int tos = IPTOS_THROUGHPUT;
- if (!setsockopt(vio->sd, IPPROTO_IP, IP_TOS, (void *) &tos, sizeof(tos)))
-#endif /* !__EMX__ */
- {
- int nodelay = 1;
- if (setsockopt(vio->sd, IPPROTO_TCP, TCP_NODELAY, (void *) &nodelay,
- sizeof(nodelay))) {
- DBUG_PRINT("warning",
- ("Couldn't set socket option for fast send"));
- r= -1;
- }
- }
- }
-#endif /* IPTOS_THROUGHPUT */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-int vio_keepalive(Vio* vio, my_bool set_keep_alive)
-{
- int r=0;
- uint opt = 0;
- DBUG_ENTER("vio_keepalive");
- DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int)
- set_keep_alive));
- if (vio->type != VIO_TYPE_NAMEDPIPE)
- {
- if (set_keep_alive)
- opt = 1;
- r = setsockopt(vio->sd, SOL_SOCKET, SO_KEEPALIVE, (char *) &opt,
- sizeof(opt));
- }
- DBUG_RETURN(r);
-}
-
-
-my_bool
-vio_should_retry(Vio * vio __attribute__((unused)))
-{
- int en = errno;
- return en == EAGAIN || en == EINTR || en == EWOULDBLOCK;
-}
-
-
-int vio_close(Vio * vio)
-{
- int r;
- DBUG_ENTER("vio_close");
-#ifdef __WIN__
- if (vio->type == VIO_TYPE_NAMEDPIPE)
- {
-#if defined(__NT__) && defined(MYSQL_SERVER)
- CancelIo(vio->hPipe);
- DisconnectNamedPipe(vio->hPipe);
-#endif
- r=CloseHandle(vio->hPipe);
- }
- else if (vio->type != VIO_CLOSED)
-#endif /* __WIN__ */
- {
- r=0;
- if (shutdown(vio->sd,2))
- r= -1;
- if (closesocket(vio->sd))
- r= -1;
- }
- if (r)
- {
- DBUG_PRINT("vio_error", ("close() failed, error: %d",errno));
- /* FIXME: error handling (not critical for MySQL) */
- }
- vio->type= VIO_CLOSED;
- vio->sd= -1;
- DBUG_RETURN(r);
-}
-
-
-const char *vio_description(Vio * vio)
-{
- return vio->desc;
-}
-
-enum enum_vio_type vio_type(Vio* vio)
-{
- return vio->type;
-}
-
-my_socket vio_fd(Vio* vio)
-{
- return vio->sd;
-}
-
-
-my_bool vio_peer_addr(Vio * vio, char *buf)
-{
- DBUG_ENTER("vio_peer_addr");
- DBUG_PRINT("enter", ("sd=%d", vio->sd));
- if (vio->localhost)
- {
- strmov(buf,"127.0.0.1");
- }
- else
- {
- size_socket addrLen = sizeof(struct sockaddr);
- if (getpeername(vio->sd, (struct sockaddr *) (& (vio->remote)),
- &addrLen) != 0)
- {
- DBUG_PRINT("exit", ("getpeername, error: %d", errno));
- DBUG_RETURN(1);
- }
- my_inet_ntoa(vio->remote.sin_addr,buf);
- }
- DBUG_PRINT("exit", ("addr=%s", buf));
- DBUG_RETURN(0);
-}
-
-
-void vio_in_addr(Vio *vio, struct in_addr *in)
-{
- DBUG_ENTER("vio_in_addr");
- if (vio->localhost)
- bzero((char*) in, sizeof(*in)); /* This should never be executed */
- else
- *in=vio->remote.sin_addr;
- DBUG_VOID_RETURN;
-}
-
-
-/* Return 0 if there is data to be read */
-
-my_bool vio_poll_read(Vio *vio,uint timeout)
-{
-#ifndef HAVE_POLL
- return 0;
-#else
- struct pollfd fds;
- int res;
- DBUG_ENTER("vio_poll");
- fds.fd=vio->sd;
- fds.events=POLLIN;
- fds.revents=0;
- if ((res=poll(&fds,1,(int) timeout*1000)) <= 0)
- {
- DBUG_RETURN(res < 0 ? 0 : 1); /* Don't return 1 on errors */
- }
- DBUG_RETURN(fds.revents & POLLIN ? 0 : 1);
-#endif
-}
-
-#endif /* HAVE_VIO */
diff --git a/support-files/binary-configure.sh b/support-files/binary-configure.sh
index 682ea570b25..107f468bffc 100644
--- a/support-files/binary-configure.sh
+++ b/support-files/binary-configure.sh
@@ -20,5 +20,5 @@ then
echo "Starting the mysqld server. You can test that it is up and running"
echo "with the command:"
echo "./bin/mysqladmin version"
- ./bin/safe_mysqld &
+ ./bin/mysqld_safe &
fi
diff --git a/support-files/mysql-max.spec.sh b/support-files/mysql-max.spec.sh
index 49f131154c0..5c4b16f0e9d 100644
--- a/support-files/mysql-max.spec.sh
+++ b/support-files/mysql-max.spec.sh
@@ -208,7 +208,7 @@ chmod -R og-rw $mysql_datadir/mysql
# Restart in the same way that mysqld will be started normally.
/etc/rc.d/init.d/mysql start
-# Allow safe_mysqld to start mysqld and print a message before we exit
+# Allow mysqld_safe to start mysqld and print a message before we exit
sleep 2
%preun
@@ -244,7 +244,7 @@ fi
%attr(755, root, root) /usr/bin/perror
%attr(755, root, root) /usr/bin/replace
%attr(755, root, root) /usr/bin/resolveip
-%attr(755, root, root) /usr/bin/safe_mysqld
+%attr(755, root, root) /usr/bin/mysqld_safe
%attr(755, root, root) /usr/bin/mysqld_multi
%attr(755, root, root) /usr/bin/my_print_defaults
diff --git a/support-files/mysql-multi.server.sh b/support-files/mysql-multi.server.sh
index af13009d038..31020029354 100644
--- a/support-files/mysql-multi.server.sh
+++ b/support-files/mysql-multi.server.sh
@@ -65,7 +65,7 @@ parse_arguments() {
done
}
-# Get arguments from the my.cfg file, groups [mysqld], [mysql_server],
+# Get arguments from the my.cnf file, groups [mysqld], [mysql_server],
# and mysql_multi_server
if test -x ./bin/my_print_defaults
then
@@ -133,14 +133,14 @@ case "$mode" in
'start')
# Start daemon
- if test -x $bindir/safe_mysqld
+ if test -x $bindir/mysqld_safe
then
# We only need to specify datadir and pid-file here and we
# get all other instance-specific config from $datadir/my.cnf.
# We have to explicitly pass --defaults-extra-file because it
# reads the config files before the command line options.
- # Also it must be first because of the way safe_mysqld works.
- $bindir/safe_mysqld --defaults-extra-file=$datadir/my.cnf \
+ # Also it must be first because of the way mysqld_safe works.
+ $bindir/mysqld_safe --defaults-extra-file=$datadir/my.cnf \
--datadir=$datadir --pid-file=$pid_file &
# Make lock for RedHat / SuSE
if test -d /var/lock/subsys
@@ -148,7 +148,7 @@ case "$mode" in
touch /var/lock/subsys/mysql
fi
else
- echo "Can't execute $bindir/safe_mysqld"
+ echo "Can't execute $bindir/mysqld_safe"
fi
;;
diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh
index 9307a2e3eb2..ee5a9adaf8b 100644
--- a/support-files/mysql.server.sh
+++ b/support-files/mysql.server.sh
@@ -53,7 +53,7 @@ parse_arguments() {
done
}
-# Get arguments from the my.cfg file, groups [mysqld] and [mysql_server]
+# Get arguments from the my.cnf file, groups [mysqld] and [mysql_server]
if test -x ./bin/my_print_defaults
then
print_defaults="./bin/my_print_defaults"
@@ -100,18 +100,18 @@ case "$mode" in
'start')
# Start daemon
- if test -x $bindir/safe_mysqld
+ if test -x $bindir/mysqld_safe
then
# Give extra arguments to mysqld with the my.cnf file. This script may
# be overwritten at next upgrade.
- $bindir/safe_mysqld --datadir=$datadir --pid-file=$pid_file &
+ $bindir/mysqld_safe --datadir=$datadir --pid-file=$pid_file &
# Make lock for RedHat / SuSE
if test -w /var/lock/subsys
then
touch /var/lock/subsys/mysql
fi
else
- echo "Can't execute $bindir/safe_mysqld"
+ echo "Can't execute $bindir/mysqld_safe"
fi
;;
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index a10b09c12be..1bea22fbad7 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -296,7 +296,7 @@ chmod -R og-rw $mysql_datadir/mysql
# Restart in the same way that mysqld will be started normally.
/etc/rc.d/init.d/mysql start
-# Allow safe_mysqld to start mysqld and print a message before we exit
+# Allow mysqld_safe to start mysqld and print a message before we exit
sleep 2
%post Max
@@ -342,7 +342,7 @@ fi
%attr(755, root, root) /usr/bin/perror
%attr(755, root, root) /usr/bin/replace
%attr(755, root, root) /usr/bin/resolveip
-%attr(755, root, root) /usr/bin/safe_mysqld
+%attr(755, root, root) /usr/bin/mysqld_safe
%attr(755, root, root) /usr/bin/mysqld_multi
%attr(755, root, root) /usr/bin/my_print_defaults
@@ -360,6 +360,7 @@ fi
%attr(755, root, root) /usr/bin/mysql
%attr(755, root, root) /usr/bin/mysqlaccess
%attr(755, root, root) /usr/bin/mysqladmin
+%attr(755, root, root) /usr/bin/mysqlcheck
%attr(755, root, root) /usr/bin/mysql_find_rows
%attr(755, root, root) /usr/bin/mysqldump
%attr(755, root, root) /usr/bin/mysqlimport
@@ -378,7 +379,7 @@ fi
%attr(644, root, man) %doc /usr/man/man1/mysqlshow.1*
%attr(644, root, man) %doc /usr/man/man1/perror.1*
%attr(644, root, man) %doc /usr/man/man1/replace.1*
-%attr(644, root, man) %doc /usr/man/man1/safe_mysqld.1*
+%attr(644, root, man) %doc /usr/man/man1/mysqld_safe.1*
%post shared
/sbin/ldconfig
diff --git a/tests/fork_big.pl b/tests/fork_big.pl
index 8f16db74793..4009a9da71b 100755
--- a/tests/fork_big.pl
+++ b/tests/fork_big.pl
@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++)
{
test_select() if (($pid=fork()) == 0); $work{$pid}="select_key";
}
+test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count";
test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
test_update() if (($pid=fork()) == 0); $work{$pid}="update";
test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush";
@@ -214,6 +215,35 @@ sub test_select
}
#
+# Do big select count(distinct..) over the table
+#
+
+sub test_select_count
+{
+ my ($dbh, $i, $j, $count, $loop);
+
+ $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
+ $opt_user, $opt_password,
+ { PrintError => 0}) || die $DBI::errstr;
+
+ $count=0;
+ $i=0;
+ while (!test_if_abort($dbh))
+ {
+ for ($j=0 ; $j < $numtables ; $j++)
+ {
+ my ($table)= $testtables[$j]->[0];
+ simple_query($dbh, "select count(distinct marker),count(distinct id),count(distinct info) from $table");
+ $count++;
+ }
+ sleep(20); # This query is quite slow
+ }
+ $dbh->disconnect; $dbh=0;
+ print "Test_select: Executed $count select count(distinct) queries\n";
+ exit(0);
+}
+
+#
# Delete 1-5 rows from the first 2 tables.
# Test ends when the number of rows for table 3 didn't change during
# one loop
diff --git a/vio/Makefile.am b/vio/Makefile.am
index 9bb8691eee6..c1a69b26058 100644
--- a/vio/Makefile.am
+++ b/vio/Makefile.am
@@ -14,20 +14,12 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-INCLUDES = -I$(srcdir)/../include -I../include \
- @OPENSSL_INCLUDES@
-LDADD = libvio.la
-pkglib_LTLIBRARIES = libvio.la
+INCLUDES = -I$(srcdir)/../include -I../include $(openssl_includes)
+LDADD = libvio.a $(openssl_libs)
+pkglib_LIBRARIES = libvio.a
noinst_PROGRAMS =
noinst_HEADERS =
-libvio_la_SOURCES = \
- Vio.cc VioAcceptorFd.cc \
- VioConnectorFd.cc VioFd.cc \
- VioHandle.cc VioSSL.cc \
- VioSSLFactoriesFd.cc VioSocket.cc \
- auto.cc hostnamexx.cc \
- vdbug.cc version.cc \
- vmem.cc violitexx.cc
+libvio_a_SOURCES = vio.c viosocket.c viossl.c viosslfactories.c
OMIT_DEPENDENCIES = pthread.h stdio.h __stdio.h stdlib.h __stdlib.h math.h\
__math.h time.h __time.h unistd.h __unistd.h types.h \
diff --git a/vio/vio.c b/vio/vio.c
index 9775c002737..96cb0c31ef6 100644
--- a/vio/vio.c
+++ b/vio/vio.c
@@ -23,10 +23,11 @@
#define DONT_MAP_VIO
#include <global.h>
+#include <mysql_com.h>
+#include <violite.h>
#include <errno.h>
#include <assert.h>
-#include <vio.h>
#include <my_sys.h>
#include <my_net.h>
#include <m_string.h>
@@ -58,56 +59,68 @@
/*
- * Helper to fill most of the st_vio* with defaults.
+ * Helper to fill most of the Vio* with defaults.
*/
-void vio_reset(st_vio* vio, enum enum_vio_type type,
+void vio_reset(Vio* vio, enum enum_vio_type type,
my_socket sd, HANDLE hPipe,
my_bool localhost)
{
- bzero((char*) vio, sizeof(st_vio));
+ DBUG_ENTER("vio_reset");
+ DBUG_PRINT("enter", ("type=%d sd=%d localhost=%d", type, sd, localhost));
+
+ bzero((char*) vio, sizeof(*vio));
vio->type = type;
vio->sd = sd;
vio->hPipe = hPipe;
vio->localhost= localhost;
#ifdef HAVE_VIO
-if(type == VIO_TYPE_SSL){
- vio->viodelete =vio_ssl_delete;
- vio->vioerrno =vio_ssl_errno;
- vio->read =vio_ssl_read;
- vio->write =vio_ssl_write;
- vio->fastsend =vio_ssl_fastsend;
- vio->viokeepalive=vio_ssl_keepalive;
- vio->should_retry=vio_ssl_should_retry;
- vio->vioclose =vio_ssl_close;
- vio->peer_addr =vio_ssl_peer_addr;
- vio->in_addr =vio_ssl_in_addr;
- vio->poll_read =vio_ssl_poll_read;
-} else { /* default is VIO_TYPE_TCPIP */
- vio->viodelete =vio_delete;
- vio->vioerrno =vio_errno;
- vio->read =vio_read;
- vio->write =vio_write;
- vio->fastsend =vio_fastsend;
- vio->viokeepalive=vio_keepalive;
- vio->should_retry=vio_should_retry;
- vio->vioclose =vio_close;
- vio->peer_addr =vio_peer_addr;
- vio->in_addr =vio_in_addr;
- vio->poll_read =vio_poll_read;
-}
-
+#ifdef HAVE_OPENSSL
+ if (type == VIO_TYPE_SSL)
+ {
+ vio->viodelete =vio_ssl_delete;
+ vio->vioerrno =vio_ssl_errno;
+ vio->read =vio_ssl_read;
+ vio->write =vio_ssl_write;
+ vio->fastsend =vio_ssl_fastsend;
+ vio->viokeepalive =vio_ssl_keepalive;
+ vio->should_retry =vio_ssl_should_retry;
+ vio->vioclose =vio_ssl_close;
+ vio->peer_addr =vio_ssl_peer_addr;
+ vio->in_addr =vio_ssl_in_addr;
+ vio->poll_read =vio_ssl_poll_read;
+ vio->vioblocking =vio_blocking;
+ vio->is_blocking =vio_is_blocking;
+ }
+ else /* default is VIO_TYPE_TCPIP */
+#endif /* HAVE_OPENSSL */
+ {
+ vio->viodelete =vio_delete;
+ vio->vioerrno =vio_errno;
+ vio->read =vio_read;
+ vio->write =vio_write;
+ vio->fastsend =vio_fastsend;
+ vio->viokeepalive =vio_keepalive;
+ vio->should_retry =vio_should_retry;
+ vio->vioclose =vio_close;
+ vio->peer_addr =vio_peer_addr;
+ vio->in_addr =vio_in_addr;
+ vio->poll_read =vio_poll_read;
+ vio->vioblocking =vio_blocking;
+ vio->is_blocking =vio_is_blocking;
+ }
#endif /* HAVE_VIO */
+ DBUG_VOID_RETURN;
}
/* Open the socket or TCP/IP connection and read the fnctl() status */
-st_vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
+Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
{
- st_vio *vio;
+ Vio *vio;
DBUG_ENTER("vio_new");
DBUG_PRINT("enter", ("sd=%d", sd));
- if ((vio = (st_vio*) my_malloc(sizeof(*vio),MYF(MY_WME))))
+ if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME))))
{
vio_reset(vio, type, sd, 0, localhost);
sprintf(vio->desc,
@@ -134,11 +147,11 @@ st_vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
#ifdef __WIN__
-st_vio *vio_new_win32pipe(HANDLE hPipe)
+Vio *vio_new_win32pipe(HANDLE hPipe)
{
- st_vio *vio;
+ Vio *vio;
DBUG_ENTER("vio_new_handle");
- if ((vio = (st_vio*) my_malloc(sizeof(st_vio),MYF(MY_WME))))
+ if ((vio = (Vio*) my_malloc(sizeof(Vio),MYF(MY_WME))))
{
vio_reset(vio, VIO_TYPE_NAMEDPIPE, 0, hPipe, TRUE);
strmov(vio->desc, "named pipe");
@@ -147,5 +160,3 @@ st_vio *vio_new_win32pipe(HANDLE hPipe)
}
#endif
-
-
diff --git a/vio/viotcpip.c b/vio/viosocket.c
index f0dfc81cf4f..bf151c19928 100644
--- a/vio/viotcpip.c
+++ b/vio/viosocket.c
@@ -24,10 +24,11 @@
#define DONT_MAP_VIO
#include <global.h>
+#include <mysql_com.h>
#include <errno.h>
#include <assert.h>
-#include <vio.h>
+#include <violite.h>
#include <my_sys.h>
#include <my_net.h>
#include <m_string.h>
@@ -61,7 +62,7 @@
#define HANDLE void *
#endif
-void vio_delete(st_vio* vio)
+void vio_delete(Vio* vio)
{
/* It must be safe to delete null pointers. */
/* This matches the semantics of C++'s delete operator. */
@@ -73,13 +74,13 @@ void vio_delete(st_vio* vio)
}
}
-int vio_errno(st_vio *vio __attribute__((unused)))
+int vio_errno(Vio *vio __attribute__((unused)))
{
return errno; /* On Win32 this mapped to WSAGetLastError() */
}
-int vio_read(st_vio * vio, gptr buf, int size)
+int vio_read(Vio * vio, gptr buf, int size)
{
int r;
DBUG_ENTER("vio_read");
@@ -100,7 +101,7 @@ int vio_read(st_vio * vio, gptr buf, int size)
#ifndef DBUG_OFF
if (r < 0)
{
- DBUG_PRINT("error", ("Got error %d during read",errno));
+ DBUG_PRINT("vio_error", ("Got error %d during read",errno));
}
#endif /* DBUG_OFF */
DBUG_PRINT("exit", ("%d", r));
@@ -108,7 +109,7 @@ int vio_read(st_vio * vio, gptr buf, int size)
}
-int vio_write(st_vio * vio, const gptr buf, int size)
+int vio_write(Vio * vio, const gptr buf, int size)
{
int r;
DBUG_ENTER("vio_write");
@@ -128,7 +129,7 @@ int vio_write(st_vio * vio, const gptr buf, int size)
#ifndef DBUG_OFF
if (r < 0)
{
- DBUG_PRINT("error", ("Got error on write: %d",errno));
+ DBUG_PRINT("vio_error", ("Got error on write: %d",errno));
}
#endif /* DBUG_OFF */
DBUG_PRINT("exit", ("%d", r));
@@ -136,7 +137,7 @@ int vio_write(st_vio * vio, const gptr buf, int size)
}
-int vio_blocking(st_vio * vio, my_bool set_blocking_mode)
+int vio_blocking(Vio * vio, my_bool set_blocking_mode)
{
int r=0;
DBUG_ENTER("vio_blocking");
@@ -181,7 +182,7 @@ int vio_blocking(st_vio * vio, my_bool set_blocking_mode)
}
my_bool
-vio_is_blocking(st_vio * vio)
+vio_is_blocking(Vio * vio)
{
my_bool r;
DBUG_ENTER("vio_is_blocking");
@@ -191,7 +192,7 @@ vio_is_blocking(st_vio * vio)
}
-int vio_fastsend(st_vio * vio __attribute__((unused)))
+int vio_fastsend(Vio * vio __attribute__((unused)))
{
int r=0;
DBUG_ENTER("vio_fastsend");
@@ -217,7 +218,7 @@ int vio_fastsend(st_vio * vio __attribute__((unused)))
DBUG_RETURN(r);
}
-int vio_keepalive(st_vio* vio, my_bool set_keep_alive)
+int vio_keepalive(Vio* vio, my_bool set_keep_alive)
{
int r=0;
uint opt = 0;
@@ -236,14 +237,14 @@ int vio_keepalive(st_vio* vio, my_bool set_keep_alive)
my_bool
-vio_should_retry(st_vio * vio __attribute__((unused)))
+vio_should_retry(Vio * vio __attribute__((unused)))
{
int en = errno;
return en == EAGAIN || en == EINTR || en == EWOULDBLOCK;
}
-int vio_close(st_vio * vio)
+int vio_close(Vio * vio)
{
int r;
DBUG_ENTER("vio_close");
@@ -267,7 +268,7 @@ int vio_close(st_vio * vio)
}
if (r)
{
- DBUG_PRINT("error", ("close() failed, error: %d",errno));
+ DBUG_PRINT("vio_error", ("close() failed, error: %d",errno));
/* FIXME: error handling (not critical for MySQL) */
}
vio->type= VIO_CLOSED;
@@ -276,23 +277,23 @@ int vio_close(st_vio * vio)
}
-const char *vio_description(st_vio * vio)
+const char *vio_description(Vio * vio)
{
return vio->desc;
}
-enum enum_vio_type vio_type(st_vio* vio)
+enum enum_vio_type vio_type(Vio* vio)
{
return vio->type;
}
-my_socket vio_fd(st_vio* vio)
+my_socket vio_fd(Vio* vio)
{
return vio->sd;
}
-my_bool vio_peer_addr(st_vio * vio, char *buf)
+my_bool vio_peer_addr(Vio * vio, char *buf)
{
DBUG_ENTER("vio_peer_addr");
DBUG_PRINT("enter", ("sd=%d", vio->sd));
@@ -309,15 +310,14 @@ my_bool vio_peer_addr(st_vio * vio, char *buf)
DBUG_PRINT("exit", ("getpeername, error: %d", errno));
DBUG_RETURN(1);
}
- /* FIXME */
-/* my_inet_ntoa(vio->remote.sin_addr,buf); */
+ my_inet_ntoa(vio->remote.sin_addr,buf);
}
DBUG_PRINT("exit", ("addr=%s", buf));
DBUG_RETURN(0);
}
-void vio_in_addr(st_vio *vio, struct in_addr *in)
+void vio_in_addr(Vio *vio, struct in_addr *in)
{
DBUG_ENTER("vio_in_addr");
if (vio->localhost)
@@ -330,7 +330,7 @@ void vio_in_addr(st_vio *vio, struct in_addr *in)
/* Return 0 if there is data to be read */
-my_bool vio_poll_read(st_vio *vio,uint timeout)
+my_bool vio_poll_read(Vio *vio,uint timeout)
{
#ifndef HAVE_POLL
return 0;
@@ -348,4 +348,3 @@ my_bool vio_poll_read(st_vio *vio,uint timeout)
DBUG_RETURN(fds.revents & POLLIN ? 0 : 1);
#endif
}
-
diff --git a/vio/viossl.c b/vio/viossl.c
index 5600bc1a800..e4fe9d87228 100644
--- a/vio/viossl.c
+++ b/vio/viossl.c
@@ -23,10 +23,11 @@
*/
#include <global.h>
+#include <mysql_com.h>
#include <errno.h>
#include <assert.h>
-#include <vio.h>
+#include <violite.h>
#include <my_sys.h>
#include <my_net.h>
#include <m_string.h>
@@ -62,7 +63,27 @@
#ifdef HAVE_OPENSSL
-void vio_ssl_delete(st_vio * vio)
+
+static void
+report_errors()
+{
+ unsigned long l;
+ const char* file;
+ const char* data;
+ int line,flags;
+ DBUG_ENTER("report_errors");
+
+ while ((l=ERR_get_error_line_data(&file,&line,&data,&flags)) != 0)
+ {
+ char buf[200];
+ DBUG_PRINT("error", ("OpenSSL: %s:%s:%d:%s\n", ERR_error_string(l,buf),
+ file,line,(flags&ERR_TXT_STRING)?data:"")) ;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+void vio_ssl_delete(Vio * vio)
{
/* It must be safe to delete null pointers. */
/* This matches the semantics of C++'s delete operator. */
@@ -74,13 +95,13 @@ void vio_ssl_delete(st_vio * vio)
}
}
-int vio_ssl_errno(st_vio *vio __attribute__((unused)))
+int vio_ssl_errno(Vio *vio __attribute__((unused)))
{
return errno; /* On Win32 this mapped to WSAGetLastError() */
}
-int vio_ssl_read(st_vio * vio, gptr buf, int size)
+int vio_ssl_read(Vio * vio, gptr buf, int size)
{
int r;
DBUG_ENTER("vio_ssl_read");
@@ -96,7 +117,7 @@ int vio_ssl_read(st_vio * vio, gptr buf, int size)
}
-int vio_ssl_write(st_vio * vio, const gptr buf, int size)
+int vio_ssl_write(Vio * vio, const gptr buf, int size)
{
int r;
DBUG_ENTER("vio_ssl_write");
@@ -112,7 +133,7 @@ int vio_ssl_write(st_vio * vio, const gptr buf, int size)
}
-int vio_ssl_fastsend(st_vio * vio __attribute__((unused)))
+int vio_ssl_fastsend(Vio * vio __attribute__((unused)))
{
int r=0;
DBUG_ENTER("vio_ssl_fastsend");
@@ -138,7 +159,7 @@ int vio_ssl_fastsend(st_vio * vio __attribute__((unused)))
DBUG_RETURN(r);
}
-int vio_ssl_keepalive(st_vio* vio, my_bool set_keep_alive)
+int vio_ssl_keepalive(Vio* vio, my_bool set_keep_alive)
{
int r=0;
uint opt = 0;
@@ -157,14 +178,14 @@ int vio_ssl_keepalive(st_vio* vio, my_bool set_keep_alive)
my_bool
-vio_ssl_should_retry(st_vio * vio __attribute__((unused)))
+vio_ssl_should_retry(Vio * vio __attribute__((unused)))
{
int en = errno;
return en == EAGAIN || en == EINTR || en == EWOULDBLOCK;
}
-int vio_ssl_close(st_vio * vio)
+int vio_ssl_close(Vio * vio)
{
int r;
DBUG_ENTER("vio_ssl_close");
@@ -191,23 +212,23 @@ int vio_ssl_close(st_vio * vio)
}
-const char *vio_ssl_description(st_vio * vio)
+const char *vio_ssl_description(Vio * vio)
{
return vio->desc;
}
-enum enum_vio_type vio_ssl_type(st_vio* vio)
+enum enum_vio_type vio_ssl_type(Vio* vio)
{
return vio->type;
}
-my_socket vio_ssl_fd(st_vio* vio)
+my_socket vio_ssl_fd(Vio* vio)
{
return vio->sd;
}
-my_bool vio_ssl_peer_addr(st_vio * vio, char *buf)
+my_bool vio_ssl_peer_addr(Vio * vio, char *buf)
{
DBUG_ENTER("vio_ssl_peer_addr");
DBUG_PRINT("enter", ("sd=%d", vio->sd));
@@ -232,7 +253,7 @@ my_bool vio_ssl_peer_addr(st_vio * vio, char *buf)
}
-void vio_ssl_in_addr(st_vio *vio, struct in_addr *in)
+void vio_ssl_in_addr(Vio *vio, struct in_addr *in)
{
DBUG_ENTER("vio_ssl_in_addr");
if (vio->localhost)
@@ -245,7 +266,7 @@ void vio_ssl_in_addr(st_vio *vio, struct in_addr *in)
/* Return 0 if there is data to be read */
-my_bool vio_ssl_poll_read(st_vio *vio,uint timeout)
+my_bool vio_ssl_poll_read(Vio *vio,uint timeout)
{
#ifndef HAVE_POLL
return 0;
@@ -265,28 +286,10 @@ my_bool vio_ssl_poll_read(st_vio *vio,uint timeout)
}
-static void
-report_errors()
-{
- unsigned long l;
- const char* file;
- const char* data;
- int line,flags;
- DBUG_ENTER("report_errors");
-
- while ((l=ERR_get_error_line_data(&file,&line,&data,&flags)) != 0)
- {
- char buf[200];
- DBUG_PRINT("error", ("OpenSSL: %s:%s:%d:%s\n", ERR_error_string(l,buf),
- file,line,(flags&ERR_TXT_STRING)?data:"")) ;
- }
- DBUG_VOID_RETURN;
-}
-
/* FIXME: There are some duplicate code in
* sslaccept()/sslconnect() which maybe can be eliminated
*/
-struct st_vio *sslaccept(struct st_VioSSLAcceptorFd* ptr, struct st_vio* sd)
+Vio *sslaccept(struct st_VioSSLAcceptorFd* ptr, Vio* sd)
{
DBUG_ENTER("sslaccept");
DBUG_PRINT("enter", ("sd=%s ptr=%p", sd->desc,ptr));
@@ -319,13 +322,13 @@ struct st_vio *sslaccept(struct st_VioSSLAcceptorFd* ptr, struct st_vio* sd)
DBUG_RETURN(sd);
}
-struct st_vio *sslconnect(struct st_VioSSLConnectorFd* ptr, struct st_vio* sd)
+Vio *sslconnect(struct st_VioSSLConnectorFd* ptr, Vio* sd)
{
DBUG_ENTER("sslconnect");
DBUG_PRINT("enter", ("sd=%s ptr=%p ctx: %p", sd->desc,ptr,ptr->ssl_context_));
vio_reset(sd,VIO_TYPE_SSL,sd->sd,0,FALSE);
- ptr->bio_=0;
+ sd->bio_=0;
sd->ssl_=0;
sd->open_=FALSE;
assert(sd != 0);
@@ -338,7 +341,7 @@ struct st_vio *sslconnect(struct st_VioSSLConnectorFd* ptr, struct st_vio* sd)
report_errors();
DBUG_RETURN(sd);
}
- if (!(ptr->bio_ = BIO_new_socket(sd->sd, BIO_NOCLOSE)))
+ if (!(sd->bio_ = BIO_new_socket(sd->sd, BIO_NOCLOSE)))
{
DBUG_PRINT("error", ("BIO_new_socket failure"));
report_errors();
@@ -346,7 +349,7 @@ struct st_vio *sslconnect(struct st_VioSSLConnectorFd* ptr, struct st_vio* sd)
sd->ssl_=0;
DBUG_RETURN(sd);
}
- SSL_set_bio(sd->ssl_, ptr->bio_, ptr->bio_);
+ SSL_set_bio(sd->ssl_, sd->bio_, sd->bio_);
SSL_set_connect_state(sd->ssl_);
/* sprintf(ptr->desc_, "VioSSL(%d)", sd->sd);
sd->ssl_cip_ = SSL_get_cipher(sd->ssl_);*/
diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c
index 2b2eaf4fff5..4be956ed9ba 100644
--- a/vio/viosslfactories.c
+++ b/vio/viosslfactories.c
@@ -2,7 +2,8 @@
#include <global.h>
#include <my_sys.h>
-#include <vio.h>
+#include <mysql_com.h>
+#include <violite.h>
#ifdef HAVE_OPENSSL
@@ -185,7 +186,7 @@ struct st_VioSSLConnectorFd* new_VioSSLConnectorFd(const char* key_file,
DBUG_RETURN(ptr);
ctor_failure:
DBUG_PRINT("exit", ("there was an error"));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
@@ -266,7 +267,7 @@ new_VioSSLAcceptorFd(const char* key_file,
DBUG_RETURN(ptr);
ctor_failure:
DBUG_PRINT("exit", ("there was an error"));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}