summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <monty@mysql.com>2004-09-06 15:14:10 +0300
committerunknown <monty@mysql.com>2004-09-06 15:14:10 +0300
commit4ad51359c1b7b5ed854f1af8df06fd6912068d28 (patch)
treed9ef905036723648a1d354d0523ef8124e4dee76
parentfa3bfbe45796edd85cce30e62ff1fcfb81df745c (diff)
parent9a63c8e0e468d7a64dcb7e23f4e5c344eebf635b (diff)
downloadmariadb-git-4ad51359c1b7b5ed854f1af8df06fd6912068d28.tar.gz
Merge with 4.1
(Includes merge of arena code in 4.1 and 5.0) BitKeeper/etc/ignore: auto-union BitKeeper/etc/logging_ok: auto-union VC++Files/sql/mysqld.dsp: Auto merged client/mysql.cc: Auto merged client/mysqltest.c: Auto merged include/my_global.h: Auto merged include/my_sys.h: Auto merged include/mysql.h: Auto merged include/mysql_com.h: Auto merged innobase/row/row0sel.c: Auto merged libmysql/client_settings.h: Auto merged libmysql/libmysql.c: Auto merged libmysqld/Makefile.am: Auto merged libmysqld/examples/Makefile.am: Auto merged libmysqld/lib_sql.cc: Auto merged myisam/mi_check.c: Auto merged myisam/myisamchk.c: Auto merged myisam/sort.c: Auto merged mysql-test/r/connect.result: Auto merged mysql-test/r/ctype_recoding.result: Auto merged mysql-test/r/ctype_ucs.result: Auto merged mysql-test/r/func_in.result: Auto merged mysql-test/r/func_like.result: Auto merged mysql-test/r/gis.result: Auto merged mysql-test/r/having.result: Auto merged mysql-test/r/heap.result: Auto merged mysql-test/r/join.result: Auto merged mysql-test/r/key.result: Auto merged mysql-test/r/lowercase_table.result: Auto merged mysql-test/r/ndb_autodiscover.result: Auto merged mysql-test/r/null.result: Auto merged mysql-test/r/olap.result: Auto merged mysql-test/r/order_by.result: Auto merged mysql-test/r/ps_1general.result: Auto merged mysql-test/r/ps_2myisam.result: Auto merged mysql-test/r/ps_3innodb.result: Auto merged mysql-test/r/ps_4heap.result: Auto merged mysql-test/r/ps_5merge.result: Auto merged mysql-test/r/ps_6bdb.result: Auto merged mysql-test/r/range.result: Auto merged mysql-test/r/rename.result: Auto merged mysql-test/r/show_check.result: Auto merged mysql-test/r/subselect.result: Auto merged mysql-test/r/union.result: Auto merged mysql-test/r/variables.result: Auto merged mysql-test/t/alter_table.test: Auto merged mysql-test/t/null.test: Auto merged mysql-test/t/ps_1general.test: Auto merged mysql-test/t/rpl_charset.test: Auto merged mysql-test/t/rpl_heap.test: Auto merged mysql-test/t/rpl_relayrotate.test: Auto merged mysql-test/t/subselect.test: Auto merged mysql-test/t/variables.test: Auto merged netware/mysql_test_run.c: Auto merged scripts/make_binary_distribution.sh: Auto merged scripts/mysql_create_system_tables.sh: Auto merged scripts/mysql_fix_privilege_tables.sql: Auto merged scripts/mysql_install_db.sh: Auto merged sql/ha_berkeley.cc: Auto merged sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/ha_myisam.cc: Auto merged sql/handler.cc: Auto merged sql/handler.h: Auto merged sql/item.h: Auto merged sql/item_cmpfunc.h: Auto merged sql/item_create.cc: Auto merged sql/item_create.h: Auto merged sql/item_func.cc: Auto merged sql/item_func.h: Auto merged sql/item_timefunc.cc: Auto merged sql/item_timefunc.h: Auto merged sql/lex.h: Auto merged sql/lock.cc: Auto merged sql/log_event.cc: Auto merged sql/mysql_priv.h: Auto merged sql/opt_sum.cc: Auto merged sql/protocol.cc: Auto merged sql/records.cc: Auto merged sql/repl_failsafe.cc: Auto merged sql/set_var.cc: Auto merged sql/set_var.h: Auto merged sql/slave.cc: Auto merged sql/sql_acl.cc: Auto merged sql/sql_acl.h: Auto merged sql/sql_db.cc: Auto merged sql/sql_delete.cc: Auto merged sql/sql_insert.cc: Auto merged sql/sql_list.h: Auto merged sql/sql_load.cc: Auto merged sql/sql_rename.cc: Auto merged sql/sql_select.h: Auto merged sql/sql_show.cc: Auto merged sql/sql_string.h: Auto merged sql/table.cc: Auto merged sql-common/client.c: Auto merged client/mysqlbinlog.cc: Merge with 4.1 configure.in: Merge with 4.1 include/mysqld_error.h: Add new error message (1) from 4.1 mysql-test/mysql-test-run.sh: Merge with 4.1 mysql-test/r/func_gconcat.result: Merge with 4.1 mysql-test/r/func_if.result: Merge with 4.1 mysql-test/r/grant.result: Merge with 4.1 mysql-test/r/join_outer.result: Merge with 4.1 mysql-test/r/rpl_charset.result: Merge with 4.1 (This has to be fixed before pushing) mysql-test/r/system_mysql_db.result: Merge with 4.1. Added collation to new privileges mysql-test/t/grant.test: Merge with 4.1 mysql-test/t/grant_cache.test: Merge with 4.1 mysql-test/t/show_check.test: Merge with 4.1 sql/Makefile.am: Merge with 4.1 sql/item.cc: Merge with 4.1 sql/item_cmpfunc.cc: Merge with 4.1 (arena code) sql/item_subselect.cc: Merge with 4.1 sql/item_subselect.h: Merge with 4.1 sql/item_sum.cc: Merge with 4.1 sql/item_sum.h: Merge with 4.1 sql/log.cc: Merge with 4.1 (Remove code that is not relevant for 5.0) sql/mysqld.cc: Merge with 4.1 sql/opt_range.cc: Merge with 4.1 sql/share/czech/errmsg.txt: Merge with 4.1 sql/share/danish/errmsg.txt: Merge with 4.1 sql/share/dutch/errmsg.txt: Merge with 4.1 sql/share/english/errmsg.txt: Merge with 4.1 sql/share/estonian/errmsg.txt: Merge with 4.1 sql/share/french/errmsg.txt: Merge with 4.1 sql/share/german/errmsg.txt: Merge with 4.1 sql/share/greek/errmsg.txt: Merge with 4.1 sql/share/hungarian/errmsg.txt: Merge with 4.1 sql/share/italian/errmsg.txt: Merge with 4.1 sql/share/japanese/errmsg.txt: Merge with 4.1 sql/share/korean/errmsg.txt: Merge with 4.1 sql/share/norwegian-ny/errmsg.txt: Merge with 4.1 sql/share/norwegian/errmsg.txt: Merge with 4.1 sql/share/polish/errmsg.txt: Merge with 4.1 sql/share/portuguese/errmsg.txt: Merge with 4.1 sql/share/romanian/errmsg.txt: Merge with 4.1 sql/share/russian/errmsg.txt: Merge with 4.1 sql/share/serbian/errmsg.txt: Merge with 4.1 sql/share/slovak/errmsg.txt: Merge with 4.1 sql/share/spanish/errmsg.txt: Merge with 4.1 sql/share/swedish/errmsg.txt: Merge with 4.1 sql/share/ukrainian/errmsg.txt: Merge with 4.1 sql/sql_base.cc: Merge with 4.1 sql/sql_class.cc: Merge with 4.1 Use arena code from 4.1 sql/sql_class.h: Merge with 4.1 Use arena code from 4.1 sql/sql_derived.cc: Merge with 4.1 sql/sql_lex.cc: Merge with 4.1 sql/sql_lex.h: Merge with 4.1 sql/sql_parse.cc: Merge with 4.1 sql/sql_prepare.cc: Merge with 4.1 sql/sql_select.cc: Merge with 4.1 sql/sql_table.cc: Merge with 4.1 sql/sql_union.cc: Merge with 4.1 sql/sql_yacc.yy: Merge with 4.1 sql/tztime.cc: Merge with 4.1 tests/client_test.c: Merge with 4.1
-rw-r--r--.bzrignore122
-rw-r--r--BUILD/Makefile.am1
-rwxr-xr-xBUILD/compile-hpux11-parisc2-aCC80
-rwxr-xr-xBUILD/compile-irix-mips64-mipspro8
-rwxr-xr-xBUILD/compile-pentium-max5
-rwxr-xr-xBUILD/compile-pentium-valgrind-max2
-rw-r--r--BitKeeper/etc/logging_ok8
-rwxr-xr-xBuild-tools/Bootstrap54
-rwxr-xr-xBuild-tools/Do-compile6
-rwxr-xr-xBuild-tools/mysql-copyright34
-rw-r--r--Docs/Makefile.am7
-rw-r--r--Makefile.am11
-rw-r--r--VC++Files/sql/message.mc8
-rw-r--r--VC++Files/sql/mysqld.dsp87
-rw-r--r--VC++Files/winmysqladmin/mysql_com.h41
-rw-r--r--acconfig.h372
-rw-r--r--acinclude.m4310
-rw-r--r--bdb/dist/configure.ac2
-rw-r--r--bdb/dist/gen_inc.awk22
-rw-r--r--client/client_priv.h3
-rw-r--r--client/mysql.cc54
-rw-r--r--client/mysqladmin.c4
-rw-r--r--client/mysqlbinlog.cc275
-rw-r--r--client/mysqlcheck.c4
-rw-r--r--client/mysqldump.c5
-rw-r--r--client/mysqlimport.c4
-rw-r--r--client/mysqlshow.c4
-rw-r--r--client/mysqltest.c4
-rw-r--r--cmd-line-utils/Makefile.am17
-rw-r--r--cmd-line-utils/libedit/Makefile.am34
-rw-r--r--cmd-line-utils/libedit/chared.c282
-rw-r--r--cmd-line-utils/libedit/chared.h43
-rw-r--r--cmd-line-utils/libedit/common.c165
-rw-r--r--cmd-line-utils/libedit/config.h14
-rw-r--r--cmd-line-utils/libedit/el.c82
-rw-r--r--cmd-line-utils/libedit/el.h17
-rw-r--r--cmd-line-utils/libedit/emacs.c16
-rw-r--r--cmd-line-utils/libedit/hist.c63
-rw-r--r--cmd-line-utils/libedit/hist.h6
-rw-r--r--cmd-line-utils/libedit/histedit.h15
-rw-r--r--cmd-line-utils/libedit/history.c201
-rw-r--r--cmd-line-utils/libedit/key.c52
-rw-r--r--cmd-line-utils/libedit/key.h33
-rw-r--r--cmd-line-utils/libedit/makelist.sh4
-rw-r--r--cmd-line-utils/libedit/map.c339
-rw-r--r--cmd-line-utils/libedit/map.h2
-rw-r--r--cmd-line-utils/libedit/np/fgetln.c88
-rw-r--r--cmd-line-utils/libedit/np/strlcat.c75
-rw-r--r--cmd-line-utils/libedit/np/strlcpy.c75
-rw-r--r--cmd-line-utils/libedit/np/unvis.c322
-rw-r--r--cmd-line-utils/libedit/np/vis.c347
-rw-r--r--cmd-line-utils/libedit/np/vis.h96
-rw-r--r--cmd-line-utils/libedit/parse.c14
-rw-r--r--cmd-line-utils/libedit/prompt.c12
-rw-r--r--cmd-line-utils/libedit/read.c130
-rw-r--r--cmd-line-utils/libedit/read.h55
-rw-r--r--cmd-line-utils/libedit/readline.c240
-rw-r--r--cmd-line-utils/libedit/readline/readline.h24
-rw-r--r--cmd-line-utils/libedit/refresh.c51
-rw-r--r--cmd-line-utils/libedit/search.c142
-rw-r--r--cmd-line-utils/libedit/search.h6
-rw-r--r--cmd-line-utils/libedit/sig.c18
-rw-r--r--cmd-line-utils/libedit/sig.h7
-rw-r--r--cmd-line-utils/libedit/sys.h66
-rw-r--r--cmd-line-utils/libedit/term.c114
-rw-r--r--cmd-line-utils/libedit/tokenizer.c61
-rw-r--r--cmd-line-utils/libedit/tokenizer.h2
-rw-r--r--cmd-line-utils/libedit/tty.c25
-rw-r--r--cmd-line-utils/libedit/tty.h10
-rw-r--r--cmd-line-utils/libedit/vi.c735
-rw-r--r--configure.in345
-rw-r--r--extra/mysql_waitpid.c1
-rw-r--r--extra/perror.c4
-rw-r--r--heap/hp_hash.c110
-rw-r--r--include/config-win.h3
-rw-r--r--include/m_ctype.h7
-rw-r--r--include/m_string.h3
-rw-r--r--include/my_getopt.h16
-rw-r--r--include/my_global.h28
-rw-r--r--include/my_sys.h6
-rw-r--r--include/my_time.h19
-rw-r--r--include/myisampack.h357
-rw-r--r--include/mysql.h11
-rw-r--r--include/mysql_com.h41
-rw-r--r--include/mysql_time.h9
-rw-r--r--include/mysqld_error.h115
-rw-r--r--include/sql_common.h1
-rw-r--r--innobase/buf/buf0flu.c65
-rw-r--r--innobase/buf/buf0rea.c2
-rw-r--r--innobase/dict/dict0crea.c1
-rw-r--r--innobase/dict/dict0dict.c1
-rw-r--r--innobase/fil/fil0fil.c2
-rw-r--r--innobase/include/dyn0dyn.h3
-rw-r--r--innobase/include/mtr0log.h3
-rw-r--r--innobase/include/mtr0log.ic3
-rw-r--r--innobase/include/os0file.h4
-rw-r--r--innobase/include/page0page.ic15
-rw-r--r--innobase/include/srv0srv.h3
-rw-r--r--innobase/include/srv0start.h6
-rw-r--r--innobase/include/ut0dbg.h34
-rw-r--r--innobase/lock/lock0lock.c1
-rw-r--r--innobase/log/log0recv.c3
-rw-r--r--innobase/os/os0file.c69
-rw-r--r--innobase/row/row0mysql.c40
-rw-r--r--innobase/row/row0sel.c92
-rw-r--r--innobase/srv/srv0srv.c7
-rw-r--r--innobase/srv/srv0start.c64
-rw-r--r--innobase/ut/ut0dbg.c7
-rw-r--r--innobase/ut/ut0mem.c6
-rwxr-xr-xinstall-sh2
-rw-r--r--libmysql/Makefile.am17
-rw-r--r--libmysql/Makefile.shared39
-rw-r--r--libmysql/client_settings.h2
-rw-r--r--libmysql/conf_to_src.c9
-rw-r--r--libmysql/dll.c9
-rw-r--r--libmysql/errmsg.c9
-rw-r--r--libmysql/get_password.c9
-rw-r--r--libmysql/libmysql.c662
-rw-r--r--libmysql/manager.c9
-rw-r--r--libmysql_r/Makefile.am20
-rw-r--r--libmysqld/Makefile.am2
-rw-r--r--libmysqld/examples/Makefile.am2
-rw-r--r--libmysqld/lib_sql.cc59
-rwxr-xr-xlibmysqld/libmysqld.rc125
-rwxr-xr-xlibmysqld/resource.h15
-rw-r--r--man/Makefile.am2
-rw-r--r--man/mysqlman.1.in15
-rw-r--r--myisam/Makefile.am6
-rw-r--r--myisam/ftdefs.h2
-rw-r--r--myisam/mi_check.c68
-rw-r--r--myisam/mi_key.c90
-rw-r--r--myisam/mi_search.c72
-rw-r--r--myisam/myisam_ftdump.c2
-rw-r--r--myisam/myisamchk.c6
-rw-r--r--myisam/sort.c4
-rw-r--r--myisammrg/myrg_open.c137
-rw-r--r--mysql-test/Makefile.am1
-rw-r--r--mysql-test/include/have_csv.inc4
-rw-r--r--mysql-test/include/have_exampledb.inc4
-rw-r--r--mysql-test/mysql-test-run.sh32
-rw-r--r--mysql-test/ndb/ndb_config_2_node.ini59
-rw-r--r--mysql-test/ndb/ndbcluster.sh123
-rw-r--r--mysql-test/r/alter_table.result6
-rw-r--r--mysql-test/r/auto_increment.result18
-rw-r--r--mysql-test/r/binary.result28
-rw-r--r--mysql-test/r/connect.result3
-rw-r--r--mysql-test/r/create.result38
-rw-r--r--mysql-test/r/create_select_tmp.result8
-rw-r--r--mysql-test/r/csv.result4931
-rw-r--r--mysql-test/r/ctype_cp1251.result4
-rw-r--r--mysql-test/r/ctype_create.result8
-rw-r--r--mysql-test/r/ctype_recoding.result24
-rw-r--r--mysql-test/r/ctype_uca.result110
-rw-r--r--mysql-test/r/ctype_ucs.result16
-rw-r--r--mysql-test/r/ctype_utf8.result398
-rw-r--r--mysql-test/r/date_formats.result75
-rw-r--r--mysql-test/r/endspace.result43
-rw-r--r--mysql-test/r/exampledb.result6
-rw-r--r--mysql-test/r/flush_block_commit.result23
-rw-r--r--mysql-test/r/fulltext2.result9
-rw-r--r--mysql-test/r/func_gconcat.result51
-rw-r--r--mysql-test/r/func_if.result2
-rw-r--r--mysql-test/r/func_in.result10
-rw-r--r--mysql-test/r/func_like.result3
-rw-r--r--mysql-test/r/func_math.result32
-rw-r--r--mysql-test/r/func_set.result12
-rw-r--r--mysql-test/r/func_str.result5
-rw-r--r--mysql-test/r/gis.result19
-rw-r--r--mysql-test/r/grant.result19
-rw-r--r--mysql-test/r/have_csv.require2
-rw-r--r--mysql-test/r/have_exampledb.require2
-rw-r--r--mysql-test/r/having.result41
-rw-r--r--mysql-test/r/heap.result10
-rw-r--r--mysql-test/r/join.result6
-rw-r--r--mysql-test/r/join_outer.result71
-rw-r--r--mysql-test/r/key.result38
-rw-r--r--mysql-test/r/lowercase_table.result22
-rw-r--r--mysql-test/r/merge.result9
-rw-r--r--mysql-test/r/metadata.result2
-rw-r--r--mysql-test/r/myisam.result25
-rw-r--r--mysql-test/r/mysql_protocols.result9
-rw-r--r--mysql-test/r/mysqlbinlog2.result446
-rw-r--r--mysql-test/r/ndb_alter_table.result53
-rw-r--r--mysql-test/r/ndb_autodiscover.result4
-rw-r--r--mysql-test/r/ndb_basic.result195
-rw-r--r--mysql-test/r/ndb_blob.result324
-rw-r--r--mysql-test/r/ndb_index_ordered.result69
-rw-r--r--mysql-test/r/ndb_index_unique.result113
-rw-r--r--mysql-test/r/ndb_limit.result31
-rw-r--r--mysql-test/r/ndb_lock.result30
-rw-r--r--mysql-test/r/ndb_replace.result2
-rw-r--r--mysql-test/r/ndb_transaction.result257
-rw-r--r--mysql-test/r/ndb_truncate.result14
-rw-r--r--mysql-test/r/ndb_types.result36
-rw-r--r--mysql-test/r/negation_elimination.result13
-rw-r--r--mysql-test/r/null.result19
-rw-r--r--mysql-test/r/olap.result36
-rw-r--r--mysql-test/r/order_by.result73
-rw-r--r--mysql-test/r/ps.result51
-rw-r--r--mysql-test/r/ps_1general.result9
-rw-r--r--mysql-test/r/ps_2myisam.result2
-rw-r--r--mysql-test/r/ps_3innodb.result2
-rw-r--r--mysql-test/r/ps_4heap.result2
-rw-r--r--mysql-test/r/ps_5merge.result4
-rw-r--r--mysql-test/r/ps_6bdb.result2
-rw-r--r--mysql-test/r/range.result86
-rw-r--r--mysql-test/r/rename.result1
-rw-r--r--mysql-test/r/rpl_charset.result124
-rw-r--r--mysql-test/r/rpl_delete_all.result10
-rw-r--r--mysql-test/r/rpl_heap.result12
-rw-r--r--mysql-test/r/select_found.result5
-rw-r--r--mysql-test/r/show_check.result55
-rw-r--r--mysql-test/r/subselect.result79
-rw-r--r--mysql-test/r/subselect_gis.result8
-rw-r--r--mysql-test/r/system_mysql_db.result164
-rw-r--r--mysql-test/r/timezone2.result7
-rw-r--r--mysql-test/r/truncate.result21
-rw-r--r--mysql-test/r/type_blob.result20
-rw-r--r--mysql-test/r/type_date.result17
-rw-r--r--mysql-test/r/type_float.result14
-rw-r--r--mysql-test/r/type_ranges.result2
-rw-r--r--mysql-test/r/type_timestamp.result12
-rw-r--r--mysql-test/r/type_uint.result4
-rw-r--r--mysql-test/r/union.result8
-rw-r--r--mysql-test/r/variables.result16
-rw-r--r--mysql-test/t/alter_table.test11
-rw-r--r--mysql-test/t/auto_increment.test7
-rw-r--r--mysql-test/t/binary.test14
-rw-r--r--mysql-test/t/connect.test3
-rw-r--r--mysql-test/t/create.test34
-rw-r--r--mysql-test/t/create_select_tmp.test8
-rw-r--r--mysql-test/t/csv.test1315
-rw-r--r--mysql-test/t/ctype_create.test12
-rw-r--r--mysql-test/t/ctype_recoding.test14
-rw-r--r--mysql-test/t/ctype_uca.test2
-rw-r--r--mysql-test/t/ctype_ucs.test22
-rw-r--r--mysql-test/t/ctype_utf8.test329
-rw-r--r--mysql-test/t/date_formats.test19
-rw-r--r--mysql-test/t/endspace.test12
-rw-r--r--mysql-test/t/exampledb.test16
-rw-r--r--mysql-test/t/flush_block_commit.test52
-rw-r--r--mysql-test/t/fulltext2.test8
-rw-r--r--mysql-test/t/func_gconcat.test41
-rw-r--r--mysql-test/t/func_in.test7
-rw-r--r--mysql-test/t/func_like.test6
-rw-r--r--mysql-test/t/func_math.test4
-rw-r--r--mysql-test/t/func_set.test12
-rw-r--r--mysql-test/t/func_str.test6
-rw-r--r--mysql-test/t/gis.test18
-rw-r--r--mysql-test/t/grant.test16
-rw-r--r--mysql-test/t/grant_cache.test3
-rw-r--r--mysql-test/t/having.test47
-rw-r--r--mysql-test/t/heap.test14
-rw-r--r--mysql-test/t/join.test10
-rw-r--r--mysql-test/t/join_outer.test50
-rw-r--r--mysql-test/t/key.test26
-rw-r--r--mysql-test/t/lowercase_table.test20
-rw-r--r--mysql-test/t/merge.test14
-rw-r--r--mysql-test/t/myisam.test21
-rw-r--r--mysql-test/t/mysql_protocols.test10
-rw-r--r--mysql-test/t/mysqlbinlog2.test156
-rw-r--r--mysql-test/t/ndb_alter_table.test16
-rw-r--r--mysql-test/t/ndb_autodiscover.test2
-rw-r--r--mysql-test/t/ndb_basic.test72
-rw-r--r--mysql-test/t/ndb_blob.test275
-rw-r--r--mysql-test/t/ndb_index_ordered.test33
-rw-r--r--mysql-test/t/ndb_index_unique.test61
-rw-r--r--mysql-test/t/ndb_limit.test44
-rw-r--r--mysql-test/t/ndb_lock.test41
-rw-r--r--mysql-test/t/ndb_replace.test2
-rw-r--r--mysql-test/t/ndb_transaction.test297
-rw-r--r--mysql-test/t/ndb_truncate.test33
-rw-r--r--mysql-test/t/ndb_types.test47
-rw-r--r--mysql-test/t/negation_elimination.test4
-rw-r--r--mysql-test/t/null.test21
-rw-r--r--mysql-test/t/olap.test37
-rw-r--r--mysql-test/t/order_by.test45
-rw-r--r--mysql-test/t/ps.test72
-rw-r--r--mysql-test/t/ps_1general.test8
-rw-r--r--mysql-test/t/range.test45
-rw-r--r--mysql-test/t/rename.test2
-rw-r--r--mysql-test/t/rpl_charset.test36
-rw-r--r--mysql-test/t/rpl_delete_all.test6
-rw-r--r--mysql-test/t/rpl_heap.test6
-rw-r--r--mysql-test/t/rpl_relayrotate.test3
-rw-r--r--mysql-test/t/select_found.test14
-rw-r--r--mysql-test/t/show_check.test45
-rw-r--r--mysql-test/t/subselect.test61
-rw-r--r--mysql-test/t/subselect_gis.test15
-rw-r--r--mysql-test/t/timezone2.test12
-rw-r--r--mysql-test/t/truncate.test16
-rw-r--r--mysql-test/t/type_blob.test7
-rw-r--r--mysql-test/t/type_date.test19
-rw-r--r--mysql-test/t/type_float.test14
-rw-r--r--mysql-test/t/type_timestamp.test10
-rw-r--r--mysql-test/t/type_uint.test1
-rw-r--r--mysql-test/t/union.test8
-rw-r--r--mysql-test/t/variables.test11
-rw-r--r--mysys/Makefile.am3
-rw-r--r--mysys/hash.c4
-rw-r--r--mysys/list.c2
-rw-r--r--mysys/mf_iocache.c4
-rw-r--r--mysys/mf_keycache.c73
-rw-r--r--mysys/mf_keycaches.c4
-rw-r--r--mysys/mf_tempfile.c4
-rw-r--r--mysys/my_alloc.c6
-rw-r--r--mysys/my_fopen.c6
-rw-r--r--mysys/my_fstream.c8
-rw-r--r--mysys/my_getopt.c108
-rw-r--r--mysys/my_getwd.c2
-rw-r--r--mysys/my_handler.c4
-rw-r--r--mysys/my_lib.c13
-rw-r--r--mysys/my_lwrite.c2
-rw-r--r--mysys/my_malloc.c4
-rw-r--r--mysys/my_pread.c4
-rw-r--r--mysys/my_read.c2
-rw-r--r--mysys/my_realloc.c4
-rw-r--r--mysys/my_write.c2
-rw-r--r--mysys/safemalloc.c8
-rw-r--r--mysys/thr_alarm.c4
-rw-r--r--mysys/thr_lock.c18
-rw-r--r--mysys/thr_mutex.c4
-rw-r--r--mysys/tree.c4
-rw-r--r--mysys/typelib.c2
-rw-r--r--ndb/include/Makefile.am1
-rw-r--r--ndb/include/debugger/EventLogger.hpp7
-rw-r--r--ndb/include/kernel/GlobalSignalNumbers.h11
-rw-r--r--ndb/include/kernel/Interpreter.hpp6
-rw-r--r--ndb/include/kernel/NodeInfo.hpp9
-rw-r--r--ndb/include/kernel/NodeState.hpp21
-rw-r--r--ndb/include/kernel/kernel_config_parameters.h5
-rw-r--r--ndb/include/kernel/ndb_limits.h2
-rw-r--r--ndb/include/kernel/signaldata/CreateFragmentation.hpp2
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp6
-rw-r--r--ndb/include/kernel/signaldata/DropTab.hpp3
-rw-r--r--ndb/include/kernel/signaldata/DumpStateOrd.hpp1
-rw-r--r--ndb/include/kernel/signaldata/KeyInfo.hpp1
-rw-r--r--ndb/include/kernel/signaldata/PrepDropTab.hpp6
-rw-r--r--ndb/include/kernel/signaldata/ScanTab.hpp155
-rw-r--r--ndb/include/kernel/signaldata/SignalData.hpp16
-rw-r--r--ndb/include/kernel/signaldata/StartInfo.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TcCommit.hpp1
-rw-r--r--ndb/include/kernel/signaldata/TcKeyReq.hpp1
-rw-r--r--ndb/include/kernel/signaldata/TupAccess.hpp174
-rw-r--r--ndb/include/mgmapi/mgmapi.h15
-rw-r--r--ndb/include/mgmapi/mgmapi_config_parameters.h3
-rw-r--r--ndb/include/mgmapi/mgmapi_debug.h25
-rw-r--r--ndb/include/mgmcommon/ConfigRetriever.hpp45
-rw-r--r--ndb/include/mgmcommon/LocalConfig.hpp (renamed from ndb/src/common/mgmcommon/LocalConfig.hpp)34
-rw-r--r--ndb/include/mgmcommon/MgmtErrorReporter.hpp6
-rw-r--r--ndb/include/mgmcommon/NdbConfig.h13
-rw-r--r--ndb/include/ndb_global.h12
-rw-r--r--ndb/include/ndbapi/Ndb.hpp42
-rw-r--r--ndb/include/ndbapi/NdbApi.hpp2
-rw-r--r--ndb/include/ndbapi/NdbBlob.hpp86
-rw-r--r--ndb/include/ndbapi/NdbConnection.hpp193
-rw-r--r--ndb/include/ndbapi/NdbCursorOperation.hpp73
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp13
-rw-r--r--ndb/include/ndbapi/NdbIndexOperation.hpp2
-rw-r--r--ndb/include/ndbapi/NdbIndexScanOperation.hpp140
-rw-r--r--ndb/include/ndbapi/NdbOperation.hpp246
-rw-r--r--ndb/include/ndbapi/NdbRecAttr.hpp50
-rw-r--r--ndb/include/ndbapi/NdbReceiver.hpp67
-rw-r--r--ndb/include/ndbapi/NdbResultSet.hpp54
-rw-r--r--ndb/include/ndbapi/NdbScanOperation.hpp231
-rw-r--r--ndb/include/portlib/NdbTCP.h2
-rw-r--r--ndb/include/transporter/TransporterDefinitions.hpp1
-rw-r--r--ndb/include/transporter/TransporterRegistry.hpp83
-rw-r--r--ndb/include/util/BaseString.hpp2
-rw-r--r--ndb/include/util/Bitmask.hpp171
-rw-r--r--ndb/include/util/ConfigValues.hpp6
-rw-r--r--ndb/include/util/NdbSqlUtil.hpp126
-rw-r--r--ndb/include/util/Properties.hpp2
-rw-r--r--ndb/include/util/SocketAuthenticator.hpp39
-rw-r--r--ndb/include/util/SocketClient.hpp38
-rw-r--r--ndb/src/common/debugger/DebuggerNames.cpp15
-rw-r--r--ndb/src/common/debugger/EventLogger.cpp16
-rw-r--r--ndb/src/common/debugger/SignalLoggerManager.cpp25
-rw-r--r--ndb/src/common/debugger/signaldata/DictTabInfo.cpp2
-rw-r--r--ndb/src/common/debugger/signaldata/LCP.cpp3
-rw-r--r--ndb/src/common/debugger/signaldata/Makefile.am3
-rw-r--r--ndb/src/common/debugger/signaldata/ScanTab.cpp111
-rw-r--r--ndb/src/common/debugger/signaldata/SignalDataPrint.cpp12
-rw-r--r--ndb/src/common/debugger/signaldata/SignalNames.cpp4
-rw-r--r--ndb/src/common/debugger/signaldata/TupAccess.cpp131
-rw-r--r--ndb/src/common/logger/FileLogHandler.cpp4
-rw-r--r--ndb/src/common/logger/Logger.cpp1
-rw-r--r--ndb/src/common/mgmcommon/ConfigInfo.cpp994
-rw-r--r--ndb/src/common/mgmcommon/ConfigInfo.hpp7
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp302
-rw-r--r--ndb/src/common/mgmcommon/IPCConfig.cpp19
-rw-r--r--ndb/src/common/mgmcommon/InitConfigFileParser.cpp45
-rw-r--r--ndb/src/common/mgmcommon/LocalConfig.cpp130
-rw-r--r--ndb/src/common/mgmcommon/NdbConfig.c112
-rw-r--r--ndb/src/common/portlib/Makefile.am2
-rw-r--r--ndb/src/common/portlib/NdbTCP.cpp (renamed from ndb/src/common/portlib/NdbTCP.c)5
-rw-r--r--ndb/src/common/transporter/Packer.cpp14
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.cpp162
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.hpp30
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.unix.cpp124
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.cpp250
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.hpp83
-rw-r--r--ndb/src/common/transporter/Transporter.cpp188
-rw-r--r--ndb/src/common/transporter/Transporter.hpp109
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp450
-rw-r--r--ndb/src/common/util/BaseString.cpp3
-rw-r--r--ndb/src/common/util/ConfigValues.cpp26
-rw-r--r--ndb/src/common/util/Makefile.am3
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp22
-rw-r--r--ndb/src/common/util/Parser.cpp1
-rw-r--r--ndb/src/common/util/Properties.cpp39
-rw-r--r--ndb/src/common/util/SocketAuthenticator.cpp91
-rw-r--r--ndb/src/common/util/SocketClient.cpp92
-rw-r--r--ndb/src/common/util/SocketServer.cpp16
-rw-r--r--ndb/src/common/util/socket_io.cpp4
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp2
-rw-r--r--ndb/src/cw/cpcd/CPCD.cpp7
-rw-r--r--ndb/src/cw/cpcd/Makefile.am2
-rw-r--r--ndb/src/cw/cpcd/Monitor.cpp2
-rw-r--r--ndb/src/cw/cpcd/Process.cpp88
-rw-r--r--ndb/src/cw/cpcd/main.cpp1
-rw-r--r--ndb/src/kernel/Makefile.am2
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt6
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp5
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp3
-rw-r--r--ndb/src/kernel/blocks/backup/BackupInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.cpp20
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.hpp5
-rw-r--r--ndb/src/kernel/blocks/backup/restore/main.cpp9
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp102
-rw-r--r--ndb/src/kernel/blocks/dbacc/Dbacc.hpp5
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp203
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp99
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp2
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp4
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihInit.cpp8
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp251
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp13
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp29
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp552
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp282
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcInit.cpp57
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp2279
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp27
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp292
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp12
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp196
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp14
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp286
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp16
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp38
-rw-r--r--ndb/src/kernel/blocks/dbtux/Dbtux.hpp164
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp256
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp95
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp46
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp112
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp7
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp35
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp201
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp333
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp111
-rw-r--r--ndb/src/kernel/blocks/dbtux/Makefile.am1
-rw-r--r--ndb/src/kernel/blocks/dbtux/Times.txt47
-rw-r--r--ndb/src/kernel/blocks/dbutil/DbUtil.cpp2
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.cpp7
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.hpp15
-rw-r--r--ndb/src/kernel/blocks/grep/GrepInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp33
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp6
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Filename.cpp11
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Filename.hpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp3
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp6
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Pool.hpp1
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp22
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp93
-rw-r--r--ndb/src/kernel/blocks/trix/Trix.cpp6
-rw-r--r--ndb/src/kernel/error/ErrorReporter.cpp39
-rw-r--r--ndb/src/kernel/error/ErrorReporter.hpp31
-rw-r--r--ndb/src/kernel/main.cpp (renamed from ndb/src/kernel/Main.cpp)194
-rw-r--r--ndb/src/kernel/vm/ArrayPool.hpp18
-rw-r--r--ndb/src/kernel/vm/Configuration.cpp152
-rw-r--r--ndb/src/kernel/vm/Configuration.hpp6
-rw-r--r--ndb/src/kernel/vm/DataBuffer.hpp2
-rw-r--r--ndb/src/kernel/vm/Emulator.cpp4
-rw-r--r--ndb/src/kernel/vm/MetaData.cpp2
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.cpp103
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.hpp28
-rw-r--r--ndb/src/kernel/vm/ThreadConfig.cpp2
-rw-r--r--ndb/src/kernel/vm/TransporterCallback.cpp25
-rw-r--r--ndb/src/mgmapi/Makefile.am2
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp232
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp76
-rw-r--r--ndb/src/mgmclient/CpcClient.cpp8
-rw-r--r--ndb/src/mgmclient/main.cpp13
-rw-r--r--ndb/src/mgmsrv/CommandInterpreter.cpp12
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp383
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp33
-rw-r--r--ndb/src/mgmsrv/MgmtSrvrConfig.cpp7
-rw-r--r--ndb/src/mgmsrv/Services.cpp163
-rw-r--r--ndb/src/mgmsrv/Services.hpp5
-rw-r--r--ndb/src/mgmsrv/main.cpp196
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp2
-rw-r--r--ndb/src/ndbapi/DictCache.cpp11
-rw-r--r--ndb/src/ndbapi/Makefile.am4
-rw-r--r--ndb/src/ndbapi/Makefile_old13
-rw-r--r--ndb/src/ndbapi/Ndb.cpp196
-rw-r--r--ndb/src/ndbapi/NdbApiSignal.cpp26
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp291
-rw-r--r--ndb/src/ndbapi/NdbConnection.cpp717
-rw-r--r--ndb/src/ndbapi/NdbConnectionScan.cpp545
-rw-r--r--ndb/src/ndbapi/NdbCursorOperation.cpp6
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp99
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp111
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp13
-rw-r--r--ndb/src/ndbapi/NdbEventOperation.cpp2
-rw-r--r--ndb/src/ndbapi/NdbEventOperationImpl.cpp38
-rw-r--r--ndb/src/ndbapi/NdbImpl.hpp11
-rw-r--r--ndb/src/ndbapi/NdbIndexOperation.cpp19
-rw-r--r--ndb/src/ndbapi/NdbLinHash.hpp12
-rw-r--r--ndb/src/ndbapi/NdbOperation.cpp75
-rw-r--r--ndb/src/ndbapi/NdbOperationDefine.cpp121
-rw-r--r--ndb/src/ndbapi/NdbOperationExec.cpp392
-rw-r--r--ndb/src/ndbapi/NdbOperationInt.cpp46
-rw-r--r--ndb/src/ndbapi/NdbOperationScan.cpp587
-rw-r--r--ndb/src/ndbapi/NdbRecAttr.cpp84
-rw-r--r--ndb/src/ndbapi/NdbReceiver.cpp185
-rw-r--r--ndb/src/ndbapi/NdbResultSet.cpp59
-rw-r--r--ndb/src/ndbapi/NdbScanFilter.cpp10
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp1752
-rw-r--r--ndb/src/ndbapi/Ndbif.cpp425
-rw-r--r--ndb/src/ndbapi/Ndbinit.cpp7
-rw-r--r--ndb/src/ndbapi/Ndblist.cpp42
-rw-r--r--ndb/src/ndbapi/ObjectMap.hpp41
-rw-r--r--ndb/src/ndbapi/ScanOperation.txt46
-rw-r--r--ndb/src/ndbapi/TransporterFacade.cpp144
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp9
-rw-r--r--ndb/src/ndbapi/ndberror.c7
-rw-r--r--ndb/test/include/HugoOperations.hpp54
-rw-r--r--ndb/test/include/HugoTransactions.hpp33
-rw-r--r--ndb/test/include/NDBT_Table.hpp8
-rw-r--r--ndb/test/include/NDBT_Tables.hpp3
-rw-r--r--ndb/test/include/NDBT_Test.hpp15
-rw-r--r--ndb/test/include/NdbRestarter.hpp5
-rw-r--r--ndb/test/include/UtilTransactions.hpp14
-rw-r--r--ndb/test/ndbapi/Makefile.am7
-rw-r--r--ndb/test/ndbapi/Makefile_old4
-rw-r--r--ndb/test/ndbapi/ScanFunctions.hpp143
-rw-r--r--ndb/test/ndbapi/ScanInterpretTest.hpp52
-rw-r--r--ndb/test/ndbapi/bank/Bank.cpp102
-rw-r--r--ndb/test/ndbapi/bank/BankLoad.cpp12
-rw-r--r--ndb/test/ndbapi/create_tab.cpp5
-rw-r--r--ndb/test/ndbapi/flexAsynch.cpp14
-rw-r--r--ndb/test/ndbapi/flexBench.cpp20
-rw-r--r--ndb/test/ndbapi/flexTT.cpp6
-rw-r--r--ndb/test/ndbapi/old_dirs/testBackup/Makefile1
-rw-r--r--ndb/test/ndbapi/old_dirs/testGrep/Makefile1
-rw-r--r--ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile1
-rw-r--r--ndb/test/ndbapi/testBackup.cpp8
-rw-r--r--ndb/test/ndbapi/testBasic.cpp48
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp636
-rw-r--r--ndb/test/ndbapi/testDataBuffers.cpp10
-rw-r--r--ndb/test/ndbapi/testDeadlock.cpp514
-rw-r--r--ndb/test/ndbapi/testDict.cpp1
-rw-r--r--ndb/test/ndbapi/testGrep.cpp7
-rw-r--r--ndb/test/ndbapi/testIndex.cpp51
-rw-r--r--ndb/test/ndbapi/testNdbApi.cpp3
-rw-r--r--ndb/test/ndbapi/testNodeRestart.cpp2
-rw-r--r--ndb/test/ndbapi/testOIBasic.cpp1023
-rw-r--r--ndb/test/ndbapi/testRestartGci.cpp2
-rw-r--r--ndb/test/ndbapi/testScan.cpp193
-rw-r--r--ndb/test/ndbapi/testScanPerf.cpp (renamed from ndb/test/ndbapi/testScanPerf/testScanPerf.cpp)0
-rw-r--r--ndb/test/ndbapi/testScanPerf/Makefile9
-rw-r--r--ndb/test/ndbapi/testTimeout.cpp235
-rw-r--r--ndb/test/ndbapi/testTransactions.cpp16
-rw-r--r--ndb/test/run-test/Makefile.am16
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt1008
-rw-r--r--ndb/test/run-test/daily-devel-tests.txt204
-rw-r--r--ndb/test/run-test/main.cpp74
-rw-r--r--ndb/test/src/HugoAsynchTransactions.cpp34
-rw-r--r--ndb/test/src/HugoCalculator.cpp10
-rw-r--r--ndb/test/src/HugoOperations.cpp269
-rw-r--r--ndb/test/src/HugoTransactions.cpp327
-rw-r--r--ndb/test/src/NDBT_Table.cpp29
-rw-r--r--ndb/test/src/NDBT_Tables.cpp176
-rw-r--r--ndb/test/src/NDBT_Test.cpp69
-rw-r--r--ndb/test/src/NdbBackup.cpp53
-rw-r--r--ndb/test/src/NdbRestarter.cpp32
-rw-r--r--ndb/test/src/NdbRestarts.cpp12
-rw-r--r--ndb/test/src/UtilTransactions.cpp282
-rw-r--r--ndb/test/tools/cpcc.cpp8
-rw-r--r--ndb/test/tools/create_index.cpp20
-rw-r--r--ndb/test/tools/hugoPkReadRecord.cpp9
-rw-r--r--ndb/tools/delete_all.cpp2
-rw-r--r--ndb/tools/desc.cpp5
-rw-r--r--ndb/tools/select_all.cpp64
-rw-r--r--ndb/tools/select_count.cpp27
-rw-r--r--ndb/tools/waiter.cpp34
-rw-r--r--netware/mysql_test_run.c2
-rw-r--r--scripts/make_binary_distribution.sh2
-rw-r--r--scripts/make_sharedlib_distribution.sh6
-rw-r--r--scripts/make_win_src_distribution.sh2
-rw-r--r--scripts/mysql_create_system_tables.sh51
-rw-r--r--scripts/mysql_fix_privilege_tables.sql36
-rw-r--r--scripts/mysqld_safe.sh17
-rw-r--r--sql-bench/server-cfg.sh1
-rw-r--r--sql-common/client.c15
-rw-r--r--sql-common/my_time.c167
-rw-r--r--sql/Makefile.am36
-rw-r--r--sql/examples/ha_archive.cc125
-rw-r--r--sql/examples/ha_archive.h8
-rw-r--r--sql/examples/ha_tina.cc851
-rw-r--r--sql/examples/ha_tina.h132
-rw-r--r--sql/field.cc144
-rw-r--r--sql/field.h15
-rw-r--r--sql/field_conv.cc4
-rw-r--r--sql/gen_lex_hash.cc2
-rw-r--r--sql/ha_berkeley.cc6
-rw-r--r--sql/ha_heap.cc5
-rw-r--r--sql/ha_innodb.cc83
-rw-r--r--sql/ha_innodb.h3
-rw-r--r--sql/ha_myisam.cc22
-rw-r--r--sql/ha_ndbcluster.cc1194
-rw-r--r--sql/ha_ndbcluster.h53
-rw-r--r--sql/handler.cc77
-rw-r--r--sql/handler.h5
-rw-r--r--sql/item.cc80
-rw-r--r--sql/item.h22
-rw-r--r--sql/item_cmpfunc.cc257
-rw-r--r--sql/item_cmpfunc.h11
-rw-r--r--sql/item_create.cc5
-rw-r--r--sql/item_create.h1
-rw-r--r--sql/item_func.cc71
-rw-r--r--sql/item_func.h9
-rw-r--r--sql/item_row.cc3
-rw-r--r--sql/item_strfunc.cc22
-rw-r--r--sql/item_subselect.cc225
-rw-r--r--sql/item_subselect.h2
-rw-r--r--sql/item_sum.cc35
-rw-r--r--sql/item_sum.h16
-rw-r--r--sql/item_timefunc.cc173
-rw-r--r--sql/item_timefunc.h16
-rw-r--r--sql/key.cc15
-rw-r--r--sql/lex.h2
-rw-r--r--sql/lock.cc73
-rw-r--r--sql/log.cc267
-rw-r--r--sql/log_event.cc4
-rw-r--r--sql/mysql_priv.h35
-rw-r--r--sql/mysqld.cc894
-rw-r--r--sql/opt_range.cc12
-rw-r--r--sql/protocol.cc10
-rw-r--r--sql/records.cc3
-rw-r--r--sql/repl_failsafe.cc9
-rw-r--r--sql/set_var.cc36
-rw-r--r--sql/set_var.h1
-rw-r--r--sql/share/charsets/cp852.xml2
-rw-r--r--sql/share/czech/errmsg.txt21
-rw-r--r--sql/share/danish/errmsg.txt22
-rw-r--r--sql/share/dutch/errmsg.txt23
-rw-r--r--sql/share/english/errmsg.txt22
-rw-r--r--sql/share/estonian/errmsg.txt24
-rw-r--r--sql/share/french/errmsg.txt22
-rw-r--r--sql/share/german/errmsg.txt25
-rw-r--r--sql/share/greek/errmsg.txt22
-rw-r--r--sql/share/hungarian/errmsg.txt25
-rw-r--r--sql/share/italian/errmsg.txt20
-rw-r--r--sql/share/japanese/errmsg.txt24
-rw-r--r--sql/share/korean/errmsg.txt22
-rw-r--r--sql/share/norwegian-ny/errmsg.txt22
-rw-r--r--sql/share/norwegian/errmsg.txt22
-rw-r--r--sql/share/polish/errmsg.txt23
-rw-r--r--sql/share/portuguese/errmsg.txt23
-rw-r--r--sql/share/romanian/errmsg.txt23
-rw-r--r--sql/share/russian/errmsg.txt25
-rw-r--r--sql/share/serbian/errmsg.txt3
-rw-r--r--sql/share/slovak/errmsg.txt22
-rw-r--r--sql/share/spanish/errmsg.txt24
-rw-r--r--sql/share/swedish/errmsg.OLD221
-rw-r--r--sql/share/swedish/errmsg.txt22
-rw-r--r--sql/share/ukrainian/errmsg.txt25
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_acl.cc46
-rw-r--r--sql/sql_acl.h3
-rw-r--r--sql/sql_analyse.cc3
-rw-r--r--sql/sql_base.cc241
-rw-r--r--sql/sql_class.cc151
-rw-r--r--sql/sql_class.h116
-rw-r--r--sql/sql_db.cc19
-rw-r--r--sql/sql_delete.cc8
-rw-r--r--sql/sql_insert.cc7
-rw-r--r--sql/sql_lex.cc17
-rw-r--r--sql/sql_lex.h13
-rw-r--r--sql/sql_list.h4
-rw-r--r--sql/sql_load.cc18
-rw-r--r--sql/sql_parse.cc148
-rw-r--r--sql/sql_prepare.cc207
-rw-r--r--sql/sql_rename.cc2
-rw-r--r--sql/sql_select.cc260
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_show.cc18
-rw-r--r--sql/sql_string.h9
-rw-r--r--sql/sql_table.cc60
-rw-r--r--sql/sql_union.cc4
-rw-r--r--sql/sql_yacc.yy163
-rw-r--r--sql/table.cc11
-rw-r--r--sql/time.cc157
-rw-r--r--sql/tztime.cc332
-rw-r--r--sql/tztime.h12
-rw-r--r--sql/unireg.cc2
-rw-r--r--strings/ctype-big5.c3
-rw-r--r--strings/ctype-bin.c112
-rw-r--r--strings/ctype-euc_kr.c3
-rw-r--r--strings/ctype-gb2312.c3
-rw-r--r--strings/ctype-gbk.c3
-rw-r--r--strings/ctype-latin1.c3
-rw-r--r--strings/ctype-mb.c328
-rw-r--r--strings/ctype-simple.c17
-rw-r--r--strings/ctype-sjis.c3
-rw-r--r--strings/ctype-tis620.c3
-rw-r--r--strings/ctype-uca.c2
-rw-r--r--strings/ctype-ucs2.c3
-rw-r--r--strings/ctype-ujis.c3
-rw-r--r--strings/ctype-utf8.c324
-rw-r--r--strings/my_vsnprintf.c50
-rw-r--r--strings/strto.c8
-rw-r--r--strings/strtol.c9
-rw-r--r--strings/strtoll.c13
-rw-r--r--strings/strtoul.c9
-rw-r--r--strings/strtoull.c13
-rw-r--r--strings/utr11-dump.c112
-rw-r--r--support-files/Makefile.am3
-rw-r--r--support-files/MySQL-shared-compat.spec.sh4
-rw-r--r--support-files/mysql.spec.sh79
-rw-r--r--tests/client_test.c190
-rw-r--r--tools/Makefile.am22
-rw-r--r--zlib/Makefile.am29
739 files changed, 39155 insertions, 18168 deletions
diff --git a/.bzrignore b/.bzrignore
index 6f5cd2de56a..be7211af9e2 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -14,6 +14,7 @@
*/.pure
*~
.*.swp
+./config.h
.defs.mk
.depend
.depend.mk
@@ -77,6 +78,7 @@ Docs/mysql.xml
Docs/safe-mysql.xml
Docs/tex.fmt
Docs/texi2dvi.out
+EXCEPTIONS-CLIENT
INSTALL-SOURCE
INSTALL-WIN-SOURCE
Logs/*
@@ -336,6 +338,7 @@ libmysqld/derror.cc
libmysqld/discover.cc
libmysqld/errmsg.c
libmysqld/examples/client_test.c
+libmysqld/examples/client_test.cc
libmysqld/examples/completion_hash.cc
libmysqld/examples/completion_hash.h
libmysqld/examples/link_sources
@@ -441,6 +444,7 @@ libmysqld/sql_udf.cc
libmysqld/sql_union.cc
libmysqld/sql_unions.cc
libmysqld/sql_update.cc
+libmysqld/sql_view.cc
libmysqld/sql_yacc.cc
libmysqld/stacktrace.c
libmysqld/strfunc.cc
@@ -656,7 +660,71 @@ ndb/lib/libREP_API.so
ndb/lib/libndbclient.so
ndb/lib/libndbclient_extra.so
ndb/src/common/mgmcommon/printConfig/*.d
+ndb/src/cw/cpcd/ndb_cpcd
+ndb/src/kernel/blocks/backup/restore/ndb_restore
+ndb/src/kernel/ndbd
+ndb/src/mgmclient/ndb_mgm
ndb/src/mgmclient/test_cpcd/*.d
+ndb/src/mgmsrv/ndb_mgmd
+ndb/test/ndbapi/bank/bankCreator
+ndb/test/ndbapi/bank/bankMakeGL
+ndb/test/ndbapi/bank/bankSumAccounts
+ndb/test/ndbapi/bank/bankTimer
+ndb/test/ndbapi/bank/bankTransactionMaker
+ndb/test/ndbapi/bank/bankValidateAllGLs
+ndb/test/ndbapi/bank/testBank
+ndb/test/ndbapi/create_all_tabs
+ndb/test/ndbapi/create_tab
+ndb/test/ndbapi/drop_all_tabs
+ndb/test/ndbapi/flexAsynch
+ndb/test/ndbapi/flexBench
+ndb/test/ndbapi/flexHammer
+ndb/test/ndbapi/flexTT
+ndb/test/ndbapi/testBackup
+ndb/test/ndbapi/testBasic
+ndb/test/ndbapi/testBasicAsynch
+ndb/test/ndbapi/testBlobs
+ndb/test/ndbapi/testDataBuffers
+ndb/test/ndbapi/testDeadlock
+ndb/test/ndbapi/testDict
+ndb/test/ndbapi/testIndex
+ndb/test/ndbapi/testMgm
+ndb/test/ndbapi/testNdbApi
+ndb/test/ndbapi/testNodeRestart
+ndb/test/ndbapi/testOIBasic
+ndb/test/ndbapi/testOperations
+ndb/test/ndbapi/testRestartGci
+ndb/test/ndbapi/testScan
+ndb/test/ndbapi/testScanInterpreter
+ndb/test/ndbapi/testScanPerf
+ndb/test/ndbapi/testSystemRestart
+ndb/test/ndbapi/testTimeout
+ndb/test/ndbapi/testTransactions
+ndb/test/ndbapi/test_event
+ndb/test/run-test/atrt
+ndb/test/tools/copy_tab
+ndb/test/tools/create_index
+ndb/test/tools/hugoCalculator
+ndb/test/tools/hugoFill
+ndb/test/tools/hugoLoad
+ndb/test/tools/hugoLockRecords
+ndb/test/tools/hugoPkDelete
+ndb/test/tools/hugoPkRead
+ndb/test/tools/hugoPkReadRecord
+ndb/test/tools/hugoPkUpdate
+ndb/test/tools/hugoScanRead
+ndb/test/tools/hugoScanUpdate
+ndb/test/tools/ndb_cpcc
+ndb/test/tools/restart
+ndb/test/tools/verify_index
+ndb/tools/ndb_delete_all
+ndb/tools/ndb_desc
+ndb/tools/ndb_drop_index
+ndb/tools/ndb_drop_table
+ndb/tools/ndb_select_all
+ndb/tools/ndb_select_count
+ndb/tools/ndb_show_tables
+ndb/tools/ndb_waiter
pull.log
regex/re
repl-tests/test-repl-ts/repl-timestamp.master.reject
@@ -773,6 +841,7 @@ support-files/MacOSX/postinstall
support-files/MacOSX/preinstall
support-files/binary-configure
support-files/my-huge.cnf
+support-files/my-innodb-heavy-4G.cnf
support-files/my-large.cnf
support-files/my-medium.cnf
support-files/my-small.cnf
@@ -785,6 +854,57 @@ support-files/mysql-log-rotate
support-files/mysql.server
support-files/mysql.spec
tags
+test/ndbapi/bank/bankCreator
+test/ndbapi/bank/bankMakeGL
+test/ndbapi/bank/bankSumAccounts
+test/ndbapi/bank/bankTimer
+test/ndbapi/bank/bankTransactionMaker
+test/ndbapi/bank/bankValidateAllGLs
+test/ndbapi/bank/testBank
+test/ndbapi/create_all_tabs
+test/ndbapi/create_tab
+test/ndbapi/drop_all_tabs
+test/ndbapi/flexAsynch
+test/ndbapi/flexBench
+test/ndbapi/flexHammer
+test/ndbapi/flexTT
+test/ndbapi/testBackup
+test/ndbapi/testBasic
+test/ndbapi/testBasicAsynch
+test/ndbapi/testBlobs
+test/ndbapi/testDataBuffers
+test/ndbapi/testDeadlock
+test/ndbapi/testDict
+test/ndbapi/testIndex
+test/ndbapi/testMgm
+test/ndbapi/testNdbApi
+test/ndbapi/testNodeRestart
+test/ndbapi/testOIBasic
+test/ndbapi/testOperations
+test/ndbapi/testRestartGci
+test/ndbapi/testScan
+test/ndbapi/testScanInterpreter
+test/ndbapi/testScanPerf
+test/ndbapi/testSystemRestart
+test/ndbapi/testTimeout
+test/ndbapi/testTransactions
+test/ndbapi/test_event
+test/run-test/atrt
+test/tools/copy_tab
+test/tools/create_index
+test/tools/hugoCalculator
+test/tools/hugoFill
+test/tools/hugoLoad
+test/tools/hugoLockRecords
+test/tools/hugoPkDelete
+test/tools/hugoPkRead
+test/tools/hugoPkReadRecord
+test/tools/hugoPkUpdate
+test/tools/hugoScanRead
+test/tools/hugoScanUpdate
+test/tools/ndb_cpcc
+test/tools/restart
+test/tools/verify_index
test_xml
tests/client_test
tests/connect_test
@@ -799,5 +919,3 @@ vio/test-ssl
vio/test-sslclient
vio/test-sslserver
vio/viotest-ssl
-libmysqld/sql_view.cc
-libmysqld/examples/client_test.cc
diff --git a/BUILD/Makefile.am b/BUILD/Makefile.am
index 2414d4f3a44..9f3c55c20d5 100644
--- a/BUILD/Makefile.am
+++ b/BUILD/Makefile.am
@@ -38,6 +38,7 @@ EXTRA_DIST = FINISH.sh \
compile-solaris-sparc \
compile-solaris-sparc-debug \
compile-irix-mips64-mipspro \
+ compile-hpux11-parisc2-aCC \
compile-solaris-sparc-forte \
compile-solaris-sparc-purify
diff --git a/BUILD/compile-hpux11-parisc2-aCC b/BUILD/compile-hpux11-parisc2-aCC
new file mode 100755
index 00000000000..1bdef94e080
--- /dev/null
+++ b/BUILD/compile-hpux11-parisc2-aCC
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+if [ ! -f "sql/mysqld.cc" ]; then
+ echo "You must run this script from the MySQL top-level directory."
+ exit 1
+fi
+
+# -fast Expand into a set of compiler options to result in
+# improved application run-time. Options include: +O3,
+# +Onolooptransform, +Olibcalls, +FPD, +Oentryschedule,
+# +Ofastaccess.
+# +O4 Perform level 3 as well as doing link time optimizations.
+# Also sends +Oprocelim and +Ofastaccess to the linker
+# (see ld(1)).
+
+release_flags="-fast +O3"
+
+# -z Do not bind anything to address zero. This option
+# allows runtime detection of null pointers. See the
+# note on pointers below.
+cflags="-g -z +O0"
+cxxflags="-g0 -z +O0"
+debug_conigure_options="--with-debug"
+
+while [ "$#" != 0 ]; do
+ case "$1" in
+ --help)
+ echo "Usage: $0 [options]"
+ echo "Options:"
+ echo "--help print this message"
+ echo "--debug build debug binary [default] "
+ echo "--release build optimised binary"
+ echo "-32 build 32 bit binary [default]"
+ echo "-64 build 64 bit binary"
+ exit 0
+ ;;
+ --debug)
+ echo "Building debug binary"
+ ;;
+ --release)
+ echo "Building release binary"
+ cflags="$release_flags"
+ cxxflags="$release_flags"
+ debug_configure_options=""
+ ;;
+ -32)
+ echo "Building 32-bit binary"
+ ;;
+ -64)
+ echo "Building 64-bit binary"
+ cflags="$cflags +DA2.0W +DD64"
+ cxxflags="$cxxflags +DA2.0W +DD64"
+ ;;
+ *)
+ echo "$0: invalid option '$1'; use --help to show usage"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+
+set -x
+make distclean
+aclocal
+autoheader
+libtoolize --automake --force
+automake --force --add-missing
+autoconf
+
+(cd bdb/dist && sh s_all)
+(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
+
+CC=cc CXX=aCC CFLAGS="$cflags" CXXFLAGS="$cxxflags" \
+./configure --prefix=/usr/local/mysql --disable-shared \
+ --with-extra-charsets=complex --enable-thread-safe-client \
+ --without-extra-tools $debug_configure_options \
+ --disable-dependency-tracking
+
+gmake
diff --git a/BUILD/compile-irix-mips64-mipspro b/BUILD/compile-irix-mips64-mipspro
index d8107ad73c0..1987fa13b1f 100755
--- a/BUILD/compile-irix-mips64-mipspro
+++ b/BUILD/compile-irix-mips64-mipspro
@@ -6,7 +6,7 @@ if [ ! -f "sql/mysqld.cc" ]; then
fi
cflags="-64 -mips4"
-
+config_args=
if [ "$#" != 0 ]; then
case "$1" in
--help)
@@ -25,8 +25,7 @@ if [ "$#" != 0 ]; then
cflags=""
;;
*)
- echo "$0: invalid option '$1'; use --help to show usage"
- exit 1
+ config_args="$config_args $1"; shift
;;
esac
else
@@ -79,6 +78,7 @@ cxxflags="$cxxflags -LANG:libc_in_namespace_std=OFF"
CC=cc CXX=CC CFLAGS="$cflags" CXXFLAGS="$cxxflags" \
./configure --prefix=/usr/local/mysql --disable-shared \
--with-extra-charsets=complex --enable-thread-safe-client \
- --without-extra-tools --disable-dependency-tracking
+ --without-extra-tools --disable-dependency-tracking \
+ $config_args
make
diff --git a/BUILD/compile-pentium-max b/BUILD/compile-pentium-max
index 6eb71fcebb6..caf657a2049 100755
--- a/BUILD/compile-pentium-max
+++ b/BUILD/compile-pentium-max
@@ -7,11 +7,6 @@ extra_flags="$pentium_cflags $fast_cflags -g"
extra_configs="$pentium_configs"
#strip=yes
-#extra_configs="$extra_configs --with-innodb --with-berkeley-db \
-# --with-embedded-server --enable-thread-safe-client \
-# --with-openssl --with-vio --with-raid --with-ndbcluster"
-# removed per discussion with Brian and Sanja because it makes Bootstrap
-# fail
extra_configs="$extra_configs --with-innodb --with-berkeley-db \
--with-embedded-server --enable-thread-safe-client \
--with-openssl --with-vio --with-raid --with-ndbcluster"
diff --git a/BUILD/compile-pentium-valgrind-max b/BUILD/compile-pentium-valgrind-max
index ef035b3f023..fd9543163d6 100755
--- a/BUILD/compile-pentium-valgrind-max
+++ b/BUILD/compile-pentium-valgrind-max
@@ -9,7 +9,7 @@ cxx_warnings="$cxx_warnings $debug_extra_warnings"
extra_configs="$pentium_configs $debug_configs"
# We want to test isam when building with valgrind
-extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl"
+extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl --with-vio --with-raid --with-ndbcluster"
. "$path/FINISH.sh"
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index bd45ae65224..4ca73c35e40 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -61,6 +61,7 @@ hf@genie.(none)
igor@hundin.mysql.fi
igor@rurik.mysql.com
ingo@mysql.com
+jan@hundin.mysql.fi
jani@a80-186-24-72.elisa-laajakaista.fi
jani@dsl-jkl1657.dial.inet.fi
jani@dsl-kpogw4gb5.dial.inet.fi
@@ -80,10 +81,12 @@ jcole@mugatu.jcole.us
jcole@mugatu.spaceapes.com
jcole@sarvik.tfr.cafe.ee
jcole@tetra.spaceapes.com
+joerg@mysql.com
joreland@mysql.com
jorge@linux.jorge.mysql.com
jplindst@t41.(none)
kaj@work.mysql.com
+kent@mysql.com
konstantin@mysql.com
kostja@oak.local
lenz@kallisto.mysql.com
@@ -94,9 +97,11 @@ miguel@hegel.(none)
miguel@hegel.br
miguel@hegel.local
miguel@hegel.txg
+miguel@hegel.txg.br
miguel@light.
miguel@light.local
miguel@sartre.local
+mikron@c-fb0ae253.1238-1-64736c10.cust.bredbandsbolaget.se
mikron@mikael-ronstr-ms-dator.local
mmatthew@markslaptop.
monty@bitch.mysql.fi
@@ -127,6 +132,8 @@ mysql@home.(none)
mysqldev@build.mysql2.com
mysqldev@melody.local
mysqldev@mysql.com
+mysqldev@o2k.irixworld.net
+ndbdev@eel.hemma.oreland.se
ndbdev@ndbmaster.mysql.com
nick@mysql.com
nick@nick.leippe.com
@@ -153,6 +160,7 @@ ram@gw.udmsearch.izhnet.ru
ram@mysql.r18.ru
ram@ram.(none)
ranger@regul.home.lan
+rburnett@build.mysql.com
root@home.(none)
root@x3.internalnet
salle@banica.(none)
diff --git a/Build-tools/Bootstrap b/Build-tools/Bootstrap
index fa3c6344a05..8cad093bc5f 100755
--- a/Build-tools/Bootstrap
+++ b/Build-tools/Bootstrap
@@ -28,8 +28,8 @@ else
# Some predefined settings
$build_command= "BUILD/compile-pentium-max";
$PWD= cwd();
-$LOGFILE= $PWD . "/Bootstrap.log";
$opt_docdir= $PWD . "/mysqldoc";
+$opt_archive_log= undef;
$opt_build_command= undef;
$opt_changelog= undef;
$opt_delete= undef;
@@ -51,6 +51,7 @@ $version= "unknown";
$major=$minor=$release=0;
GetOptions(
+ "archive-log|a",
"build-command|b=s",
"changelog|c:s",
"directory|d=s",
@@ -73,6 +74,17 @@ GetOptions(
) || print_help("");
#
+# Override predefined build command
+#
+if (defined $opt_build_command)
+{
+ $build_command= $opt_build_command;
+}
+
+print_help("") if ($opt_help);
+defined($REPO=$ARGV[0]) || print_help("Please enter the BK repository to be used!");
+
+#
# Override predefined Log file name
#
if (defined $opt_log)
@@ -90,16 +102,7 @@ if (defined $opt_log)
}
}
-#
-# Override predefined build command
-#
-if (defined $opt_build_command)
-{
- $build_command= $opt_build_command;
-}
-
-print_help("") if ($opt_help);
-defined($REPO=$ARGV[0]) || print_help("Please enter the BK repository to be used!");
+$LOGFILE= $PWD . "/Bootstrap-" . $REPO . ".log" unless ($LOGFILE);
&logger("Starting build");
&abort("The directory \"$REPO\" could not be found!") if (!-d $REPO);
@@ -120,14 +123,16 @@ if (($opt_directory ne $PWD) && (!-d $opt_directory && !$opt_dry_run))
if ($opt_pull)
{
&logger("Updating BK tree $REPO to latest ChangeSet first");
- $command= "cd $REPO; bk pull; cd ..";
- &run_command($command, "Could not update $REPO!");
+ chdir ($REPO) or &abort("Could not chdir to $REPO!");
+ &run_command("bk pull", "Could not update $REPO!");
+ chdir ($PWD) or &abort("Could not chdir to $PWD!");
unless ($opt_skip_manual)
{
&logger("Updating manual tree in $opt_docdir");
- $command= "cd $opt_docdir; bk pull; cd ..";
- &run_command($command, "Could not update $opt_docdir!");
+ chdir ($opt_docdir) or &abort("Could not chdir to $opt_docdir!");
+ &run_command("bk pull", "Could not update $opt_docdir!");
+ chdir ($PWD) or &abort("Could not chdir to $PWD!");
}
}
@@ -351,6 +356,21 @@ if (!$opt_skip_check)
# All done when we came down here
#
&logger("SUCCESS: Build finished successfully.") if (!$opt_dry_run);
+
+#
+# Move the log file into the Log dir of the target dir
+#
+if ($opt_archive_log)
+{
+ my $logdir= $target_dir . "/Logs";
+ &logger("Moving $LOGFILE to $logdir");
+ mkdir "$logdir" if (! -d $logdir);
+ $command= "mv ";
+ $command.= "-v " if ($opt_verbose || defined $opt_log);
+ $command.= "$LOGFILE $logdir";
+ &run_command($command, "Could not move $LOGFILE to $logdir!");
+}
+
exit 0;
#
@@ -378,6 +398,8 @@ distribution check can be run before the source archive is being created.
Options:
+-a, --archive-log Move the log file into the Logs directory of
+ the exported tree after a successful build
-b, --build-command=<cmd> Use <cmd> to compile the sources before packing
the distribution.
(default is "$build_command")
@@ -398,7 +420,7 @@ Options:
do not build or test the source distribution
-h, --help Print this help message
-l, --log[=<filename>] Write a log file [to <filename>]
- (default is "$LOGFILE")
+ (default is "./Bootstrap-<bk repository>.log")
-m, --mail=<address> Mail a failure report to the given address (and
include a log file snippet, if logging is enabled)
Note that the \@-Sign needs to be quoted!
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index e6e71582c74..f3c20c81a9f 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -10,12 +10,13 @@ use Sys::Hostname;
$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env="";
$opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix="";
$opt_tmp=$opt_version_suffix="";
-$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0;
+$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0;
$opt_innodb=$opt_bdb=$opt_raid=$opt_libwrap=$opt_clearlogs=0;
GetOptions(
"bdb",
"build-thread=i",
+ "bundled-zlib",
"config-env=s" => \@config_env,
"config-extra-env=s" => \@config_extra_env,
"config-options=s" => \@config_options,
@@ -255,6 +256,7 @@ if ($opt_stage <= 1)
log_system("$make clean") if ($opt_use_old_distribution);
$opt_config_options.= " --disable-shared" if (!$opt_enable_shared); # Default for binary versions
$opt_config_options.= " --with-berkeley-db" if ($opt_bdb);
+ $opt_config_options.= " --with-zlib-dir=bundled" if ($opt_bundled_zlib);
$opt_config_options.= " --with-client-ldflags=-all-static" if ($opt_static_client);
$opt_config_options.= " --with-debug" if ($opt_with_debug);
$opt_config_options.= " --with-libwrap" if ($opt_libwrap);
@@ -374,7 +376,7 @@ if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
log_timestamp();
system("mkdir $bench_tmpdir") if (! -d $bench_tmpdir);
safe_cd("${test_dir}/mysql-test");
- check_system("./mysql-test-run $flags --warnings --tmpdir=$bench_tmpdir --master_port=$mysql_tcp_port --slave_port=$slave_port --ndbcluster_port=$ndbcluster_port --manager-port=$manager_port --no-manager --sleep=10", "tests were successful");
+ check_system("./mysql-test-run $flags --tmpdir=$bench_tmpdir --master_port=$mysql_tcp_port --slave_port=$slave_port --ndbcluster_port=$ndbcluster_port --manager-port=$manager_port --no-manager --sleep=10", "tests were successful");
}
#
diff --git a/Build-tools/mysql-copyright b/Build-tools/mysql-copyright
index e1ee513e06d..0c091890e72 100755
--- a/Build-tools/mysql-copyright
+++ b/Build-tools/mysql-copyright
@@ -101,6 +101,7 @@ sub main
# on the toplevel of the directory instead. file 'PUBLIC' shouldn't
# exist in the new mysql distributions, but let's be sure..
unlink("$destdir/PUBLIC", "$destdir/README");
+ unlink("$destdir/COPYING", "$destdir/EXCEPTIONS-CLIENT");
copy("$WD/Docs/MySQLEULA.txt", "$destdir");
# remove readline, bdb subdirs and update 'configure'
@@ -114,6 +115,9 @@ sub main
# fix file copyrights
&fix_usage_copyright();
&add_copyright();
+
+ # fix LICENSE tag in include/mysql_version.h
+ &fix_mysql_version();
# rename the directory with new distribution name
chdir("$WD/$dir");
@@ -141,6 +145,28 @@ sub main
}
####
+#### This function will s/GPL/Commercial/ in include/mysql_version.h for the
+#### LICENSE tag.
+####
+sub fix_mysql_version
+{
+ chdir("$destdir");
+ my $header_file= (-f 'include/mysql_version.h.in')? 'include/mysql_version.h.in' : 'include/mysql_version.h';
+
+ open(MYSQL_VERSION,"<$header_file") or die "Unable to open $header_file for read: $!\n";
+ undef $/;
+ my $mysql_version= <MYSQL_VERSION>;
+ close(MYSQL_VERSION);
+
+ $mysql_version=~ s/\#define LICENSE[\s\t]+GPL/#define LICENSE Commercial/;
+
+ open(MYSQL_VERSION,">$header_file") or die "Unable to open $header_file for write: $!\n";
+ print MYSQL_VERSION $mysql_version;
+ close(MYSQL_VERSION);
+ chdir("$cwd");
+}
+
+####
#### This function will remove unwanted parts of a src tree for the mysqlcom
#### distributions.
####
@@ -150,11 +176,7 @@ sub trim_the_fat
my $cwd= getcwd();
system("rm -rf $destdir/${the_fat}");
- if ($win_flag)
- {
- chdir("$destdir") or die "Unable to change directory to $destdir!: $!\n";
- }
- else
+ if (!$win_flag)
{
chdir("$destdir");
unlink ("configure") or die "Can't delete $destdir/configure: $!\n";
@@ -182,7 +204,7 @@ sub trim_the_fat
open(CONFIGURE,">configure.in") or die "Unable to open configure.in for write: $!\n";
print CONFIGURE $configure;
close(CONFIGURE);
- `autoconf`;
+ `aclocal && autoheader && aclocal && automake && autoconf`;
die "'./configure' was not produced!" unless (-f "configure");
chdir("$cwd");
}
diff --git a/Docs/Makefile.am b/Docs/Makefile.am
index 19b2efd4cab..491302a082a 100644
--- a/Docs/Makefile.am
+++ b/Docs/Makefile.am
@@ -26,7 +26,7 @@ EXTRA_DIST = $(noinst_SCRIPTS) $(BUILT_SOURCES) mysqld_error.txt \
all: $(targets) txt_files
-txt_files: ../INSTALL-SOURCE ../COPYING ../INSTALL-WIN-SOURCE \
+txt_files: ../INSTALL-SOURCE ../COPYING ../INSTALL-WIN-SOURCE ../EXCEPTIONS-CLIENT \
INSTALL-BINARY ../support-files/MacOSX/ReadMe.txt
CLEAN_FILES: $(BUILD_SOURCES)
@@ -204,7 +204,10 @@ INSTALL-BINARY: mysql.info $(GT)
perl -w $(GT) mysql.info "Installing binary" "Installing source" > $@
../COPYING: mysql.info $(GT)
- perl -w $(GT) mysql.info "GPL license" "Function Index" > $@
+ perl -w $(GT) mysql.info "GPL license" "MySQL FLOSS License Exception" > $@
+
+../EXCEPTIONS-CLIENT: mysql.info $(GT)
+ perl -w $(GT) mysql.info "MySQL FLOSS License Exception" "Function Index" > $@
../support-files/MacOSX/ReadMe.txt: mysql.info $(GT)
perl -w $(GT) mysql.info "Mac OS X installation" "NetWare installation" > $@
diff --git a/Makefile.am b/Makefile.am
index f8efb247c95..7c2ed820a23 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -19,8 +19,15 @@
AUTOMAKE_OPTIONS = foreign
# These are built from source in the Docs directory
-EXTRA_DIST = INSTALL-SOURCE README COPYING zlib
-SUBDIRS = . include @docs_dirs@ \
+EXTRA_DIST = INSTALL-SOURCE README COPYING EXCEPTIONS-CLIENT
+SUBDIRS = . include @docs_dirs@ @zlib_dir@ \
+ @readline_topdir@ sql-common \
+ @thread_dirs@ pstack @sql_client_dirs@ \
+ @sql_server_dirs@ scripts man tests \
+ netware @libmysqld_dirs@ \
+ @bench_dirs@ support-files @fs_dirs@ @tools_dirs@
+
+DIST_SUBDIRS = . include @docs_dirs@ zlib \
@readline_topdir@ sql-common \
@thread_dirs@ pstack @sql_client_dirs@ \
@sql_server_dirs@ scripts man tests SSL\
diff --git a/VC++Files/sql/message.mc b/VC++Files/sql/message.mc
new file mode 100644
index 00000000000..a1a7c8cff7e
--- /dev/null
+++ b/VC++Files/sql/message.mc
@@ -0,0 +1,8 @@
+MessageId = 100
+Severity = Error
+Facility = Application
+SymbolicName = MSG_DEFAULT
+Language = English
+%1For more information, see Help and Support Center at http://www.mysql.com.
+
+
diff --git a/VC++Files/sql/mysqld.dsp b/VC++Files/sql/mysqld.dsp
index bbac49bfe2b..c4475427823 100644
--- a/VC++Files/sql/mysqld.dsp
+++ b/VC++Files/sql/mysqld.dsp
@@ -187,7 +187,7 @@ LINK32=xilink6.exe
# PROP Target_Dir ""
# ADD BASE CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D "DBUG_OFF" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "USE_SYMDIR" /D "HAVE_DLOPEN" /D "NDEBUG" /FD /c
# SUBTRACT BASE CPP /YX
-# ADD CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D LICENSE=Commercial /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "HAVE_DLOPEN" /D "DBUG_OFF" /D "_MBCS" /D "NDEBUG" /FD /c
+# ADD CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D LICENSE=Commercial /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "HAVE_DLOPEN" /D "DBUG_OFF" /D "_MBCS" /D "NDEBUG" /FD /D MYSQL_SERVER_SUFFIX=-classic /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -243,7 +243,7 @@ LINK32=xilink6.exe
# PROP Target_Dir ""
# ADD BASE CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D "DBUG_OFF" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "USE_SYMDIR" /D "HAVE_DLOPEN" /D "NDEBUG" /FD /c
# SUBTRACT BASE CPP /YX
-# ADD CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D "__NT__" /D "DBUG_OFF" /D "NDEBUG" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "HAVE_DLOPEN" /D LICENSE=Commercial /D MYSQL_SERVER_SUFFIX=-nt /FD /c
+# ADD CPP /nologo /G6 /MT /W3 /O2 /I "../include" /I "../regex" /I "../zlib" /D "__NT__" /D "DBUG_OFF" /D "NDEBUG" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "HAVE_DLOPEN" /FD /D LICENSE=Commercial /D MYSQL_SERVER_SUFFIX=-classic-nt /c
# SUBTRACT CPP /YX
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
@@ -920,6 +920,89 @@ SOURCE=.\log_event.cpp
# End Source File
# Begin Source File
+SOURCE=.\message.mc
+
+!IF "$(CFG)" == "mysqld - Win32 Release"
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 Debug"
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 nt"
+
+# Begin Custom Build - Compiling messages
+InputDir=.
+InputPath=.\message.mc
+InputName=message
+
+BuildCmds= \
+ mc.exe "$(InputDir)\$(InputName).mc"
+
+"$(InputDir)\$(InputName).rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"$(InputDir)\$(InputName).h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 Max nt"
+# Begin Custom Build - Compiling messages
+InputDir=.
+InputPath=.\message.mc
+InputName=message
+
+BuildCmds= \
+ mc.exe "$(InputDir)\$(InputName).mc"
+
+"$(InputDir)\$(InputName).rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"$(InputDir)\$(InputName).h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+!ELSEIF "$(CFG)" == "mysqld - Win32 Max"
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 classic"
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 pro"
+
+!ELSEIF "$(CFG)" == "mysqld - Win32 classic nt"
+# Begin Custom Build - Compiling messages
+InputDir=.
+InputPath=.\message.mc
+InputName=message
+
+BuildCmds= \
+ mc.exe "$(InputDir)\$(InputName).mc"
+
+"$(InputDir)\$(InputName).rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"$(InputDir)\$(InputName).h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+!ELSEIF "$(CFG)" == "mysqld - Win32 pro nt"
+# Begin Custom Build - Compiling messages
+InputDir=.
+InputPath=.\message.mc
+InputName=message
+
+BuildCmds= \
+ mc.exe "$(InputDir)\$(InputName).mc"
+
+"$(InputDir)\$(InputName).rc" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+
+"$(InputDir)\$(InputName).h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ $(BuildCmds)
+# End Custom Build
+!ENDIF
+
+# End Source File
+# Begin Source File
+
+SOURCE=.\message.rc
+# End Source File
+# Begin Source File
+
SOURCE=.\mf_iocache.cpp
!IF "$(CFG)" == "mysqld - Win32 Release"
diff --git a/VC++Files/winmysqladmin/mysql_com.h b/VC++Files/winmysqladmin/mysql_com.h
index 0870f340451..2a7eb57d745 100644
--- a/VC++Files/winmysqladmin/mysql_com.h
+++ b/VC++Files/winmysqladmin/mysql_com.h
@@ -155,25 +155,32 @@ enum enum_field_types { FIELD_TYPE_DECIMAL, FIELD_TYPE_TINY,
#define FIELD_TYPE_CHAR FIELD_TYPE_TINY /* For compability */
#define FIELD_TYPE_INTERVAL FIELD_TYPE_ENUM /* For compability */
-enum enum_shutdown_level {
- /*
- We want levels to be in growing order of hardness. So we leave room
- for future intermediate levels. For now, escalating one level is += 10;
- later if we insert new levels in between we will need a function
- next_shutdown_level(level). Note that DEFAULT does not respect the
- growing property.
- */
- SHUTDOWN_DEFAULT= 0, /* mapped to WAIT_ALL_BUFFERS for now */
+
+/* Shutdown/kill enums and constants */
+
+/* Bits for THD::killable. */
+#define MYSQL_SHUTDOWN_KILLABLE_CONNECT (unsigned char)(1 << 0)
+#define MYSQL_SHUTDOWN_KILLABLE_TRANS (unsigned char)(1 << 1)
+#define MYSQL_SHUTDOWN_KILLABLE_LOCK_TABLE (unsigned char)(1 << 2)
+#define MYSQL_SHUTDOWN_KILLABLE_UPDATE (unsigned char)(1 << 3)
+
+enum mysql_enum_shutdown_level {
/*
- Here is the list in growing order (the next does the previous plus
- something). WAIT_ALL_BUFFERS is what we have now. Others are "this MySQL
- server does not support this shutdown level yet".
+ We want levels to be in growing order of hardness (because we use number
+ comparisons). Note that DEFAULT does not respect the growing property, but
+ it's ok.
*/
- SHUTDOWN_WAIT_CONNECTIONS= 10, /* wait for existing connections to finish */
- SHUTDOWN_WAIT_TRANSACTIONS= 20, /* wait for existing trans to finish */
- SHUTDOWN_WAIT_STATEMENTS= 30, /* wait for existing updating stmts to finish */
- SHUTDOWN_WAIT_ALL_BUFFERS= 40, /* flush InnoDB buffers */
- SHUTDOWN_WAIT_CRITICAL_BUFFERS= 50, /* flush MyISAM buffs (no corruption) */
+ DEFAULT= 0,
+ /* wait for existing connections to finish */
+ WAIT_CONNECTIONS= MYSQL_SHUTDOWN_KILLABLE_CONNECT,
+ /* wait for existing trans to finish */
+ WAIT_TRANSACTIONS= MYSQL_SHUTDOWN_KILLABLE_TRANS,
+ /* wait for existing updates to finish (=> no partial MyISAM update) */
+ WAIT_UPDATES= MYSQL_SHUTDOWN_KILLABLE_UPDATE,
+ /* flush InnoDB buffers and other storage engines' buffers*/
+ WAIT_ALL_BUFFERS= (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1),
+ /* don't flush InnoDB buffers, flush other storage engines' buffers*/
+ WAIT_CRITICAL_BUFFERS= (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1) + 1,
/* Now the 2 levels of the KILL command */
#if MYSQL_VERSION_ID >= 50000
KILL_QUERY= 254,
diff --git a/acconfig.h b/acconfig.h
deleted file mode 100644
index f9cff3010ca..00000000000
--- a/acconfig.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/* acconfig.h
- This file is in the public domain.
-
- Descriptive text for the C preprocessor macros that
- the distributed Autoconf macros can define.
- No software package will use all of them; autoheader copies the ones
- your configure.in uses into your configuration header file templates.
-
- The entries are in sort -df order: alphabetical, case insensitive,
- ignoring punctuation (such as underscores). Although this order
- can split up related entries, it makes it easier to check whether
- a given entry is in the file.
-
- Leave the following blank line there!! Autoheader needs it. */
-
-
-#undef C_ALLOCA
-
-#undef CRAY_STACKSEG_END
-
-/* Define the default charset name */
-#undef MYSQL_DEFAULT_CHARSET_NAME
-
-/* Define the default charset name */
-#undef MYSQL_DEFAULT_COLLATION_NAME
-
-/* Version of .frm files */
-#undef DOT_FRM_VERSION
-
-/* If LOAD DATA LOCAL INFILE should be enabled by default */
-#undef ENABLED_LOCAL_INFILE
-
-/* READLINE: */
-#undef FIONREAD_IN_SYS_IOCTL
-
-/* READLINE: Define if your system defines TIOCGWINSZ in sys/ioctl.h. */
-#undef GWINSZ_IN_SYS_IOCTL
-
-/* Handing of large files on Solaris 2.6 */
-#undef _FILE_OFFSET_BITS
-
-/* Do we have FIONREAD */
-#undef FIONREAD_IN_SYS_IOCTL
-
-/* Do we need to define _GNU_SOURCE */
-#undef _GNU_SOURCE
-
-/* atomic_add() from <asm/atomic.h> (Linux only) */
-#undef HAVE_ATOMIC_ADD
-
-/* atomic_sub() from <asm/atomic.h> (Linux only) */
-#undef HAVE_ATOMIC_SUB
-
-/* If we have a working alloca() implementation */
-#undef HAVE_ALLOCA
-
-/* bool is not defined by all C++ compilators */
-#undef HAVE_BOOL
-
-/* Have berkeley db installed */
-#undef HAVE_BERKELEY_DB
-
-/* DSB style signals ? */
-#undef HAVE_BSD_SIGNALS
-
-/* Can netinet be included */
-#undef HAVE_BROKEN_NETINET_INCLUDES
-
-/* READLINE: */
-#undef HAVE_BSD_SIGNALS
-
-/* Define charsets you want */
-#undef HAVE_CHARSET_armscii8
-#undef HAVE_CHARSET_ascii
-#undef HAVE_CHARSET_big5
-#undef HAVE_CHARSET_cp1250
-#undef HAVE_CHARSET_cp1251
-#undef HAVE_CHARSET_cp1256
-#undef HAVE_CHARSET_cp1257
-#undef HAVE_CHARSET_cp850
-#undef HAVE_CHARSET_cp852
-#undef HAVE_CHARSET_cp866
-#undef HAVE_CHARSET_dec8
-#undef HAVE_CHARSET_euckr
-#undef HAVE_CHARSET_gb2312
-#undef HAVE_CHARSET_gbk
-#undef HAVE_CHARSET_geostd8
-#undef HAVE_CHARSET_greek
-#undef HAVE_CHARSET_hebrew
-#undef HAVE_CHARSET_hp8
-#undef HAVE_CHARSET_keybcs2
-#undef HAVE_CHARSET_koi8r
-#undef HAVE_CHARSET_koi8u
-#undef HAVE_CHARSET_latin1
-#undef HAVE_CHARSET_latin2
-#undef HAVE_CHARSET_latin5
-#undef HAVE_CHARSET_latin7
-#undef HAVE_CHARSET_macce
-#undef HAVE_CHARSET_macroman
-#undef HAVE_CHARSET_sjis
-#undef HAVE_CHARSET_swe7
-#undef HAVE_CHARSET_tis620
-#undef HAVE_CHARSET_ucs2
-#undef HAVE_CHARSET_ujis
-#undef HAVE_CHARSET_utf8
-
-/* ZLIB and compress: */
-#undef HAVE_COMPRESS
-
-/* Define if we are using OSF1 DEC threads */
-#undef HAVE_DEC_THREADS
-
-/* Define if we are using OSF1 DEC threads on 3.2 */
-#undef HAVE_DEC_3_2_THREADS
-
-/* Builds Example DB */
-#undef HAVE_EXAMPLE_DB
-
-/* Builds Archive Storage Engine */
-#undef HAVE_ARCHIVE_DB
-
-/* fp_except from ieeefp.h */
-#undef HAVE_FP_EXCEPT
-
-/* READLINE: */
-#undef HAVE_GETPW_DECLS
-
-/* Solaris define gethostbyname_r with 5 arguments. glibc2 defines
- this with 6 arguments */
-#undef HAVE_GETHOSTBYNAME_R_GLIBC2_STYLE
-
-/* In OSF 4.0f the 3'd argument to gethostname_r is hostent_data * */
-#undef HAVE_GETHOSTBYNAME_R_RETURN_INT
-
-/* Define if int8, int16 and int32 types exist */
-#undef HAVE_INT_8_16_32
-
-/* Using Innobase DB */
-#undef HAVE_INNOBASE_DB
-
-/* Using old ISAM tables */
-#undef HAVE_ISAM
-
-/* Define if we have GNU readline */
-#undef HAVE_LIBREADLINE
-
-/* Define if have -lwrap */
-#undef HAVE_LIBWRAP
-
-/* Define if we are using Xavier Leroy's LinuxThreads */
-#undef HAVE_LINUXTHREADS
-
-/* Do we have lstat */
-#undef HAVE_LSTAT
-
-/* Do we use user level threads */
-#undef HAVE_mit_thread
-
-/* Using Ndb Cluster DB */
-#undef HAVE_NDBCLUSTER_DB
-
-/* Including Ndb Cluster DB shared memory transporter */
-#undef NDB_SHM_TRANSPORTER
-
-/* Including Ndb Cluster DB sci transporter */
-#undef NDB_SCI_TRANSPORTER
-
-/* For some non posix threads */
-#undef HAVE_NONPOSIX_PTHREAD_GETSPECIFIC
-
-/* For some non posix threads */
-#undef HAVE_NONPOSIX_PTHREAD_MUTEX_INIT
-
-/* READLINE: */
-#undef HAVE_POSIX_SIGNALS
-
-/* Well.. */
-#undef HAVE_POSIX_SIGSETJMP
-
-/* sigwait with one argument */
-#undef HAVE_NONPOSIX_SIGWAIT
-
-/* ORBIT */
-#undef HAVE_ORBIT
-
-/* pthread_attr_setscope */
-#undef HAVE_PTHREAD_ATTR_SETSCOPE
-
-/* pthread_yield that doesn't take any arguments */
-#undef HAVE_PTHREAD_YIELD_ZERO_ARG
-
-/* pthread_yield function with one argument */
-#undef HAVE_PTHREAD_YIELD_ONE_ARG
-
-/* POSIX readdir_r */
-#undef HAVE_READDIR_R
-
-/* Have Gemini db installed */
-#undef HAVE_GEMINI_DB
-
-/* POSIX sigwait */
-#undef HAVE_SIGWAIT
-
-/* crypt */
-#undef HAVE_CRYPT
-
-/* If we want to have query cache */
-#undef HAVE_QUERY_CACHE
-
-/* Spatial extentions */
-#undef HAVE_SPATIAL
-
-/* RTree keys */
-#undef HAVE_RTREE_KEYS
-
-/* Access checks in embedded library */
-#undef HAVE_EMBEDDED_PRIVILEGE_CONTROL
-
-/* Solaris define gethostbyaddr_r with 7 arguments. glibc2 defines
- this with 8 arguments */
-#undef HAVE_SOLARIS_STYLE_GETHOST
-
-/* MIT pthreads does not support connecting with unix sockets */
-#undef HAVE_THREADS_WITHOUT_SOCKETS
-
-/* Timespec has a ts_sec instead of tv_sev */
-#undef HAVE_TIMESPEC_TS_SEC
-
-/* Have the tzname variable */
-#undef HAVE_TZNAME
-
-/* Define if the system files define uchar */
-#undef HAVE_UCHAR
-
-/* Define if the system files define uint */
-#undef HAVE_UINT
-
-/* Define if the system files define ulong */
-#undef HAVE_ULONG
-
-/* Define if the system files define in_addr_t */
-#undef HAVE_IN_ADDR_T
-
-/* UNIXWARE7 threads are not posix */
-#undef HAVE_UNIXWARE7_THREADS
-
-/* new UNIXWARE7 threads that are not yet posix */
-#undef HAVE_UNIXWARE7_POSIX
-
-/* OpenSSL */
-#undef HAVE_OPENSSL
-
-/* READLINE: */
-#undef HAVE_USG_SIGHOLD
-
-/* Virtual IO */
-#undef HAVE_VIO
-
-/* Handling of large files on Solaris 2.6 */
-#undef _LARGEFILE_SOURCE
-
-/* Handling of large files on Solaris 2.6 */
-#undef _LARGEFILE64_SOURCE
-
-/* Define if want -lwrap */
-#undef LIBWRAP
-
-/* Define to machine type name eg sun10 */
-#undef MACHINE_TYPE
-
-#undef MUST_REINSTALL_SIGHANDLERS
-
-/* Defined to used character set */
-#undef MY_CHARSET_CURRENT
-
-/* READLINE: no sys file*/
-#undef NO_SYS_FILE
-
-/* Program name */
-#undef PACKAGE
-
-/* mysql client protocoll version */
-#undef PROTOCOL_VERSION
-
-/* ndb version */
-#undef NDB_VERSION_MAJOR
-#undef NDB_VERSION_MINOR
-#undef NDB_VERSION_BUILD
-#undef NDB_VERSION_STATUS
-
-/* Define if qsort returns void */
-#undef QSORT_TYPE_IS_VOID
-
-/* Define as the return type of qsort (int or void). */
-#undef RETQSORTTYPE
-
-/* Size of off_t */
-#undef SIZEOF_OFF_T
-
-/* Define as the base type of the last arg to accept */
-#undef SOCKET_SIZE_TYPE
-
-/* Last argument to get/setsockopt */
-#undef SOCKOPT_OPTLEN_TYPE
-
-#undef SPEED_T_IN_SYS_TYPES
-#undef SPRINTF_RETURNS_PTR
-#undef SPRINTF_RETURNS_INT
-#undef SPRINTF_RETURNS_GARBAGE
-
-/* Needed to get large file support on HPUX 10.20 */
-#undef __STDC_EXT__
-
-#undef STACK_DIRECTION
-
-#undef STRCOLL_BROKEN
-
-#undef STRUCT_DIRENT_HAS_D_FILENO
-#undef STRUCT_DIRENT_HAS_D_INO
-
-#undef STRUCT_WINSIZE_IN_SYS_IOCTL
-#undef STRUCT_WINSIZE_IN_TERMIOS
-
-/* Define to name of system eg solaris*/
-#undef SYSTEM_TYPE
-
-/* Define if you want to have threaded code. This may be undef on client code */
-#undef THREAD
-
-/* Should be client be thread safe */
-#undef THREAD_SAFE_CLIENT
-
-/* READLINE: */
-#undef TIOCSTAT_IN_SYS_IOCTL
-
-/* Use multi-byte character routines */
-#undef USE_MB
-#undef USE_MB_IDENT
-
-/* the pstack backtrace library */
-#undef USE_PSTACK
-
-/* Use MySQL RAID */
-#undef USE_RAID
-
-/* Program version */
-#undef VERSION
-
-/* READLINE: */
-#undef VOID_SIGHANDLER
-
-/* used libedit interface (can we dereference result of rl_completion_entry_function?) */
-#undef USE_LIBEDIT_INTERFACE
-
-/* used new readline interface (does rl_completion_func_t and rl_compentry_func_t defined?) */
-#undef USE_NEW_READLINE_INTERFACE
-
-/* macro for libedit */
-#undef HAVE_VIS_H
-#undef HAVE_FGETLN
-#undef HAVE_ISSETUGID
-#undef HAVE_STRLCPY
-#undef HAVE_GETLINE
-#undef HAVE_FLOCKFILE
-#undef HAVE_SYS_TYPES_H
-#undef HAVE_SYS_CDEFS_H
-
-
-/* Leave that blank line there!! Autoheader needs it.
- If you're adding to this file, keep in mind:
- The entries are in sort -df order: alphabetical, case insensitive,
- ignoring punctuation (such as underscores). */
diff --git a/acinclude.m4 b/acinclude.m4
index 92a9d9e00b3..f634784ea34 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1,5 +1,26 @@
# Local macros for automake & autoconf
+
+AC_DEFUN(MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY,[
+ AC_CACHE_CHECK([HIST_ENTRY is declared in readline/readline.h], mysql_cv_hist_entry_declared,
+ AC_TRY_COMPILE(
+ [
+ #include "stdio.h"
+ #include "readline/readline.h"
+ ],
+ [
+ HIST_ENTRY entry;
+ ],
+ [
+ mysql_cv_hist_entry_declared=yes
+ AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY, [1],
+ [HIST_ENTRY is defined in the outer libeditreadline])
+ ],
+ [mysql_cv_libedit_interface=no]
+ )
+ )
+])
+
AC_DEFUN(MYSQL_CHECK_LIBEDIT_INTERFACE,[
AC_CACHE_CHECK([libedit variant of rl_completion_entry_function], mysql_cv_libedit_interface,
AC_TRY_COMPILE(
@@ -13,7 +34,8 @@ AC_DEFUN(MYSQL_CHECK_LIBEDIT_INTERFACE,[
],
[
mysql_cv_libedit_interface=yes
- AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE)
+ AC_DEFINE_UNQUOTED([USE_LIBEDIT_INTERFACE], [1],
+ [used libedit interface (can we dereference result of rl_completion_entry_function)])
],
[mysql_cv_libedit_interface=no]
)
@@ -33,7 +55,8 @@ AC_DEFUN(MYSQL_CHECK_NEW_RL_INTERFACE,[
],
[
mysql_cv_new_rl_interface=yes
- AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE)
+ AC_DEFINE_UNQUOTED([USE_NEW_READLINE_INTERFACE], [1],
+ [used new readline interface (are rl_completion_func_t and rl_compentry_func_t defined)])
],
[mysql_cv_new_rl_interface=no]
)
@@ -65,7 +88,7 @@ main()
exit(0);
}], AC_CV_NAME=`cat conftestval`, AC_CV_NAME=0, ifelse([$2], , , AC_CV_NAME=$2))])dnl
AC_MSG_RESULT($AC_CV_NAME)
-AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME)
+AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME, [ ])
undefine([AC_TYPE_NAME])dnl
undefine([AC_CV_NAME])dnl
])
@@ -105,7 +128,8 @@ if test "$mysql_cv_btype_last_arg_accept" = "none"; then
mysql_cv_btype_last_arg_accept=int
fi)
AC_LANG_RESTORE
-AC_DEFINE_UNQUOTED(SOCKET_SIZE_TYPE, $mysql_cv_btype_last_arg_accept)
+AC_DEFINE_UNQUOTED([SOCKET_SIZE_TYPE], [$mysql_cv_btype_last_arg_accept],
+ [The base type of the last arg to accept])
CXXFLAGS="$ac_save_CXXFLAGS"
])
#---END:
@@ -121,10 +145,11 @@ void qsort(void *base, size_t nel, size_t width,
int (*compar) (const void *, const void *));
],
[int i;], mysql_cv_type_qsort=void, mysql_cv_type_qsort=int)])
-AC_DEFINE_UNQUOTED(RETQSORTTYPE, $mysql_cv_type_qsort)
+AC_DEFINE_UNQUOTED([RETQSORTTYPE], [$mysql_cv_type_qsort],
+ [The return type of qsort (int or void).])
if test "$mysql_cv_type_qsort" = "void"
then
- AC_DEFINE_UNQUOTED(QSORT_TYPE_IS_VOID, 1)
+ AC_DEFINE_UNQUOTED([QSORT_TYPE_IS_VOID], [1], [qsort returns void])
fi
])
@@ -142,7 +167,8 @@ abstime.ts_nsec = 0;
], mysql_cv_timespec_ts=yes, mysql_cv_timespec_ts=no)])
if test "$mysql_cv_timespec_ts" = "yes"
then
- AC_DEFINE(HAVE_TIMESPEC_TS_SEC)
+ AC_DEFINE([HAVE_TIMESPEC_TS_SEC], [1],
+ [Timespec has a ts_sec instead of tv_sev])
fi
])
@@ -158,36 +184,120 @@ extern "C"
], mysql_cv_tzname=yes, mysql_cv_tzname=no)])
if test "$mysql_cv_tzname" = "yes"
then
- AC_DEFINE(HAVE_TZNAME)
+ AC_DEFINE([HAVE_TZNAME], [1], [Have the tzname variable])
fi
])
-AC_DEFUN(MYSQL_CHECK_ZLIB_WITH_COMPRESS, [
+
+dnl Define zlib paths to point at bundled zlib
+
+AC_DEFUN([MYSQL_USE_BUNDLED_ZLIB], [
+ZLIB_INCLUDES="-I\$(top_srcdir)/zlib"
+ZLIB_LIBS="\$(top_builddir)/zlib/libz.la"
+zlib_dir="zlib"
+AC_SUBST([zlib_dir])
+mysql_cv_compress="yes"
+])
+
+dnl Auxiliary macro to check for zlib at given path
+
+AC_DEFUN([MYSQL_CHECK_ZLIB_DIR], [
+save_INCLUDES="$INCLUDES"
save_LIBS="$LIBS"
-LIBS="-l$1 $LIBS"
-AC_CACHE_CHECK([if libz with compress], mysql_cv_compress,
-[AC_TRY_RUN([#include <zlib.h>
-#ifdef __cplusplus
-extern "C"
-#endif
-int main(int argv, char **argc)
-{
- return 0;
-}
+INCLUDES="$INCLUDES $ZLIB_INCLUDES"
+LIBS="$LIBS $ZLIB_LIBS"
+AC_CACHE_VAL([mysql_cv_compress],
+ [AC_TRY_LINK([#include <zlib.h>],
+ [int link_test() { return compress(0, (unsigned long*) 0, "", 0); }],
+ [mysql_cv_compress="yes"
+ AC_MSG_RESULT([ok])],
+ [mysql_cv_compress="no"])
+ ])
+INCLUDES="$save_INCLUDES"
+LIBS="$save_LIBS"
+])
-int link_test()
-{
- return compress(0, (unsigned long*) 0, "", 0);
-}
-], mysql_cv_compress=yes, mysql_cv_compress=no)])
-if test "$mysql_cv_compress" = "yes"
-then
- AC_DEFINE(HAVE_COMPRESS)
-else
- LIBS="$save_LIBS"
-fi
+dnl MYSQL_CHECK_ZLIB_WITH_COMPRESS
+dnl ------------------------------------------------------------------------
+dnl @synopsis MYSQL_CHECK_ZLIB_WITH_COMPRESS
+dnl
+dnl Provides the following configure options:
+dnl --with-zlib-dir=DIR
+dnl Possible DIR values are:
+dnl - "no" - the macro will disable use of compression functions
+dnl - "bundled" - means use zlib bundled along with MySQL sources
+dnl - empty, or not specified - the macro will try default system
+dnl library (if present), and in case of error will fall back to
+dnl bundled zlib
+dnl - zlib location prefix - given location prefix, the macro expects
+dnl to find the library headers in $prefix/include, and binaries in
+dnl $prefix/lib. If zlib headers or binaries weren't found at $prefix, the
+dnl macro bails out with error.
+dnl
+dnl If the library was found, this function #defines HAVE_COMPRESS
+dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and
+dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz).
+
+AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [
+AC_MSG_CHECKING([for zlib compression library])
+case $SYSTEM_TYPE in
+dnl This is a quick fix for Netware if AC_TRY_LINK for some reason
+dnl won't work there. Uncomment in case of failure and on Netware
+dnl we'll always assume that zlib is present
+dnl *netware* | *modesto*)
+dnl AC_MSG_RESULT(ok)
+dnl AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support])
+dnl ;;
+ *)
+ AC_ARG_WITH([zlib-dir],
+ AC_HELP_STRING([--with-zlib-dir=DIR],
+ [Provide MySQL with a custom location of
+ compression library. Given DIR, zlib binary is
+ assumed to be in $DIR/lib and header files
+ in $DIR/include.]),
+ [mysql_zlib_dir=${withval}],
+ [mysql_zlib_dir=""])
+ case "$mysql_zlib_dir" in
+ "no")
+ mysql_cv_compress="no"
+ AC_MSG_RESULT([disabled])
+ ;;
+ "bundled")
+ MYSQL_USE_BUNDLED_ZLIB
+ AC_MSG_RESULT([using bundled zlib])
+ ;;
+ "")
+ ZLIB_INCLUDES=""
+ ZLIB_LIBS="-lz"
+ MYSQL_CHECK_ZLIB_DIR
+ if test "$mysql_cv_compress" = "no"; then
+ MYSQL_USE_BUNDLED_ZLIB
+ AC_MSG_RESULT([system-wide zlib not found, using one bundled with MySQL])
+ fi
+ ;;
+ *)
+ if test -f "$mysql_zlib_dir/lib/libz.a" -a \
+ -f "$mysql_zlib_dir/include/zlib.h"; then
+ ZLIB_INCLUDES="-I$mysql_zlib_dir/include"
+ ZLIB_LIBS="-L$mysql_zlib_dir/lib -lz"
+ MYSQL_CHECK_ZLIB_DIR
+ fi
+ if test "x$mysql_cv_compress" != "xyes"; then
+ AC_MSG_ERROR([headers or binaries were not found in $mysql_zlib_dir/{include,lib}])
+ fi
+ ;;
+ esac
+ if test "$mysql_cv_compress" = "yes"; then
+ AC_SUBST([ZLIB_LIBS])
+ AC_SUBST([ZLIB_INCLUDES])
+ AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support])
+ fi
+ ;;
+esac
])
+dnl ------------------------------------------------------------------------
+
#---START: Used in for client configure
AC_DEFUN(MYSQL_CHECK_ULONG,
[AC_MSG_CHECKING(for type ulong)
@@ -203,7 +313,7 @@ main()
AC_MSG_RESULT($ac_cv_ulong)
if test "$ac_cv_ulong" = "yes"
then
- AC_DEFINE(HAVE_ULONG)
+ AC_DEFINE([HAVE_ULONG], [1], [system headers define ulong])
fi
])
@@ -221,7 +331,7 @@ main()
AC_MSG_RESULT($ac_cv_uchar)
if test "$ac_cv_uchar" = "yes"
then
- AC_DEFINE(HAVE_UCHAR)
+ AC_DEFINE([HAVE_UCHAR], [1], [system headers define uchar])
fi
])
@@ -239,7 +349,7 @@ main()
AC_MSG_RESULT($ac_cv_uint)
if test "$ac_cv_uint" = "yes"
then
- AC_DEFINE(HAVE_UINT)
+ AC_DEFINE([HAVE_UINT], [1], [system headers define uint])
fi
])
@@ -261,7 +371,7 @@ int main(int argc, char **argv)
AC_MSG_RESULT($ac_cv_in_addr_t)
if test "$ac_cv_in_addr_t" = "yes"
then
- AC_DEFINE(HAVE_IN_ADDR_T)
+ AC_DEFINE([HAVE_IN_ADDR_T], [1], [system headers define in_addr_t])
fi
])
@@ -279,7 +389,8 @@ extern "C"
], ac_cv_pthread_yield_zero_arg=yes, ac_cv_pthread_yield_zero_arg=yeso)])
if test "$ac_cv_pthread_yield_zero_arg" = "yes"
then
- AC_DEFINE(HAVE_PTHREAD_YIELD_ZERO_ARG)
+ AC_DEFINE([HAVE_PTHREAD_YIELD_ZERO_ARG], [1],
+ [pthread_yield that doesn't take any arguments])
fi
]
[AC_CACHE_CHECK([if pthread_yield takes 1 argument], ac_cv_pthread_yield_one_arg,
@@ -294,7 +405,8 @@ extern "C"
], ac_cv_pthread_yield_one_arg=yes, ac_cv_pthread_yield_one_arg=no)])
if test "$ac_cv_pthread_yield_one_arg" = "yes"
then
- AC_DEFINE(HAVE_PTHREAD_YIELD_ONE_ARG)
+ AC_DEFINE([HAVE_PTHREAD_YIELD_ONE_ARG], [1],
+ [pthread_yield function with one argument])
fi
]
)
@@ -318,7 +430,7 @@ main()
AC_MSG_RESULT($ac_cv_fp_except)
if test "$ac_cv_fp_except" = "yes"
then
- AC_DEFINE(HAVE_FP_EXCEPT)
+ AC_DEFINE([HAVE_FP_EXCEPT], [1], [fp_except from ieeefp.h])
fi
])
@@ -459,11 +571,12 @@ AC_CACHE_VAL(mysql_cv_signal_vintage,
])
AC_MSG_RESULT($mysql_cv_signal_vintage)
if test "$mysql_cv_signal_vintage" = posix; then
-AC_DEFINE(HAVE_POSIX_SIGNALS)
+AC_DEFINE(HAVE_POSIX_SIGNALS, [1],
+ [Signal handling is POSIX (sigset/sighold, etc)])
elif test "$mysql_cv_signal_vintage" = "4.2bsd"; then
-AC_DEFINE(HAVE_BSD_SIGNALS)
+AC_DEFINE([HAVE_BSD_SIGNALS], [1], [BSD style signals])
elif test "$mysql_cv_signal_vintage" = svr3; then
-AC_DEFINE(HAVE_USG_SIGHOLD)
+AC_DEFINE(HAVE_USG_SIGHOLD, [1], [sighold() is present and usable])
fi
])
@@ -476,7 +589,7 @@ extern struct passwd *getpwent();], [struct passwd *z; z = getpwent();],
mysql_cv_can_redecl_getpw=yes,mysql_cv_can_redecl_getpw=no)])
AC_MSG_RESULT($mysql_cv_can_redecl_getpw)
if test "$mysql_cv_can_redecl_getpw" = "no"; then
-AC_DEFINE(HAVE_GETPW_DECLS)
+AC_DEFINE(HAVE_GETPW_DECLS, [1], [getpwent() declaration present])
fi
])
@@ -488,7 +601,8 @@ AC_CACHE_VAL(mysql_cv_tiocgwinsz_in_ioctl,
mysql_cv_tiocgwinsz_in_ioctl=yes,mysql_cv_tiocgwinsz_in_ioctl=no)])
AC_MSG_RESULT($mysql_cv_tiocgwinsz_in_ioctl)
if test "$mysql_cv_tiocgwinsz_in_ioctl" = "yes"; then
-AC_DEFINE(GWINSZ_IN_SYS_IOCTL)
+AC_DEFINE([GWINSZ_IN_SYS_IOCTL], [1],
+ [READLINE: your system defines TIOCGWINSZ in sys/ioctl.h.])
fi
])
@@ -500,7 +614,7 @@ AC_CACHE_VAL(mysql_cv_fionread_in_ioctl,
mysql_cv_fionread_in_ioctl=yes,mysql_cv_fionread_in_ioctl=no)])
AC_MSG_RESULT($mysql_cv_fionread_in_ioctl)
if test "$mysql_cv_fionread_in_ioctl" = "yes"; then
-AC_DEFINE(FIONREAD_IN_SYS_IOCTL)
+AC_DEFINE([FIONREAD_IN_SYS_IOCTL], [1], [Do we have FIONREAD])
fi
])
@@ -512,7 +626,8 @@ AC_CACHE_VAL(mysql_cv_tiocstat_in_ioctl,
mysql_cv_tiocstat_in_ioctl=yes,mysql_cv_tiocstat_in_ioctl=no)])
AC_MSG_RESULT($mysql_cv_tiocstat_in_ioctl)
if test "$mysql_cv_tiocstat_in_ioctl" = "yes"; then
-AC_DEFINE(TIOCSTAT_IN_SYS_IOCTL)
+AC_DEFINE(TIOCSTAT_IN_SYS_IOCTL, [1],
+ [declaration of TIOCSTAT in sys/ioctl.h])
fi
])
@@ -545,10 +660,46 @@ struct dirent d; int z; z = d.d_ino;
], mysql_cv_dirent_has_dino=yes, mysql_cv_dirent_has_dino=no)])
AC_MSG_RESULT($mysql_cv_dirent_has_dino)
if test "$mysql_cv_dirent_has_dino" = "yes"; then
-AC_DEFINE(STRUCT_DIRENT_HAS_D_INO)
+AC_DEFINE(STRUCT_DIRENT_HAS_D_INO, [1],
+ [d_ino member present in struct dirent])
fi
])
+AC_DEFUN(MYSQL_STRUCT_DIRENT_D_NAMLEN,
+[AC_REQUIRE([AC_HEADER_DIRENT])
+AC_MSG_CHECKING(if struct dirent has a d_namlen member)
+AC_CACHE_VAL(mysql_cv_dirent_has_dnamlen,
+[AC_TRY_COMPILE([
+#include <stdio.h>
+#include <sys/types.h>
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif /* HAVE_UNISTD_H */
+#if defined(HAVE_DIRENT_H)
+# include <dirent.h>
+#else
+# define dirent direct
+# ifdef HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif /* SYSNDIR */
+# ifdef HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif /* SYSDIR */
+# ifdef HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif /* HAVE_DIRENT_H */
+],[
+struct dirent d; int z; z = (int)d.d_namlen;
+], mysql_cv_dirent_has_dnamlen=yes, mysql_cv_dirent_has_dnamlen=no)])
+AC_MSG_RESULT($mysql_cv_dirent_has_dnamlen)
+if test "$mysql_cv_dirent_has_dnamlen" = "yes"; then
+AC_DEFINE(STRUCT_DIRENT_HAS_D_NAMLEN, [1],
+ [d_namlen member present in struct dirent])
+fi
+])
+
+
AC_DEFUN(MYSQL_TYPE_SIGHANDLER,
[AC_MSG_CHECKING([whether signal handlers are of type void])
AC_CACHE_VAL(mysql_cv_void_sighandler,
@@ -564,7 +715,7 @@ void (*signal ()) ();],
[int i;], mysql_cv_void_sighandler=yes, mysql_cv_void_sighandler=no)])dnl
AC_MSG_RESULT($mysql_cv_void_sighandler)
if test "$mysql_cv_void_sighandler" = "yes"; then
-AC_DEFINE(VOID_SIGHANDLER)
+AC_DEFINE(VOID_SIGHANDLER, [1], [sighandler type is void (*signal ()) ();])
fi
])
@@ -583,7 +734,7 @@ AC_LANG_RESTORE
])
AC_MSG_RESULT($mysql_cv_have_bool)
if test "$mysql_cv_have_bool" = yes; then
-AC_DEFINE(HAVE_BOOL)
+AC_DEFINE([HAVE_BOOL], [1], [bool is not defined by all C++ compilators])
fi
])dnl
@@ -624,7 +775,7 @@ then
ac_cv_header_alloca_h=yes, ac_cv_header_alloca_h=no)])
if test "$ac_cv_header_alloca_h" = "yes"
then
- AC_DEFINE(HAVE_ALLOCA)
+ AC_DEFINE(HAVE_ALLOCA, 1)
fi
AC_CACHE_CHECK([for alloca], ac_cv_func_alloca_works,
@@ -647,7 +798,7 @@ then
], [char *p = (char *) alloca(1);],
ac_cv_func_alloca_works=yes, ac_cv_func_alloca_works=no)])
if test "$ac_cv_func_alloca_works" = "yes"; then
- AC_DEFINE(HAVE_ALLOCA)
+ AC_DEFINE([HAVE_ALLOCA], [1], [If we have a working alloca() implementation])
fi
if test "$ac_cv_func_alloca_works" = "no"; then
@@ -656,7 +807,7 @@ then
# contain a buggy version. If you still want to use their alloca,
# use ar to extract alloca.o from them instead of compiling alloca.c.
ALLOCA=alloca.o
- AC_DEFINE(C_ALLOCA)
+ AC_DEFINE(C_ALLOCA, 1)
AC_CACHE_CHECK(whether alloca needs Cray hooks, ac_cv_os_cray,
[AC_EGREP_CPP(webecray,
@@ -761,7 +912,7 @@ AC_DEFUN(MYSQL_CHECK_VIO, [
then
vio_dir="vio"
vio_libs="../vio/libvio.la"
- AC_DEFINE(HAVE_VIO)
+ AC_DEFINE(HAVE_VIO, 1)
else
vio_dir=""
vio_libs=""
@@ -852,7 +1003,7 @@ AC_MSG_CHECKING(for OpenSSL)
#force VIO use
vio_dir="vio"
vio_libs="../vio/libvio.la"
- AC_DEFINE(HAVE_VIO)
+ AC_DEFINE([HAVE_VIO], [1], [Virtual IO])
AC_MSG_RESULT(yes)
openssl_libs="-L$OPENSSL_LIB -lssl -lcrypto"
# Don't set openssl_includes to /usr/include as this gives us a lot of
@@ -866,7 +1017,7 @@ AC_MSG_CHECKING(for OpenSSL)
then
openssl_includes="$openssl_includes -I$OPENSSL_KERBEROS_INCLUDE"
fi
- AC_DEFINE(HAVE_OPENSSL)
+ AC_DEFINE([HAVE_OPENSSL], [1], [OpenSSL])
# openssl-devel-0.9.6 requires dlopen() and we can't link staticly
# on many platforms (We should actually test this here, but it's quite
@@ -927,7 +1078,7 @@ then
orbit_libs=`orbit-config --libs server`
orbit_idl="$orbit_exec_prefix/bin/orbit-idl"
AC_MSG_RESULT(found!)
- AC_DEFINE(HAVE_ORBIT)
+ AC_DEFINE([HAVE_ORBIT], [1], [ORBIT])
else
orbit_exec_prefix=
orbit_includes=
@@ -949,7 +1100,7 @@ AC_DEFUN([MYSQL_CHECK_ISAM], [
isam_libs=
if test X"$with_isam" = X"yes"
then
- AC_DEFINE(HAVE_ISAM)
+ AC_DEFINE([HAVE_ISAM], [1], [Using old ISAM tables])
isam_libs="\$(top_builddir)/isam/libnisam.a\
\$(top_builddir)/merge/libmerge.a"
fi
@@ -1245,7 +1396,7 @@ AC_DEFUN([MYSQL_CHECK_INNODB], [
case "$innodb" in
yes )
AC_MSG_RESULT([Using Innodb])
- AC_DEFINE(HAVE_INNOBASE_DB)
+ AC_DEFINE([HAVE_INNOBASE_DB], [1], [Using Innobase DB])
have_innodb="yes"
innodb_includes="-I../innobase/include"
innodb_system_libs=""
@@ -1318,7 +1469,7 @@ AC_DEFUN([MYSQL_CHECK_EXAMPLEDB], [
case "$exampledb" in
yes )
- AC_DEFINE(HAVE_EXAMPLE_DB)
+ AC_DEFINE([HAVE_EXAMPLE_DB], [1], [Builds Example DB])
AC_MSG_RESULT([yes])
[exampledb=yes]
;;
@@ -1348,7 +1499,7 @@ AC_DEFUN([MYSQL_CHECK_ARCHIVEDB], [
case "$archivedb" in
yes )
- AC_DEFINE(HAVE_ARCHIVE_DB)
+ AC_DEFINE([HAVE_ARCHIVE_DB], [1], [Builds Archive Storage Engine])
AC_MSG_RESULT([yes])
[archivedb=yes]
;;
@@ -1364,6 +1515,37 @@ dnl END OF MYSQL_CHECK_ARCHIVE SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
+dnl Macro: MYSQL_CHECK_CSVDB
+dnl Sets HAVE_CSV_DB if --with-csv-storage-engine is used
+dnl ---------------------------------------------------------------------------
+AC_DEFUN([MYSQL_CHECK_CSVDB], [
+ AC_ARG_WITH([csv-storage-engine],
+ [
+ --with-csv-storage-engine
+ Enable the CSV Storage Engine],
+ [csvdb="$withval"],
+ [csvdb=no])
+ AC_MSG_CHECKING([for csv storage engine])
+
+ case "$csvdb" in
+ yes )
+ AC_DEFINE([HAVE_CSV_DB], [1], [Builds the CSV Storage Engine])
+ AC_MSG_RESULT([yes])
+ [csvdb=yes]
+ ;;
+ * )
+ AC_MSG_RESULT([no])
+ [csvdb=no]
+ ;;
+ esac
+
+])
+dnl ---------------------------------------------------------------------------
+dnl END OF MYSQL_CHECK_CSV SECTION
+dnl ---------------------------------------------------------------------------
+
+
+dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_NDBCLUSTER
dnl Sets HAVE_NDBCLUSTER_DB if --with-ndbcluster is used
dnl ---------------------------------------------------------------------------
@@ -1397,7 +1579,8 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
case "$ndb_shm" in
yes )
AC_MSG_RESULT([-- including shared memory transporter])
- AC_DEFINE(NDB_SHM_TRANSPORTER)
+ AC_DEFINE([NDB_SHM_TRANSPORTER], [1],
+ [Including Ndb Cluster DB shared memory transporter])
have_ndb_shm="yes"
;;
* )
@@ -1409,7 +1592,8 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
case "$ndb_sci" in
yes )
AC_MSG_RESULT([-- including sci transporter])
- AC_DEFINE(NDB_SCI_TRANSPORTER)
+ AC_DEFINE([NDB_SCI_TRANSPORTER], [1],
+ [Including Ndb Cluster DB sci transporter])
have_ndb_sci="yes"
;;
* )
@@ -1457,7 +1641,7 @@ AC_DEFUN([MYSQL_CHECK_NDBCLUSTER], [
case "$ndbcluster" in
yes )
AC_MSG_RESULT([Using NDB Cluster])
- AC_DEFINE(HAVE_NDBCLUSTER_DB)
+ AC_DEFINE([HAVE_NDBCLUSTER_DB], [1], [Using Ndb Cluster DB])
have_ndbcluster="yes"
ndbcluster_includes="-I../ndb/include -I../ndb/include/ndbapi"
ndbcluster_libs="\$(top_builddir)/ndb/src/.libs/libndbclient.a"
@@ -1602,7 +1786,7 @@ AC_DEFUN(MYSQL_SYS_LARGEFILE,
esac])
AC_SYS_LARGEFILE_MACRO_VALUE(_LARGEFILE_SOURCE,
ac_cv_sys_largefile_source,
- [Define to make fseeko etc. visible, on some hosts.],
+ [makes fseeko etc. visible, on some hosts.],
[case "$host_os" in
# HP-UX 10.20 and later
hpux10.[2-9][0-9]* | hpux1[1-9]* | hpux[2-9][0-9]*)
@@ -1610,7 +1794,7 @@ AC_DEFUN(MYSQL_SYS_LARGEFILE,
esac])
AC_SYS_LARGEFILE_MACRO_VALUE(_LARGE_FILES,
ac_cv_sys_large_files,
- [Define for large files, on AIX-style hosts.],
+ [Large files support on AIX-style hosts.],
[case "$host_os" in
# AIX 4.2 and later
aix4.[2-9]* | aix4.1[0-9]* | aix[5-9].* | aix[1-9][0-9]*)
diff --git a/bdb/dist/configure.ac b/bdb/dist/configure.ac
index a61b8595322..98cf0f63b39 100644
--- a/bdb/dist/configure.ac
+++ b/bdb/dist/configure.ac
@@ -555,7 +555,7 @@ fi
LIB@&t@OBJS=`echo "$LIB@&t@OBJS" |
sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'`
LTLIBOBJS=`echo "$LIB@&t@OBJS" |
- sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'`
+ sed "s,\.[[^.]]* ,$o ,g;s,\.[[^.]]*$,$o,"`
AC_SUBST(LTLIBOBJS)
# Initial output file list.
diff --git a/bdb/dist/gen_inc.awk b/bdb/dist/gen_inc.awk
index 4d245623bee..2f5b491cda1 100644
--- a/bdb/dist/gen_inc.awk
+++ b/bdb/dist/gen_inc.awk
@@ -18,20 +18,20 @@
# i_pfile include file that contains internal (PUBLIC) prototypes
/PUBLIC:/ {
sub("^.*PUBLIC:[ ][ ]*", "")
- if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ if ($0 ~ /^#(if|ifdef|ifndef|else|endif)/) {
print $0 >> i_pfile
print $0 >> i_dfile
next
}
pline = sprintf("%s %s", pline, $0)
- if (pline ~ "));") {
+ if (pline ~ /\)\);/) {
sub("^[ ]*", "", pline)
print pline >> i_pfile
if (pline !~ db_version_unique_name) {
- def = gensub("[ ][ ]*__P.*", "", 1, pline)
- sub("^.*[ ][*]*", "", def)
+ sub("[ ][ ]*__P.*", "", pline)
+ sub("^.*[ ][*]*", "", pline)
printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
- def, def) >> i_dfile
+ pline, pline) >> i_dfile
}
pline = ""
}
@@ -53,20 +53,20 @@
# functions in libraries built with that configuration option.
/EXTERN:/ {
sub("^.*EXTERN:[ ][ ]*", "")
- if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ if ($0 ~ /^#(if|ifdef|ifndef|else|endif)/) {
print $0 >> e_pfile
print $0 >> e_dfile
next
}
eline = sprintf("%s %s", eline, $0)
- if (eline ~ "));") {
+ if (eline ~ /\)\);/) {
sub("^[ ]*", "", eline)
print eline >> e_pfile
- if (eline !~ db_version_unique_name && eline !~ "^int txn_") {
- def = gensub("[ ][ ]*__P.*", "", 1, eline)
- sub("^.*[ ][*]*", "", def)
+ if (eline !~ db_version_unique_name && eline !~ /^int txn_/) {
+ sub("[ ][ ]*__P.*", "", eline)
+ sub("^.*[ ][*]*", "", eline)
printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
- def, def) >> e_dfile
+ eline, eline) >> e_dfile
}
eline = ""
}
diff --git a/client/client_priv.h b/client/client_priv.h
index 854d205e585..ad08484b706 100644
--- a/client/client_priv.h
+++ b/client/client_priv.h
@@ -43,5 +43,6 @@ enum options_client
OPT_PROMPT, OPT_IGN_LINES,OPT_TRANSACTION,OPT_MYSQL_PROTOCOL,
OPT_SHARED_MEMORY_BASE_NAME, OPT_FRM, OPT_SKIP_OPTIMIZATION,
OPT_COMPATIBLE, OPT_RECONNECT, OPT_DELIMITER, OPT_SECURE_AUTH,
- OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_CREATE_OPTIONS
+ OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_CREATE_OPTIONS,
+ OPT_START_POSITION, OPT_STOP_POSITION, OPT_START_DATETIME, OPT_STOP_DATETIME
};
diff --git a/client/mysql.cc b/client/mysql.cc
index c9ee6819a13..0b43f9b80ec 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -44,7 +44,7 @@
#include <locale.h>
#endif
-const char *VER= "14.5";
+const char *VER= "14.6";
/* Don't try to make a nice table if the data is too big */
#define MAX_COLUMN_LENGTH 1024
@@ -294,7 +294,7 @@ static const char *server_default_groups[]=
HIST_ENTRY is defined for libedit, but not for the real readline
Need to redefine it for real readline to find it
*/
-#if !defined(USE_LIBEDIT_INTERFACE)
+#if !defined(HAVE_HIST_ENTRY)
typedef struct _hist_entry {
const char *line;
const char *data;
@@ -607,7 +607,7 @@ static struct my_option my_long_options[] =
{"silent", 's', "Be more silent. Print results with a tab as separator, each row on new line.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0,
0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -753,8 +753,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
opt_nopager= 1;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) ==
- ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
@@ -793,6 +792,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
while (*argument) *argument++= 'x'; // Destroy argument
if (*start)
start[1]=0 ;
+ tty_password= 0;
}
else
tty_password= 1;
@@ -1670,15 +1670,15 @@ static int com_server_help(String *buffer __attribute__((unused)),
if (num_fields == 2)
{
put_info("Many help items for your request exist", INFO_INFO);
- put_info("For more specific request please type 'help <item>' where item is one of next", INFO_INFO);
+ put_info("To make a more specific request, please type 'help <item>',\nwhere item is one of next", INFO_INFO);
num_name= 0;
num_cat= 1;
last_char= '_';
}
else if ((cur= mysql_fetch_row(result)))
{
- tee_fprintf(PAGER, "You asked help about help category: \"%s\"\n", cur[0]);
- put_info("For a more information type 'help <item>' where item is one of the following", INFO_INFO);
+ tee_fprintf(PAGER, "You asked for help about help category: \"%s\"\n", cur[0]);
+ put_info("For more information, type 'help <item>', where item is one of the following", INFO_INFO);
num_name= 1;
num_cat= 2;
print_help_item(&cur,1,2,&last_char);
@@ -1692,7 +1692,7 @@ static int com_server_help(String *buffer __attribute__((unused)),
else
{
put_info("\nNothing found", INFO_INFO);
- put_info("Please try to run 'help contents' for list of all accessible topics\n", INFO_INFO);
+ put_info("Please try to run 'help contents' for a list of all accessible topics\n", INFO_INFO);
}
}
@@ -1711,9 +1711,9 @@ com_help(String *buffer __attribute__((unused)),
if (help_arg)
return com_server_help(buffer,line,help_arg+1);
- put_info("\nFor the complete MySQL Manual online visit:\n http://www.mysql.com/documentation\n", INFO_INFO);
- put_info("For info on technical support from MySQL developers visit:\n http://www.mysql.com/support\n", INFO_INFO);
- put_info("For info on MySQL books, utilities, consultants, etc. visit:\n http://www.mysql.com/portal\n", INFO_INFO);
+ put_info("\nFor the complete MySQL Manual online, visit:\n http://www.mysql.com/documentation\n", INFO_INFO);
+ put_info("For info on technical support from MySQL developers, visit:\n http://www.mysql.com/support\n", INFO_INFO);
+ put_info("For info on MySQL books, utilities, consultants, etc., visit:\n http://www.mysql.com/portal\n", INFO_INFO);
put_info("List of all MySQL commands:", INFO_INFO);
if (!named_cmds)
put_info("Note that all text commands must be first on line and end with ';'",INFO_INFO);
@@ -2020,21 +2020,27 @@ print_table_data(MYSQL_RES *result)
while ((cur= mysql_fetch_row(result)))
{
+ ulong *lengths= mysql_fetch_lengths(result);
(void) tee_fputs("|", PAGER);
mysql_field_seek(result, 0);
for (uint off= 0; off < mysql_num_fields(result); off++)
{
const char *str= cur[off] ? cur[off] : "NULL";
field= mysql_fetch_field(result);
- uint length= field->max_length;
- if (length > MAX_COLUMN_LENGTH)
+ uint maxlength= field->max_length;
+ if (maxlength > MAX_COLUMN_LENGTH)
{
tee_fputs(str, PAGER);
tee_fputs(" |", PAGER);
}
else
- tee_fprintf(PAGER, num_flag[off] ? "%*s |" : " %-*s|",
- length, str);
+ {
+ uint currlength= (uint) lengths[off];
+ uint numcells= charset_info->cset->numcells(charset_info,
+ str, str + currlength);
+ tee_fprintf(PAGER, num_flag[off] ? "%*s |" : " %-*s|",
+ maxlength + currlength - numcells, str);
+ }
}
(void) tee_fputs("\n", PAGER);
}
@@ -2687,8 +2693,9 @@ char *get_arg(char *line, my_bool get_next_arg)
ptr++;
if (*ptr == '\\') // short command was used
ptr+= 2;
- while (*ptr &&!my_isspace(charset_info, *ptr)) // skip command
- ptr++;
+ else
+ while (*ptr &&!my_isspace(charset_info, *ptr)) // skip command
+ ptr++;
}
if (!*ptr)
return NullS;
@@ -2836,13 +2843,16 @@ com_status(String *buffer __attribute__((unused)),
MYSQL_RES *result;
LINT_INIT(result);
tee_fprintf(stdout, "\nConnection id:\t\t%lu\n",mysql_thread_id(&mysql));
- if (!mysql_query(&mysql,"select DATABASE(),USER()") &&
+ if (!mysql_query(&mysql,"select DATABASE(), USER() limit 1") &&
(result=mysql_use_result(&mysql)))
{
MYSQL_ROW cur=mysql_fetch_row(result);
- tee_fprintf(stdout, "Current database:\t%s\n", cur[0] ? cur[0] : "");
- tee_fprintf(stdout, "Current user:\t\t%s\n",cur[1]);
- (void) mysql_fetch_row(result); // Read eof
+ if (cur)
+ {
+ tee_fprintf(stdout, "Current database:\t%s\n", cur[0] ? cur[0] : "");
+ tee_fprintf(stdout, "Current user:\t\t%s\n", cur[1]);
+ }
+ mysql_free_result(result);
}
#ifdef HAVE_OPENSSL
if (mysql.net.vio && mysql.net.vio->ssl_arg &&
diff --git a/client/mysqladmin.c b/client/mysqladmin.c
index aaed101a83e..df3e8dfed62 100644
--- a/client/mysqladmin.c
+++ b/client/mysqladmin.c
@@ -151,7 +151,7 @@ static struct my_option my_long_options[] =
"Change the value of a variable. Please note that this option is deprecated; you can set variables directly with --variable-name=value.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -249,7 +249,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 268cd4ddd80..538ec4982c1 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -31,7 +31,7 @@
#define MYSQL_CLIENT
#undef MYSQL_SERVER
#include "client_priv.h"
-#include <time.h>
+#include <my_time.h>
#include "log_event.h"
/* That one is necessary for defines of OPTION_NO_FOREIGN_KEY_CHECKS etc */
#include "mysql_priv.h"
@@ -69,10 +69,18 @@ static int port = MYSQL_PORT;
static const char* sock= 0;
static const char* user = 0;
static char* pass = 0;
-static ulonglong position = 0;
+
+static ulonglong start_position, stop_position;
+#define start_position_mot ((my_off_t)start_position)
+#define stop_position_mot ((my_off_t)stop_position)
+
+static char *start_datetime_str, *stop_datetime_str;
+static my_time_t start_datetime= 0, stop_datetime= MY_TIME_T_MAX;
+static ulonglong rec_count= 0;
static short binlog_flags = 0;
static MYSQL* mysql = NULL;
static const char* dirname_for_local_load= 0;
+static bool stop_passed= 0;
/*
check_header() will set the pointer below.
@@ -327,14 +335,45 @@ Create_file event for file_id: %u\n",ae->file_id);
Load_log_processor load_processor;
-int process_event(ulonglong *rec_count, LAST_EVENT_INFO *last_event_info,
- Log_event *ev, my_off_t pos)
+/*
+ Process an event
+
+ SYNOPSIS
+ process_event()
+
+ RETURN
+ 0 ok and continue
+ 1 error and terminate
+ -1 ok and terminate
+
+ TODO
+ This function returns 0 even in some error cases. This should be changed.
+*/
+
+
+
+int process_event(LAST_EVENT_INFO *last_event_info, Log_event *ev,
+ my_off_t pos)
{
char ll_buff[21];
DBUG_ENTER("process_event");
- if ((*rec_count) >= offset)
+ if ((rec_count >= offset) &&
+ ((my_time_t)(ev->when) >= start_datetime))
{
+ /*
+ We have found an event after start_datetime, from now on print
+ everything (in case the binlog has timestamps increasing and decreasing,
+ we do this to avoid cutting the middle).
+ */
+ start_datetime= 0;
+ offset= 0; // print everything and protect against cycling rec_count
+ if (((my_time_t)(ev->when) >= stop_datetime)
+ || (pos >= stop_position_mot))
+ {
+ stop_passed= 1; // skip all next binlogs
+ DBUG_RETURN(-1);
+ }
if (!short_form)
fprintf(result_file, "# at %s\n",llstr(pos,ll_buff));
@@ -424,7 +463,7 @@ Create_file event for file_id: %u\n",exv->file_id);
}
end:
- (*rec_count)++;
+ rec_count++;
if (ev)
delete ev;
DBUG_RETURN(0);
@@ -454,13 +493,14 @@ static struct my_option my_long_options[] =
{"port", 'P', "Use port to connect to the remote server.",
(gptr*) &port, (gptr*) &port, 0, GET_INT, REQUIRED_ARG, MYSQL_PORT, 0, 0,
0, 0, 0},
- {"position", 'j', "Start reading the binlog at position N.",
- (gptr*) &position, (gptr*) &position, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0,
- 0, 0},
+ {"position", 'j', "Deprecated. Use --start-position instead.",
+ (gptr*) &start_position, (gptr*) &start_position, 0, GET_ULL,
+ REQUIRED_ARG, BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE,
+ /* COM_BINLOG_DUMP accepts only 4 bytes for the position */
+ (ulonglong)(~(uint32)0), 0, 0, 0},
{"protocol", OPT_MYSQL_PROTOCOL,
"The protocol of connection (tcp,socket,pipe,memory).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-
{"result-file", 'r', "Direct output to a given file.", 0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"read-from-remote-server", 'R', "Read binary logs from a MySQL server",
@@ -476,6 +516,35 @@ static struct my_option my_long_options[] =
{"socket", 'S', "Socket file to use for connection.",
(gptr*) &sock, (gptr*) &sock, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0,
0, 0},
+ {"start-datetime", OPT_START_DATETIME,
+ "Start reading the binlog at first event having a datetime equal or "
+ "posterior to the argument; the argument must be a date and time "
+ "in the local time zone, in any format accepted by the MySQL server "
+ "for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 "
+ "(you should probably use quotes for your shell to set it properly).",
+ (gptr*) &start_datetime_str, (gptr*) &start_datetime_str,
+ 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"stop-datetime", OPT_STOP_DATETIME,
+ "Stop reading the binlog at first event having a datetime equal or "
+ "posterior to the argument; the argument must be a date and time "
+ "in the local time zone, in any format accepted by the MySQL server "
+ "for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 "
+ "(you should probably use quotes for your shell to set it properly).",
+ (gptr*) &stop_datetime_str, (gptr*) &stop_datetime_str,
+ 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"start-position", OPT_START_POSITION,
+ "Start reading the binlog at position N. Applies to the first binlog "
+ "passed on the command line.",
+ (gptr*) &start_position, (gptr*) &start_position, 0, GET_ULL,
+ REQUIRED_ARG, BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE,
+ /* COM_BINLOG_DUMP accepts only 4 bytes for the position */
+ (ulonglong)(~(uint32)0), 0, 0, 0},
+ {"stop-position", OPT_STOP_POSITION,
+ "Stop reading the binlog at position N. Applies to the last binlog "
+ "passed on the command line.",
+ (gptr*) &stop_position, (gptr*) &stop_position, 0, GET_ULL,
+ REQUIRED_ARG, (ulonglong)(~(my_off_t)0), BIN_LOG_HEADER_SIZE,
+ (ulonglong)(~(my_off_t)0), 0, 0, 0},
{"to-last-log", 't', "Requires -R. Will not stop at the end of the \
requested binlog but rather continue printing until the end of the last \
binlog of the MySQL server. If you send the output to the same MySQL server, \
@@ -550,6 +619,29 @@ the mysql command line client\n\n");
my_print_variables(my_long_options);
}
+
+static my_time_t convert_str_to_timestamp(const char* str)
+{
+ int was_cut;
+ MYSQL_TIME l_time;
+ long dummy_my_timezone;
+ bool dummy_in_dst_time_gap;
+ /* We require a total specification (date AND time) */
+ if (str_to_datetime(str, strlen(str), &l_time, 0, &was_cut) !=
+ MYSQL_TIMESTAMP_DATETIME || was_cut)
+ {
+ fprintf(stderr, "Incorrect date and time argument: %s\n", str);
+ exit(1);
+ }
+ /*
+ Note that Feb 30th, Apr 31st cause no error messages and are mapped to
+ the next existing day, like in mysqld. Maybe this could be changed when
+ mysqld is changed too (with its "strict" mode?).
+ */
+ return
+ my_system_gmt_sec(&l_time, &dummy_my_timezone, &dummy_in_dst_time_gap);
+}
+
#include <help_end.h>
extern "C" my_bool
@@ -588,15 +680,19 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) ==
- ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
}
break;
}
- break;
+ case OPT_START_DATETIME:
+ start_datetime= convert_str_to_timestamp(start_datetime_str);
+ break;
+ case OPT_STOP_DATETIME:
+ stop_datetime= convert_str_to_timestamp(stop_datetime_str);
+ break;
case 'V':
print_version();
exit(0);
@@ -638,13 +734,14 @@ static MYSQL* safe_connect()
return local_mysql;
}
+
static int dump_log_entries(const char* logname)
{
- if (remote_opt)
- return dump_remote_log_entries(logname);
- return dump_local_log_entries(logname);
+ return (remote_opt ? dump_remote_log_entries(logname) :
+ dump_local_log_entries(logname));
}
+
/*
This is not as smart as check_header() (used for local log); it will not work
for a binlog which mixes format. TODO: fix this.
@@ -711,9 +808,20 @@ static int dump_remote_log_entries(const char* logname)
char buf[128];
LAST_EVENT_INFO last_event_info;
uint len, logname_len;
- NET* net = &mysql->net;
+ NET* net;
+ int error= 0;
+ my_off_t old_off= start_position_mot;
+ char fname[FN_REFLEN+1];
DBUG_ENTER("dump_remote_log_entries");
+ /*
+ Even if we already read one binlog (case of >=2 binlogs on command line),
+ we cannot re-use the same connection as before, because it is now dead
+ (COM_BINLOG_DUMP kills the thread when it finishes).
+ */
+ mysql= safe_connect();
+ net= &mysql->net;
+
if (check_master_version(mysql, &description_event))
{
fprintf(stderr, "Could not find server version");
@@ -726,14 +834,11 @@ could be out of memory");
DBUG_RETURN(1);
}
- if (!position)
- position = BIN_LOG_HEADER_SIZE;
- if (position < BIN_LOG_HEADER_SIZE)
- {
- position = BIN_LOG_HEADER_SIZE;
- sql_print_error("Warning: The position in the binary log can't be less than %d.\nStarting from position %d\n", BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE);
- }
- int4store(buf, position);
+ /*
+ COM_BINLOG_DUMP accepts only 4 bytes for the position, so we are forced to
+ cast to uint32.
+ */
+ int4store(buf, (uint32)start_position);
int2store(buf + BIN_LOG_HEADER_SIZE, binlog_flags);
logname_len = (uint) strlen(logname);
int4store(buf + 6, 0);
@@ -741,22 +846,20 @@ could be out of memory");
if (simple_command(mysql, COM_BINLOG_DUMP, buf, logname_len + 10, 1))
{
fprintf(stderr,"Got fatal error sending the log dump command\n");
- DBUG_RETURN(1);
+ error= 1;
+ goto err;
}
- my_off_t old_off= position;
- ulonglong rec_count= 0;
- char fname[FN_REFLEN+1];
-
for (;;)
{
- const char *error;
+ const char *error_msg;
len = net_safe_read(mysql);
if (len == packet_error)
{
fprintf(stderr, "Got error reading packet from server: %s\n",
mysql_error(mysql));
- DBUG_RETURN(1);
+ error= 1;
+ goto err;
}
if (len < 8 && net->read_pos[0] == 254)
break; // end of data
@@ -768,11 +871,13 @@ could be out of memory");
if (!ev)
{
fprintf(stderr, "Could not construct log event object\n");
- DBUG_RETURN(1);
+ error= 1;
+ goto err;
}
Log_event_type type= ev->get_type_code();
- if (description_event->binlog_version >=3 || (type != LOAD_EVENT && type != CREATE_FILE_EVENT))
+ if (description_event->binlog_version >= 3 ||
+ (type != LOAD_EVENT && type != CREATE_FILE_EVENT))
{
/*
If this is a Rotate event, maybe it's the end of the requested binlog;
@@ -793,20 +898,32 @@ could be out of memory");
part of our log) and then we will stop when we receive the fake one
soon.
*/
- if ((rev->when == 0) && !to_last_remote_log)
+ if (rev->when == 0)
{
- if ((rev->ident_len != logname_len) ||
- memcmp(rev->new_log_ident, logname, logname_len))
- DBUG_RETURN(0);
- /*
- Otherwise, this is a fake Rotate for our log, at the very beginning
- for sure. Skip it.
- */
- continue;
+ if (!to_last_remote_log)
+ {
+ if ((rev->ident_len != logname_len) ||
+ memcmp(rev->new_log_ident, logname, logname_len))
+ {
+ error= 0;
+ goto err;
+ }
+ /*
+ Otherwise, this is a fake Rotate for our log, at the very
+ beginning for sure. Skip it, because it was not in the original
+ log. If we are running with to_last_remote_log, we print it,
+ because it serves as a useful marker between binlogs then.
+ */
+ continue;
+ }
+ len= 1; // fake Rotate, so don't increment old_off
}
}
- if (process_event(&rec_count,&last_event_info,ev,old_off))
- DBUG_RETURN(1);
+ if ((error= process_event(&last_event_info,ev,old_off)))
+ {
+ error= ((error < 0) ? 0 : 1);
+ goto err;
+ }
}
else
{
@@ -816,28 +933,35 @@ could be out of memory");
File file;
if ((file= load_processor.prepare_new_file_for_old_format(le,fname)) < 0)
- DBUG_RETURN(1);
-
- if (process_event(&rec_count,&last_event_info,ev,old_off))
{
- my_close(file,MYF(MY_WME));
- DBUG_RETURN(1);
+ error= 1;
+ goto err;
}
- if (load_processor.load_old_format_file(net,old_fname,old_len,file))
+
+ if ((error= process_event(&last_event_info,ev,old_off)))
{
my_close(file,MYF(MY_WME));
- DBUG_RETURN(1);
+ error= ((error < 0) ? 0 : 1);
+ goto err;
}
+ error= load_processor.load_old_format_file(net,old_fname,old_len,file);
my_close(file,MYF(MY_WME));
+ if (error)
+ {
+ error= 1;
+ goto err;
+ }
}
/*
Let's adjust offset for remote log as for local log to produce
- similar text. As we don't print the fake Rotate event, all events are
- real so we can simply add the length.
+ similar text.
*/
old_off+= len-1;
}
- DBUG_RETURN(0);
+
+err:
+ mysql_close(mysql);
+ DBUG_RETURN(error);
}
@@ -940,7 +1064,6 @@ static int dump_local_log_entries(const char* logname)
{
File fd = -1;
IO_CACHE cache,*file= &cache;
- ulonglong rec_count = 0;
LAST_EVENT_INFO last_event_info;
byte tmp_buff[BIN_LOG_HEADER_SIZE];
int error= 0;
@@ -949,7 +1072,7 @@ static int dump_local_log_entries(const char* logname)
{
if ((fd = my_open(logname, O_RDONLY | O_BINARY, MYF(MY_WME))) < 0)
return 1;
- if (init_io_cache(file, fd, 0, READ_CACHE, (my_off_t) position, 0,
+ if (init_io_cache(file, fd, 0, READ_CACHE, start_position_mot, 0,
MYF(MY_WME | MY_NABP)))
{
my_close(fd, MYF(MY_WME));
@@ -963,12 +1086,12 @@ static int dump_local_log_entries(const char* logname)
0, MYF(MY_WME | MY_NABP | MY_DONT_CHECK_FILESIZE)))
return 1;
check_header(file, &description_event);
- if (position)
+ if (start_position)
{
- /* skip 'position' characters from stdout */
+ /* skip 'start_position' characters from stdout */
byte buff[IO_SIZE];
my_off_t length,tmp;
- for (length= (my_off_t) position ; length > 0 ; length-=tmp)
+ for (length= start_position_mot ; length > 0 ; length-=tmp)
{
tmp=min(length,sizeof(buff));
if (my_b_read(file, buff, (uint) tmp))
@@ -978,14 +1101,14 @@ static int dump_local_log_entries(const char* logname)
}
}
}
- file->pos_in_file=position;
+ file->pos_in_file= start_position_mot;
file->seek_not_done=0;
}
if (!description_event || !description_event->is_valid())
die("Invalid Format_description log event; could be out of memory");
- if (!position && my_b_read(file, tmp_buff, BIN_LOG_HEADER_SIZE))
+ if (!start_position && my_b_read(file, tmp_buff, BIN_LOG_HEADER_SIZE))
{
error= 1;
goto end;
@@ -1009,11 +1132,12 @@ static int dump_local_log_entries(const char* logname)
// file->error == 0 means EOF, that's OK, we break in this case
break;
}
- if (process_event(&rec_count,&last_event_info,ev,old_off))
- {
- error= 1;
- break;
- }
+ if ((error= process_event(&last_event_info,ev,old_off)))
+ {
+ if (error < 0)
+ error= 0;
+ break;
+ }
}
end:
@@ -1028,11 +1152,14 @@ end:
int main(int argc, char** argv)
{
static char **defaults_argv;
- int exit_value;
+ int exit_value= 0;
+ ulonglong save_stop_position;
MY_INIT(argv[0]);
DBUG_ENTER("main");
DBUG_PROCESS(argv[0]);
+ init_time(); // for time functions
+
parse_args(&argc, (char***)&argv);
defaults_argv=argv;
@@ -1044,8 +1171,6 @@ int main(int argc, char** argv)
}
my_set_max_open_files(open_files_limit);
- if (remote_opt)
- mysql = safe_connect();
MY_TMPDIR tmpdir;
tmpdir.list= 0;
@@ -1063,24 +1188,26 @@ int main(int argc, char** argv)
else
load_processor.init_by_cur_dir();
- exit_value= 0;
fprintf(result_file,
"/*!40019 SET @@session.max_insert_delayed_threads=0*/;\n");
- while (--argc >= 0)
+ for (save_stop_position= stop_position, stop_position= ~(my_off_t)0 ;
+ (--argc >= 0) && !stop_passed ; )
{
+ if (argc == 0) // last log, --stop-position applies
+ stop_position= save_stop_position;
if (dump_log_entries(*(argv++)))
{
exit_value=1;
break;
}
+ // For next log, --start-position does not apply
+ start_position= BIN_LOG_HEADER_SIZE;
}
if (tmpdir.list)
free_tmpdir(&tmpdir);
if (result_file != stdout)
my_fclose(result_file, MYF(0));
- if (remote_opt)
- mysql_close(mysql);
cleanup();
free_defaults(defaults_argv);
my_free_open_file_info();
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index 78e82e670f8..b072d1c86fe 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -125,7 +125,7 @@ static struct my_option my_long_options[] =
"Can fix almost anything except unique keys that aren't unique.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -271,7 +271,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'V': print_version(); exit(0);
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqldump.c b/client/mysqldump.c
index dfac9ea0e7c..c8392e2d502 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -276,7 +276,7 @@ static struct my_option my_long_options[] =
"Direct output to a given file. This option should be used in MSDOS, because it prevents new line '\\n' from being converted to '\\r\\n' (carriage return + line feed).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -582,8 +582,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
case (int) OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol= find_type(argument, &sql_protocol_typelib, 0))
- == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index ccf7fd9880d..c68d2d9f724 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -123,7 +123,7 @@ static struct my_option my_long_options[] =
{"replace", 'r', "If duplicate unique key was found, replace old row.",
(gptr*) &replace, (gptr*) &replace, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -203,7 +203,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index d9e2a1fa92a..9c9fdf6e443 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -188,7 +188,7 @@ static struct my_option my_long_options[] =
{"protocol", OPT_MYSQL_PROTOCOL, "The protocol of connection (tcp,socket,pipe,memory).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name", OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -268,7 +268,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
{
- if ((opt_protocol = find_type(argument, &sql_protocol_typelib,0)) == ~(ulong) 0)
+ if ((opt_protocol= find_type(argument, &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", argument);
exit(1);
diff --git a/client/mysqltest.c b/client/mysqltest.c
index 8307fe44bd9..3287c9738d3 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -641,7 +641,7 @@ VAR* var_get(const char* var_name, const char** var_name_end, my_bool raw,
if (*var_name != '$')
goto err;
digit = *++var_name - '0';
- if (!(digit < 10 && digit >= 0))
+ if (digit < 0 || digit >= 10)
{
const char* save_var_name = var_name, *end;
uint length;
@@ -660,7 +660,7 @@ VAR* var_get(const char* var_name, const char** var_name_end, my_bool raw,
length < MAX_VAR_NAME)
{
char buff[MAX_VAR_NAME+1];
- strmake(buff, save_var_name, length);
+ strmake(buff, save_var_name, length);
v= var_from_env(buff, "");
}
var_name--; /* Point at last character */
diff --git a/cmd-line-utils/Makefile.am b/cmd-line-utils/Makefile.am
index 7214d1231f9..88aaedde06d 100644
--- a/cmd-line-utils/Makefile.am
+++ b/cmd-line-utils/Makefile.am
@@ -1,3 +1,20 @@
+# Copyright (C) 2004 MySQL AB
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Library General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Library General Public License for more details.
+#
+# You should have received a copy of the GNU Library General Public
+# License along with this library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA
+
## Process this file with automake to create Makefile.in
SUBDIRS= @readline_basedir@
diff --git a/cmd-line-utils/libedit/Makefile.am b/cmd-line-utils/libedit/Makefile.am
index eb6b930c0b2..a3d73a7082a 100644
--- a/cmd-line-utils/libedit/Makefile.am
+++ b/cmd-line-utils/libedit/Makefile.am
@@ -7,28 +7,30 @@ AHDR=vi.h emacs.h common.h
INCLUDES = -I$(top_srcdir)/include -I$(srcdir)/../.. -I..
-noinst_LIBRARIES = liblibedit.a
+noinst_LIBRARIES = libedit.a
-liblibedit_a_SOURCES = chared.c el.c fgetln.c history.c map.c \
- prompt.c readline.c search.c \
- strlcpy.c tokenizer.c vi.c common.c \
- emacs.c hist.c key.c parse.c read.c \
- refresh.c sig.c term.c tty.c help.c \
- fcns.c
+libedit_a_SOURCES = chared.c el.c history.c map.c prompt.c readline.c \
+ search.c tokenizer.c vi.c common.c emacs.c \
+ hist.c key.c parse.c read.c refresh.c sig.c term.c \
+ tty.c help.c fcns.c
+
+EXTRA_libedit_a_SOURCES = np/unvis.c np/strlcpy.c np/vis.c np/strlcat.c \
+ np/fgetln.c
+
+libedit_a_LIBADD = @LIBEDIT_LOBJECTS@
+libedit_a_DEPENDENCIES = @LIBEDIT_LOBJECTS@
pkginclude_HEADERS = readline/readline.h
-noinst_HEADERS = chared.h el.h histedit.h key.h \
- parse.h refresh.h sig.h sys.h \
- tokenizer.h compat.h compat_conf.h fgetln.h \
- hist.h map.h prompt.h search.h \
- strlcpy.h libedit_term.h tty.h
+noinst_HEADERS = chared.h el.h histedit.h key.h parse.h refresh.h sig.h \
+ sys.h tokenizer.h config.h hist.h map.h prompt.h read.h \
+ search.h tty.h libedit_term.h
-EXTRA_DIST = makelist.sh
+EXTRA_DIST = makelist.sh np/unvis.c np/strlcpy.c np/vis.c np/vis.h np/strlcat.c np/fgetln.c
-CLEANFILES = makelist
+CLEANFILES = makelist common.h emacs.h vi.h fcns.h help.h fcns.c help.c
-DEFS = -DUNDEF_THREADS_HACK -DHAVE_CONFIG_H -DNO_KILL_INTR
+DEFS = -DUNDEF_THREADS_HACK -DHAVE_CONFIG_H -DNO_KILL_INTR
SUFFIXES = .sh
@@ -73,13 +75,11 @@ fcns.c: ${AHDR} fcns.h makelist
chared.o: vi.h emacs.h common.h help.h fcns.h
el.o: vi.h emacs.h common.h help.h fcns.h
-fgetln.o: vi.h emacs.h common.h help.h fcns.h
history.o: vi.h emacs.h common.h help.h fcns.h
map.o: vi.h emacs.h common.h help.h fcns.h
prompt.o: vi.h emacs.h common.h help.h fcns.h
readline.o: vi.h emacs.h common.h help.h fcns.h
search.o: vi.h emacs.h common.h help.h fcns.h
-strlcpy.o: vi.h emacs.h common.h help.h fcns.h
tokenizer.o: vi.h emacs.h common.h help.h fcns.h
vi.o: vi.h emacs.h common.h help.h fcns.h
common.o: vi.h emacs.h common.h help.h fcns.h
diff --git a/cmd-line-utils/libedit/chared.c b/cmd-line-utils/libedit/chared.c
index 6ac051c3bb0..62a407e66a8 100644
--- a/cmd-line-utils/libedit/chared.c
+++ b/cmd-line-utils/libedit/chared.c
@@ -1,4 +1,4 @@
-/* $NetBSD: chared.c,v 1.14 2001/05/17 01:02:17 christos Exp $ */
+/* $NetBSD: chared.c,v 1.18 2002/11/20 16:50:08 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,13 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)chared.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: chared.c,v 1.18 2002/11/20 16:50:08 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* chared.c: Character editor utilities
*/
-#include "sys.h"
-
#include <stdlib.h>
#include "el.h"
@@ -53,17 +58,36 @@
* Handle state for the vi undo command
*/
protected void
-cv_undo(EditLine *el,int action, size_t size, char *ptr)
+cv_undo(EditLine *el)
{
c_undo_t *vu = &el->el_chared.c_undo;
- vu->action = action;
- vu->ptr = ptr;
- vu->isize = size;
- (void) memcpy(vu->buf, vu->ptr, size);
-#ifdef DEBUG_UNDO
- (void) fprintf(el->el_errfile, "Undo buffer \"%s\" size = +%d -%d\n",
- vu->ptr, vu->isize, vu->dsize);
-#endif
+ c_redo_t *r = &el->el_chared.c_redo;
+ int size;
+
+ /* Save entire line for undo */
+ size = el->el_line.lastchar - el->el_line.buffer;
+ vu->len = size;
+ vu->cursor = el->el_line.cursor - el->el_line.buffer;
+ memcpy(vu->buf, el->el_line.buffer, (size_t)size);
+
+ /* save command info for redo */
+ r->count = el->el_state.doingarg ? el->el_state.argument : 0;
+ r->action = el->el_chared.c_vcmd.action;
+ r->pos = r->buf;
+ r->cmd = el->el_state.thiscmd;
+ r->ch = el->el_state.thisch;
+}
+
+/* cv_yank():
+ * Save yank/delete data for paste
+ */
+protected void
+cv_yank(EditLine *el, const char *ptr, int size)
+{
+ c_kill_t *k = &el->el_chared.c_kill;
+
+ memcpy(k->buf, ptr, size +0u);
+ k->last = k->buf + size;
}
@@ -75,8 +99,10 @@ c_insert(EditLine *el, int num)
{
char *cp;
- if (el->el_line.lastchar + num >= el->el_line.limit)
- return; /* can't go past end of buffer */
+ if (el->el_line.lastchar + num >= el->el_line.limit) {
+ if (!ch_enlargebufs(el, num +0u))
+ return; /* can't go past end of buffer */
+ }
if (el->el_line.cursor < el->el_line.lastchar) {
/* if I must move chars */
@@ -97,12 +123,14 @@ c_delafter(EditLine *el, int num)
if (el->el_line.cursor + num > el->el_line.lastchar)
num = el->el_line.lastchar - el->el_line.cursor;
+ if (el->el_map.current != el->el_map.emacs) {
+ cv_undo(el);
+ cv_yank(el, el->el_line.cursor, num);
+ }
+
if (num > 0) {
char *cp;
- if (el->el_map.current != el->el_map.emacs)
- cv_undo(el, INSERT, (size_t)num, el->el_line.cursor);
-
for (cp = el->el_line.cursor; cp <= el->el_line.lastchar; cp++)
*cp = cp[num];
@@ -121,13 +149,14 @@ c_delbefore(EditLine *el, int num)
if (el->el_line.cursor - num < el->el_line.buffer)
num = el->el_line.cursor - el->el_line.buffer;
+ if (el->el_map.current != el->el_map.emacs) {
+ cv_undo(el);
+ cv_yank(el, el->el_line.cursor - num, num);
+ }
+
if (num > 0) {
char *cp;
- if (el->el_map.current != el->el_map.emacs)
- cv_undo(el, INSERT, (size_t)num,
- el->el_line.cursor - num);
-
for (cp = el->el_line.cursor - num;
cp <= el->el_line.lastchar;
cp++)
@@ -144,7 +173,7 @@ c_delbefore(EditLine *el, int num)
protected int
ce__isword(int p)
{
- return (isalpha(p) || isdigit(p) || strchr("*?_-.[]~=", p) != NULL);
+ return (isalnum(p) || strchr("*?_-.[]~=", p) != NULL);
}
@@ -154,6 +183,20 @@ ce__isword(int p)
protected int
cv__isword(int p)
{
+ if (isalnum(p) || p == '_')
+ return 1;
+ if (isgraph(p))
+ return 2;
+ return 0;
+}
+
+
+/* cv__isWord():
+ * Return if p is part of a big word according to vi
+ */
+protected int
+cv__isWord(int p)
+{
return (!isspace(p));
}
@@ -216,7 +259,7 @@ cv_next_word(EditLine *el, char *p, char *high, int n, int (*wtest)(int))
* vi historically deletes with cw only the word preserving the
* trailing whitespace! This is not what 'w' does..
*/
- if (el->el_chared.c_vcmd.action != (DELETE|INSERT))
+ if (n || el->el_chared.c_vcmd.action != (DELETE|INSERT))
while ((p < high) && isspace((unsigned char) *p))
p++;
}
@@ -233,26 +276,19 @@ cv_next_word(EditLine *el, char *p, char *high, int n, int (*wtest)(int))
* Find the previous word vi style
*/
protected char *
-cv_prev_word(EditLine *el, char *p, char *low, int n, int (*wtest)(int))
+cv_prev_word(char *p, char *low, int n, int (*wtest)(int))
{
int test;
+ p--;
while (n--) {
- p--;
- /*
- * vi historically deletes with cb only the word preserving the
- * leading whitespace! This is not what 'b' does..
- */
- if (el->el_chared.c_vcmd.action != (DELETE|INSERT))
- while ((p > low) && isspace((unsigned char) *p))
- p--;
+ while ((p > low) && isspace((unsigned char) *p))
+ p--;
test = (*wtest)((unsigned char) *p);
while ((p >= low) && (*wtest)((unsigned char) *p) == test)
p--;
- p++;
- while (isspace((unsigned char) *p))
- p++;
}
+ p++;
/* p now points where we want it */
if (p < low)
@@ -303,47 +339,34 @@ protected void
cv_delfini(EditLine *el)
{
int size;
- int oaction;
+ int action = el->el_chared.c_vcmd.action;
- if (el->el_chared.c_vcmd.action & INSERT)
+ if (action & INSERT)
el->el_map.current = el->el_map.key;
- oaction = el->el_chared.c_vcmd.action;
- el->el_chared.c_vcmd.action = NOP;
-
if (el->el_chared.c_vcmd.pos == 0)
+ /* sanity */
return;
-
- if (el->el_line.cursor > el->el_chared.c_vcmd.pos) {
- size = (int) (el->el_line.cursor - el->el_chared.c_vcmd.pos);
- c_delbefore(el, size);
- el->el_line.cursor = el->el_chared.c_vcmd.pos;
- re_refresh_cursor(el);
- } else if (el->el_line.cursor < el->el_chared.c_vcmd.pos) {
- size = (int)(el->el_chared.c_vcmd.pos - el->el_line.cursor);
- c_delafter(el, size);
- } else {
+ size = el->el_line.cursor - el->el_chared.c_vcmd.pos;
+ if (size == 0)
size = 1;
- c_delafter(el, size);
- }
- switch (oaction) {
- case DELETE|INSERT:
- el->el_chared.c_undo.action = DELETE|INSERT;
- break;
- case DELETE:
- el->el_chared.c_undo.action = INSERT;
- break;
- case NOP:
- case INSERT:
- default:
- EL_ABORT((el->el_errfile, "Bad oaction %d\n", oaction));
- break;
+ el->el_line.cursor = el->el_chared.c_vcmd.pos;
+ if (action & YANK) {
+ if (size > 0)
+ cv_yank(el, el->el_line.cursor, size);
+ else
+ cv_yank(el, el->el_line.cursor + size, -size);
+ } else {
+ if (size > 0) {
+ c_delafter(el, size);
+ re_refresh_cursor(el);
+ } else {
+ c_delbefore(el, -size);
+ el->el_line.cursor += size;
+ }
}
-
-
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.dsize = size;
+ el->el_chared.c_vcmd.action = NOP;
}
@@ -373,21 +396,19 @@ ce__endword(char *p, char *high, int n)
* Go to the end of this word according to vi
*/
protected char *
-cv__endword(char *p, char *high, int n)
+cv__endword(char *p, char *high, int n, int (*wtest)(int))
{
+ int test;
+
p++;
while (n--) {
while ((p < high) && isspace((unsigned char) *p))
p++;
- if (isalnum((unsigned char) *p))
- while ((p < high) && isalnum((unsigned char) *p))
- p++;
- else
- while ((p < high) && !(isspace((unsigned char) *p) ||
- isalnum((unsigned char) *p)))
- p++;
+ test = (*wtest)((unsigned char) *p);
+ while ((p < high) && (*wtest)((unsigned char) *p) == test)
+ p++;
}
p--;
return (p);
@@ -406,20 +427,23 @@ ch_init(EditLine *el)
(void) memset(el->el_line.buffer, 0, EL_BUFSIZ);
el->el_line.cursor = el->el_line.buffer;
el->el_line.lastchar = el->el_line.buffer;
- el->el_line.limit = &el->el_line.buffer[EL_BUFSIZ - 2];
+ el->el_line.limit = &el->el_line.buffer[EL_BUFSIZ - EL_LEAVE];
el->el_chared.c_undo.buf = (char *) el_malloc(EL_BUFSIZ);
if (el->el_chared.c_undo.buf == NULL)
return (-1);
(void) memset(el->el_chared.c_undo.buf, 0, EL_BUFSIZ);
- el->el_chared.c_undo.action = NOP;
- el->el_chared.c_undo.isize = 0;
- el->el_chared.c_undo.dsize = 0;
- el->el_chared.c_undo.ptr = el->el_line.buffer;
+ el->el_chared.c_undo.len = -1;
+ el->el_chared.c_undo.cursor = 0;
+ el->el_chared.c_redo.buf = (char *) el_malloc(EL_BUFSIZ);
+ if (el->el_chared.c_redo.buf == NULL)
+ return (-1);
+ el->el_chared.c_redo.pos = el->el_chared.c_redo.buf;
+ el->el_chared.c_redo.lim = el->el_chared.c_redo.buf + EL_BUFSIZ;
+ el->el_chared.c_redo.cmd = ED_UNASSIGNED;
el->el_chared.c_vcmd.action = NOP;
el->el_chared.c_vcmd.pos = el->el_line.buffer;
- el->el_chared.c_vcmd.ins = el->el_line.buffer;
el->el_chared.c_kill.buf = (char *) el_malloc(EL_BUFSIZ);
if (el->el_chared.c_kill.buf == NULL)
@@ -454,14 +478,11 @@ ch_reset(EditLine *el)
el->el_line.cursor = el->el_line.buffer;
el->el_line.lastchar = el->el_line.buffer;
- el->el_chared.c_undo.action = NOP;
- el->el_chared.c_undo.isize = 0;
- el->el_chared.c_undo.dsize = 0;
- el->el_chared.c_undo.ptr = el->el_line.buffer;
+ el->el_chared.c_undo.len = -1;
+ el->el_chared.c_undo.cursor = 0;
el->el_chared.c_vcmd.action = NOP;
el->el_chared.c_vcmd.pos = el->el_line.buffer;
- el->el_chared.c_vcmd.ins = el->el_line.buffer;
el->el_chared.c_kill.mark = el->el_line.buffer;
@@ -516,7 +537,8 @@ ch_enlargebufs(el, addlen)
el->el_line.buffer = newbuffer;
el->el_line.cursor = newbuffer + (el->el_line.cursor - oldbuf);
el->el_line.lastchar = newbuffer + (el->el_line.lastchar - oldbuf);
- el->el_line.limit = &newbuffer[newsz - EL_LEAVE];
+ /* don't set new size until all buffers are enlarged */
+ el->el_line.limit = &newbuffer[sz - EL_LEAVE];
/*
* Reallocate kill buffer.
@@ -545,14 +567,22 @@ ch_enlargebufs(el, addlen)
/* zero the newly added memory, leave old data in */
(void) memset(&newbuffer[sz], 0, newsz - sz);
-
- el->el_chared.c_undo.ptr = el->el_line.buffer +
- (el->el_chared.c_undo.ptr - oldbuf);
el->el_chared.c_undo.buf = newbuffer;
+
+ newbuffer = el_realloc(el->el_chared.c_redo.buf, newsz);
+ if (!newbuffer)
+ return 0;
+ el->el_chared.c_redo.pos = newbuffer +
+ (el->el_chared.c_redo.pos - el->el_chared.c_redo.buf);
+ el->el_chared.c_redo.lim = newbuffer +
+ (el->el_chared.c_redo.lim - el->el_chared.c_redo.buf);
+ el->el_chared.c_redo.buf = newbuffer;
if (!hist_enlargebuf(el, sz, newsz))
return 0;
+ /* Safe to set enlarged buffer size */
+ el->el_line.limit = &newbuffer[newsz - EL_LEAVE];
return 1;
}
@@ -567,6 +597,11 @@ ch_end(EditLine *el)
el->el_line.limit = NULL;
el_free((ptr_t) el->el_chared.c_undo.buf);
el->el_chared.c_undo.buf = NULL;
+ el_free((ptr_t) el->el_chared.c_redo.buf);
+ el->el_chared.c_redo.buf = NULL;
+ el->el_chared.c_redo.pos = NULL;
+ el->el_chared.c_redo.lim = NULL;
+ el->el_chared.c_redo.cmd = ED_UNASSIGNED;
el_free((ptr_t) el->el_chared.c_kill.buf);
el->el_chared.c_kill.buf = NULL;
el_free((ptr_t) el->el_chared.c_macro.macro);
@@ -619,51 +654,64 @@ el_deletestr(EditLine *el, int n)
* Get a string
*/
protected int
-c_gets(EditLine *el, char *buf)
+c_gets(EditLine *el, char *buf, const char *prompt)
{
char ch;
- int len = 0;
+ int len;
+ char *cp = el->el_line.buffer;
+
+ if (prompt) {
+ len = strlen(prompt);
+ memcpy(cp, prompt, len + 0u);
+ cp += len;
+ }
+ len = 0;
+
+ for (;;) {
+ el->el_line.cursor = cp;
+ *cp = ' ';
+ el->el_line.lastchar = cp + 1;
+ re_refresh(el);
+
+ if (el_getc(el, &ch) != 1) {
+ ed_end_of_file(el, 0);
+ len = -1;
+ break;
+ }
- for (ch = 0; ch == 0;) {
- if (el_getc(el, &ch) != 1)
- return (ed_end_of_file(el, 0));
switch (ch) {
+
case 0010: /* Delete and backspace */
case 0177:
- if (len > 1) {
- *el->el_line.cursor-- = '\0';
- el->el_line.lastchar = el->el_line.cursor;
- buf[len--] = '\0';
- } else {
- el->el_line.buffer[0] = '\0';
- el->el_line.lastchar = el->el_line.buffer;
- el->el_line.cursor = el->el_line.buffer;
- return (CC_REFRESH);
+ if (len <= 0) {
+ len = -1;
+ break;
}
- re_refresh(el);
- ch = 0;
- break;
+ cp--;
+ continue;
case 0033: /* ESC */
case '\r': /* Newline */
case '\n':
+ buf[len] = ch;
break;
default:
- if (len >= EL_BUFSIZ)
+ if (len >= EL_BUFSIZ - 16)
term_beep(el);
else {
buf[len++] = ch;
- *el->el_line.cursor++ = ch;
- el->el_line.lastchar = el->el_line.cursor;
+ *cp++ = ch;
}
- re_refresh(el);
- ch = 0;
- break;
+ continue;
}
+ break;
}
- buf[len] = ch;
- return (len);
+
+ el->el_line.buffer[0] = '\0';
+ el->el_line.lastchar = el->el_line.buffer;
+ el->el_line.cursor = el->el_line.buffer;
+ return len;
}
diff --git a/cmd-line-utils/libedit/chared.h b/cmd-line-utils/libedit/chared.h
index 2eb9ad32886..d2e6f742413 100644
--- a/cmd-line-utils/libedit/chared.h
+++ b/cmd-line-utils/libedit/chared.h
@@ -1,4 +1,4 @@
-/* $NetBSD: chared.h,v 1.6 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: chared.h,v 1.11 2002/11/20 16:50:08 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -71,23 +71,31 @@ typedef struct c_macro_t {
} c_macro_t;
/*
- * Undo information for both vi and emacs
+ * Undo information for vi - no undo in emacs (yet)
*/
typedef struct c_undo_t {
- int action;
- size_t isize;
- size_t dsize;
- char *ptr;
- char *buf;
+ int len; /* length of saved line */
+ int cursor; /* position of saved cursor */
+ char *buf; /* full saved text */
} c_undo_t;
+/* redo for vi */
+typedef struct c_redo_t {
+ char *buf; /* redo insert key sequence */
+ char *pos;
+ char *lim;
+ el_action_t cmd; /* command to redo */
+ char ch; /* char that invoked it */
+ int count;
+ int action; /* from cv_action() */
+} c_redo_t;
+
/*
* Current action information for vi
*/
typedef struct c_vcmd_t {
int action;
char *pos;
- char *ins;
} c_vcmd_t;
/*
@@ -106,6 +114,7 @@ typedef struct c_kill_t {
typedef struct el_chared_t {
c_undo_t c_undo;
c_kill_t c_kill;
+ c_redo_t c_redo;
c_vcmd_t c_vcmd;
c_macro_t c_macro;
} el_chared_t;
@@ -120,10 +129,10 @@ typedef struct el_chared_t {
#define NOP 0x00
#define DELETE 0x01
#define INSERT 0x02
-#define CHANGE 0x04
+#define YANK 0x04
-#define CHAR_FWD 0
-#define CHAR_BACK 1
+#define CHAR_FWD (+1)
+#define CHAR_BACK (-1)
#define MODE_INSERT 0
#define MODE_REPLACE 1
@@ -137,23 +146,25 @@ typedef struct el_chared_t {
protected int cv__isword(int);
+protected int cv__isWord(int);
protected void cv_delfini(EditLine *);
-protected char *cv__endword(char *, char *, int);
+protected char *cv__endword(char *, char *, int, int (*)(int));
protected int ce__isword(int);
-protected void cv_undo(EditLine *, int, size_t, char *);
+protected void cv_undo(EditLine *);
+protected void cv_yank(EditLine *, const char *, int);
protected char *cv_next_word(EditLine*, char *, char *, int, int (*)(int));
-protected char *cv_prev_word(EditLine*, char *, char *, int, int (*)(int));
+protected char *cv_prev_word(char *, char *, int, int (*)(int));
protected char *c__next_word(char *, char *, int, int (*)(int));
protected char *c__prev_word(char *, char *, int, int (*)(int));
protected void c_insert(EditLine *, int);
protected void c_delbefore(EditLine *, int);
protected void c_delafter(EditLine *, int);
-protected int c_gets(EditLine *, char *);
+protected int c_gets(EditLine *, char *, const char *);
protected int c_hpos(EditLine *);
protected int ch_init(EditLine *);
protected void ch_reset(EditLine *);
-protected int ch_enlargebufs __P((EditLine *, size_t));
+protected int ch_enlargebufs(EditLine *, size_t);
protected void ch_end(EditLine *);
#endif /* _h_el_chared */
diff --git a/cmd-line-utils/libedit/common.c b/cmd-line-utils/libedit/common.c
index 9ac6af9ac1b..f290057568a 100644
--- a/cmd-line-utils/libedit/common.c
+++ b/cmd-line-utils/libedit/common.c
@@ -1,4 +1,4 @@
-/* $NetBSD: common.c,v 1.10 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: common.c,v 1.14 2002/11/20 16:50:08 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)common.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: common.c,v 1.14 2002/11/20 16:50:08 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* common.c: Common Editor functions
*/
-#include "sys.h"
#include "el.h"
/* ed_end_of_file():
@@ -66,7 +72,7 @@ ed_end_of_file(EditLine *el, int c __attribute__((unused)))
protected el_action_t
ed_insert(EditLine *el, int c)
{
- int i;
+ int count = el->el_state.argument;
if (c == '\0')
return (CC_ERROR);
@@ -74,42 +80,28 @@ ed_insert(EditLine *el, int c)
if (el->el_line.lastchar + el->el_state.argument >=
el->el_line.limit) {
/* end of buffer space, try to allocate more */
- if (!ch_enlargebufs(el, (size_t) el->el_state.argument))
+ if (!ch_enlargebufs(el, (size_t) count))
return CC_ERROR; /* error allocating more */
}
- if (el->el_state.argument == 1) {
- if (el->el_state.inputmode != MODE_INSERT) {
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize++] =
- *el->el_line.cursor;
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize] =
- '\0';
- c_delafter(el, 1);
- }
- c_insert(el, 1);
+ if (count == 1) {
+ if (el->el_state.inputmode == MODE_INSERT
+ || el->el_line.cursor >= el->el_line.lastchar)
+ c_insert(el, 1);
*el->el_line.cursor++ = c;
- el->el_state.doingarg = 0; /* just in case */
re_fastaddc(el); /* fast refresh for one char. */
} else {
- if (el->el_state.inputmode != MODE_INSERT) {
- for (i = 0; i < el->el_state.argument; i++)
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize++] =
- el->el_line.cursor[i];
-
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize] =
- '\0';
- c_delafter(el, el->el_state.argument);
- }
- c_insert(el, el->el_state.argument);
+ if (el->el_state.inputmode != MODE_REPLACE_1)
+ c_insert(el, el->el_state.argument);
- while (el->el_state.argument--)
+ while (count-- && el->el_line.cursor < el->el_line.lastchar)
*el->el_line.cursor++ = c;
re_refresh(el);
}
if (el->el_state.inputmode == MODE_REPLACE_1)
- (void) vi_command_mode(el, 0);
+ return vi_command_mode(el, 0);
return (CC_NORM);
}
@@ -229,7 +221,7 @@ ed_move_to_end(EditLine *el, int c __attribute__((unused)))
#ifdef VI_MOVE
el->el_line.cursor--;
#endif
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -253,7 +245,7 @@ ed_move_to_beg(EditLine *el, int c __attribute__((unused)))
/* We want FIRST non space character */
while (isspace((unsigned char) *el->el_line.cursor))
el->el_line.cursor++;
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -295,16 +287,20 @@ protected el_action_t
/*ARGSUSED*/
ed_next_char(EditLine *el, int c __attribute__((unused)))
{
+ char *lim = el->el_line.lastchar;
- if (el->el_line.cursor >= el->el_line.lastchar)
+ if (el->el_line.cursor >= lim ||
+ (el->el_line.cursor == lim - 1 &&
+ el->el_map.type == MAP_VI &&
+ el->el_chared.c_vcmd.action == NOP))
return (CC_ERROR);
el->el_line.cursor += el->el_state.argument;
- if (el->el_line.cursor > el->el_line.lastchar)
- el->el_line.cursor = el->el_line.lastchar;
+ if (el->el_line.cursor > lim)
+ el->el_line.cursor = lim;
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -330,7 +326,7 @@ ed_prev_word(EditLine *el, int c __attribute__((unused)))
ce__isword);
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -353,7 +349,7 @@ ed_prev_char(EditLine *el, int c __attribute__((unused)))
el->el_line.cursor = el->el_line.buffer;
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -405,25 +401,9 @@ ed_digit(EditLine *el, int c)
(el->el_state.argument * 10) + (c - '0');
}
return (CC_ARGHACK);
- } else {
- if (el->el_line.lastchar + 1 >= el->el_line.limit) {
- if (!ch_enlargebufs(el, 1))
- return (CC_ERROR);
- }
-
- if (el->el_state.inputmode != MODE_INSERT) {
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize++] =
- *el->el_line.cursor;
- el->el_chared.c_undo.buf[el->el_chared.c_undo.isize] =
- '\0';
- c_delafter(el, 1);
- }
- c_insert(el, 1);
- *el->el_line.cursor++ = c;
- el->el_state.doingarg = 0;
- re_fastaddc(el);
}
- return (CC_NORM);
+
+ return ed_insert(el, c);
}
@@ -457,12 +437,11 @@ ed_argument_digit(EditLine *el, int c)
*/
protected el_action_t
/*ARGSUSED*/
-ed_unassigned(EditLine *el, int c __attribute__((unused)))
+ed_unassigned(EditLine *el __attribute__((unused)),
+ int c __attribute__((unused)))
{
- term_beep(el);
- term__flush();
- return (CC_NORM);
+ return (CC_ERROR);
}
@@ -490,7 +469,7 @@ ed_tty_sigint(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_dsusp(EditLine *el __attribute__((unused)),
+ed_tty_dsusp(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -504,7 +483,7 @@ ed_tty_dsusp(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_flush_output(EditLine *el __attribute__((unused)),
+ed_tty_flush_output(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -518,7 +497,7 @@ ed_tty_flush_output(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_sigquit(EditLine *el __attribute__((unused)),
+ed_tty_sigquit(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -532,7 +511,7 @@ ed_tty_sigquit(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_sigtstp(EditLine *el __attribute__((unused)),
+ed_tty_sigtstp(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -546,7 +525,7 @@ ed_tty_sigtstp(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_stop_output(EditLine *el __attribute__((unused)),
+ed_tty_stop_output(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -560,7 +539,7 @@ ed_tty_stop_output(EditLine *el __attribute__((unused)),
*/
protected el_action_t
/*ARGSUSED*/
-ed_tty_start_output(EditLine *el __attribute__((unused)),
+ed_tty_start_output(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -580,8 +559,6 @@ ed_newline(EditLine *el, int c __attribute__((unused)))
re_goto_bottom(el);
*el->el_line.lastchar++ = '\n';
*el->el_line.lastchar = '\0';
- if (el->el_map.type == MAP_VI)
- el->el_chared.c_vcmd.ins = el->el_line.buffer;
return (CC_NEWLINE);
}
@@ -627,7 +604,7 @@ ed_clear_screen(EditLine *el, int c __attribute__((unused)))
*/
protected el_action_t
/*ARGSUSED*/
-ed_redisplay(EditLine *el __attribute__((unused)),
+ed_redisplay(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -655,7 +632,7 @@ ed_start_over(EditLine *el, int c __attribute__((unused)))
*/
protected el_action_t
/*ARGSUSED*/
-ed_sequence_lead_in(EditLine *el __attribute__((unused)),
+ed_sequence_lead_in(EditLine *el __attribute__((unused)),
int c __attribute__((unused)))
{
@@ -672,8 +649,9 @@ protected el_action_t
ed_prev_history(EditLine *el, int c __attribute__((unused)))
{
char beep = 0;
+ int sv_event = el->el_history.eventno;
- el->el_chared.c_undo.action = NOP;
+ el->el_chared.c_undo.len = -1;
*el->el_line.lastchar = '\0'; /* just in case */
if (el->el_history.eventno == 0) { /* save the current buffer
@@ -686,15 +664,17 @@ ed_prev_history(EditLine *el, int c __attribute__((unused)))
el->el_history.eventno += el->el_state.argument;
if (hist_get(el) == CC_ERROR) {
+ if (el->el_map.type == MAP_VI) {
+ el->el_history.eventno = sv_event;
+ return CC_ERROR;
+ }
beep = 1;
/* el->el_history.eventno was fixed by first call */
(void) hist_get(el);
}
- re_refresh(el);
if (beep)
- return (CC_ERROR);
- else
- return (CC_NORM); /* was CC_UP_HIST */
+ return CC_REFRESH_BEEP;
+ return CC_REFRESH;
}
@@ -706,17 +686,22 @@ protected el_action_t
/*ARGSUSED*/
ed_next_history(EditLine *el, int c __attribute__((unused)))
{
+ el_action_t beep = CC_REFRESH, rval;
- el->el_chared.c_undo.action = NOP;
+ el->el_chared.c_undo.len = -1;
*el->el_line.lastchar = '\0'; /* just in case */
el->el_history.eventno -= el->el_state.argument;
if (el->el_history.eventno < 0) {
el->el_history.eventno = 0;
- return (CC_ERROR);/* make it beep */
+ beep = CC_REFRESH_BEEP;
}
- return (hist_get(el));
+ rval = hist_get(el);
+ if (rval == CC_REFRESH)
+ return beep;
+ return rval;
+
}
@@ -733,7 +718,7 @@ ed_search_prev_history(EditLine *el, int c __attribute__((unused)))
bool_t found = 0;
el->el_chared.c_vcmd.action = NOP;
- el->el_chared.c_undo.action = NOP;
+ el->el_chared.c_undo.len = -1;
*el->el_line.lastchar = '\0'; /* just in case */
if (el->el_history.eventno < 0) {
#ifdef DEBUG_EDIT
@@ -801,7 +786,7 @@ ed_search_next_history(EditLine *el, int c __attribute__((unused)))
bool_t found = 0;
el->el_chared.c_vcmd.action = NOP;
- el->el_chared.c_undo.action = NOP;
+ el->el_chared.c_undo.len = -1;
*el->el_line.lastchar = '\0'; /* just in case */
if (el->el_history.eventno == 0)
@@ -930,25 +915,13 @@ ed_command(EditLine *el, int c __attribute__((unused)))
char tmpbuf[EL_BUFSIZ];
int tmplen;
- el->el_line.buffer[0] = '\0';
- el->el_line.lastchar = el->el_line.buffer;
- el->el_line.cursor = el->el_line.buffer;
-
- c_insert(el, 3); /* prompt + ": " */
- *el->el_line.cursor++ = '\n';
- *el->el_line.cursor++ = ':';
- *el->el_line.cursor++ = ' ';
- re_refresh(el);
+ tmplen = c_gets(el, tmpbuf, "\n: ");
+ term__putc('\n');
- tmplen = c_gets(el, tmpbuf);
- tmpbuf[tmplen] = '\0';
-
- el->el_line.buffer[0] = '\0';
- el->el_line.lastchar = el->el_line.buffer;
- el->el_line.cursor = el->el_line.buffer;
+ if (tmplen < 0 || (tmpbuf[tmplen] = 0, parse_line(el, tmpbuf)) == -1)
+ term_beep(el);
- if (parse_line(el, tmpbuf) == -1)
- return (CC_ERROR);
- else
- return (CC_REFRESH);
+ el->el_map.current = el->el_map.key;
+ re_clear_display(el);
+ return CC_REFRESH;
}
diff --git a/cmd-line-utils/libedit/config.h b/cmd-line-utils/libedit/config.h
new file mode 100644
index 00000000000..b6f002d5b9e
--- /dev/null
+++ b/cmd-line-utils/libedit/config.h
@@ -0,0 +1,14 @@
+
+#include "my_config.h"
+#include "sys.h"
+
+#define __RCSID(x)
+#define __COPYRIGHT(x)
+#define __RENAME(x)
+#define _DIAGASSERT(x)
+
+#if !defined(__attribute__) && (defined(__cplusplus) || !defined(__GNUC__) || __GNUC__ == 2 && __GNUC_MINOR__ < 8)
+#define __attribute__(A)
+#endif
+
+
diff --git a/cmd-line-utils/libedit/el.c b/cmd-line-utils/libedit/el.c
index 76b17aba0cf..1b445d40f1c 100644
--- a/cmd-line-utils/libedit/el.c
+++ b/cmd-line-utils/libedit/el.c
@@ -1,4 +1,4 @@
-/* $NetBSD: el.c,v 1.21 2001/01/05 22:45:30 christos Exp $ */
+/* $NetBSD: el.c,v 1.30 2002/11/12 00:00:23 thorpej Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,13 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)el.c 8.2 (Berkeley) 1/3/94";
+#else
+__RCSID("$NetBSD: el.c,v 1.30 2002/11/12 00:00:23 thorpej Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* el.c: EditLine interface functions
*/
-#include "sys.h"
-
#include <sys/types.h>
#include <sys/param.h>
#include <string.h>
@@ -58,9 +63,6 @@ el_init(const char *prog, FILE *fin, FILE *fout, FILE *ferr)
{
EditLine *el = (EditLine *) el_malloc(sizeof(EditLine));
-#ifdef DEBUG
- char *tty;
-#endif
if (el == NULL)
return (NULL);
@@ -77,8 +79,12 @@ el_init(const char *prog, FILE *fin, FILE *fout, FILE *ferr)
*/
el->el_flags = 0;
- (void) term_init(el);
- (void) key_init(el);
+ if (term_init(el) == -1) {
+ free(el->el_prog);
+ el_free(el);
+ return NULL;
+ }
+ (void) el_key_init(el);
(void) map_init(el);
if (tty_init(el) == -1)
el->el_flags |= NO_TTY;
@@ -87,6 +93,7 @@ el_init(const char *prog, FILE *fin, FILE *fout, FILE *ferr)
(void) hist_init(el);
(void) prompt_init(el);
(void) sig_init(el);
+ (void) read_init(el);
return (el);
}
@@ -105,7 +112,7 @@ el_end(EditLine *el)
el_reset(el);
term_end(el);
- key_end(el);
+ el_key_end(el);
map_end(el);
tty_end(el);
ch_end(el);
@@ -138,11 +145,12 @@ public int
el_set(EditLine *el, int op, ...)
{
va_list va;
- int rv;
- va_start(va, op);
+ int rv = 0;
if (el == NULL)
return (-1);
+ va_start(va, op);
+
switch (op) {
case EL_PROMPT:
case EL_RPROMPT:
@@ -162,7 +170,6 @@ el_set(EditLine *el, int op, ...)
el->el_flags |= HANDLE_SIGNALS;
else
el->el_flags &= ~HANDLE_SIGNALS;
- rv = 0;
break;
case EL_BIND:
@@ -239,8 +246,20 @@ el_set(EditLine *el, int op, ...)
rv = 0;
break;
+ case EL_GETCFN:
+ {
+ el_rfunc_t rc = va_arg(va, el_rfunc_t);
+ rv = el_read_setfn(el, rc);
+ break;
+ }
+
+ case EL_CLIENTDATA:
+ el->el_data = va_arg(va, void *);
+ break;
+
default:
rv = -1;
+ break;
}
va_end(va);
@@ -261,11 +280,11 @@ el_get(EditLine *el, int op, void *ret)
switch (op) {
case EL_PROMPT:
case EL_RPROMPT:
- rv = prompt_get(el, (el_pfunc_t *) & ret, op);
+ rv = prompt_get(el, (void *) &ret, op);
break;
case EL_EDITOR:
- rv = map_get_editor(el, (const char **) &ret);
+ rv = map_get_editor(el, (void *) &ret);
break;
case EL_SIGNAL:
@@ -349,6 +368,16 @@ el_get(EditLine *el, int op, void *ret)
break;
#endif /* XXX */
+ case EL_GETCFN:
+ *((el_rfunc_t *)ret) = el_read_getfn(el);
+ rv = 0;
+ break;
+
+ case EL_CLIENTDATA:
+ *((void **)ret) = el->el_data;
+ rv = 0;
+ break;
+
default:
rv = -1;
}
@@ -367,15 +396,6 @@ el_line(EditLine *el)
return (const LineInfo *) (void *) &el->el_line;
}
-static const char elpath[] = "/.editrc";
-
-#if defined(MAXPATHLEN)
-#define LIBEDIT_MAXPATHLEN MAXPATHLEN
-#elif defined(PATH_MAX)
-#define LIBEDIT_MAXPATHLEN PATH_MAX
-#else
-#define LIBEDIT_MAXPATHLEN 1024
-#endif
/* el_source():
* Source a file
@@ -385,10 +405,14 @@ el_source(EditLine *el, const char *fname)
{
FILE *fp;
size_t len;
- char *ptr, path[LIBEDIT_MAXPATHLEN];
+ char *ptr;
fp = NULL;
if (fname == NULL) {
+#ifdef HAVE_ISSETUGID
+ static const char elpath[] = "/.editrc";
+ char path[MAXPATHLEN];
+
if (issetugid())
return (-1);
if ((ptr = getenv("HOME")) == NULL)
@@ -398,6 +422,14 @@ el_source(EditLine *el, const char *fname)
if (strlcat(path, elpath, sizeof(path)) >= sizeof(path))
return (-1);
fname = path;
+#else
+ /*
+ * If issetugid() is missing, always return an error, in order
+ * to keep from inadvertently opening up the user to a security
+ * hole.
+ */
+ return (-1);
+#endif
}
if (fp == NULL)
fp = fopen(fname, "r");
diff --git a/cmd-line-utils/libedit/el.h b/cmd-line-utils/libedit/el.h
index 7cf17e8f069..49bd462ad3b 100644
--- a/cmd-line-utils/libedit/el.h
+++ b/cmd-line-utils/libedit/el.h
@@ -1,4 +1,4 @@
-/* $NetBSD: el.h,v 1.8 2001/01/06 14:44:50 jdolecek Exp $ */
+/* $NetBSD: el.h,v 1.13 2002/11/15 14:32:33 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -72,7 +72,7 @@ typedef struct el_line_t {
char *buffer; /* Input line */
char *cursor; /* Cursor position */
char *lastchar; /* Last character */
- const char *limit; /* Max position */
+ const char *limit; /* Max position */
} el_line_t;
/*
@@ -84,6 +84,8 @@ typedef struct el_state_t {
int argument; /* Numeric argument */
int metanext; /* Is the next char a meta char */
el_action_t lastcmd; /* Previous command */
+ el_action_t thiscmd; /* this command */
+ char thisch; /* char that generated it */
} el_state_t;
/*
@@ -106,6 +108,7 @@ typedef struct el_state_t {
#include "parse.h"
#include "sig.h"
#include "help.h"
+#include "read.h"
struct editline {
char *el_prog; /* the program name */
@@ -116,6 +119,7 @@ struct editline {
coord_t el_cursor; /* Cursor location */
char **el_display; /* Real screen image = what is there */
char **el_vdisplay; /* Virtual screen image = what we see */
+ void *el_data; /* Client data */
el_line_t el_line; /* The current line information */
el_state_t el_state; /* Current editor state */
el_term_t el_term; /* Terminal dependent stuff */
@@ -129,13 +133,18 @@ struct editline {
el_history_t el_history; /* History stuff */
el_search_t el_search; /* Search stuff */
el_signal_t el_signal; /* Signal handling stuff */
+ el_read_t el_read; /* Character reading stuff */
};
protected int el_editmode(EditLine *, int, const char **);
#ifdef DEBUG
-#define EL_ABORT(a) (void) (fprintf(el->el_errfile, "%s, %d: ", \
- __FILE__, __LINE__), fprintf a, abort())
+#define EL_ABORT(a) do { \
+ fprintf(el->el_errfile, "%s, %d: ", \
+ __FILE__, __LINE__); \
+ fprintf a; \
+ abort(); \
+ } while( /*CONSTCOND*/0);
#else
#define EL_ABORT(a) abort()
#endif
diff --git a/cmd-line-utils/libedit/emacs.c b/cmd-line-utils/libedit/emacs.c
index bb5ffb2a9f6..d58d1620693 100644
--- a/cmd-line-utils/libedit/emacs.c
+++ b/cmd-line-utils/libedit/emacs.c
@@ -1,4 +1,4 @@
-/* $NetBSD: emacs.c,v 1.9 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: emacs.c,v 1.12 2002/11/15 14:32:33 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)emacs.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: emacs.c,v 1.12 2002/11/15 14:32:33 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* emacs.c: Emacs functions
*/
-#include "sys.h"
#include "el.h"
/* em_delete_or_list():
@@ -209,7 +215,7 @@ em_copy_region(EditLine *el, int c __attribute__((unused)))
{
char *kp, *cp;
- if (el->el_chared.c_kill.mark)
+ if (!el->el_chared.c_kill.mark)
return (CC_ERROR);
if (el->el_chared.c_kill.mark > el->el_line.cursor) {
@@ -265,7 +271,7 @@ em_next_word(EditLine *el, int c __attribute__((unused)))
ce__isword);
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
diff --git a/cmd-line-utils/libedit/hist.c b/cmd-line-utils/libedit/hist.c
index 2b20c7d14dc..59c2f39dd34 100644
--- a/cmd-line-utils/libedit/hist.c
+++ b/cmd-line-utils/libedit/hist.c
@@ -1,4 +1,4 @@
-/* $NetBSD: hist.c,v 1.9 2001/05/17 01:02:17 christos Exp $ */
+/* $NetBSD: hist.c,v 1.12 2003/01/21 18:40:23 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)hist.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: hist.c,v 1.12 2003/01/21 18:40:23 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* hist.c: History access functions
*/
-#include "sys.h"
#include <stdlib.h>
#include "el.h"
@@ -126,18 +132,16 @@ hist_get(EditLine *el)
el->el_history.eventno = h;
return (CC_ERROR);
}
- (void) strncpy(el->el_line.buffer, hp,
+ (void) strlcpy(el->el_line.buffer, hp,
(size_t)(el->el_line.limit - el->el_line.buffer));
el->el_line.lastchar = el->el_line.buffer + strlen(el->el_line.buffer);
- if (el->el_line.lastchar > el->el_line.buffer) {
- if (el->el_line.lastchar[-1] == '\n')
- el->el_line.lastchar--;
- if (el->el_line.lastchar[-1] == ' ')
- el->el_line.lastchar--;
- if (el->el_line.lastchar < el->el_line.buffer)
- el->el_line.lastchar = el->el_line.buffer;
- }
+ if (el->el_line.lastchar > el->el_line.buffer
+ && el->el_line.lastchar[-1] == '\n')
+ el->el_line.lastchar--;
+ if (el->el_line.lastchar > el->el_line.buffer
+ && el->el_line.lastchar[-1] == ' ')
+ el->el_line.lastchar--;
#ifdef KSHVI
if (el->el_map.type == MAP_VI)
el->el_line.cursor = el->el_line.buffer;
@@ -149,22 +153,41 @@ hist_get(EditLine *el)
}
-/* hist_list()
- * List history entries
+/* hist_command()
+ * process a history command
*/
protected int
/*ARGSUSED*/
-hist_list(EditLine *el, int argc __attribute__((unused)),
- const char **argv __attribute__((unused)))
+hist_command(EditLine *el, int argc, const char **argv)
{
const char *str;
+ int num;
+ HistEvent ev;
if (el->el_history.ref == NULL)
return (-1);
- for (str = HIST_LAST(el); str != NULL; str = HIST_PREV(el))
- (void) fprintf(el->el_outfile, "%d %s",
- el->el_history.ev.num, str);
- return (0);
+
+ if (argc == 0 || strcmp(argv[0], "list") == 1) {
+ /* List history entries */
+
+ for (str = HIST_LAST(el); str != NULL; str = HIST_PREV(el))
+ (void) fprintf(el->el_outfile, "%d %s",
+ el->el_history.ev.num, str);
+ return (0);
+ }
+
+ if (argc != 2)
+ return (-1);
+
+ num = (int)strtol(argv[1], NULL, 0);
+
+ if (strcmp(argv[0], "size") == 0)
+ return history(el->el_history.ref, &ev, H_SETSIZE, num);
+
+ if (strcmp(argv[0], "unique") == 0)
+ return history(el->el_history.ref, &ev, H_SETUNIQUE, num);
+
+ return -1;
}
/* hist_enlargebuf()
diff --git a/cmd-line-utils/libedit/hist.h b/cmd-line-utils/libedit/hist.h
index e650b6a55a9..b713281b382 100644
--- a/cmd-line-utils/libedit/hist.h
+++ b/cmd-line-utils/libedit/hist.h
@@ -1,4 +1,4 @@
-/* $NetBSD: hist.h,v 1.6 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: hist.h,v 1.9 2003/01/21 18:40:23 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -66,7 +66,7 @@ typedef struct el_history_t {
#define HIST_FIRST(el) HIST_FUN(el, H_FIRST, NULL)
#define HIST_LAST(el) HIST_FUN(el, H_LAST, NULL)
#define HIST_PREV(el) HIST_FUN(el, H_PREV, NULL)
-#define HIST_EVENT(el, num) HIST_FUN(el, H_EVENT, num)
+#define HIST_SET(el, num) HIST_FUN(el, H_SET, num)
#define HIST_LOAD(el, fname) HIST_FUN(el, H_LOAD fname)
#define HIST_SAVE(el, fname) HIST_FUN(el, H_SAVE fname)
@@ -74,7 +74,7 @@ protected int hist_init(EditLine *);
protected void hist_end(EditLine *);
protected el_action_t hist_get(EditLine *);
protected int hist_set(EditLine *, hist_fun_t, ptr_t);
-protected int hist_list(EditLine *, int, const char **);
+protected int hist_command(EditLine *, int, const char **);
protected int hist_enlargebuf(EditLine *, size_t, size_t);
#endif /* _h_el_hist */
diff --git a/cmd-line-utils/libedit/histedit.h b/cmd-line-utils/libedit/histedit.h
index 0b8d175cef1..3137bd680a7 100644
--- a/cmd-line-utils/libedit/histedit.h
+++ b/cmd-line-utils/libedit/histedit.h
@@ -1,4 +1,4 @@
-/* $NetBSD: histedit.h,v 1.16 2000/09/04 22:06:30 lukem Exp $ */
+/* $NetBSD: histedit.h,v 1.21 2003/01/21 18:40:24 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -44,6 +44,9 @@
#ifndef _HISTEDIT_H_
#define _HISTEDIT_H_
+#define LIBEDIT_MAJOR 2
+#define LIBEDIT_MINOR 6
+
#include <sys/types.h>
#include <stdio.h>
@@ -90,7 +93,7 @@ void el_end(EditLine *);
*/
const char *el_gets(EditLine *, int *);
int el_getc(EditLine *, char *);
-void el_push(EditLine *, const char *);
+void el_push(EditLine *, char *);
/*
* Beep!
@@ -126,6 +129,10 @@ int el_get(EditLine *, int, void *);
#define EL_HIST 10 /* , hist_fun_t, const char *); */
#define EL_EDITMODE 11 /* , int); */
#define EL_RPROMPT 12 /* , el_pfunc_t); */
+#define EL_GETCFN 13 /* , el_rfunc_t); */
+#define EL_CLIENTDATA 14 /* , void *); */
+
+#define EL_BUILTIN_GETCFN (NULL)
/*
* Source named file or $PWD/.editrc or $HOME/.editrc
@@ -174,7 +181,7 @@ int history(History *, HistEvent *, int, ...);
#define H_PREV 5 /* , void); */
#define H_NEXT 6 /* , void); */
#define H_CURR 8 /* , const int); */
-#define H_SET 7 /* , void); */
+#define H_SET 7 /* , int); */
#define H_ADD 9 /* , const char *); */
#define H_ENTER 10 /* , const char *); */
#define H_APPEND 11 /* , const char *); */
@@ -186,5 +193,7 @@ int history(History *, HistEvent *, int, ...);
#define H_LOAD 17 /* , const char *); */
#define H_SAVE 18 /* , const char *); */
#define H_CLEAR 19 /* , void); */
+#define H_SETUNIQUE 20 /* , int); */
+#define H_GETUNIQUE 21 /* , void); */
#endif /* _HISTEDIT_H_ */
diff --git a/cmd-line-utils/libedit/history.c b/cmd-line-utils/libedit/history.c
index 90d94e7fc18..53648203bf0 100644
--- a/cmd-line-utils/libedit/history.c
+++ b/cmd-line-utils/libedit/history.c
@@ -1,4 +1,4 @@
-/* $NetBSD: history.c,v 1.17 2001/03/20 00:08:31 christos Exp $ */
+/* $NetBSD: history.c,v 1.22 2003/01/21 18:40:24 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,18 +36,25 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)history.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: history.c,v 1.22 2003/01/21 18:40:24 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* hist.c: History access functions
*/
-#include "sys.h"
-
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#ifdef HAVE_VIS_H
#include <vis.h>
+#else
+#include "np/vis.h"
#endif
#include <sys/stat.h>
@@ -73,6 +80,7 @@ struct history {
history_efun_t h_enter; /* Add an element */
history_efun_t h_add; /* Append to an element */
};
+
#define HNEXT(h, ev) (*(h)->h_next)((h)->h_ref, ev)
#define HFIRST(h, ev) (*(h)->h_first)((h)->h_ref, ev)
#define HPREV(h, ev) (*(h)->h_prev)((h)->h_ref, ev)
@@ -87,9 +95,17 @@ struct history {
#define h_realloc(a, b) realloc((a), (b))
#define h_free(a) free(a)
+typedef struct {
+ int num;
+ char *str;
+} HistEventPrivate;
+
+
private int history_setsize(History *, HistEvent *, int);
private int history_getsize(History *, HistEvent *);
+private int history_setunique(History *, HistEvent *, int);
+private int history_getunique(History *, HistEvent *);
private int history_set_fun(History *, History *);
private int history_load(History *, const char *);
private int history_save(History *, const char *);
@@ -108,15 +124,17 @@ typedef struct hentry_t {
HistEvent ev; /* What we return */
struct hentry_t *next; /* Next entry */
struct hentry_t *prev; /* Previous entry */
-} hentry_t;
+} hentry_t;
typedef struct history_t {
- hentry_t list; /* Fake list header element */
- hentry_t *cursor; /* Current element in the list */
- int max; /* Maximum number of events */
- int cur; /* Current number of events */
+ hentry_t list; /* Fake list header element */
+ hentry_t *cursor; /* Current element in the list */
+ int max; /* Maximum number of events */
+ int cur; /* Current number of events */
int eventid; /* For generation of unique event id */
-} history_t;
+ int flags; /* History flags */
+#define H_UNIQUE 1 /* Store only unique elements */
+} history_t;
private int history_def_first(ptr_t, HistEvent *);
private int history_def_last(ptr_t, HistEvent *);
@@ -126,13 +144,19 @@ private int history_def_curr(ptr_t, HistEvent *);
private int history_def_set(ptr_t, HistEvent *, const int n);
private int history_def_enter(ptr_t, HistEvent *, const char *);
private int history_def_add(ptr_t, HistEvent *, const char *);
-private void history_def_init(ptr_t *, HistEvent *, int);
+private int history_def_init(ptr_t *, HistEvent *, int);
private void history_def_clear(ptr_t, HistEvent *);
private int history_def_insert(history_t *, HistEvent *, const char *);
private void history_def_delete(history_t *, HistEvent *, hentry_t *);
-#define history_def_setsize(p, num)(void) (((history_t *) p)->max = (num))
-#define history_def_getsize(p) (((history_t *) p)->cur)
+#define history_def_setsize(p, num)(void) (((history_t *)p)->max = (num))
+#define history_def_getsize(p) (((history_t *)p)->cur)
+#define history_def_getunique(p) (((((history_t *)p)->flags) & H_UNIQUE) != 0)
+#define history_def_setunique(p, uni) \
+ if (uni) \
+ (((history_t *)p)->flags) |= H_UNIQUE; \
+ else \
+ (((history_t *)p)->flags) &= ~H_UNIQUE
#define he_strerror(code) he_errlist[code]
#define he_seterrev(evp, code) {\
@@ -326,20 +350,20 @@ history_def_add(ptr_t p, HistEvent *ev, const char *str)
history_t *h = (history_t *) p;
size_t len;
char *s;
+ HistEventPrivate *evp = (void *)&h->cursor->ev;
if (h->cursor == &h->list)
return (history_def_enter(p, ev, str));
- len = strlen(h->cursor->ev.str) + strlen(str) + 1;
+ len = strlen(evp->str) + strlen(str) + 1;
s = (char *) h_malloc(len);
- if (!s) {
+ if (s == NULL) {
he_seterrev(ev, _HE_MALLOC_FAILED);
return (-1);
}
(void) strlcpy(s, h->cursor->ev.str, len);
(void) strlcat(s, str, len);
- /* LINTED const cast */
- h_free((ptr_t) h->cursor->ev.str);
- h->cursor->ev.str = s;
+ h_free((ptr_t)evp->str);
+ evp->str = s;
*ev = h->cursor->ev;
return (0);
}
@@ -350,16 +374,14 @@ history_def_add(ptr_t p, HistEvent *ev, const char *str)
*/
/* ARGSUSED */
private void
-history_def_delete(history_t *h,
- HistEvent *ev __attribute__((unused)), hentry_t *hp)
+history_def_delete(history_t *h, HistEvent *ev __attribute__((unused)), hentry_t *hp)
{
-
+ HistEventPrivate *evp = (void *)&hp->ev;
if (hp == &h->list)
abort();
hp->prev->next = hp->next;
hp->next->prev = hp->prev;
- /* LINTED const cast */
- h_free((ptr_t) hp->ev.str);
+ h_free((ptr_t) evp->str);
h_free(hp);
h->cur--;
}
@@ -373,11 +395,11 @@ history_def_insert(history_t *h, HistEvent *ev, const char *str)
{
h->cursor = (hentry_t *) h_malloc(sizeof(hentry_t));
- if (h->cursor)
- h->cursor->ev.str = strdup(str);
- if (!h->cursor || !h->cursor->ev.str) {
- he_seterrev(ev, _HE_MALLOC_FAILED);
- return (-1);
+ if (h->cursor == NULL)
+ goto oomem;
+ if ((h->cursor->ev.str = strdup(str)) == NULL) {
+ h_free((ptr_t)h->cursor);
+ goto oomem;
}
h->cursor->ev.num = ++h->eventid;
h->cursor->next = h->list.next;
@@ -388,6 +410,9 @@ history_def_insert(history_t *h, HistEvent *ev, const char *str)
*ev = h->cursor->ev;
return (0);
+oomem:
+ he_seterrev(ev, _HE_MALLOC_FAILED);
+ return (-1);
}
@@ -399,6 +424,10 @@ history_def_enter(ptr_t p, HistEvent *ev, const char *str)
{
history_t *h = (history_t *) p;
+ if ((h->flags & H_UNIQUE) != 0 && h->list.next != &h->list &&
+ strcmp(h->list.next->ev.str, str) == 0)
+ return (0);
+
if (history_def_insert(h, ev, str) == -1)
return (-1); /* error, keep error message */
@@ -406,10 +435,10 @@ history_def_enter(ptr_t p, HistEvent *ev, const char *str)
* Always keep at least one entry.
* This way we don't have to check for the empty list.
*/
- while (h->cur - 1 > h->max)
+ while (h->cur > h->max && h->cur > 0)
history_def_delete(h, ev, h->list.prev);
- return (0);
+ return (1);
}
@@ -417,10 +446,12 @@ history_def_enter(ptr_t p, HistEvent *ev, const char *str)
* Default history initialization function
*/
/* ARGSUSED */
-private void
+private int
history_def_init(ptr_t *p, HistEvent *ev __attribute__((unused)), int n)
{
history_t *h = (history_t *) h_malloc(sizeof(history_t));
+ if (h == NULL)
+ return -1;
if (n <= 0)
n = 0;
@@ -431,7 +462,9 @@ history_def_init(ptr_t *p, HistEvent *ev __attribute__((unused)), int n)
h->list.ev.str = NULL;
h->list.ev.num = 0;
h->cursor = &h->list;
+ h->flags = 0;
*p = (ptr_t) h;
+ return 0;
}
@@ -460,10 +493,15 @@ history_def_clear(ptr_t p, HistEvent *ev)
public History *
history_init(void)
{
- History *h = (History *) h_malloc(sizeof(History));
HistEvent ev;
+ History *h = (History *) h_malloc(sizeof(History));
+ if (h == NULL)
+ return NULL;
- history_def_init(&h->h_ref, &ev, 0);
+ if (history_def_init(&h->h_ref, &ev, 0) == -1) {
+ h_free((ptr_t)h);
+ return NULL;
+ }
h->h_ent = -1;
h->h_next = history_def_next;
h->h_first = history_def_first;
@@ -519,18 +557,46 @@ history_setsize(History *h, HistEvent *ev, int num)
private int
history_getsize(History *h, HistEvent *ev)
{
- int retval = 0;
-
if (h->h_next != history_def_next) {
he_seterrev(ev, _HE_NOT_ALLOWED);
return (-1);
}
- retval = history_def_getsize(h->h_ref);
- if (retval < -1) {
+ ev->num = history_def_getsize(h->h_ref);
+ if (ev->num < -1) {
he_seterrev(ev, _HE_SIZE_NEGATIVE);
return (-1);
}
- ev->num = retval;
+ return (0);
+}
+
+
+/* history_setunique():
+ * Set if adjacent equal events should not be entered in history.
+ */
+private int
+history_setunique(History *h, HistEvent *ev, int uni)
+{
+
+ if (h->h_next != history_def_next) {
+ he_seterrev(ev, _HE_NOT_ALLOWED);
+ return (-1);
+ }
+ history_def_setunique(h->h_ref, uni);
+ return (0);
+}
+
+
+/* history_getunique():
+ * Get if adjacent equal events should not be entered in history.
+ */
+private int
+history_getunique(History *h, HistEvent *ev)
+{
+ if (h->h_next != history_def_next) {
+ he_seterrev(ev, _HE_NOT_ALLOWED);
+ return (-1);
+ }
+ ev->num = history_def_getunique(h->h_ref);
return (0);
}
@@ -595,13 +661,9 @@ history_load(History *h, const char *fname)
if ((fp = fopen(fname, "r")) == NULL)
return (i);
- if ((line = fgetln(fp, &sz)) == NULL)
- goto done;
-
- if (strncmp(line, hist_cookie, sz) != 0)
- goto done;
-
ptr = h_malloc(max_size = 1024);
+ if (ptr == NULL)
+ goto done;
for (i = 0; (line = fgetln(fp, &sz)) != NULL; i++) {
char c = line[sz];
@@ -611,15 +673,24 @@ history_load(History *h, const char *fname)
line[sz] = '\0';
if (max_size < sz) {
+ char *nptr;
max_size = (sz + 1023) & ~1023;
- ptr = h_realloc(ptr, max_size);
+ nptr = h_realloc(ptr, max_size);
+ if (nptr == NULL) {
+ i = -1;
+ goto oomem;
+ }
+ ptr = nptr;
}
(void) strunvis(ptr, line);
line[sz] = c;
- HENTER(h, &ev, ptr);
+ if (HENTER(h, &ev, ptr) == -1) {
+ h_free((ptr_t)ptr);
+ return -1;
+ }
}
- h_free(ptr);
-
+oomem:
+ h_free((ptr_t)ptr);
done:
(void) fclose(fp);
return (i);
@@ -634,28 +705,38 @@ history_save(History *h, const char *fname)
{
FILE *fp;
HistEvent ev;
- int i = 0, retval;
+ int i = -1, retval;
size_t len, max_size;
char *ptr;
if ((fp = fopen(fname, "w")) == NULL)
return (-1);
- (void) fchmod(fileno(fp), S_IRUSR|S_IWUSR);
- (void) fputs(hist_cookie, fp);
+ if (fchmod(fileno(fp), S_IRUSR|S_IWUSR) == -1)
+ goto done;
ptr = h_malloc(max_size = 1024);
- for (retval = HLAST(h, &ev);
+ if (ptr == NULL)
+ goto done;
+ for (i = 0, retval = HLAST(h, &ev);
retval != -1;
retval = HPREV(h, &ev), i++) {
- len = strlen(ev.str) * 4;
+ len = strlen(ev.str) * 4 + 1;
if (len >= max_size) {
- max_size = (len + 1023) & 1023;
- ptr = h_realloc(ptr, max_size);
+ char *nptr;
+ max_size = (len + 1023) & ~1023;
+ nptr = h_realloc(ptr, max_size);
+ if (nptr == NULL) {
+ i = -1;
+ goto oomem;
+ }
+ ptr = nptr;
}
(void) strvis(ptr, ev.str, VIS_WHITE);
(void) fprintf(fp, "%s\n", ev.str);
}
- h_free(ptr);
+oomem:
+ h_free((ptr_t)ptr);
+done:
(void) fclose(fp);
return (i);
}
@@ -754,6 +835,14 @@ history(History *h, HistEvent *ev, int fun, ...)
retval = history_setsize(h, ev, va_arg(va, int));
break;
+ case H_GETUNIQUE:
+ retval = history_getunique(h, ev);
+ break;
+
+ case H_SETUNIQUE:
+ retval = history_setunique(h, ev, va_arg(va, int));
+ break;
+
case H_ADD:
str = va_arg(va, const char *);
retval = HADD(h, ev, str);
diff --git a/cmd-line-utils/libedit/key.c b/cmd-line-utils/libedit/key.c
index 629c6aeeb9c..e75db00ce1b 100644
--- a/cmd-line-utils/libedit/key.c
+++ b/cmd-line-utils/libedit/key.c
@@ -1,4 +1,4 @@
-/* $NetBSD: key.c,v 1.12 2001/05/17 01:02:17 christos Exp $ */
+/* $NetBSD: key.c,v 1.13 2002/03/18 16:00:55 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,7 +36,14 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)key.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: key.c,v 1.13 2002/03/18 16:00:55 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* key.c: This module contains the procedures for maintaining
@@ -59,7 +66,6 @@
* 1) It is not possible to have one key that is a
* substr of another.
*/
-#include "sys.h"
#include <string.h>
#include <stdlib.h>
@@ -85,8 +91,8 @@ private int node__try(EditLine *, key_node_t *, const char *,
private key_node_t *node__get(int);
private void node__put(EditLine *, key_node_t *);
private int node__delete(EditLine *, key_node_t **, const char *);
-private int node_lookup(EditLine *, const char *,
- key_node_t *, int);
+private int node_lookup(EditLine *, const char *, key_node_t *,
+ int);
private int node_enum(EditLine *, key_node_t *, int);
private int key__decode_char(char *, int, int);
@@ -97,14 +103,14 @@ private int key__decode_char(char *, int, int);
* Initialize the key maps
*/
protected int
-key_init(EditLine *el)
+el_key_init(EditLine *el)
{
el->el_key.buf = (char *) el_malloc(KEY_BUFSIZ);
if (el->el_key.buf == NULL)
return (-1);
el->el_key.map = NULL;
- key_reset(el);
+ el_key_reset(el);
return (0);
}
@@ -113,7 +119,7 @@ key_init(EditLine *el)
* Free the key maps
*/
protected void
-key_end(EditLine *el)
+el_key_end(EditLine *el)
{
el_free((ptr_t) el->el_key.buf);
@@ -127,7 +133,7 @@ key_end(EditLine *el)
* Associate cmd with a key value
*/
protected key_value_t *
-key_map_cmd(EditLine *el, int cmd)
+el_key_map_cmd(EditLine *el, int cmd)
{
el->el_key.val.cmd = (el_action_t) cmd;
@@ -139,7 +145,7 @@ key_map_cmd(EditLine *el, int cmd)
* Associate str with a key value
*/
protected key_value_t *
-key_map_str(EditLine *el, char *str)
+el_key_map_str(EditLine *el, char *str)
{
el->el_key.val.str = str;
@@ -153,7 +159,7 @@ key_map_str(EditLine *el, char *str)
* [Always bind the ansi arrow keys?]
*/
protected void
-key_reset(EditLine *el)
+el_key_reset(EditLine *el)
{
node__put(el, el->el_key.map);
@@ -171,7 +177,7 @@ key_reset(EditLine *el)
* The last character read is returned in *ch.
*/
protected int
-key_get(EditLine *el, char *ch, key_value_t *val)
+el_key_get(EditLine *el, char *ch, key_value_t *val)
{
return (node_trav(el, el->el_key.map, ch, val));
@@ -185,7 +191,7 @@ key_get(EditLine *el, char *ch, key_value_t *val)
* out str or a unix command.
*/
protected void
-key_add(EditLine *el, const char *key, key_value_t *val, int ntype)
+el_key_add(EditLine *el, const char *key, key_value_t *val, int ntype)
{
if (key[0] == '\0') {
@@ -213,7 +219,7 @@ key_add(EditLine *el, const char *key, key_value_t *val, int ntype)
*
*/
protected void
-key_clear(EditLine *el, el_action_t *map, const char *in)
+el_key_clear(EditLine *el, el_action_t *map, const char *in)
{
if ((map[(unsigned char)*in] == ED_SEQUENCE_LEAD_IN) &&
@@ -221,7 +227,7 @@ key_clear(EditLine *el, el_action_t *map, const char *in)
el->el_map.alt[(unsigned char)*in] != ED_SEQUENCE_LEAD_IN) ||
(map == el->el_map.alt &&
el->el_map.key[(unsigned char)*in] != ED_SEQUENCE_LEAD_IN)))
- (void) key_delete(el, in);
+ (void) el_key_delete(el, in);
}
@@ -230,7 +236,7 @@ key_clear(EditLine *el, el_action_t *map, const char *in)
* they exists.
*/
protected int
-key_delete(EditLine *el, const char *key)
+el_key_delete(EditLine *el, const char *key)
{
if (key[0] == '\0') {
@@ -251,7 +257,7 @@ key_delete(EditLine *el, const char *key)
* Print entire el->el_key.map if null
*/
protected void
-key_print(EditLine *el, const char *key)
+el_key_print(EditLine *el, const char *key)
{
/* do nothing if el->el_key.map is empty and null key specified */
@@ -498,7 +504,7 @@ node_lookup(EditLine *el, const char *str, key_node_t *ptr, int cnt)
if (str[1] == 0) {
el->el_key.buf[ncnt + 1] = '"';
el->el_key.buf[ncnt + 2] = '\0';
- key_kprint(el, el->el_key.buf,
+ el_key_kprint(el, el->el_key.buf,
&ptr->val, ptr->type);
return (0);
} else
@@ -546,7 +552,7 @@ node_enum(EditLine *el, key_node_t *ptr, int cnt)
/* print this key and function */
el->el_key.buf[ncnt + 1] = '"';
el->el_key.buf[ncnt + 2] = '\0';
- key_kprint(el, el->el_key.buf, &ptr->val, ptr->type);
+ el_key_kprint(el, el->el_key.buf, &ptr->val, ptr->type);
} else
(void) node_enum(el, ptr->next, ncnt + 1);
@@ -562,7 +568,7 @@ node_enum(EditLine *el, key_node_t *ptr, int cnt)
* function specified by val
*/
protected void
-key_kprint(EditLine *el, const char *key, key_value_t *val, int ntype)
+el_key_kprint(EditLine *el, const char *key, key_value_t *val, int ntype)
{
el_bindings_t *fp;
char unparsbuf[EL_BUFSIZ];
@@ -573,7 +579,7 @@ key_kprint(EditLine *el, const char *key, key_value_t *val, int ntype)
case XK_STR:
case XK_EXE:
(void) fprintf(el->el_outfile, fmt, key,
- key__decode_str(val->str, unparsbuf,
+ el_key__decode_str(val->str, unparsbuf,
ntype == XK_STR ? "\"\"" : "[]"));
break;
case XK_CMD:
@@ -638,9 +644,9 @@ key__decode_char(char *buf, int cnt, int ch)
* Make a printable version of the ey
*/
protected char *
-key__decode_str(const char *str, char *buf, const char *sep)
+el_key__decode_str(const char *str, char *buf, const char *sep)
{
- char *b;
+ char *b;
const char *p;
b = buf;
diff --git a/cmd-line-utils/libedit/key.h b/cmd-line-utils/libedit/key.h
index e95731d9df5..9d83d7c2521 100644
--- a/cmd-line-utils/libedit/key.h
+++ b/cmd-line-utils/libedit/key.h
@@ -1,4 +1,4 @@
-/* $NetBSD: key.h,v 1.5 2001/01/23 15:55:30 jdolecek Exp $ */
+/* $NetBSD: key.h,v 1.6 2002/03/18 16:00:55 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -62,22 +62,19 @@ typedef struct el_key_t {
#define XK_NOD 2
#define XK_EXE 3
-#undef key_end
-#undef key_clear
-#undef key_print
-
-protected int key_init(EditLine *);
-protected void key_end(EditLine *);
-protected key_value_t *key_map_cmd(EditLine *, int);
-protected key_value_t *key_map_str(EditLine *, char *);
-protected void key_reset(EditLine *);
-protected int key_get(EditLine *, char *, key_value_t *);
-protected void key_add(EditLine *, const char *, key_value_t *, int);
-protected void key_clear(EditLine *, el_action_t *, const char *);
-protected int key_delete(EditLine *, const char *);
-protected void key_print(EditLine *, const char *);
-protected void key_kprint(EditLine *, const char *,
- key_value_t *, int);
-protected char *key__decode_str(const char *, char *, const char *);
+protected int el_key_init(EditLine *);
+protected void el_key_end(EditLine *);
+protected key_value_t *el_key_map_cmd(EditLine *, int);
+protected key_value_t *el_key_map_str(EditLine *, char *);
+protected void el_key_reset(EditLine *);
+protected int el_key_get(EditLine *, char *, key_value_t *);
+protected void el_key_add(EditLine *,
+ const char *, key_value_t *, int);
+protected void el_key_clear(EditLine *, el_action_t *, const char *);
+protected int el_key_delete(EditLine *, const char *);
+protected void el_key_print(EditLine *, const char *);
+protected void el_key_kprint(EditLine *, const char *, key_value_t *,
+ int);
+protected char *el_key__decode_str(const char *, char *, const char *);
#endif /* _h_el_key */
diff --git a/cmd-line-utils/libedit/makelist.sh b/cmd-line-utils/libedit/makelist.sh
index 13d37512591..fbce06fcc50 100644
--- a/cmd-line-utils/libedit/makelist.sh
+++ b/cmd-line-utils/libedit/makelist.sh
@@ -1,5 +1,5 @@
#!/bin/sh -
-# $NetBSD: makelist,v 1.7 2001/01/09 19:22:31 jdolecek Exp $
+# $NetBSD: makelist,v 1.8 2003/03/10 21:21:10 christos Exp $
#
# Copyright (c) 1992, 1993
# The Regents of the University of California. All rights reserved.
@@ -87,7 +87,6 @@ case $FLAG in
cat $FILES | $AWK '
BEGIN {
printf("/* Automatically generated file, do not edit */\n");
- printf("#include \"compat.h\"\n");
printf("#include \"sys.h\"\n#include \"el.h\"\n");
printf("private const struct el_bindings_t el_func_help[] = {\n");
low = "abcdefghijklmnopqrstuvwxyz_";
@@ -170,7 +169,6 @@ case $FLAG in
cat $FILES | $AWK '/el_action_t/ { print $3 }' | sort | $AWK '
BEGIN {
printf("/* Automatically generated file, do not edit */\n");
- printf("#include \"compat.h\"\n");
printf("#include \"sys.h\"\n#include \"el.h\"\n");
printf("private const el_func_t el_func[] = {");
maxlen = 80;
diff --git a/cmd-line-utils/libedit/map.c b/cmd-line-utils/libedit/map.c
index 144ccf1ebe0..a16625311ae 100644
--- a/cmd-line-utils/libedit/map.c
+++ b/cmd-line-utils/libedit/map.c
@@ -1,4 +1,4 @@
-/* $NetBSD: map.c,v 1.14 2001/01/09 17:22:09 jdolecek Exp $ */
+/* $NetBSD: map.c,v 1.18 2002/11/15 14:32:33 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)map.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: map.c,v 1.18 2002/11/15 14:32:33 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* map.c: Editor function definitions
*/
-#include "sys.h"
#include <stdlib.h>
#include "el.h"
@@ -373,7 +379,7 @@ private const el_action_t el_map_vi_insert[] = {
/* 5 */ ED_MOVE_TO_END, /* ^E */
/* 6 */ ED_NEXT_CHAR, /* ^F */
/* 7 */ ED_UNASSIGNED, /* ^G */
- /* 8 */ ED_DELETE_PREV_CHAR, /* ^H */ /* BackSpace key */
+ /* 8 */ VI_DELETE_PREV_CHAR, /* ^H */ /* BackSpace key */
/* 9 */ ED_UNASSIGNED, /* ^I */ /* Tab Key */
/* 10 */ ED_NEWLINE, /* ^J */
/* 11 */ ED_KILL_LINE, /* ^K */
@@ -493,135 +499,135 @@ private const el_action_t el_map_vi_insert[] = {
/* 124 */ ED_INSERT, /* | */
/* 125 */ ED_INSERT, /* } */
/* 126 */ ED_INSERT, /* ~ */
- /* 127 */ ED_DELETE_PREV_CHAR, /* ^? */
- /* 128 */ ED_UNASSIGNED, /* M-^@ */
- /* 129 */ ED_UNASSIGNED, /* M-^A */
- /* 130 */ ED_UNASSIGNED, /* M-^B */
- /* 131 */ ED_UNASSIGNED, /* M-^C */
- /* 132 */ ED_UNASSIGNED, /* M-^D */
- /* 133 */ ED_UNASSIGNED, /* M-^E */
- /* 134 */ ED_UNASSIGNED, /* M-^F */
- /* 135 */ ED_UNASSIGNED, /* M-^G */
- /* 136 */ ED_UNASSIGNED, /* M-^H */
- /* 137 */ ED_UNASSIGNED, /* M-^I */
- /* 138 */ ED_UNASSIGNED, /* M-^J */
- /* 139 */ ED_UNASSIGNED, /* M-^K */
- /* 140 */ ED_UNASSIGNED, /* M-^L */
- /* 141 */ ED_UNASSIGNED, /* M-^M */
- /* 142 */ ED_UNASSIGNED, /* M-^N */
- /* 143 */ ED_UNASSIGNED, /* M-^O */
- /* 144 */ ED_UNASSIGNED, /* M-^P */
- /* 145 */ ED_UNASSIGNED, /* M-^Q */
- /* 146 */ ED_UNASSIGNED, /* M-^R */
- /* 147 */ ED_UNASSIGNED, /* M-^S */
- /* 148 */ ED_UNASSIGNED, /* M-^T */
- /* 149 */ ED_UNASSIGNED, /* M-^U */
- /* 150 */ ED_UNASSIGNED, /* M-^V */
- /* 151 */ ED_UNASSIGNED, /* M-^W */
- /* 152 */ ED_UNASSIGNED, /* M-^X */
- /* 153 */ ED_UNASSIGNED, /* M-^Y */
- /* 154 */ ED_UNASSIGNED, /* M-^Z */
- /* 155 */ ED_UNASSIGNED, /* M-^[ */
- /* 156 */ ED_UNASSIGNED, /* M-^\ */
- /* 157 */ ED_UNASSIGNED, /* M-^] */
- /* 158 */ ED_UNASSIGNED, /* M-^^ */
- /* 159 */ ED_UNASSIGNED, /* M-^_ */
- /* 160 */ ED_UNASSIGNED, /* M-SPACE */
- /* 161 */ ED_UNASSIGNED, /* M-! */
- /* 162 */ ED_UNASSIGNED, /* M-" */
- /* 163 */ ED_UNASSIGNED, /* M-# */
- /* 164 */ ED_UNASSIGNED, /* M-$ */
- /* 165 */ ED_UNASSIGNED, /* M-% */
- /* 166 */ ED_UNASSIGNED, /* M-& */
- /* 167 */ ED_UNASSIGNED, /* M-' */
- /* 168 */ ED_UNASSIGNED, /* M-( */
- /* 169 */ ED_UNASSIGNED, /* M-) */
- /* 170 */ ED_UNASSIGNED, /* M-* */
- /* 171 */ ED_UNASSIGNED, /* M-+ */
- /* 172 */ ED_UNASSIGNED, /* M-, */
- /* 173 */ ED_UNASSIGNED, /* M-- */
- /* 174 */ ED_UNASSIGNED, /* M-. */
- /* 175 */ ED_UNASSIGNED, /* M-/ */
- /* 176 */ ED_UNASSIGNED, /* M-0 */
- /* 177 */ ED_UNASSIGNED, /* M-1 */
- /* 178 */ ED_UNASSIGNED, /* M-2 */
- /* 179 */ ED_UNASSIGNED, /* M-3 */
- /* 180 */ ED_UNASSIGNED, /* M-4 */
- /* 181 */ ED_UNASSIGNED, /* M-5 */
- /* 182 */ ED_UNASSIGNED, /* M-6 */
- /* 183 */ ED_UNASSIGNED, /* M-7 */
- /* 184 */ ED_UNASSIGNED, /* M-8 */
- /* 185 */ ED_UNASSIGNED, /* M-9 */
- /* 186 */ ED_UNASSIGNED, /* M-: */
- /* 187 */ ED_UNASSIGNED, /* M-; */
- /* 188 */ ED_UNASSIGNED, /* M-< */
- /* 189 */ ED_UNASSIGNED, /* M-= */
- /* 190 */ ED_UNASSIGNED, /* M-> */
- /* 191 */ ED_UNASSIGNED, /* M-? */
- /* 192 */ ED_UNASSIGNED, /* M-@ */
- /* 193 */ ED_UNASSIGNED, /* M-A */
- /* 194 */ ED_UNASSIGNED, /* M-B */
- /* 195 */ ED_UNASSIGNED, /* M-C */
- /* 196 */ ED_UNASSIGNED, /* M-D */
- /* 197 */ ED_UNASSIGNED, /* M-E */
- /* 198 */ ED_UNASSIGNED, /* M-F */
- /* 199 */ ED_UNASSIGNED, /* M-G */
- /* 200 */ ED_UNASSIGNED, /* M-H */
- /* 201 */ ED_UNASSIGNED, /* M-I */
- /* 202 */ ED_UNASSIGNED, /* M-J */
- /* 203 */ ED_UNASSIGNED, /* M-K */
- /* 204 */ ED_UNASSIGNED, /* M-L */
- /* 205 */ ED_UNASSIGNED, /* M-M */
- /* 206 */ ED_UNASSIGNED, /* M-N */
- /* 207 */ ED_UNASSIGNED, /* M-O */
- /* 208 */ ED_UNASSIGNED, /* M-P */
- /* 209 */ ED_UNASSIGNED, /* M-Q */
- /* 210 */ ED_UNASSIGNED, /* M-R */
- /* 211 */ ED_UNASSIGNED, /* M-S */
- /* 212 */ ED_UNASSIGNED, /* M-T */
- /* 213 */ ED_UNASSIGNED, /* M-U */
- /* 214 */ ED_UNASSIGNED, /* M-V */
- /* 215 */ ED_UNASSIGNED, /* M-W */
- /* 216 */ ED_UNASSIGNED, /* M-X */
- /* 217 */ ED_UNASSIGNED, /* M-Y */
- /* 218 */ ED_UNASSIGNED, /* M-Z */
- /* 219 */ ED_UNASSIGNED, /* M-[ */
- /* 220 */ ED_UNASSIGNED, /* M-\ */
- /* 221 */ ED_UNASSIGNED, /* M-] */
- /* 222 */ ED_UNASSIGNED, /* M-^ */
- /* 223 */ ED_UNASSIGNED, /* M-_ */
- /* 224 */ ED_UNASSIGNED, /* M-` */
- /* 225 */ ED_UNASSIGNED, /* M-a */
- /* 226 */ ED_UNASSIGNED, /* M-b */
- /* 227 */ ED_UNASSIGNED, /* M-c */
- /* 228 */ ED_UNASSIGNED, /* M-d */
- /* 229 */ ED_UNASSIGNED, /* M-e */
- /* 230 */ ED_UNASSIGNED, /* M-f */
- /* 231 */ ED_UNASSIGNED, /* M-g */
- /* 232 */ ED_UNASSIGNED, /* M-h */
- /* 233 */ ED_UNASSIGNED, /* M-i */
- /* 234 */ ED_UNASSIGNED, /* M-j */
- /* 235 */ ED_UNASSIGNED, /* M-k */
- /* 236 */ ED_UNASSIGNED, /* M-l */
- /* 237 */ ED_UNASSIGNED, /* M-m */
- /* 238 */ ED_UNASSIGNED, /* M-n */
- /* 239 */ ED_UNASSIGNED, /* M-o */
- /* 240 */ ED_UNASSIGNED, /* M-p */
- /* 241 */ ED_UNASSIGNED, /* M-q */
- /* 242 */ ED_UNASSIGNED, /* M-r */
- /* 243 */ ED_UNASSIGNED, /* M-s */
- /* 244 */ ED_UNASSIGNED, /* M-t */
- /* 245 */ ED_UNASSIGNED, /* M-u */
- /* 246 */ ED_UNASSIGNED, /* M-v */
- /* 247 */ ED_UNASSIGNED, /* M-w */
- /* 248 */ ED_UNASSIGNED, /* M-x */
- /* 249 */ ED_UNASSIGNED, /* M-y */
- /* 250 */ ED_UNASSIGNED, /* M-z */
- /* 251 */ ED_UNASSIGNED, /* M-{ */
- /* 252 */ ED_UNASSIGNED, /* M-| */
- /* 253 */ ED_UNASSIGNED, /* M-} */
- /* 254 */ ED_UNASSIGNED, /* M-~ */
- /* 255 */ ED_UNASSIGNED /* M-^? */
+ /* 127 */ VI_DELETE_PREV_CHAR, /* ^? */
+ /* 128 */ ED_INSERT, /* M-^@ */
+ /* 129 */ ED_INSERT, /* M-^A */
+ /* 130 */ ED_INSERT, /* M-^B */
+ /* 131 */ ED_INSERT, /* M-^C */
+ /* 132 */ ED_INSERT, /* M-^D */
+ /* 133 */ ED_INSERT, /* M-^E */
+ /* 134 */ ED_INSERT, /* M-^F */
+ /* 135 */ ED_INSERT, /* M-^G */
+ /* 136 */ ED_INSERT, /* M-^H */
+ /* 137 */ ED_INSERT, /* M-^I */
+ /* 138 */ ED_INSERT, /* M-^J */
+ /* 139 */ ED_INSERT, /* M-^K */
+ /* 140 */ ED_INSERT, /* M-^L */
+ /* 141 */ ED_INSERT, /* M-^M */
+ /* 142 */ ED_INSERT, /* M-^N */
+ /* 143 */ ED_INSERT, /* M-^O */
+ /* 144 */ ED_INSERT, /* M-^P */
+ /* 145 */ ED_INSERT, /* M-^Q */
+ /* 146 */ ED_INSERT, /* M-^R */
+ /* 147 */ ED_INSERT, /* M-^S */
+ /* 148 */ ED_INSERT, /* M-^T */
+ /* 149 */ ED_INSERT, /* M-^U */
+ /* 150 */ ED_INSERT, /* M-^V */
+ /* 151 */ ED_INSERT, /* M-^W */
+ /* 152 */ ED_INSERT, /* M-^X */
+ /* 153 */ ED_INSERT, /* M-^Y */
+ /* 154 */ ED_INSERT, /* M-^Z */
+ /* 155 */ ED_INSERT, /* M-^[ */
+ /* 156 */ ED_INSERT, /* M-^\ */
+ /* 157 */ ED_INSERT, /* M-^] */
+ /* 158 */ ED_INSERT, /* M-^^ */
+ /* 159 */ ED_INSERT, /* M-^_ */
+ /* 160 */ ED_INSERT, /* M-SPACE */
+ /* 161 */ ED_INSERT, /* M-! */
+ /* 162 */ ED_INSERT, /* M-" */
+ /* 163 */ ED_INSERT, /* M-# */
+ /* 164 */ ED_INSERT, /* M-$ */
+ /* 165 */ ED_INSERT, /* M-% */
+ /* 166 */ ED_INSERT, /* M-& */
+ /* 167 */ ED_INSERT, /* M-' */
+ /* 168 */ ED_INSERT, /* M-( */
+ /* 169 */ ED_INSERT, /* M-) */
+ /* 170 */ ED_INSERT, /* M-* */
+ /* 171 */ ED_INSERT, /* M-+ */
+ /* 172 */ ED_INSERT, /* M-, */
+ /* 173 */ ED_INSERT, /* M-- */
+ /* 174 */ ED_INSERT, /* M-. */
+ /* 175 */ ED_INSERT, /* M-/ */
+ /* 176 */ ED_INSERT, /* M-0 */
+ /* 177 */ ED_INSERT, /* M-1 */
+ /* 178 */ ED_INSERT, /* M-2 */
+ /* 179 */ ED_INSERT, /* M-3 */
+ /* 180 */ ED_INSERT, /* M-4 */
+ /* 181 */ ED_INSERT, /* M-5 */
+ /* 182 */ ED_INSERT, /* M-6 */
+ /* 183 */ ED_INSERT, /* M-7 */
+ /* 184 */ ED_INSERT, /* M-8 */
+ /* 185 */ ED_INSERT, /* M-9 */
+ /* 186 */ ED_INSERT, /* M-: */
+ /* 187 */ ED_INSERT, /* M-; */
+ /* 188 */ ED_INSERT, /* M-< */
+ /* 189 */ ED_INSERT, /* M-= */
+ /* 190 */ ED_INSERT, /* M-> */
+ /* 191 */ ED_INSERT, /* M-? */
+ /* 192 */ ED_INSERT, /* M-@ */
+ /* 193 */ ED_INSERT, /* M-A */
+ /* 194 */ ED_INSERT, /* M-B */
+ /* 195 */ ED_INSERT, /* M-C */
+ /* 196 */ ED_INSERT, /* M-D */
+ /* 197 */ ED_INSERT, /* M-E */
+ /* 198 */ ED_INSERT, /* M-F */
+ /* 199 */ ED_INSERT, /* M-G */
+ /* 200 */ ED_INSERT, /* M-H */
+ /* 201 */ ED_INSERT, /* M-I */
+ /* 202 */ ED_INSERT, /* M-J */
+ /* 203 */ ED_INSERT, /* M-K */
+ /* 204 */ ED_INSERT, /* M-L */
+ /* 205 */ ED_INSERT, /* M-M */
+ /* 206 */ ED_INSERT, /* M-N */
+ /* 207 */ ED_INSERT, /* M-O */
+ /* 208 */ ED_INSERT, /* M-P */
+ /* 209 */ ED_INSERT, /* M-Q */
+ /* 210 */ ED_INSERT, /* M-R */
+ /* 211 */ ED_INSERT, /* M-S */
+ /* 212 */ ED_INSERT, /* M-T */
+ /* 213 */ ED_INSERT, /* M-U */
+ /* 214 */ ED_INSERT, /* M-V */
+ /* 215 */ ED_INSERT, /* M-W */
+ /* 216 */ ED_INSERT, /* M-X */
+ /* 217 */ ED_INSERT, /* M-Y */
+ /* 218 */ ED_INSERT, /* M-Z */
+ /* 219 */ ED_INSERT, /* M-[ */
+ /* 220 */ ED_INSERT, /* M-\ */
+ /* 221 */ ED_INSERT, /* M-] */
+ /* 222 */ ED_INSERT, /* M-^ */
+ /* 223 */ ED_INSERT, /* M-_ */
+ /* 224 */ ED_INSERT, /* M-` */
+ /* 225 */ ED_INSERT, /* M-a */
+ /* 226 */ ED_INSERT, /* M-b */
+ /* 227 */ ED_INSERT, /* M-c */
+ /* 228 */ ED_INSERT, /* M-d */
+ /* 229 */ ED_INSERT, /* M-e */
+ /* 230 */ ED_INSERT, /* M-f */
+ /* 231 */ ED_INSERT, /* M-g */
+ /* 232 */ ED_INSERT, /* M-h */
+ /* 233 */ ED_INSERT, /* M-i */
+ /* 234 */ ED_INSERT, /* M-j */
+ /* 235 */ ED_INSERT, /* M-k */
+ /* 236 */ ED_INSERT, /* M-l */
+ /* 237 */ ED_INSERT, /* M-m */
+ /* 238 */ ED_INSERT, /* M-n */
+ /* 239 */ ED_INSERT, /* M-o */
+ /* 240 */ ED_INSERT, /* M-p */
+ /* 241 */ ED_INSERT, /* M-q */
+ /* 242 */ ED_INSERT, /* M-r */
+ /* 243 */ ED_INSERT, /* M-s */
+ /* 244 */ ED_INSERT, /* M-t */
+ /* 245 */ ED_INSERT, /* M-u */
+ /* 246 */ ED_INSERT, /* M-v */
+ /* 247 */ ED_INSERT, /* M-w */
+ /* 248 */ ED_INSERT, /* M-x */
+ /* 249 */ ED_INSERT, /* M-y */
+ /* 250 */ ED_INSERT, /* M-z */
+ /* 251 */ ED_INSERT, /* M-{ */
+ /* 252 */ ED_INSERT, /* M-| */
+ /* 253 */ ED_INSERT, /* M-} */
+ /* 254 */ ED_INSERT, /* M-~ */
+ /* 255 */ ED_INSERT /* M-^? */
};
private const el_action_t el_map_vi_command[] = {
@@ -633,7 +639,7 @@ private const el_action_t el_map_vi_command[] = {
/* 5 */ ED_MOVE_TO_END, /* ^E */
/* 6 */ ED_UNASSIGNED, /* ^F */
/* 7 */ ED_UNASSIGNED, /* ^G */
- /* 8 */ ED_PREV_CHAR, /* ^H */
+ /* 8 */ ED_DELETE_PREV_CHAR, /* ^H */
/* 9 */ ED_UNASSIGNED, /* ^I */
/* 10 */ ED_NEWLINE, /* ^J */
/* 11 */ ED_KILL_LINE, /* ^K */
@@ -660,9 +666,9 @@ private const el_action_t el_map_vi_command[] = {
/* 32 */ ED_NEXT_CHAR, /* SPACE */
/* 33 */ ED_UNASSIGNED, /* ! */
/* 34 */ ED_UNASSIGNED, /* " */
- /* 35 */ ED_UNASSIGNED, /* # */
+ /* 35 */ VI_COMMENT_OUT, /* # */
/* 36 */ ED_MOVE_TO_END, /* $ */
- /* 37 */ ED_UNASSIGNED, /* % */
+ /* 37 */ VI_MATCH, /* % */
/* 38 */ ED_UNASSIGNED, /* & */
/* 39 */ ED_UNASSIGNED, /* ' */
/* 40 */ ED_UNASSIGNED, /* ( */
@@ -671,7 +677,7 @@ private const el_action_t el_map_vi_command[] = {
/* 43 */ ED_NEXT_HISTORY, /* + */
/* 44 */ VI_REPEAT_PREV_CHAR, /* , */
/* 45 */ ED_PREV_HISTORY, /* - */
- /* 46 */ ED_UNASSIGNED, /* . */
+ /* 46 */ VI_REDO, /* . */
/* 47 */ VI_SEARCH_PREV, /* / */
/* 48 */ VI_ZERO, /* 0 */
/* 49 */ ED_ARGUMENT_DIGIT, /* 1 */
@@ -689,14 +695,14 @@ private const el_action_t el_map_vi_command[] = {
/* 61 */ ED_UNASSIGNED, /* = */
/* 62 */ ED_UNASSIGNED, /* > */
/* 63 */ VI_SEARCH_NEXT, /* ? */
- /* 64 */ ED_UNASSIGNED, /* @ */
+ /* 64 */ VI_ALIAS, /* @ */
/* 65 */ VI_ADD_AT_EOL, /* A */
- /* 66 */ VI_PREV_SPACE_WORD, /* B */
+ /* 66 */ VI_PREV_BIG_WORD, /* B */
/* 67 */ VI_CHANGE_TO_EOL, /* C */
/* 68 */ ED_KILL_LINE, /* D */
- /* 69 */ VI_TO_END_WORD, /* E */
+ /* 69 */ VI_END_BIG_WORD, /* E */
/* 70 */ VI_PREV_CHAR, /* F */
- /* 71 */ ED_UNASSIGNED, /* G */
+ /* 71 */ VI_TO_HISTORY_LINE, /* G */
/* 72 */ ED_UNASSIGNED, /* H */
/* 73 */ VI_INSERT_AT_BOL, /* I */
/* 74 */ ED_SEARCH_NEXT_HISTORY, /* J */
@@ -710,17 +716,17 @@ private const el_action_t el_map_vi_command[] = {
/* 82 */ VI_REPLACE_MODE, /* R */
/* 83 */ VI_SUBSTITUTE_LINE, /* S */
/* 84 */ VI_TO_PREV_CHAR, /* T */
- /* 85 */ ED_UNASSIGNED, /* U */
+ /* 85 */ VI_UNDO_LINE, /* U */
/* 86 */ ED_UNASSIGNED, /* V */
- /* 87 */ VI_NEXT_SPACE_WORD, /* W */
+ /* 87 */ VI_NEXT_BIG_WORD, /* W */
/* 88 */ ED_DELETE_PREV_CHAR, /* X */
- /* 89 */ ED_UNASSIGNED, /* Y */
+ /* 89 */ VI_YANK_END, /* Y */
/* 90 */ ED_UNASSIGNED, /* Z */
/* 91 */ ED_SEQUENCE_LEAD_IN, /* [ */
/* 92 */ ED_UNASSIGNED, /* \ */
/* 93 */ ED_UNASSIGNED, /* ] */
/* 94 */ ED_MOVE_TO_BEG, /* ^ */
- /* 95 */ ED_UNASSIGNED, /* _ */
+ /* 95 */ VI_HISTORY_WORD, /* _ */
/* 96 */ ED_UNASSIGNED, /* ` */
/* 97 */ VI_ADD, /* a */
/* 98 */ VI_PREV_WORD, /* b */
@@ -743,13 +749,13 @@ private const el_action_t el_map_vi_command[] = {
/* 115 */ VI_SUBSTITUTE_CHAR, /* s */
/* 116 */ VI_TO_NEXT_CHAR, /* t */
/* 117 */ VI_UNDO, /* u */
- /* 118 */ ED_UNASSIGNED, /* v */
+ /* 118 */ VI_HISTEDIT, /* v */
/* 119 */ VI_NEXT_WORD, /* w */
/* 120 */ ED_DELETE_NEXT_CHAR, /* x */
- /* 121 */ ED_UNASSIGNED, /* y */
+ /* 121 */ VI_YANK, /* y */
/* 122 */ ED_UNASSIGNED, /* z */
/* 123 */ ED_UNASSIGNED, /* { */
- /* 124 */ ED_UNASSIGNED, /* | */
+ /* 124 */ VI_TO_COLUMN, /* | */
/* 125 */ ED_UNASSIGNED, /* } */
/* 126 */ VI_CHANGE_CASE, /* ~ */
/* 127 */ ED_DELETE_PREV_CHAR, /* ^? */
@@ -1005,7 +1011,8 @@ map_init_meta(EditLine *el)
break;
default:
buf[1] = i & 0177;
- key_add(el, buf, key_map_cmd(el, (int) map[i]), XK_CMD);
+ el_key_add(el, buf,
+ el_key_map_cmd(el, (int) map[i]), XK_CMD);
break;
}
map[(int) buf[0]] = ED_SEQUENCE_LEAD_IN;
@@ -1027,7 +1034,7 @@ map_init_vi(EditLine *el)
el->el_map.type = MAP_VI;
el->el_map.current = el->el_map.key;
- key_reset(el);
+ el_key_reset(el);
for (i = 0; i < N_KEYS; i++) {
key[i] = vii[i];
@@ -1056,7 +1063,7 @@ map_init_emacs(EditLine *el)
el->el_map.type = MAP_EMACS;
el->el_map.current = el->el_map.key;
- key_reset(el);
+ el_key_reset(el);
for (i = 0; i < N_KEYS; i++) {
key[i] = emacs[i];
@@ -1069,7 +1076,7 @@ map_init_emacs(EditLine *el)
buf[0] = CONTROL('X');
buf[1] = CONTROL('X');
buf[2] = 0;
- key_add(el, buf, key_map_cmd(el, EM_EXCHANGE_MARK), XK_CMD);
+ el_key_add(el, buf, el_key_map_cmd(el, EM_EXCHANGE_MARK), XK_CMD);
tty_bind_char(el, 1);
term_bind_arrow(el);
@@ -1126,7 +1133,7 @@ map_print_key(EditLine *el, el_action_t *map, const char *in)
el_bindings_t *bp;
if (in[0] == '\0' || in[1] == '\0') {
- (void) key__decode_str(in, outbuf, "");
+ (void) el_key__decode_str(in, outbuf, "");
for (bp = el->el_map.help; bp->name != NULL; bp++)
if (bp->func == map[(unsigned char) *in]) {
(void) fprintf(el->el_outfile,
@@ -1134,7 +1141,7 @@ map_print_key(EditLine *el, el_action_t *map, const char *in)
return;
}
} else
- key_print(el, in);
+ el_key_print(el, in);
}
@@ -1156,20 +1163,20 @@ map_print_some_keys(EditLine *el, el_action_t *map, int first, int last)
if (first == last)
(void) fprintf(el->el_outfile,
"%-15s-> is undefined\n",
- key__decode_str(firstbuf, unparsbuf, STRQQ));
+ el_key__decode_str(firstbuf, unparsbuf, STRQQ));
return;
}
for (bp = el->el_map.help; bp->name != NULL; bp++) {
if (bp->func == map[first]) {
if (first == last) {
(void) fprintf(el->el_outfile, "%-15s-> %s\n",
- key__decode_str(firstbuf, unparsbuf, STRQQ),
+ el_key__decode_str(firstbuf, unparsbuf, STRQQ),
bp->name);
} else {
(void) fprintf(el->el_outfile,
"%-4s to %-7s-> %s\n",
- key__decode_str(firstbuf, unparsbuf, STRQQ),
- key__decode_str(lastbuf, extrabuf, STRQQ),
+ el_key__decode_str(firstbuf, unparsbuf, STRQQ),
+ el_key__decode_str(lastbuf, extrabuf, STRQQ),
bp->name);
}
return;
@@ -1223,7 +1230,7 @@ map_print_all_keys(EditLine *el)
map_print_some_keys(el, el->el_map.alt, prev, i - 1);
(void) fprintf(el->el_outfile, "Multi-character bindings\n");
- key_print(el, "");
+ el_key_print(el, "");
(void) fprintf(el->el_outfile, "Arrow key bindings\n");
term_print_arrow(el, "");
}
@@ -1316,9 +1323,9 @@ map_bind(EditLine *el, int argc, const char **argv)
return (-1);
}
if (in[1])
- (void) key_delete(el, in);
+ (void) el_key_delete(el, in);
else if (map[(unsigned char) *in] == ED_SEQUENCE_LEAD_IN)
- (void) key_delete(el, in);
+ (void) el_key_delete(el, in);
else
map[(unsigned char) *in] = ED_UNASSIGNED;
return (0);
@@ -1346,9 +1353,9 @@ map_bind(EditLine *el, int argc, const char **argv)
return (-1);
}
if (key)
- term_set_arrow(el, in, key_map_str(el, out), ntype);
+ term_set_arrow(el, in, el_key_map_str(el, out), ntype);
else
- key_add(el, in, key_map_str(el, out), ntype);
+ el_key_add(el, in, el_key_map_str(el, out), ntype);
map[(unsigned char) *in] = ED_SEQUENCE_LEAD_IN;
break;
@@ -1359,13 +1366,13 @@ map_bind(EditLine *el, int argc, const char **argv)
return (-1);
}
if (key)
- term_set_arrow(el, in, key_map_str(el, out), ntype);
+ term_set_arrow(el, in, el_key_map_str(el, out), ntype);
else {
if (in[1]) {
- key_add(el, in, key_map_cmd(el, cmd), ntype);
+ el_key_add(el, in, el_key_map_cmd(el, cmd), ntype);
map[(unsigned char) *in] = ED_SEQUENCE_LEAD_IN;
} else {
- key_clear(el, map, in);
+ el_key_clear(el, map, in);
map[(unsigned char) *in] = cmd;
}
}
diff --git a/cmd-line-utils/libedit/map.h b/cmd-line-utils/libedit/map.h
index 6033eaf1a87..3c9948ccf88 100644
--- a/cmd-line-utils/libedit/map.h
+++ b/cmd-line-utils/libedit/map.h
@@ -1,4 +1,4 @@
-/* $NetBSD: map.h,v 1.6 2001/01/09 17:22:09 jdolecek Exp $ */
+/* $NetBSD: map.h,v 1.7 2002/03/18 16:00:56 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
diff --git a/cmd-line-utils/libedit/np/fgetln.c b/cmd-line-utils/libedit/np/fgetln.c
new file mode 100644
index 00000000000..93da9914dc8
--- /dev/null
+++ b/cmd-line-utils/libedit/np/fgetln.c
@@ -0,0 +1,88 @@
+/* $NetBSD: fgetln.c,v 1.1.1.1 1999/04/12 07:43:21 crooksa Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+char *
+fgetln(fp, len)
+ FILE *fp;
+ size_t *len;
+{
+ static char *buf = NULL;
+ static size_t bufsiz = 0;
+ char *ptr;
+
+
+ if (buf == NULL) {
+ bufsiz = BUFSIZ;
+ if ((buf = malloc(bufsiz)) == NULL)
+ return NULL;
+ }
+
+ if (fgets(buf, bufsiz, fp) == NULL)
+ return NULL;
+ *len = 0;
+
+ while ((ptr = strchr(&buf[*len], '\n')) == NULL) {
+ size_t nbufsiz = bufsiz + BUFSIZ;
+ char *nbuf = realloc(buf, nbufsiz);
+
+ if (nbuf == NULL) {
+ int oerrno = errno;
+ free(buf);
+ errno = oerrno;
+ buf = NULL;
+ return NULL;
+ } else
+ buf = nbuf;
+
+ *len = bufsiz;
+ if (fgets(&buf[bufsiz], BUFSIZ, fp) == NULL)
+ return buf;
+
+ bufsiz = nbufsiz;
+ }
+
+ *len = (ptr - buf) + 1;
+ return buf;
+}
diff --git a/cmd-line-utils/libedit/np/strlcat.c b/cmd-line-utils/libedit/np/strlcat.c
new file mode 100644
index 00000000000..6c9f1e92d79
--- /dev/null
+++ b/cmd-line-utils/libedit/np/strlcat.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *rcsid = "$OpenBSD: strlcat.c,v 1.2 1999/06/17 16:28:58 millert Exp $";
+#endif /* LIBC_SCCS and not lint */
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD: src/lib/libc/string/strlcat.c,v 1.2.4.2 2001/07/09 23:30:06 obrien Exp $";
+#endif
+
+#include <sys/types.h>
+#include <string.h>
+
+/*
+ * Appends src to string dst of size siz (unlike strncat, siz is the
+ * full size of dst, not space left). At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
+ * Returns strlen(initial dst) + strlen(src); if retval >= siz,
+ * truncation occurred.
+ */
+size_t strlcat(dst, src, siz)
+ char *dst;
+ const char *src;
+ size_t siz;
+{
+ register char *d = dst;
+ register const char *s = src;
+ register size_t n = siz;
+ size_t dlen;
+
+ /* Find the end of dst and adjust bytes left but don't go past end */
+ while (n-- != 0 && *d != '\0')
+ d++;
+ dlen = d - dst;
+ n = siz - dlen;
+
+ if (n == 0)
+ return(dlen + strlen(s));
+ while (*s != '\0') {
+ if (n != 1) {
+ *d++ = *s;
+ n--;
+ }
+ s++;
+ }
+ *d = '\0';
+
+ return(dlen + (s - src)); /* count does not include NUL */
+}
diff --git a/cmd-line-utils/libedit/np/strlcpy.c b/cmd-line-utils/libedit/np/strlcpy.c
new file mode 100644
index 00000000000..1f154bcf2ea
--- /dev/null
+++ b/cmd-line-utils/libedit/np/strlcpy.c
@@ -0,0 +1,75 @@
+/* $OpenBSD: strlcpy.c,v 1.4 1999/05/01 18:56:41 millert Exp $ */
+
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char *rcsid = "$OpenBSD: strlcpy.c,v 1.4 1999/05/01 18:56:41 millert Exp $";
+#endif
+#endif /* LIBC_SCCS and not lint */
+#ifndef lint
+static const char rcsid[] =
+ "$FreeBSD: src/lib/libc/string/strlcpy.c,v 1.2.4.1 2001/07/09 23:30:06 obrien Exp $";
+#endif
+
+#include <sys/types.h>
+#include <string.h>
+
+/*
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
+ * Returns strlen(src); if retval >= siz, truncation occurred.
+ */
+size_t strlcpy(dst, src, siz)
+ char *dst;
+ const char *src;
+ size_t siz;
+{
+ register char *d = dst;
+ register const char *s = src;
+ register size_t n = siz;
+
+ /* Copy as many bytes as will fit */
+ if (n != 0 && --n != 0) {
+ do {
+ if ((*d++ = *s++) == 0)
+ break;
+ } while (--n != 0);
+ }
+
+ /* Not enough room in dst, add NUL and traverse rest of src */
+ if (n == 0) {
+ if (siz != 0)
+ *d = '\0'; /* NUL-terminate dst */
+ while (*s++)
+ ;
+ }
+
+ return(s - src - 1); /* count does not include NUL */
+}
diff --git a/cmd-line-utils/libedit/np/unvis.c b/cmd-line-utils/libedit/np/unvis.c
new file mode 100644
index 00000000000..895ff2059ac
--- /dev/null
+++ b/cmd-line-utils/libedit/np/unvis.c
@@ -0,0 +1,322 @@
+/* $NetBSD: unvis.c,v 1.22 2002/03/23 17:38:27 christos Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "config.h"
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)unvis.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: unvis.c,v 1.22 2002/03/23 17:38:27 christos Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+#define __LIBC12_SOURCE__
+
+#include <sys/types.h>
+
+#include <assert.h>
+#include <ctype.h>
+#include <stdio.h>
+#include "np/vis.h"
+
+#ifdef __weak_alias
+__weak_alias(strunvis,_strunvis)
+__weak_alias(unvis,_unvis)
+#endif
+
+#ifdef __warn_references
+__warn_references(unvis,
+ "warning: reference to compatibility unvis(); include <vis.h> for correct reference")
+#endif
+
+#if !HAVE_VIS_H
+/*
+ * decode driven by state machine
+ */
+#define S_GROUND 0 /* haven't seen escape char */
+#define S_START 1 /* start decoding special sequence */
+#define S_META 2 /* metachar started (M) */
+#define S_META1 3 /* metachar more, regular char (-) */
+#define S_CTRL 4 /* control char started (^) */
+#define S_OCTAL2 5 /* octal digit 2 */
+#define S_OCTAL3 6 /* octal digit 3 */
+#define S_HEX1 7 /* hex digit */
+#define S_HEX2 8 /* hex digit 2 */
+
+#define isoctal(c) (((u_char)(c)) >= '0' && ((u_char)(c)) <= '7')
+#define xtod(c) (isdigit(c) ? (c - '0') : ((tolower(c) - 'a') + 10))
+
+int
+unvis(cp, c, astate, flag)
+ char *cp;
+ int c;
+ int *astate, flag;
+{
+ return __unvis13(cp, (int)c, astate, flag);
+}
+
+/*
+ * unvis - decode characters previously encoded by vis
+ */
+int
+__unvis13(cp, c, astate, flag)
+ char *cp;
+ int c;
+ int *astate, flag;
+{
+
+ _DIAGASSERT(cp != NULL);
+ _DIAGASSERT(astate != NULL);
+
+ if (flag & UNVIS_END) {
+ if (*astate == S_OCTAL2 || *astate == S_OCTAL3
+ || *astate == S_HEX2) {
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ }
+ return (*astate == S_GROUND ? UNVIS_NOCHAR : UNVIS_SYNBAD);
+ }
+
+ switch (*astate) {
+
+ case S_GROUND:
+ *cp = 0;
+ if (c == '\\') {
+ *astate = S_START;
+ return (0);
+ }
+ if ((flag & VIS_HTTPSTYLE) && c == '%') {
+ *astate = S_HEX1;
+ return (0);
+ }
+ *cp = c;
+ return (UNVIS_VALID);
+
+ case S_START:
+ switch(c) {
+ case '\\':
+ *cp = c;
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7':
+ *cp = (c - '0');
+ *astate = S_OCTAL2;
+ return (0);
+ case 'M':
+ *cp = (char)0200;
+ *astate = S_META;
+ return (0);
+ case '^':
+ *astate = S_CTRL;
+ return (0);
+ case 'n':
+ *cp = '\n';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'r':
+ *cp = '\r';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'b':
+ *cp = '\b';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'a':
+ *cp = '\007';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'v':
+ *cp = '\v';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 't':
+ *cp = '\t';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'f':
+ *cp = '\f';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 's':
+ *cp = ' ';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case 'E':
+ *cp = '\033';
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+ case '\n':
+ /*
+ * hidden newline
+ */
+ *astate = S_GROUND;
+ return (UNVIS_NOCHAR);
+ case '$':
+ /*
+ * hidden marker
+ */
+ *astate = S_GROUND;
+ return (UNVIS_NOCHAR);
+ }
+ *astate = S_GROUND;
+ return (UNVIS_SYNBAD);
+
+ case S_META:
+ if (c == '-')
+ *astate = S_META1;
+ else if (c == '^')
+ *astate = S_CTRL;
+ else {
+ *astate = S_GROUND;
+ return (UNVIS_SYNBAD);
+ }
+ return (0);
+
+ case S_META1:
+ *astate = S_GROUND;
+ *cp |= c;
+ return (UNVIS_VALID);
+
+ case S_CTRL:
+ if (c == '?')
+ *cp |= 0177;
+ else
+ *cp |= c & 037;
+ *astate = S_GROUND;
+ return (UNVIS_VALID);
+
+ case S_OCTAL2: /* second possible octal digit */
+ if (isoctal(c)) {
+ /*
+ * yes - and maybe a third
+ */
+ *cp = (*cp << 3) + (c - '0');
+ *astate = S_OCTAL3;
+ return (0);
+ }
+ /*
+ * no - done with current sequence, push back passed char
+ */
+ *astate = S_GROUND;
+ return (UNVIS_VALIDPUSH);
+
+ case S_OCTAL3: /* third possible octal digit */
+ *astate = S_GROUND;
+ if (isoctal(c)) {
+ *cp = (*cp << 3) + (c - '0');
+ return (UNVIS_VALID);
+ }
+ /*
+ * we were done, push back passed char
+ */
+ return (UNVIS_VALIDPUSH);
+ case S_HEX1:
+ if (isxdigit(c)) {
+ *cp = xtod(c);
+ *astate = S_HEX2;
+ return (0);
+ }
+ /*
+ * no - done with current sequence, push back passed char
+ */
+ *astate = S_GROUND;
+ return (UNVIS_VALIDPUSH);
+ case S_HEX2:
+ *astate = S_GROUND;
+ if (isxdigit(c)) {
+ *cp = xtod(c) | (*cp << 4);
+ return (UNVIS_VALID);
+ }
+ return (UNVIS_VALIDPUSH);
+ default:
+ /*
+ * decoder in unknown state - (probably uninitialized)
+ */
+ *astate = S_GROUND;
+ return (UNVIS_SYNBAD);
+ }
+}
+
+/*
+ * strunvis - decode src into dst
+ *
+ * Number of chars decoded into dst is returned, -1 on error.
+ * Dst is null terminated.
+ */
+
+int
+strunvisx(dst, src, flag)
+ char *dst;
+ const char *src;
+ int flag;
+{
+ char c;
+ char *start = dst;
+ int state = 0;
+
+ _DIAGASSERT(src != NULL);
+ _DIAGASSERT(dst != NULL);
+
+ while ((c = *src++) != '\0') {
+ again:
+ switch (__unvis13(dst, c, &state, flag)) {
+ case UNVIS_VALID:
+ dst++;
+ break;
+ case UNVIS_VALIDPUSH:
+ dst++;
+ goto again;
+ case 0:
+ case UNVIS_NOCHAR:
+ break;
+ default:
+ return (-1);
+ }
+ }
+ if (__unvis13(dst, c, &state, UNVIS_END) == UNVIS_VALID)
+ dst++;
+ *dst = '\0';
+ return (dst - start);
+}
+
+int
+strunvis(dst, src)
+ char *dst;
+ const char *src;
+{
+ return strunvisx(dst, src, 0);
+}
+#endif
diff --git a/cmd-line-utils/libedit/np/vis.c b/cmd-line-utils/libedit/np/vis.c
new file mode 100644
index 00000000000..db42443800b
--- /dev/null
+++ b/cmd-line-utils/libedit/np/vis.c
@@ -0,0 +1,347 @@
+/* $NetBSD: vis.c,v 1.22 2002/03/23 17:38:27 christos Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if defined(LIBC_SCCS) && !defined(lint)
+__RCSID("$NetBSD: vis.c,v 1.22 2002/03/23 17:38:27 christos Exp $");
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+#include <assert.h>
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+#include <stdlib.h>
+
+#include "np/vis.h"
+
+#ifdef __weak_alias
+__weak_alias(strsvis,_strsvis)
+__weak_alias(strsvisx,_strsvisx)
+__weak_alias(strvis,_strvis)
+__weak_alias(strvisx,_strvisx)
+__weak_alias(svis,_svis)
+__weak_alias(vis,_vis)
+#endif
+
+#if !HAVE_VIS_H
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+
+#undef BELL
+#if defined(__STDC__)
+#define BELL '\a'
+#else
+#define BELL '\007'
+#endif
+
+#define isoctal(c) (((unsigned char)(c)) >= '0' && ((unsigned char)(c)) <= '7')
+#define iswhite(c) (c == ' ' || c == '\t' || c == '\n')
+#define issafe(c) (c == '\b' || c == BELL || c == '\r')
+#define xtoa(c) "0123456789abcdef"[c]
+
+#define MAXEXTRAS 5
+
+
+#define MAKEEXTRALIST(flag, extra, orig) \
+do { \
+ const char *o = orig; \
+ char *e; \
+ while (*o++) \
+ continue; \
+ extra = alloca((size_t)((o - orig) + MAXEXTRAS)); \
+ for (o = orig, e = extra; (*e++ = *o++) != '\0';) \
+ continue; \
+ e--; \
+ if (flag & VIS_SP) *e++ = ' '; \
+ if (flag & VIS_TAB) *e++ = '\t'; \
+ if (flag & VIS_NL) *e++ = '\n'; \
+ if ((flag & VIS_NOSLASH) == 0) *e++ = '\\'; \
+ *e = '\0'; \
+} while (/*CONSTCOND*/0)
+
+
+/*
+ * This is HVIS, the macro of vis used to HTTP style (RFC 1808)
+ */
+#define HVIS(dst, c, flag, nextc, extra) \
+do \
+ if (!isascii(c) || !isalnum(c) || strchr("$-_.+!*'(),", c) != NULL) { \
+ *dst++ = '%'; \
+ *dst++ = xtoa(((unsigned int)c >> 4) & 0xf); \
+ *dst++ = xtoa((unsigned int)c & 0xf); \
+ } else { \
+ SVIS(dst, c, flag, nextc, extra); \
+ } \
+while (/*CONSTCOND*/0)
+
+/*
+ * This is SVIS, the central macro of vis.
+ * dst: Pointer to the destination buffer
+ * c: Character to encode
+ * flag: Flag word
+ * nextc: The character following 'c'
+ * extra: Pointer to the list of extra characters to be
+ * backslash-protected.
+ */
+#define SVIS(dst, c, flag, nextc, extra) \
+do { \
+ int isextra, isc; \
+ isextra = strchr(extra, c) != NULL; \
+ if (!isextra && isascii(c) && (isgraph(c) || iswhite(c) || \
+ ((flag & VIS_SAFE) && issafe(c)))) { \
+ *dst++ = c; \
+ break; \
+ } \
+ isc = 0; \
+ if (flag & VIS_CSTYLE) { \
+ switch (c) { \
+ case '\n': \
+ isc = 1; *dst++ = '\\'; *dst++ = 'n'; \
+ break; \
+ case '\r': \
+ isc = 1; *dst++ = '\\'; *dst++ = 'r'; \
+ break; \
+ case '\b': \
+ isc = 1; *dst++ = '\\'; *dst++ = 'b'; \
+ break; \
+ case BELL: \
+ isc = 1; *dst++ = '\\'; *dst++ = 'a'; \
+ break; \
+ case '\v': \
+ isc = 1; *dst++ = '\\'; *dst++ = 'v'; \
+ break; \
+ case '\t': \
+ isc = 1; *dst++ = '\\'; *dst++ = 't'; \
+ break; \
+ case '\f': \
+ isc = 1; *dst++ = '\\'; *dst++ = 'f'; \
+ break; \
+ case ' ': \
+ isc = 1; *dst++ = '\\'; *dst++ = 's'; \
+ break; \
+ case '\0': \
+ isc = 1; *dst++ = '\\'; *dst++ = '0'; \
+ if (isoctal(nextc)) { \
+ *dst++ = '0'; \
+ *dst++ = '0'; \
+ } \
+ } \
+ } \
+ if (isc) break; \
+ if (isextra || ((c & 0177) == ' ') || (flag & VIS_OCTAL)) { \
+ *dst++ = '\\'; \
+ *dst++ = (unsigned char)(((unsigned int)(unsigned char)c >> 6) & 03) + '0'; \
+ *dst++ = (unsigned char)(((unsigned int)(unsigned char)c >> 3) & 07) + '0'; \
+ *dst++ = (c & 07) + '0'; \
+ } else { \
+ if ((flag & VIS_NOSLASH) == 0) *dst++ = '\\'; \
+ if (c & 0200) { \
+ c &= 0177; *dst++ = 'M'; \
+ } \
+ if (iscntrl(c)) { \
+ *dst++ = '^'; \
+ if (c == 0177) \
+ *dst++ = '?'; \
+ else \
+ *dst++ = c + '@'; \
+ } else { \
+ *dst++ = '-'; *dst++ = c; \
+ } \
+ } \
+} while (/*CONSTCOND*/0)
+
+
+/*
+ * svis - visually encode characters, also encoding the characters
+ * pointed to by `extra'
+ */
+char *
+svis(dst, c, flag, nextc, extra)
+ char *dst;
+ int c, flag, nextc;
+ const char *extra;
+{
+ char *nextra;
+ _DIAGASSERT(dst != NULL);
+ _DIAGASSERT(extra != NULL);
+ MAKEEXTRALIST(flag, nextra, extra);
+ if (flag & VIS_HTTPSTYLE)
+ HVIS(dst, c, flag, nextc, nextra);
+ else
+ SVIS(dst, c, flag, nextc, nextra);
+ *dst = '\0';
+ return(dst);
+}
+
+
+/*
+ * strsvis, strsvisx - visually encode characters from src into dst
+ *
+ * Extra is a pointer to a \0-terminated list of characters to
+ * be encoded, too. These functions are useful e. g. to
+ * encode strings in such a way so that they are not interpreted
+ * by a shell.
+ *
+ * Dst must be 4 times the size of src to account for possible
+ * expansion. The length of dst, not including the trailing NULL,
+ * is returned.
+ *
+ * Strsvisx encodes exactly len bytes from src into dst.
+ * This is useful for encoding a block of data.
+ */
+int
+strsvis(dst, src, flag, extra)
+ char *dst;
+ const char *src;
+ int flag;
+ const char *extra;
+{
+ char c;
+ char *start;
+ char *nextra;
+
+ _DIAGASSERT(dst != NULL);
+ _DIAGASSERT(src != NULL);
+ _DIAGASSERT(extra != NULL);
+ MAKEEXTRALIST(flag, nextra, extra);
+ if (flag & VIS_HTTPSTYLE) {
+ for (start = dst; (c = *src++) != '\0'; /* empty */)
+ HVIS(dst, c, flag, *src, nextra);
+ } else {
+ for (start = dst; (c = *src++) != '\0'; /* empty */)
+ SVIS(dst, c, flag, *src, nextra);
+ }
+ *dst = '\0';
+ return (dst - start);
+}
+
+
+int
+strsvisx(dst, src, len, flag, extra)
+ char *dst;
+ const char *src;
+ size_t len;
+ int flag;
+ const char *extra;
+{
+ char c;
+ char *start;
+ char *nextra;
+
+ _DIAGASSERT(dst != NULL);
+ _DIAGASSERT(src != NULL);
+ _DIAGASSERT(extra != NULL);
+ MAKEEXTRALIST(flag, nextra, extra);
+
+ if (flag & VIS_HTTPSTYLE) {
+ for (start = dst; len > 0; len--) {
+ c = *src++;
+ HVIS(dst, c, flag, len ? *src : '\0', nextra);
+ }
+ } else {
+ for (start = dst; len > 0; len--) {
+ c = *src++;
+ SVIS(dst, c, flag, len ? *src : '\0', nextra);
+ }
+ }
+ *dst = '\0';
+ return (dst - start);
+}
+
+
+/*
+ * vis - visually encode characters
+ */
+char *
+vis(dst, c, flag, nextc)
+ char *dst;
+ int c, flag, nextc;
+
+{
+ char *extra;
+
+ _DIAGASSERT(dst != NULL);
+
+ MAKEEXTRALIST(flag, extra, "");
+ if (flag & VIS_HTTPSTYLE)
+ HVIS(dst, c, flag, nextc, extra);
+ else
+ SVIS(dst, c, flag, nextc, extra);
+ *dst = '\0';
+ return (dst);
+}
+
+
+/*
+ * strvis, strvisx - visually encode characters from src into dst
+ *
+ * Dst must be 4 times the size of src to account for possible
+ * expansion. The length of dst, not including the trailing NULL,
+ * is returned.
+ *
+ * Strvisx encodes exactly len bytes from src into dst.
+ * This is useful for encoding a block of data.
+ */
+int
+strvis(dst, src, flag)
+ char *dst;
+ const char *src;
+ int flag;
+{
+ char *extra;
+
+ MAKEEXTRALIST(flag, extra, "");
+ return (strsvis(dst, src, flag, extra));
+}
+
+
+int
+strvisx(dst, src, len, flag)
+ char *dst;
+ const char *src;
+ size_t len;
+ int flag;
+{
+ char *extra;
+
+ MAKEEXTRALIST(flag, extra, "");
+ return (strsvisx(dst, src, len, flag, extra));
+}
+#endif
diff --git a/cmd-line-utils/libedit/np/vis.h b/cmd-line-utils/libedit/np/vis.h
new file mode 100644
index 00000000000..1a49c9e3ed2
--- /dev/null
+++ b/cmd-line-utils/libedit/np/vis.h
@@ -0,0 +1,96 @@
+/* $NetBSD: vis.h,v 1.12 2002/03/23 17:39:05 christos Exp $ */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vis.h 8.1 (Berkeley) 6/2/93
+ */
+
+#ifndef _VIS_H_
+#define _VIS_H_
+
+#ifdef HAVE_SYS_CDEFS_H
+#include <sys/cdefs.h>
+#endif
+
+/*
+ * to select alternate encoding format
+ */
+#define VIS_OCTAL 0x01 /* use octal \ddd format */
+#define VIS_CSTYLE 0x02 /* use \[nrft0..] where appropiate */
+
+/*
+ * to alter set of characters encoded (default is to encode all
+ * non-graphic except space, tab, and newline).
+ */
+#define VIS_SP 0x04 /* also encode space */
+#define VIS_TAB 0x08 /* also encode tab */
+#define VIS_NL 0x10 /* also encode newline */
+#define VIS_WHITE (VIS_SP | VIS_TAB | VIS_NL)
+#define VIS_SAFE 0x20 /* only encode "unsafe" characters */
+
+/*
+ * other
+ */
+#define VIS_NOSLASH 0x40 /* inhibit printing '\' */
+#define VIS_HTTPSTYLE 0x80 /* http-style escape % HEX HEX */
+
+/*
+ * unvis return codes
+ */
+#define UNVIS_VALID 1 /* character valid */
+#define UNVIS_VALIDPUSH 2 /* character valid, push back passed char */
+#define UNVIS_NOCHAR 3 /* valid sequence, no character produced */
+#define UNVIS_SYNBAD -1 /* unrecognized escape sequence */
+#define UNVIS_ERROR -2 /* decoder in unknown state (unrecoverable) */
+
+/*
+ * unvis flags
+ */
+#define UNVIS_END 1 /* no more characters */
+
+char *vis(char *, int, int, int);
+char *svis(char *, int, int, int, const char *);
+int strvis(char *, const char *, int);
+int strsvis(char *, const char *, int, const char *);
+int strvisx(char *, const char *, size_t, int);
+int strsvisx(char *, const char *, size_t, int, const char *);
+int strunvis(char *, const char *);
+int strunvisx(char *, const char *, int);
+#ifdef __LIBC12_SOURCE__
+int unvis(char *, int, int *, int);
+int __unvis13(char *, int, int *, int);
+#else
+int unvis(char *, int, int *, int) __RENAME(__unvis13);
+#endif
+
+#endif /* !_VIS_H_ */
diff --git a/cmd-line-utils/libedit/parse.c b/cmd-line-utils/libedit/parse.c
index b6d077793af..b113353d464 100644
--- a/cmd-line-utils/libedit/parse.c
+++ b/cmd-line-utils/libedit/parse.c
@@ -1,4 +1,4 @@
-/* $NetBSD: parse.c,v 1.14 2001/01/23 15:55:30 jdolecek Exp $ */
+/* $NetBSD: parse.c,v 1.16 2003/01/21 18:40:24 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,7 +36,14 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)parse.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: parse.c,v 1.16 2003/01/21 18:40:24 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* parse.c: parse an editline extended command
@@ -51,7 +58,6 @@
* settc
* setty
*/
-#include "sys.h"
#include "el.h"
#include "tokenizer.h"
#include <stdlib.h>
@@ -63,7 +69,7 @@ private const struct {
{ "bind", map_bind },
{ "echotc", term_echotc },
{ "edit", el_editmode },
- { "history", hist_list },
+ { "history", hist_command },
{ "telltc", term_telltc },
{ "settc", term_settc },
{ "setty", tty_stty },
diff --git a/cmd-line-utils/libedit/prompt.c b/cmd-line-utils/libedit/prompt.c
index fb7d9d35936..03d8309a991 100644
--- a/cmd-line-utils/libedit/prompt.c
+++ b/cmd-line-utils/libedit/prompt.c
@@ -1,4 +1,4 @@
-/* $NetBSD: prompt.c,v 1.8 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: prompt.c,v 1.9 2002/03/18 16:00:56 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)prompt.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: prompt.c,v 1.9 2002/03/18 16:00:56 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* prompt.c: Prompt printing functions
*/
-#include "sys.h"
#include <stdio.h>
#include "el.h"
diff --git a/cmd-line-utils/libedit/read.c b/cmd-line-utils/libedit/read.c
index ffec4671271..5eaa83bf482 100644
--- a/cmd-line-utils/libedit/read.c
+++ b/cmd-line-utils/libedit/read.c
@@ -1,4 +1,4 @@
-/* $NetBSD: read.c,v 1.19 2001/01/10 07:45:41 jdolecek Exp $ */
+/* $NetBSD: read.c,v 1.24 2002/11/20 16:50:08 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,13 +36,19 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)read.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: read.c,v 1.24 2002/11/20 16:50:08 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* read.c: Clean this junk up! This is horrible code.
* Terminal read functions
*/
-#include "sys.h"
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
@@ -52,12 +58,44 @@
private int read__fixio(int, int);
private int read_preread(EditLine *);
-private int read_getcmd(EditLine *, el_action_t *, char *);
private int read_char(EditLine *, char *);
+private int read_getcmd(EditLine *, el_action_t *, char *);
+
+/* read_init():
+ * Initialize the read stuff
+ */
+protected int
+read_init(EditLine *el)
+{
+ /* builtin read_char */
+ el->el_read.read_char = read_char;
+ return 0;
+}
+
+
+/* el_read_setfn():
+ * Set the read char function to the one provided.
+ * If it is set to EL_BUILTIN_GETCFN, then reset to the builtin one.
+ */
+protected int
+el_read_setfn(EditLine *el, el_rfunc_t rc)
+{
+ el->el_read.read_char = (rc == EL_BUILTIN_GETCFN) ? read_char : rc;
+ return 0;
+}
+
+
+/* el_read_getfn():
+ * return the current read char function, or EL_BUILTIN_GETCFN
+ * if it is the default one
+ */
+protected el_rfunc_t
+el_read_getfn(EditLine *el)
+{
+ return (el->el_read.read_char == read_char) ?
+ EL_BUILTIN_GETCFN : el->el_read.read_char;
+}
-#ifndef MIN
-#define MIN(A,B) ((A) < (B) ? (A) : (B))
-#endif
#ifdef DEBUG_EDIT
private void
@@ -83,7 +121,11 @@ read_debug(EditLine *el)
*/
/* ARGSUSED */
private int
-read__fixio(int fd __attribute__((unused)), int e)
+read__fixio(int fd
+#if !(defined(TRY_AGAIN) && (defined(FIONBIO) || (defined(F_SETFL) && defined(O_NDELAY))))
+ __attribute__((unused))
+#endif /* !(defined(TRY_AGAIN) && (defined(FIONBIO) || (defined(F_SETFL) && defined(O_NDELAY)))) */
+, int e)
{
switch (e) {
@@ -156,6 +198,10 @@ read_preread(EditLine *el)
return (0);
#ifdef FIONREAD
+
+#ifndef MIN // definition of MIN is lacking on hpux..
+#define MIN(x,y) (((x)<(y))?(x):(y))
+#endif
(void) ioctl(el->el_infd, FIONREAD, (ioctl_t) & chrs);
if (chrs > 0) {
char buf[EL_BUFSIZ];
@@ -178,14 +224,13 @@ read_preread(EditLine *el)
* Push a macro
*/
public void
-el_push(EditLine *el, const char *str)
+el_push(EditLine *el, char *str)
{
c_macro_t *ma = &el->el_chared.c_macro;
if (str != NULL && ma->level + 1 < EL_MAXMACRO) {
ma->level++;
- /* LINTED const cast */
- ma->macro[ma->level] = (char *) str;
+ ma->macro[ma->level] = str;
} else {
term_beep(el);
term__flush();
@@ -199,10 +244,10 @@ el_push(EditLine *el, const char *str)
private int
read_getcmd(EditLine *el, el_action_t *cmdnum, char *ch)
{
- el_action_t cmd = ED_UNASSIGNED;
+ el_action_t cmd;
int num;
- while (cmd == ED_UNASSIGNED || cmd == ED_SEQUENCE_LEAD_IN) {
+ do {
if ((num = el_getc(el, ch)) != 1) /* if EOF or error */
return (num);
@@ -221,7 +266,7 @@ read_getcmd(EditLine *el, el_action_t *cmdnum, char *ch)
cmd = el->el_map.current[(unsigned char) *ch];
if (cmd == ED_SEQUENCE_LEAD_IN) {
key_value_t val;
- switch (key_get(el, ch, &val)) {
+ switch (el_key_get(el, ch, &val)) {
case XK_CMD:
cmd = val.cmd;
break;
@@ -241,7 +286,7 @@ read_getcmd(EditLine *el, el_action_t *cmdnum, char *ch)
}
if (el->el_map.alt == NULL)
el->el_map.current = el->el_map.key;
- }
+ } while (cmd == ED_SEQUENCE_LEAD_IN);
*cmdnum = cmd;
return (OKCMD);
}
@@ -307,7 +352,7 @@ el_getc(EditLine *el, char *cp)
#ifdef DEBUG_READ
(void) fprintf(el->el_errfile, "Reading a character\n");
#endif /* DEBUG_READ */
- num_read = read_char(el, cp);
+ num_read = (*el->el_read.read_char)(el, cp);
#ifdef DEBUG_READ
(void) fprintf(el->el_errfile, "Got it %c\n", *cp);
#endif /* DEBUG_READ */
@@ -333,7 +378,7 @@ el_gets(EditLine *el, int *nread)
char *cp = el->el_line.buffer;
size_t idx;
- while (read_char(el, cp) == 1) {
+ while ((*el->el_read.read_char)(el, cp) == 1) {
/* make sure there is space for next character */
if (cp + 1 >= el->el_line.limit) {
idx = (cp - el->el_line.buffer);
@@ -352,6 +397,11 @@ el_gets(EditLine *el, int *nread)
*nread = el->el_line.cursor - el->el_line.buffer;
return (el->el_line.buffer);
}
+
+ /* This is relatively cheap, and things go terribly wrong if
+ we have the wrong size. */
+ el_resize(el);
+
re_clear_display(el); /* reset the display stuff */
ch_reset(el);
@@ -378,7 +428,7 @@ el_gets(EditLine *el, int *nread)
term__flush();
- while (read_char(el, cp) == 1) {
+ while ((*el->el_read.read_char)(el, cp) == 1) {
/* make sure there is space next character */
if (cp + 1 >= el->el_line.limit) {
idx = (cp - el->el_line.buffer);
@@ -386,6 +436,8 @@ el_gets(EditLine *el, int *nread)
break;
cp = &el->el_line.buffer[idx];
}
+ if (*cp == 4) /* ought to be stty eof */
+ break;
cp++;
if (cp[-1] == '\r' || cp[-1] == '\n')
break;
@@ -397,6 +449,7 @@ el_gets(EditLine *el, int *nread)
*nread = el->el_line.cursor - el->el_line.buffer;
return (el->el_line.buffer);
}
+
for (num = OKCMD; num == OKCMD;) { /* while still editing this
* line */
#ifdef DEBUG_EDIT
@@ -410,7 +463,8 @@ el_gets(EditLine *el, int *nread)
#endif /* DEBUG_READ */
break;
}
- if ((int) cmdnum >= el->el_map.nfunc) { /* BUG CHECK command */
+ if ((unsigned int)cmdnum >= (unsigned int)(el->el_map.nfunc))
+ { /* BUG CHECK command */
#ifdef DEBUG_EDIT
(void) fprintf(el->el_errfile,
"ERROR: illegal command from key 0%o\r\n", ch);
@@ -432,7 +486,24 @@ el_gets(EditLine *el, int *nread)
"Error command = %d\n", cmdnum);
}
#endif /* DEBUG_READ */
+ /* vi redo needs these way down the levels... */
+ el->el_state.thiscmd = cmdnum;
+ el->el_state.thisch = ch;
+ if (el->el_map.type == MAP_VI &&
+ el->el_map.current == el->el_map.key &&
+ el->el_chared.c_redo.pos < el->el_chared.c_redo.lim) {
+ if (cmdnum == VI_DELETE_PREV_CHAR &&
+ el->el_chared.c_redo.pos != el->el_chared.c_redo.buf
+ && isprint(el->el_chared.c_redo.pos[-1]))
+ el->el_chared.c_redo.pos--;
+ else
+ *el->el_chared.c_redo.pos++ = ch;
+ }
retval = (*el->el_map.func[cmdnum]) (el, ch);
+#ifdef DEBUG_READ
+ (void) fprintf(el->el_errfile,
+ "Returned state %d\n", retval );
+#endif /* DEBUG_READ */
/* save the last command here */
el->el_state.lastcmd = cmdnum;
@@ -440,8 +511,6 @@ el_gets(EditLine *el, int *nread)
/* use any return value */
switch (retval) {
case CC_CURSOR:
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
re_refresh_cursor(el);
break;
@@ -451,26 +520,20 @@ el_gets(EditLine *el, int *nread)
/* FALLTHROUGH */
case CC_REFRESH:
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
re_refresh(el);
break;
case CC_REFRESH_BEEP:
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
re_refresh(el);
term_beep(el);
break;
case CC_NORM: /* normal char */
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
break;
case CC_ARGHACK: /* Suggested by Rich Salz */
/* <rsalz@pineapple.bbn.com> */
- break; /* keep going... */
+ continue; /* keep going... */
case CC_EOF: /* end of file typed */
num = 0;
@@ -489,8 +552,6 @@ el_gets(EditLine *el, int *nread)
re_clear_display(el); /* reset the display stuff */
ch_reset(el); /* reset the input pointers */
re_refresh(el); /* print the prompt again */
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
break;
case CC_ERROR:
@@ -499,17 +560,18 @@ el_gets(EditLine *el, int *nread)
(void) fprintf(el->el_errfile,
"*** editor ERROR ***\r\n\n");
#endif /* DEBUG_READ */
- el->el_state.argument = 1;
- el->el_state.doingarg = 0;
term_beep(el);
term__flush();
break;
}
+ el->el_state.argument = 1;
+ el->el_state.doingarg = 0;
+ el->el_chared.c_vcmd.action = NOP;
}
- /* make sure the tty is set up correctly */
- (void) tty_cookedmode(el);
term__flush(); /* flush any buffered output */
+ /* make sure the tty is set up correctly */
+ (void) tty_cookedmode(el);
if (el->el_flags & HANDLE_SIGNALS)
sig_clr(el);
if (nread)
diff --git a/cmd-line-utils/libedit/read.h b/cmd-line-utils/libedit/read.h
new file mode 100644
index 00000000000..b01e77db239
--- /dev/null
+++ b/cmd-line-utils/libedit/read.h
@@ -0,0 +1,55 @@
+/* $NetBSD: read.h,v 1.1 2001/09/27 19:29:50 christos Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Anthony Mallet.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * el.read.h: Character reading functions
+ */
+#ifndef _h_el_read
+#define _h_el_read
+
+typedef int (*el_rfunc_t)(EditLine *, char *);
+
+typedef struct el_read_t {
+ el_rfunc_t read_char; /* Function to read a character */
+} el_read_t;
+
+protected int read_init(EditLine *);
+protected int el_read_setfn(EditLine *, el_rfunc_t);
+protected el_rfunc_t el_read_getfn(EditLine *);
+
+#endif /* _h_el_read */
diff --git a/cmd-line-utils/libedit/readline.c b/cmd-line-utils/libedit/readline.c
index 9069b46d1f8..5b40ade582c 100644
--- a/cmd-line-utils/libedit/readline.c
+++ b/cmd-line-utils/libedit/readline.c
@@ -1,4 +1,4 @@
-/* $NetBSD: readline.c,v 1.19 2001/01/10 08:10:45 jdolecek Exp $ */
+/* $NetBSD: readline.c,v 1.28 2003/03/10 01:14:54 christos Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -36,7 +36,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+__RCSID("$NetBSD: readline.c,v 1.28 2003/03/10 01:14:54 christos Exp $");
+#endif /* not lint && not SCCSID */
+
#include <sys/types.h>
#include <sys/stat.h>
#include <stdio.h>
@@ -47,14 +51,13 @@
#include <stdlib.h>
#include <unistd.h>
#include <limits.h>
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
#include "histedit.h"
#include "readline/readline.h"
-#include "sys.h"
#include "el.h"
#include "fcns.h" /* for EL_NUM_FCNS */
-#ifdef HAVE_ALLOCA_H
-#include <alloca.h>
-#endif
/* for rl_complete() */
#define TAB '\r'
@@ -65,7 +68,11 @@
/* readline compatibility stuff - look at readline sources/documentation */
/* to see what these variables mean */
const char *rl_library_version = "EditLine wrapper";
-const char *rl_readline_name = "";
+static char empty[] = { '\0' };
+static char expand_chars[] = { ' ', '\t', '\n', '=', '(', '\0' };
+static char break_chars[] = { ' ', '\t', '\n', '"', '\\', '\'', '`', '@', '$',
+ '>', '<', '=', ';', '|', '&', '{', '(', '\0' };
+char *rl_readline_name = empty;
FILE *rl_instream = NULL;
FILE *rl_outstream = NULL;
int rl_point = 0;
@@ -77,12 +84,12 @@ int history_length = 0;
int max_input_history = 0;
char history_expansion_char = '!';
char history_subst_char = '^';
-const char *history_no_expand_chars = " \t\n=(";
+char *history_no_expand_chars = expand_chars;
Function *history_inhibit_expansion_function = NULL;
int rl_inhibit_completion = 0;
int rl_attempted_completion_over = 0;
-const char *rl_basic_word_break_characters = " \t\n\"\\'`@$><=;|&{(";
+char *rl_basic_word_break_characters = break_chars;
char *rl_completer_word_break_characters = NULL;
char *rl_completer_quote_characters = NULL;
CPFunction *rl_completion_entry_function = NULL;
@@ -215,6 +222,11 @@ rl_initialize(void)
/* for proper prompt printing in readline() */
el_rl_prompt = strdup("");
+ if (el_rl_prompt == NULL) {
+ history_end(h);
+ el_end(e);
+ return -1;
+ }
el_set(e, EL_PROMPT, _get_prompt);
el_set(e, EL_SIGNAL, 1);
@@ -250,8 +262,8 @@ rl_initialize(void)
* and rl_line_buffer directly.
*/
li = el_line(e);
- /* LINTED const cast */
- rl_line_buffer = (char *) li->buffer;
+ /* a cheesy way to get rid of const cast. */
+ rl_line_buffer = memchr(li->buffer, *li->buffer, 1);
rl_point = rl_end = 0;
return (0);
@@ -268,6 +280,7 @@ readline(const char *prompt)
HistEvent ev;
int count;
const char *ret;
+ char *buf;
if (e == NULL || h == NULL)
rl_initialize();
@@ -278,28 +291,28 @@ readline(const char *prompt)
if (strcmp(el_rl_prompt, prompt) != 0) {
free(el_rl_prompt);
el_rl_prompt = strdup(prompt);
+ if (el_rl_prompt == NULL)
+ return NULL;
}
/* get one line from input stream */
ret = el_gets(e, &count);
if (ret && count > 0) {
- char *foo;
int lastidx;
- foo = strdup(ret);
+ buf = strdup(ret);
+ if (buf == NULL)
+ return NULL;
lastidx = count - 1;
- if (foo[lastidx] == '\n')
- foo[lastidx] = '\0';
-
- ret = foo;
+ if (buf[lastidx] == '\n')
+ buf[lastidx] = '\0';
} else
- ret = NULL;
+ buf = NULL;
history(h, &ev, H_GETSIZE);
history_length = ev.num;
- /* LINTED const cast */
- return (char *) ret;
+ return buf;
}
/*
@@ -333,6 +346,8 @@ _rl_compat_sub(const char *str, const char *what, const char *with,
size_t size, i;
result = malloc((size = 16));
+ if (result == NULL)
+ return NULL;
temp = str;
with_len = strlen(with);
what_len = strlen(what);
@@ -343,8 +358,14 @@ _rl_compat_sub(const char *str, const char *what, const char *with,
i = new - temp;
add = i + with_len;
if (i + add + 1 >= size) {
+ char *nresult;
size += add + 1;
- result = realloc(result, size);
+ nresult = realloc(result, size);
+ if (nresult == NULL) {
+ free(result);
+ return NULL;
+ }
+ result = nresult;
}
(void) strncpy(&result[len], temp, i);
len += i;
@@ -354,8 +375,14 @@ _rl_compat_sub(const char *str, const char *what, const char *with,
} else {
add = strlen(temp);
if (len + add + 1 >= size) {
+ char *nresult;
size += add + 1;
- result = realloc(result, size);
+ nresult = realloc(result, size);
+ if (nresult == NULL) {
+ free(result);
+ return NULL;
+ }
+ result = nresult;
}
(void) strcpy(&result[len], temp); /* safe */
len += add;
@@ -392,7 +419,7 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
*result = NULL;
- cmd = (char*) alloca(cmdlen + 1);
+ cmd = alloca(cmdlen + 1);
(void) strncpy(cmd, command, cmdlen);
cmd[cmdlen] = 0;
@@ -425,7 +452,7 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
return (-1);
prefix = 0;
}
- search = (char*) alloca(len + 1);
+ search = alloca(len + 1);
(void) strncpy(search, &cmd[idx], len);
search[len] = '\0';
@@ -498,6 +525,8 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
cmd++;
line = strdup(event_data);
+ if (line == NULL)
+ return 0;
for (; *cmd; cmd++) {
if (*cmd == ':')
continue;
@@ -515,7 +544,7 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
g_on = 2;
else if (*cmd == 's' || *cmd == '&') {
char *what, *with, delim;
- size_t len, from_len;
+ unsigned int len, from_len;
size_t size;
if (*cmd == '&' && (from == NULL || to == NULL))
@@ -524,23 +553,36 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
delim = *(++cmd), cmd++;
size = 16;
what = realloc(from, size);
+ if (what == NULL) {
+ free(from);
+ return 0;
+ }
len = 0;
for (; *cmd && *cmd != delim; cmd++) {
if (*cmd == '\\'
&& *(cmd + 1) == delim)
cmd++;
- if (len >= size)
- what = realloc(what,
+ if (len >= size) {
+ char *nwhat;
+ nwhat = realloc(what,
(size <<= 1));
+ if (nwhat == NULL) {
+ free(what);
+ return 0;
+ }
+ what = nwhat;
+ }
what[len++] = *cmd;
}
what[len] = '\0';
from = what;
if (*what == '\0') {
free(what);
- if (search)
+ if (search) {
from = strdup(search);
- else {
+ if (from == NULL)
+ return 0;
+ } else {
from = NULL;
return (-1);
}
@@ -551,12 +593,22 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
size = 16;
with = realloc(to, size);
+ if (with == NULL) {
+ free(to);
+ return -1;
+ }
len = 0;
from_len = strlen(from);
for (; *cmd && *cmd != delim; cmd++) {
if (len + from_len + 1 >= size) {
+ char *nwith;
size += from_len + 1;
- with = realloc(with, size);
+ nwith = realloc(with, size);
+ if (nwith == NULL) {
+ free(with);
+ return -1;
+ }
+ with = nwith;
}
if (*cmd == '&') {
/* safe */
@@ -575,8 +627,10 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
tempcmd = _rl_compat_sub(line, from, to,
(g_on) ? 1 : 0);
- free(line);
- line = tempcmd;
+ if (tempcmd) {
+ free(line);
+ line = tempcmd;
+ }
g_on = 0;
}
}
@@ -622,14 +676,21 @@ _history_expand_command(const char *command, size_t cmdlen, char **result)
}
cmdsize = 1, cmdlen = 0;
- tempcmd = malloc(cmdsize);
+ if ((tempcmd = malloc(cmdsize)) == NULL)
+ return 0;
for (i = start; start <= i && i <= end; i++) {
int arr_len;
arr_len = strlen(arr[i]);
if (cmdlen + arr_len + 1 >= cmdsize) {
+ char *ntempcmd;
cmdsize += arr_len + 1;
- tempcmd = realloc(tempcmd, cmdsize);
+ ntempcmd = realloc(tempcmd, cmdsize);
+ if (ntempcmd == NULL) {
+ free(tempcmd);
+ return 0;
+ }
+ tempcmd = ntempcmd;
}
(void) strcpy(&tempcmd[cmdlen], arr[i]); /* safe */
cmdlen += arr_len;
@@ -662,10 +723,12 @@ history_expand(char *str, char **output)
rl_initialize();
*output = strdup(str); /* do it early */
+ if (*output == NULL)
+ return 0;
if (str[0] == history_subst_char) {
/* ^foo^foo2^ is equivalent to !!:s^foo^foo2^ */
- temp = (char*) alloca(4 + strlen(str) + 1);
+ temp = alloca(4 + strlen(str) + 1);
temp[0] = temp[1] = history_expansion_char;
temp[2] = ':';
temp[3] = 's';
@@ -674,8 +737,14 @@ history_expand(char *str, char **output)
}
#define ADD_STRING(what, len) \
{ \
- if (idx + len + 1 > size) \
- result = realloc(result, (size += len + 1)); \
+ if (idx + len + 1 > size) { \
+ char *nresult = realloc(result, (size += len + 1));\
+ if (nresult == NULL) { \
+ free(*output); \
+ return 0; \
+ } \
+ result = nresult; \
+ } \
(void)strncpy(&result[idx], what, len); \
idx += len; \
result[idx] = '\0'; \
@@ -789,11 +858,21 @@ history_tokenize(const char *str)
}
if (result_idx + 2 >= size) {
+ char **nresult;
size <<= 1;
- result = realloc(result, size * sizeof(char *));
+ nresult = realloc(result, size * sizeof(char *));
+ if (nresult == NULL) {
+ free(result);
+ return NULL;
+ }
+ result = nresult;
}
len = i - start;
temp = malloc(len + 1);
+ if (temp == NULL) {
+ free(result);
+ return NULL;
+ }
(void) strncpy(temp, &str[start], len);
temp[len] = '\0';
result[result_idx++] = temp;
@@ -1158,11 +1237,15 @@ tilde_expand(char *txt)
return (strdup(txt));
temp = strchr(txt + 1, '/');
- if (temp == NULL)
+ if (temp == NULL) {
temp = strdup(txt + 1);
- else {
+ if (temp == NULL)
+ return NULL;
+ } else {
len = temp - txt + 1; /* text until string after slash */
temp = malloc(len);
+ if (temp == NULL)
+ return NULL;
(void) strncpy(temp, txt + 1, len - 2);
temp[len - 2] = '\0';
}
@@ -1176,6 +1259,8 @@ tilde_expand(char *txt)
txt += len;
temp = malloc(strlen(pass->pw_dir) + 1 + strlen(txt) + 1);
+ if (temp == NULL)
+ return NULL;
(void) sprintf(temp, "%s/%s", pass->pw_dir, txt);
return (temp);
@@ -1200,28 +1285,45 @@ filename_completion_function(const char *text, int state)
size_t len;
if (state == 0 || dir == NULL) {
- if (dir != NULL) {
- closedir(dir);
- dir = NULL;
- }
temp = strrchr(text, '/');
if (temp) {
+ char *nptr;
temp++;
- filename = realloc(filename, strlen(temp) + 1);
+ nptr = realloc(filename, strlen(temp) + 1);
+ if (nptr == NULL) {
+ free(filename);
+ return NULL;
+ }
+ filename = nptr;
(void) strcpy(filename, temp);
len = temp - text; /* including last slash */
- dirname = realloc(dirname, len + 1);
+ nptr = realloc(dirname, len + 1);
+ if (nptr == NULL) {
+ free(filename);
+ return NULL;
+ }
+ dirname = nptr;
(void) strncpy(dirname, text, len);
dirname[len] = '\0';
} else {
filename = strdup(text);
+ if (filename == NULL)
+ return NULL;
dirname = NULL;
}
/* support for ``~user'' syntax */
if (dirname && *dirname == '~') {
+ char *nptr;
temp = tilde_expand(dirname);
- dirname = realloc(dirname, strlen(temp) + 1);
+ if (temp == NULL)
+ return NULL;
+ nptr = realloc(dirname, strlen(temp) + 1);
+ if (nptr == NULL) {
+ free(dirname);
+ return NULL;
+ }
+ dirname = nptr;
(void) strcpy(dirname, temp); /* safe */
free(temp); /* no longer needed */
}
@@ -1230,6 +1332,10 @@ filename_completion_function(const char *text, int state)
if (filename_len == 0)
return (NULL); /* no expansion possible */
+ if (dir != NULL) {
+ (void)closedir(dir);
+ dir = NULL;
+ }
dir = opendir(dirname ? dirname : ".");
if (!dir)
return (NULL); /* cannot open the directory */
@@ -1239,7 +1345,7 @@ filename_completion_function(const char *text, int state)
/* otherwise, get first entry where first */
/* filename_len characters are equal */
if (entry->d_name[0] == filename[0]
-#ifdef HAVE_DIRENT_H
+#ifndef STRUCT_DIRENT_HAS_D_NAMLEN
&& strlen(entry->d_name) >= filename_len
#else
&& entry->d_namlen >= filename_len
@@ -1252,21 +1358,26 @@ filename_completion_function(const char *text, int state)
if (entry) { /* match found */
struct stat stbuf;
-#ifdef HAVE_DIRENT_H
+#ifndef STRUCT_DIRENT_HAS_D_NAMLEN
len = strlen(entry->d_name) +
#else
len = entry->d_namlen +
#endif
((dirname) ? strlen(dirname) : 0) + 1 + 1;
temp = malloc(len);
+ if (temp == NULL)
+ return NULL;
(void) sprintf(temp, "%s%s",
dirname ? dirname : "", entry->d_name); /* safe */
/* test, if it's directory */
if (stat(temp, &stbuf) == 0 && S_ISDIR(stbuf.st_mode))
strcat(temp, "/"); /* safe */
- } else
+ } else {
+ (void)closedir(dir);
+ dir = NULL;
temp = NULL;
+ }
return (temp);
}
@@ -1331,16 +1442,24 @@ completion_matches(const char *text, CPFunction *genfunc)
matches = 0;
match_list_len = 1;
while ((retstr = (*genfunc) (text, matches)) != NULL) {
- if (matches + 1 >= match_list_len) {
+ /* allow for list terminator here */
+ if (matches + 2 >= match_list_len) {
+ char **nmatch_list;
match_list_len <<= 1;
- match_list = realloc(match_list,
+ nmatch_list = realloc(match_list,
match_list_len * sizeof(char *));
+ if (nmatch_list == NULL) {
+ free(match_list);
+ return NULL;
+ }
+ match_list = nmatch_list;
+
}
match_list[++matches] = retstr;
}
if (!match_list)
- return (char **) NULL; /* nothing found */
+ return NULL; /* nothing found */
/* find least denominator and insert it to match_list[0] */
which = 2;
@@ -1354,14 +1473,15 @@ completion_matches(const char *text, CPFunction *genfunc)
}
retstr = malloc(max_equal + 1);
+ if (retstr == NULL) {
+ free(match_list);
+ return NULL;
+ }
(void) strncpy(retstr, match_list[1], max_equal);
retstr[max_equal] = '\0';
match_list[0] = retstr;
/* add NULL as last pointer to the array */
- if (matches + 1 >= match_list_len)
- match_list = realloc(match_list,
- (match_list_len + 1) * sizeof(char *));
match_list[matches + 1] = (char *) NULL;
return (match_list);
@@ -1374,10 +1494,8 @@ static int
_rl_qsort_string_compare(i1, i2)
const void *i1, *i2;
{
- /*LINTED const castaway*/
- const char *s1 = ((const char **)i1)[0];
- /*LINTED const castaway*/
- const char *s2 = ((const char **)i2)[0];
+ const char *s1 = ((const char * const *)i1)[0];
+ const char *s2 = ((const char * const *)i2)[0];
return strcasecmp(s1, s2);
}
@@ -1459,7 +1577,7 @@ rl_complete_internal(int what_to_do)
ctemp--;
len = li->cursor - ctemp;
- temp = (char*) alloca(len + 1);
+ temp = alloca(len + 1);
(void) strncpy(temp, ctemp, len);
temp[len] = '\0';
diff --git a/cmd-line-utils/libedit/readline/readline.h b/cmd-line-utils/libedit/readline/readline.h
index 930c32d6f1c..7485dde4052 100644
--- a/cmd-line-utils/libedit/readline/readline.h
+++ b/cmd-line-utils/libedit/readline/readline.h
@@ -39,18 +39,6 @@
#define _READLINE_H_
#include <sys/types.h>
-#if HAVE_SYS_CDEFS_H
-#include <sys/cdefs.h>
-#endif
-#ifndef __BEGIN_DECLS
-#if defined(__cplusplus)
-#define __BEGIN_DECLS extern "C" {
-#define __END_DECLS }
-#else
-#define __BEGIN_DECLS
-#define __END_DECLS
-#endif
-#endif
/* list of readline stuff supported by editline library's readline wrapper */
@@ -66,16 +54,18 @@ typedef struct _hist_entry {
} HIST_ENTRY;
/* global variables used by readline enabled applications */
-__BEGIN_DECLS
+#ifdef __cplusplus
+extern "C" {
+#endif
extern const char *rl_library_version;
-extern const char *rl_readline_name;
+extern char *rl_readline_name;
extern FILE *rl_instream;
extern FILE *rl_outstream;
extern char *rl_line_buffer;
extern int rl_point, rl_end;
extern int history_base, history_length;
extern int max_input_history;
-extern const char *rl_basic_word_break_characters;
+extern char *rl_basic_word_break_characters;
extern char *rl_completer_word_break_characters;
extern char *rl_completer_quote_characters;
extern CPFunction *rl_completion_entry_function;
@@ -121,6 +111,8 @@ void rl_display_match_list(char **, int, int);
int rl_insert(int, int);
void rl_reset_terminal(const char *);
int rl_bind_key(int, int (*)(int, int));
-__END_DECLS
+#ifdef __cplusplus
+}
+#endif
#endif /* _READLINE_H_ */
diff --git a/cmd-line-utils/libedit/refresh.c b/cmd-line-utils/libedit/refresh.c
index 534e7e12304..e71bdba2b61 100644
--- a/cmd-line-utils/libedit/refresh.c
+++ b/cmd-line-utils/libedit/refresh.c
@@ -1,4 +1,4 @@
-/* $NetBSD: refresh.c,v 1.17 2001/04/13 00:53:11 lukem Exp $ */
+/* $NetBSD: refresh.c,v 1.24 2003/03/10 21:18:49 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)refresh.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: refresh.c,v 1.24 2003/03/10 21:18:49 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* refresh.c: Lower level screen refreshing functions
*/
-#include "sys.h"
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
@@ -51,28 +57,28 @@
private void re_addc(EditLine *, int);
private void re_update_line(EditLine *, char *, char *, int);
-private void re_insert (EditLine *, char *, int, int, char *, int);
-private void re_delete(EditLine *, char *, int, int, int);
+private void re_insert (EditLine *el, char *, int, int, char *, int);
+private void re_delete(EditLine *el, char *, int, int, int);
private void re_fastputc(EditLine *, int);
private void re__strncopy(char *, char *, size_t);
private void re__copy_and_pad(char *, const char *, size_t);
#ifdef DEBUG_REFRESH
-private void re_printstr(EditLine *, char *, char *, char *);
+private void re_printstr(EditLine *, const char *, char *, char *);
#define __F el->el_errfile
#define ELRE_ASSERT(a, b, c) do \
- if (a) { \
+ if (/*CONSTCOND*/ a) { \
(void) fprintf b; \
c; \
} \
- while (0)
+ while (/*CONSTCOND*/0)
#define ELRE_DEBUG(a, b) ELRE_ASSERT(a,b,;)
/* re_printstr():
* Print a string on the debugging pty
*/
private void
-re_printstr(EditLine *el, char *str, char *f, char *t)
+re_printstr(EditLine *el, const char *str, char *f, char *t)
{
ELRE_DEBUG(1, (__F, "%s:\"", str));
@@ -203,6 +209,14 @@ re_refresh(EditLine *el)
el->el_refresh.r_cursor.h = 0;
el->el_refresh.r_cursor.v = 0;
+ if (el->el_line.cursor >= el->el_line.lastchar) {
+ if (el->el_map.current == el->el_map.alt
+ && el->el_line.lastchar != el->el_line.buffer)
+ el->el_line.cursor = el->el_line.lastchar - 1;
+ else
+ el->el_line.cursor = el->el_line.lastchar;
+ }
+
cur.h = -1; /* set flag in case I'm not set */
cur.v = 0;
@@ -312,7 +326,6 @@ re_goto_bottom(EditLine *el)
{
term_move_to_line(el, el->el_refresh.r_oldcv);
- term__putc('\r');
term__putc('\n');
re_clear_display(el);
term__flush();
@@ -325,7 +338,7 @@ re_goto_bottom(EditLine *el)
*/
private void
/*ARGSUSED*/
-re_insert(EditLine *el __attribute__((unused)),
+re_insert(EditLine *el __attribute__((unused)),
char *d, int dat, int dlen, char *s, int num)
{
char *a, *b;
@@ -369,7 +382,7 @@ re_insert(EditLine *el __attribute__((unused)),
*/
private void
/*ARGSUSED*/
-re_delete(EditLine *el __attribute__((unused)),
+re_delete(EditLine *el __attribute__((unused)),
char *d, int dat, int dlen, int num)
{
char *a, *b;
@@ -905,7 +918,7 @@ re_update_line(EditLine *el, char *old, char *new, int i)
private void
re__copy_and_pad(char *dst, const char *src, size_t width)
{
- unsigned int i;
+ size_t i;
for (i = 0; i < width; i++) {
if (*src == '\0')
@@ -929,6 +942,14 @@ re_refresh_cursor(EditLine *el)
char *cp, c;
int h, v, th;
+ if (el->el_line.cursor >= el->el_line.lastchar) {
+ if (el->el_map.current == el->el_map.alt
+ && el->el_line.lastchar != el->el_line.buffer)
+ el->el_line.cursor = el->el_line.lastchar - 1;
+ else
+ el->el_line.cursor = el->el_line.lastchar;
+ }
+
/* first we must find where the cursor is... */
h = el->el_prompt.p_pos.h;
v = el->el_prompt.p_pos.v;
@@ -1051,8 +1072,8 @@ re_fastaddc(EditLine *el)
re_fastputc(el, c);
} else {
re_fastputc(el, '\\');
- re_fastputc(el, (int) ((((unsigned int) c >> 6) & 7) + '0'));
- re_fastputc(el, (int) ((((unsigned int) c >> 3) & 7) + '0'));
+ re_fastputc(el, (int)(((((unsigned int)c) >> 6) & 3) + '0'));
+ re_fastputc(el, (int)(((((unsigned int)c) >> 3) & 7) + '0'));
re_fastputc(el, (c & 7) + '0');
}
term__flush();
diff --git a/cmd-line-utils/libedit/search.c b/cmd-line-utils/libedit/search.c
index bdc3a1e8bb9..0957529485c 100644
--- a/cmd-line-utils/libedit/search.c
+++ b/cmd-line-utils/libedit/search.c
@@ -1,4 +1,4 @@
-/* $NetBSD: search.c,v 1.11 2001/01/23 15:55:31 jdolecek Exp $ */
+/* $NetBSD: search.c,v 1.14 2002/11/20 16:50:08 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,17 +36,21 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)search.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: search.c,v 1.14 2002/11/20 16:50:08 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* search.c: History and character search functions
*/
-#include "sys.h"
#include <stdlib.h>
-#if HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
#if defined(REGEX)
+#include <sys/types.h>
#include <regex.h>
#elif defined(REGEXP)
#include <regexp.h>
@@ -73,7 +77,8 @@ search_init(EditLine *el)
el->el_search.patlen = 0;
el->el_search.patdir = -1;
el->el_search.chacha = '\0';
- el->el_search.chadir = -1;
+ el->el_search.chadir = CHAR_FWD;
+ el->el_search.chatflg = 0;
return (0);
}
@@ -445,29 +450,23 @@ cv_search(EditLine *el, int dir)
char tmpbuf[EL_BUFSIZ];
int tmplen;
- tmplen = 0;
-#ifdef ANCHOR
- tmpbuf[tmplen++] = '.';
- tmpbuf[tmplen++] = '*';
-#endif
-
- el->el_line.buffer[0] = '\0';
- el->el_line.lastchar = el->el_line.buffer;
- el->el_line.cursor = el->el_line.buffer;
- el->el_search.patdir = dir;
-
- c_insert(el, 2); /* prompt + '\n' */
- *el->el_line.cursor++ = '\n';
- *el->el_line.cursor++ = dir == ED_SEARCH_PREV_HISTORY ? '/' : '?';
- re_refresh(el);
-
#ifdef ANCHOR
+ tmpbuf[0] = '.';
+ tmpbuf[1] = '*';
#define LEN 2
#else
#define LEN 0
#endif
+ tmplen = LEN;
+
+ el->el_search.patdir = dir;
+
+ tmplen = c_gets(el, &tmpbuf[LEN],
+ dir == ED_SEARCH_PREV_HISTORY ? "\n/" : "\n?" );
+ if (tmplen == -1)
+ return CC_REFRESH;
- tmplen = c_gets(el, &tmpbuf[LEN]) + LEN;
+ tmplen += LEN;
ch = tmpbuf[tmplen];
tmpbuf[tmplen] = '\0';
@@ -476,9 +475,6 @@ cv_search(EditLine *el, int dir)
* Use the old pattern, but wild-card it.
*/
if (el->el_search.patlen == 0) {
- el->el_line.buffer[0] = '\0';
- el->el_line.lastchar = el->el_line.buffer;
- el->el_line.cursor = el->el_line.buffer;
re_refresh(el);
return (CC_ERROR);
}
@@ -509,19 +505,15 @@ cv_search(EditLine *el, int dir)
el->el_state.lastcmd = (el_action_t) dir; /* avoid c_setpat */
el->el_line.cursor = el->el_line.lastchar = el->el_line.buffer;
if ((dir == ED_SEARCH_PREV_HISTORY ? ed_search_prev_history(el, 0) :
- ed_search_next_history(el, 0)) == CC_ERROR) {
+ ed_search_next_history(el, 0)) == CC_ERROR) {
re_refresh(el);
return (CC_ERROR);
- } else {
- if (ch == 0033) {
- re_refresh(el);
- *el->el_line.lastchar++ = '\n';
- *el->el_line.lastchar = '\0';
- re_goto_bottom(el);
- return (CC_NEWLINE);
- } else
- return (CC_REFRESH);
}
+ if (ch == 0033) {
+ re_refresh(el);
+ return ed_newline(el, 0);
+ }
+ return (CC_REFRESH);
}
@@ -578,69 +570,53 @@ cv_repeat_srch(EditLine *el, int c)
}
-/* cv_csearch_back():
- * Vi character search reverse
+/* cv_csearch():
+ * Vi character search
*/
protected el_action_t
-cv_csearch_back(EditLine *el, int ch, int count, int tflag)
+cv_csearch(EditLine *el, int direction, int ch, int count, int tflag)
{
char *cp;
- cp = el->el_line.cursor;
- while (count--) {
- if (*cp == ch)
- cp--;
- while (cp > el->el_line.buffer && *cp != ch)
- cp--;
- }
-
- if (cp < el->el_line.buffer || (cp == el->el_line.buffer && *cp != ch))
- return (CC_ERROR);
-
- if (*cp == ch && tflag)
- cp++;
+ if (ch == 0)
+ return CC_ERROR;
- el->el_line.cursor = cp;
-
- if (el->el_chared.c_vcmd.action & DELETE) {
- el->el_line.cursor++;
- cv_delfini(el);
- return (CC_REFRESH);
+ if (ch == -1) {
+ char c;
+ if (el_getc(el, &c) != 1)
+ return ed_end_of_file(el, 0);
+ ch = c;
}
- re_refresh_cursor(el);
- return (CC_NORM);
-}
-
-/* cv_csearch_fwd():
- * Vi character search forward
- */
-protected el_action_t
-cv_csearch_fwd(EditLine *el, int ch, int count, int tflag)
-{
- char *cp;
+ /* Save for ';' and ',' commands */
+ el->el_search.chacha = ch;
+ el->el_search.chadir = direction;
+ el->el_search.chatflg = tflag;
cp = el->el_line.cursor;
while (count--) {
if (*cp == ch)
- cp++;
- while (cp < el->el_line.lastchar && *cp != ch)
- cp++;
+ cp += direction;
+ for (;;cp += direction) {
+ if (cp >= el->el_line.lastchar)
+ return CC_ERROR;
+ if (cp < el->el_line.buffer)
+ return CC_ERROR;
+ if (*cp == ch)
+ break;
+ }
}
- if (cp >= el->el_line.lastchar)
- return (CC_ERROR);
-
- if (*cp == ch && tflag)
- cp--;
+ if (tflag)
+ cp -= direction;
el->el_line.cursor = cp;
- if (el->el_chared.c_vcmd.action & DELETE) {
- el->el_line.cursor++;
+ if (el->el_chared.c_vcmd.action != NOP) {
+ if (direction > 0)
+ el->el_line.cursor++;
cv_delfini(el);
- return (CC_REFRESH);
+ return CC_REFRESH;
}
- re_refresh_cursor(el);
- return (CC_NORM);
+ return CC_CURSOR;
}
diff --git a/cmd-line-utils/libedit/search.h b/cmd-line-utils/libedit/search.h
index 676bbe2e35b..a7363072a4c 100644
--- a/cmd-line-utils/libedit/search.h
+++ b/cmd-line-utils/libedit/search.h
@@ -1,4 +1,4 @@
-/* $NetBSD: search.h,v 1.5 2000/09/04 22:06:32 lukem Exp $ */
+/* $NetBSD: search.h,v 1.6 2002/11/15 14:32:34 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -52,6 +52,7 @@ typedef struct el_search_t {
int patdir; /* Direction of the last search */
int chadir; /* Character search direction */
char chacha; /* Character we are looking for */
+ char chatflg; /* 0 if f, 1 if t */
} el_search_t;
@@ -64,7 +65,6 @@ protected el_action_t ce_inc_search(EditLine *, int);
protected el_action_t cv_search(EditLine *, int);
protected el_action_t ce_search_line(EditLine *, char *, int);
protected el_action_t cv_repeat_srch(EditLine *, int);
-protected el_action_t cv_csearch_back(EditLine *, int, int, int);
-protected el_action_t cv_csearch_fwd(EditLine *, int, int, int);
+protected el_action_t cv_csearch(EditLine *, int, int, int, int);
#endif /* _h_el_search */
diff --git a/cmd-line-utils/libedit/sig.c b/cmd-line-utils/libedit/sig.c
index bfb3d5c93f9..3730067ed5f 100644
--- a/cmd-line-utils/libedit/sig.c
+++ b/cmd-line-utils/libedit/sig.c
@@ -1,4 +1,4 @@
-/* $NetBSD: sig.c,v 1.8 2001/01/09 17:31:04 jdolecek Exp $ */
+/* $NetBSD: sig.c,v 1.10 2003/03/10 00:58:05 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,14 +36,20 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)sig.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: sig.c,v 1.10 2003/03/10 00:58:05 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* sig.c: Signal handling stuff.
* our policy is to trap all signals, set a good state
* and pass the ball to our caller.
*/
-#include "sys.h"
#include "el.h"
#include <stdlib.h>
@@ -115,9 +121,9 @@ sig_init(EditLine *el)
#undef _DO
(void) sigprocmask(SIG_BLOCK, &nset, &oset);
-#define SIGSIZE (sizeof(sighdl) / sizeof(sighdl[0]) * sizeof(libedit_sig_t))
+#define SIGSIZE (sizeof(sighdl) / sizeof(sighdl[0]) * sizeof(el_signalhandler_t))
- el->el_signal = (el_signal_t) el_malloc(SIGSIZE);
+ el->el_signal = (el_signalhandler_t *) el_malloc(SIGSIZE);
if (el->el_signal == NULL)
return (-1);
for (i = 0; sighdl[i] != -1; i++)
@@ -157,7 +163,7 @@ sig_set(EditLine *el)
(void) sigprocmask(SIG_BLOCK, &nset, &oset);
for (i = 0; sighdl[i] != -1; i++) {
- libedit_sig_t s;
+ el_signalhandler_t s;
/* This could happen if we get interrupted */
if ((s = signal(sighdl[i], sig_handler)) != sig_handler)
el->el_signal[i] = s;
diff --git a/cmd-line-utils/libedit/sig.h b/cmd-line-utils/libedit/sig.h
index 399e3a69437..8effea8e121 100644
--- a/cmd-line-utils/libedit/sig.h
+++ b/cmd-line-utils/libedit/sig.h
@@ -1,4 +1,4 @@
-/* $NetBSD: sig.h,v 1.3 2000/09/04 22:06:32 lukem Exp $ */
+/* $NetBSD: sig.h,v 1.4 2003/03/10 00:58:05 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -62,9 +62,8 @@
_DO(SIGCONT) \
_DO(SIGWINCH)
-typedef RETSIGTYPE (*libedit_sig_t)();
-typedef libedit_sig_t *el_signal_t;
-
+typedef void (*el_signalhandler_t)(int);
+typedef el_signalhandler_t *el_signal_t;
protected void sig_end(EditLine*);
protected int sig_init(EditLine*);
diff --git a/cmd-line-utils/libedit/sys.h b/cmd-line-utils/libedit/sys.h
index d9007243456..a7477d2c5ba 100644
--- a/cmd-line-utils/libedit/sys.h
+++ b/cmd-line-utils/libedit/sys.h
@@ -1,4 +1,4 @@
-/* $NetBSD: sys.h,v 1.4 2000/09/04 22:06:32 lukem Exp $ */
+/* $NetBSD: sys.h,v 1.6 2003/03/10 00:57:38 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -44,6 +44,10 @@
#ifndef _h_sys
#define _h_sys
+#ifdef HAVE_SYS_CDEFS_H
+#include <sys/cdefs.h>
+#endif
+
#ifndef public
# define public /* Externally visible functions/variables */
#endif
@@ -57,10 +61,6 @@
/* When we want to hide everything */
#endif
-#if HAVE_SYS_CDEFS_H
-#include <sys/cdefs.h>
-#endif
-
#ifndef _PTR_T
# define _PTR_T
typedef void *ptr_t;
@@ -73,22 +73,58 @@ typedef void *ioctl_t;
#include <stdio.h>
+#ifndef HAVE_STRLCAT
+#define strlcat libedit_strlcat
+size_t strlcat(char *dst, const char *src, size_t size);
+#endif
+
+#ifndef HAVE_STRLCPY
+#define strlcpy libedit_strlcpy
+size_t strlcpy(char *dst, const char *src, size_t size);
+#endif
+
+#ifndef HAVE_FGETLN
+#define fgetln libedit_fgetln
+char *fgetln(FILE *fp, size_t *len);
+#endif
+
#define REGEX /* Use POSIX.2 regular expression functions */
#undef REGEXP /* Use UNIX V8 regular expression functions */
-#if defined(__sun__) && defined(__SVR4)
+#ifdef notdef
# undef REGEX
# undef REGEXP
# include <malloc.h>
-typedef void (*sig_t)(int);
-#endif
-
-#ifndef __P
-#ifdef __STDC__
-#define __P(x) x
-#else
-#define __P(x) ()
-#endif
+# ifdef __GNUC__
+/*
+ * Broken hdrs.
+ */
+extern int tgetent(const char *bp, char *name);
+extern int tgetflag(const char *id);
+extern int tgetnum(const char *id);
+extern char *tgetstr(const char *id, char **area);
+extern char *tgoto(const char *cap, int col, int row);
+extern int tputs(const char *str, int affcnt, int (*putc)(int));
+extern char *getenv(const char *);
+extern int fprintf(FILE *, const char *, ...);
+extern int sigsetmask(int);
+extern int sigblock(int);
+extern int fputc(int, FILE *);
+extern int fgetc(FILE *);
+extern int fflush(FILE *);
+extern int tolower(int);
+extern int toupper(int);
+extern int errno, sys_nerr;
+extern char *sys_errlist[];
+extern void perror(const char *);
+# include <string.h>
+# define strerror(e) sys_errlist[e]
+# endif
+# ifdef SABER
+extern ptr_t memcpy(ptr_t, const ptr_t, size_t);
+extern ptr_t memset(ptr_t, int, size_t);
+# endif
+extern char *fgetline(FILE *, int *);
#endif
#endif /* _h_sys */
diff --git a/cmd-line-utils/libedit/term.c b/cmd-line-utils/libedit/term.c
index bcda9ac1216..1f90c783a2b 100644
--- a/cmd-line-utils/libedit/term.c
+++ b/cmd-line-utils/libedit/term.c
@@ -1,4 +1,4 @@
-/* $NetBSD: term.c,v 1.32 2001/01/23 15:55:31 jdolecek Exp $ */
+/* $NetBSD: term.c,v 1.35 2002/03/18 16:00:59 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,31 +36,40 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)term.c 8.2 (Berkeley) 4/30/95";
+#else
+__RCSID("$NetBSD: term.c,v 1.35 2002/03/18 16:00:59 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* term.c: Editor/termcap-curses interface
* We have to declare a static variable here, since the
* termcap putchar routine does not take an argument!
*/
-
-#include "sys.h"
#include <stdio.h>
#include <signal.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
-#if defined(HAVE_TERMCAP_H)
+#ifdef HAVE_TERMCAP_H
#include <termcap.h>
-#elif defined(HAVE_CURSES_H) && defined(HAVE_TERM_H) /* For HPUX11 */
+#endif
+#ifdef HAVE_CURSES_H
#include <curses.h>
-#include <term.h>
#endif
-#include <sys/types.h>
-#include <sys/ioctl.h>
+#ifdef HAVE_NCURSES_H
+#include <ncurses.h>
+#endif
#include "el.h"
+#include <sys/types.h>
+#include <sys/ioctl.h>
+
/*
* IMPORTANT NOTE: these routines are allowed to look at the current screen
* and the current possition assuming that it is correct. If this is not
@@ -340,8 +349,7 @@ term_init(EditLine *el)
return (-1);
(void) memset(el->el_term.t_val, 0, T_val * sizeof(int));
term_outfile = el->el_outfile;
- if (term_set(el, NULL) == -1)
- return (-1);
+ (void) term_set(el, NULL);
term_init_arrow(el);
return (0);
}
@@ -637,7 +645,7 @@ mc_again:
* from col 0
*/
if (EL_CAN_TAB ?
- ((unsigned int)-del > (((unsigned int) where >> 3) +
+ (((unsigned int)-del) > (((unsigned int) where >> 3) +
(where & 07)))
: (-del > where)) {
term__putc('\r'); /* do a CR */
@@ -897,7 +905,7 @@ term_set(EditLine *el, const char *term)
memset(el->el_term.t_cap, 0, TC_BUFSIZE);
- i = tgetent(el->el_term.t_cap, (char*) term);
+ i = tgetent(el->el_term.t_cap, term);
if (i <= 0) {
if (i == -1)
@@ -927,7 +935,7 @@ term_set(EditLine *el, const char *term)
Val(T_co) = tgetnum("co");
Val(T_li) = tgetnum("li");
for (t = tstr; t->name != NULL; t++)
- term_alloc(el, t, tgetstr((char*) t->name, &area));
+ term_alloc(el, t, tgetstr(t->name, &area));
}
if (Val(T_co) < 2)
@@ -1067,34 +1075,32 @@ term_reset_arrow(EditLine *el)
static const char stOH[] = {033, 'O', 'H', '\0'};
static const char stOF[] = {033, 'O', 'F', '\0'};
- term_init_arrow(el); /* Init arrow struct */
-
- key_add(el, strA, &arrow[A_K_UP].fun, arrow[A_K_UP].type);
- key_add(el, strB, &arrow[A_K_DN].fun, arrow[A_K_DN].type);
- key_add(el, strC, &arrow[A_K_RT].fun, arrow[A_K_RT].type);
- key_add(el, strD, &arrow[A_K_LT].fun, arrow[A_K_LT].type);
- key_add(el, strH, &arrow[A_K_HO].fun, arrow[A_K_HO].type);
- key_add(el, strF, &arrow[A_K_EN].fun, arrow[A_K_EN].type);
- key_add(el, stOA, &arrow[A_K_UP].fun, arrow[A_K_UP].type);
- key_add(el, stOB, &arrow[A_K_DN].fun, arrow[A_K_DN].type);
- key_add(el, stOC, &arrow[A_K_RT].fun, arrow[A_K_RT].type);
- key_add(el, stOD, &arrow[A_K_LT].fun, arrow[A_K_LT].type);
- key_add(el, stOH, &arrow[A_K_HO].fun, arrow[A_K_HO].type);
- key_add(el, stOF, &arrow[A_K_EN].fun, arrow[A_K_EN].type);
+ el_key_add(el, strA, &arrow[A_K_UP].fun, arrow[A_K_UP].type);
+ el_key_add(el, strB, &arrow[A_K_DN].fun, arrow[A_K_DN].type);
+ el_key_add(el, strC, &arrow[A_K_RT].fun, arrow[A_K_RT].type);
+ el_key_add(el, strD, &arrow[A_K_LT].fun, arrow[A_K_LT].type);
+ el_key_add(el, strH, &arrow[A_K_HO].fun, arrow[A_K_HO].type);
+ el_key_add(el, strF, &arrow[A_K_EN].fun, arrow[A_K_EN].type);
+ el_key_add(el, stOA, &arrow[A_K_UP].fun, arrow[A_K_UP].type);
+ el_key_add(el, stOB, &arrow[A_K_DN].fun, arrow[A_K_DN].type);
+ el_key_add(el, stOC, &arrow[A_K_RT].fun, arrow[A_K_RT].type);
+ el_key_add(el, stOD, &arrow[A_K_LT].fun, arrow[A_K_LT].type);
+ el_key_add(el, stOH, &arrow[A_K_HO].fun, arrow[A_K_HO].type);
+ el_key_add(el, stOF, &arrow[A_K_EN].fun, arrow[A_K_EN].type);
if (el->el_map.type == MAP_VI) {
- key_add(el, &strA[1], &arrow[A_K_UP].fun, arrow[A_K_UP].type);
- key_add(el, &strB[1], &arrow[A_K_DN].fun, arrow[A_K_DN].type);
- key_add(el, &strC[1], &arrow[A_K_RT].fun, arrow[A_K_RT].type);
- key_add(el, &strD[1], &arrow[A_K_LT].fun, arrow[A_K_LT].type);
- key_add(el, &strH[1], &arrow[A_K_HO].fun, arrow[A_K_HO].type);
- key_add(el, &strF[1], &arrow[A_K_EN].fun, arrow[A_K_EN].type);
- key_add(el, &stOA[1], &arrow[A_K_UP].fun, arrow[A_K_UP].type);
- key_add(el, &stOB[1], &arrow[A_K_DN].fun, arrow[A_K_DN].type);
- key_add(el, &stOC[1], &arrow[A_K_RT].fun, arrow[A_K_RT].type);
- key_add(el, &stOD[1], &arrow[A_K_LT].fun, arrow[A_K_LT].type);
- key_add(el, &stOH[1], &arrow[A_K_HO].fun, arrow[A_K_HO].type);
- key_add(el, &stOF[1], &arrow[A_K_EN].fun, arrow[A_K_EN].type);
+ el_key_add(el, &strA[1], &arrow[A_K_UP].fun, arrow[A_K_UP].type);
+ el_key_add(el, &strB[1], &arrow[A_K_DN].fun, arrow[A_K_DN].type);
+ el_key_add(el, &strC[1], &arrow[A_K_RT].fun, arrow[A_K_RT].type);
+ el_key_add(el, &strD[1], &arrow[A_K_LT].fun, arrow[A_K_LT].type);
+ el_key_add(el, &strH[1], &arrow[A_K_HO].fun, arrow[A_K_HO].type);
+ el_key_add(el, &strF[1], &arrow[A_K_EN].fun, arrow[A_K_EN].type);
+ el_key_add(el, &stOA[1], &arrow[A_K_UP].fun, arrow[A_K_UP].type);
+ el_key_add(el, &stOB[1], &arrow[A_K_DN].fun, arrow[A_K_DN].type);
+ el_key_add(el, &stOC[1], &arrow[A_K_RT].fun, arrow[A_K_RT].type);
+ el_key_add(el, &stOD[1], &arrow[A_K_LT].fun, arrow[A_K_LT].type);
+ el_key_add(el, &stOH[1], &arrow[A_K_HO].fun, arrow[A_K_HO].type);
+ el_key_add(el, &stOF[1], &arrow[A_K_EN].fun, arrow[A_K_EN].type);
}
}
@@ -1148,7 +1154,7 @@ term_print_arrow(EditLine *el, const char *name)
for (i = 0; i < A_K_NKEYS; i++)
if (*name == '\0' || strcmp(name, arrow[i].name) == 0)
if (arrow[i].type != XK_NOD)
- key_kprint(el, arrow[i].name, &arrow[i].fun,
+ el_key_kprint(el, arrow[i].name, &arrow[i].fun,
arrow[i].type);
}
@@ -1189,20 +1195,20 @@ term_bind_arrow(EditLine *el)
* unassigned key.
*/
if (arrow[i].type == XK_NOD)
- key_clear(el, map, p);
+ el_key_clear(el, map, p);
else {
if (p[1] && (dmap[j] == map[j] ||
map[j] == ED_SEQUENCE_LEAD_IN)) {
- key_add(el, p, &arrow[i].fun,
+ el_key_add(el, p, &arrow[i].fun,
arrow[i].type);
map[j] = ED_SEQUENCE_LEAD_IN;
} else if (map[j] == ED_UNASSIGNED) {
- key_clear(el, map, p);
+ el_key_clear(el, map, p);
if (arrow[i].type == XK_CMD)
map[j] = arrow[i].fun.cmd;
else
- key_add(el, p, &arrow[i].fun,
- arrow[i].type);
+ el_key_add(el, p, &arrow[i].fun,
+ arrow[i].type);
}
}
}
@@ -1235,10 +1241,11 @@ term__flush(void)
/* term_telltc():
* Print the current termcap characteristics
*/
+char *el_key__decode_str(const char *, char *, const char *);
+
protected int
/*ARGSUSED*/
-term_telltc(EditLine *el, int
- argc __attribute__((unused)),
+term_telltc(EditLine *el, int argc __attribute__((unused)),
const char **argv __attribute__((unused)))
{
const struct termcapstr *t;
@@ -1263,7 +1270,7 @@ term_telltc(EditLine *el, int
(void) fprintf(el->el_outfile, "\t%25s (%s) == %s\n",
t->long_name,
t->name, *ts && **ts ?
- key__decode_str(*ts, upbuf, "") : "(empty)");
+ el_key__decode_str(*ts, upbuf, "") : "(empty)");
(void) fputc('\n', el->el_outfile);
return (0);
}
@@ -1274,7 +1281,8 @@ term_telltc(EditLine *el, int
*/
protected int
/*ARGSUSED*/
-term_settc(EditLine *el, int argc __attribute__((unused)), const char **argv)
+term_settc(EditLine *el, int argc __attribute__((unused)),
+ const char **argv __attribute__((unused)))
{
const struct termcapstr *ts;
const struct termcapval *tv;
@@ -1350,7 +1358,9 @@ term_settc(EditLine *el, int argc __attribute__((unused)), const char **argv)
*/
protected int
/*ARGSUSED*/
-term_echotc(EditLine *el, int argc __attribute__((unused)), const char **argv)
+term_echotc(EditLine *el __attribute__((unused)),
+ int argc __attribute__((unused)),
+ const char **argv __attribute__((unused)))
{
char *cap, *scap, *ep;
int arg_need, arg_cols, arg_rows;
@@ -1429,7 +1439,7 @@ term_echotc(EditLine *el, int argc __attribute__((unused)), const char **argv)
break;
}
if (t->name == NULL)
- scap = tgetstr((char*) *argv, &area);
+ scap = tgetstr(*argv, &area);
if (!scap || scap[0] == '\0') {
if (!silent)
(void) fprintf(el->el_errfile,
diff --git a/cmd-line-utils/libedit/tokenizer.c b/cmd-line-utils/libedit/tokenizer.c
index 7a7e5b5ed75..f6892d9954c 100644
--- a/cmd-line-utils/libedit/tokenizer.c
+++ b/cmd-line-utils/libedit/tokenizer.c
@@ -1,4 +1,4 @@
-/* $NetBSD: tokenizer.c,v 1.7 2001/01/04 15:56:32 christos Exp $ */
+/* $NetBSD: tokenizer.c,v 1.11 2002/10/27 20:24:29 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)tokenizer.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: tokenizer.c,v 1.11 2002/10/27 20:24:29 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* tokenize.c: Bourne shell like tokenizer
*/
-#include "sys.h"
#include <string.h>
#include <stdlib.h>
#include "tokenizer.h"
@@ -66,7 +72,7 @@ typedef enum {
struct tokenizer {
char *ifs; /* In field separator */
int argc, amax; /* Current and maximum number of args */
- const char **argv; /* Argument list */
+ char **argv; /* Argument list */
char *wptr, *wmax; /* Space and limit on the word buffer */
char *wstart; /* Beginning of next word */
char *wspace; /* Space of word buffer */
@@ -103,16 +109,29 @@ tok_init(const char *ifs)
{
Tokenizer *tok = (Tokenizer *) tok_malloc(sizeof(Tokenizer));
+ if (tok == NULL)
+ return NULL;
tok->ifs = strdup(ifs ? ifs : IFS);
+ if (tok->ifs == NULL) {
+ tok_free((ptr_t)tok);
+ return NULL;
+ }
tok->argc = 0;
tok->amax = AINCR;
- tok->argv = (const char **) tok_malloc(sizeof(char *) * tok->amax);
- if (tok->argv == NULL)
- return (NULL);
+ tok->argv = (char **) tok_malloc(sizeof(char *) * tok->amax);
+ if (tok->argv == NULL) {
+ tok_free((ptr_t)tok->ifs);
+ tok_free((ptr_t)tok);
+ return NULL;
+ }
tok->argv[0] = NULL;
tok->wspace = (char *) tok_malloc(WINCR);
- if (tok->wspace == NULL)
- return (NULL);
+ if (tok->wspace == NULL) {
+ tok_free((ptr_t)tok->argv);
+ tok_free((ptr_t)tok->ifs);
+ tok_free((ptr_t)tok);
+ return NULL;
+ }
tok->wmax = tok->wspace + WINCR;
tok->wstart = tok->wspace;
tok->wptr = tok->wspace;
@@ -268,7 +287,7 @@ tok_line(Tokenizer *tok, const char *line, int *argc, const char ***argv)
switch (tok->quote) {
case Q_none:
tok_finish(tok);
- *argv = tok->argv;
+ *argv = (const char **)tok->argv;
*argc = tok->argc;
return (0);
@@ -301,7 +320,7 @@ tok_line(Tokenizer *tok, const char *line, int *argc, const char ***argv)
return (3);
}
tok_finish(tok);
- *argv = tok->argv;
+ *argv = (const char **)tok->argv;
*argc = tok->argc;
return (0);
@@ -363,25 +382,25 @@ tok_line(Tokenizer *tok, const char *line, int *argc, const char ***argv)
if (tok->wptr >= tok->wmax - 4) {
size_t size = tok->wmax - tok->wspace + WINCR;
char *s = (char *) tok_realloc(tok->wspace, size);
- /* SUPPRESS 22 */
- int offs = s - tok->wspace;
if (s == NULL)
return (-1);
- if (offs != 0) {
+ if (s != tok->wspace) {
int i;
- for (i = 0; i < tok->argc; i++)
- tok->argv[i] = tok->argv[i] + offs;
- tok->wptr = tok->wptr + offs;
- tok->wstart = tok->wstart + offs;
- tok->wmax = s + size;
+ for (i = 0; i < tok->argc; i++) {
+ tok->argv[i] =
+ (tok->argv[i] - tok->wspace) + s;
+ }
+ tok->wptr = (tok->wptr - tok->wspace) + s;
+ tok->wstart = (tok->wstart - tok->wspace) + s;
tok->wspace = s;
}
+ tok->wmax = s + size;
}
if (tok->argc >= tok->amax - 4) {
- const char **p;
+ char **p;
tok->amax += AINCR;
- p = (const char **) tok_realloc(tok->argv,
+ p = (char **) tok_realloc(tok->argv,
tok->amax * sizeof(char *));
if (p == NULL)
return (-1);
diff --git a/cmd-line-utils/libedit/tokenizer.h b/cmd-line-utils/libedit/tokenizer.h
index 14919fd3f84..7cc7a3346e4 100644
--- a/cmd-line-utils/libedit/tokenizer.h
+++ b/cmd-line-utils/libedit/tokenizer.h
@@ -1,4 +1,4 @@
-/* $NetBSD: tokenizer.h,v 1.4 2000/09/04 22:06:33 lukem Exp $ */
+/* $NetBSD: tokenizer.h,v 1.5 2002/03/18 16:01:00 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
diff --git a/cmd-line-utils/libedit/tty.c b/cmd-line-utils/libedit/tty.c
index 2c7b502136d..fe81762fb82 100644
--- a/cmd-line-utils/libedit/tty.c
+++ b/cmd-line-utils/libedit/tty.c
@@ -1,4 +1,4 @@
-/* $NetBSD: tty.c,v 1.15 2001/05/17 01:02:17 christos Exp $ */
+/* $NetBSD: tty.c,v 1.16 2002/03/18 16:01:01 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,18 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)tty.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: tty.c,v 1.16 2002/03/18 16:01:01 christos Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* tty.c: tty interface stuff
*/
-#include "sys.h"
#include "tty.h"
#include "el.h"
@@ -54,7 +60,7 @@ typedef struct ttymodes_t {
typedef struct ttymap_t {
int nch, och; /* Internal and termio rep of chars */
el_action_t bind[3]; /* emacs, vi, and vi-cmd */
-} ttymap_t;
+} ttymap_t;
private const ttyperm_t ttyperm = {
@@ -778,15 +784,15 @@ tty_bind_char(EditLine *el, int force)
if (new[0] == old[0] && !force)
continue;
/* Put the old default binding back, and set the new binding */
- key_clear(el, map, (char *)old);
+ el_key_clear(el, map, (char *)old);
map[old[0]] = dmap[old[0]];
- key_clear(el, map, (char *)new);
+ el_key_clear(el, map, (char *)new);
/* MAP_VI == 1, MAP_EMACS == 0... */
map[new[0]] = tp->bind[el->el_map.type];
if (dalt) {
- key_clear(el, alt, (char *)old);
+ el_key_clear(el, alt, (char *)old);
alt[old[0]] = dalt[old[0]];
- key_clear(el, alt, (char *)new);
+ el_key_clear(el, alt, (char *)new);
alt[new[0]] = tp->bind[el->el_map.type + 1];
}
}
@@ -1039,9 +1045,8 @@ tty_stty(EditLine *el, int argc __attribute__((unused)), const char **argv)
{
const ttymodes_t *m;
char x;
- const char *d;
int aflag = 0;
- const char *s;
+ const char *s, *d;
const char *name;
int z = EX_IO;
diff --git a/cmd-line-utils/libedit/tty.h b/cmd-line-utils/libedit/tty.h
index 5fdcfadf0dc..e9597fceb2b 100644
--- a/cmd-line-utils/libedit/tty.h
+++ b/cmd-line-utils/libedit/tty.h
@@ -1,4 +1,4 @@
-/* $NetBSD: tty.h,v 1.8 2000/09/04 22:06:33 lukem Exp $ */
+/* $NetBSD: tty.h,v 1.9 2002/03/18 16:01:01 christos Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -453,16 +453,16 @@
#define MD_NN 5
typedef struct {
- const char *t_name;
- u_int t_setmask;
- u_int t_clrmask;
+ const char *t_name;
+ u_int t_setmask;
+ u_int t_clrmask;
} ttyperm_t[NN_IO][MD_NN];
typedef unsigned char ttychar_t[NN_IO][C_NCC];
protected int tty_init(EditLine *);
protected void tty_end(EditLine *);
-protected int tty_stty(EditLine *, int, const char**);
+protected int tty_stty(EditLine *, int, const char **);
protected int tty_rawmode(EditLine *);
protected int tty_cookedmode(EditLine *);
protected int tty_quotemode(EditLine *);
diff --git a/cmd-line-utils/libedit/vi.c b/cmd-line-utils/libedit/vi.c
index 296e11eb4d9..5380872cf65 100644
--- a/cmd-line-utils/libedit/vi.c
+++ b/cmd-line-utils/libedit/vi.c
@@ -1,4 +1,4 @@
-/* $NetBSD: vi.c,v 1.8 2000/09/04 22:06:33 lukem Exp $ */
+/* $NetBSD: vi.c,v 1.16 2003/03/10 11:09:25 dsl Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -36,12 +36,22 @@
* SUCH DAMAGE.
*/
-#include "compat.h"
+#include "config.h"
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#if !defined(lint) && !defined(SCCSID)
+#if 0
+static char sccsid[] = "@(#)vi.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: vi.c,v 1.16 2003/03/10 11:09:25 dsl Exp $");
+#endif
+#endif /* not lint && not SCCSID */
/*
* vi.c: Vi mode commands.
*/
-#include "sys.h"
#include "el.h"
private el_action_t cv_action(EditLine *, int);
@@ -53,22 +63,18 @@ private el_action_t cv_paste(EditLine *, int);
private el_action_t
cv_action(EditLine *el, int c)
{
- char *cp, *kp;
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
+ /* 'cc', 'dd' and (possibly) friends */
+ if (c != el->el_chared.c_vcmd.action)
+ return CC_ERROR;
+
+ if (!(c & YANK))
+ cv_undo(el);
+ cv_yank(el, el->el_line.buffer,
+ el->el_line.lastchar - el->el_line.buffer);
el->el_chared.c_vcmd.action = NOP;
el->el_chared.c_vcmd.pos = 0;
-
- el->el_chared.c_undo.isize = 0;
- el->el_chared.c_undo.dsize = 0;
- kp = el->el_chared.c_undo.buf;
- for (cp = el->el_line.buffer; cp < el->el_line.lastchar; cp++) {
- *kp++ = *cp;
- el->el_chared.c_undo.dsize++;
- }
-
- el->el_chared.c_undo.action = INSERT;
- el->el_chared.c_undo.ptr = el->el_line.buffer;
el->el_line.lastchar = el->el_line.buffer;
el->el_line.cursor = el->el_line.buffer;
if (c & INSERT)
@@ -79,25 +85,8 @@ cv_action(EditLine *el, int c)
el->el_chared.c_vcmd.pos = el->el_line.cursor;
el->el_chared.c_vcmd.action = c;
return (CC_ARGHACK);
-
-#ifdef notdef
- /*
- * I don't think that this is needed. But we keep it for now
- */
- else
- if (el_chared.c_vcmd.action == NOP) {
- el->el_chared.c_vcmd.pos = el->el_line.cursor;
- el->el_chared.c_vcmd.action = c;
- return (CC_ARGHACK);
- } else {
- el->el_chared.c_vcmd.action = 0;
- el->el_chared.c_vcmd.pos = 0;
- return (CC_ERROR);
- }
-#endif
}
-
/* cv_paste():
* Paste previous deletion before or after the cursor
*/
@@ -105,23 +94,25 @@ private el_action_t
cv_paste(EditLine *el, int c)
{
char *ptr;
- c_undo_t *un = &el->el_chared.c_undo;
+ c_kill_t *k = &el->el_chared.c_kill;
+ int len = k->last - k->buf;
+ if (k->buf == NULL || len == 0)
+ return (CC_ERROR);
#ifdef DEBUG_PASTE
- (void) fprintf(el->el_errfile, "Paste: %x \"%s\" +%d -%d\n",
- un->action, un->buf, un->isize, un->dsize);
+ (void) fprintf(el->el_errfile, "Paste: \"%.*s\"\n", len, k->buf);
#endif
- if (un->isize == 0)
- return (CC_ERROR);
+
+ cv_undo(el);
if (!c && el->el_line.cursor < el->el_line.lastchar)
el->el_line.cursor++;
ptr = el->el_line.cursor;
- c_insert(el, (int) un->isize);
- if (el->el_line.cursor + un->isize > el->el_line.lastchar)
+ c_insert(el, len);
+ if (el->el_line.cursor + len > el->el_line.lastchar)
return (CC_ERROR);
- (void) memcpy(ptr, un->buf, un->isize);
+ (void) memcpy(ptr, k->buf, len +0u);
return (CC_REFRESH);
}
@@ -152,24 +143,24 @@ vi_paste_prev(EditLine *el, int c __attribute__((unused)))
}
-/* vi_prev_space_word():
+/* vi_prev_big_word():
* Vi move to the previous space delimited word
* [B]
*/
protected el_action_t
/*ARGSUSED*/
-vi_prev_space_word(EditLine *el, int c __attribute__((unused)))
+vi_prev_big_word(EditLine *el, int c __attribute__((unused)))
{
if (el->el_line.cursor == el->el_line.buffer)
return (CC_ERROR);
- el->el_line.cursor = cv_prev_word(el, el->el_line.cursor,
+ el->el_line.cursor = cv_prev_word(el->el_line.cursor,
el->el_line.buffer,
el->el_state.argument,
- cv__isword);
+ cv__isWord);
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -179,7 +170,7 @@ vi_prev_space_word(EditLine *el, int c __attribute__((unused)))
/* vi_prev_word():
* Vi move to the previous word
- * [B]
+ * [b]
*/
protected el_action_t
/*ARGSUSED*/
@@ -189,12 +180,12 @@ vi_prev_word(EditLine *el, int c __attribute__((unused)))
if (el->el_line.cursor == el->el_line.buffer)
return (CC_ERROR);
- el->el_line.cursor = cv_prev_word(el, el->el_line.cursor,
+ el->el_line.cursor = cv_prev_word(el->el_line.cursor,
el->el_line.buffer,
el->el_state.argument,
- ce__isword);
+ cv__isword);
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -202,25 +193,23 @@ vi_prev_word(EditLine *el, int c __attribute__((unused)))
}
-/* vi_next_space_word():
+/* vi_next_big_word():
* Vi move to the next space delimited word
* [W]
*/
protected el_action_t
/*ARGSUSED*/
-vi_next_space_word(EditLine *el, int c __attribute__((unused)))
+vi_next_big_word(EditLine *el, int c __attribute__((unused)))
{
- if (el->el_line.cursor == el->el_line.lastchar)
+ if (el->el_line.cursor >= el->el_line.lastchar - 1)
return (CC_ERROR);
el->el_line.cursor = cv_next_word(el, el->el_line.cursor,
- el->el_line.lastchar,
- el->el_state.argument,
- cv__isword);
+ el->el_line.lastchar, el->el_state.argument, cv__isWord);
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -237,16 +226,14 @@ protected el_action_t
vi_next_word(EditLine *el, int c __attribute__((unused)))
{
- if (el->el_line.cursor == el->el_line.lastchar)
+ if (el->el_line.cursor >= el->el_line.lastchar - 1)
return (CC_ERROR);
el->el_line.cursor = cv_next_word(el, el->el_line.cursor,
- el->el_line.lastchar,
- el->el_state.argument,
- ce__isword);
+ el->el_line.lastchar, el->el_state.argument, cv__isword);
if (el->el_map.type == MAP_VI)
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
cv_delfini(el);
return (CC_REFRESH);
}
@@ -261,19 +248,27 @@ vi_next_word(EditLine *el, int c __attribute__((unused)))
protected el_action_t
vi_change_case(EditLine *el, int c)
{
+ int i;
- if (el->el_line.cursor < el->el_line.lastchar) {
- c = *el->el_line.cursor;
+ if (el->el_line.cursor >= el->el_line.lastchar)
+ return (CC_ERROR);
+ cv_undo(el);
+ for (i = 0; i < el->el_state.argument; i++) {
+
+ c = *(unsigned char *)el->el_line.cursor;
if (isupper(c))
- *el->el_line.cursor++ = tolower(c);
+ *el->el_line.cursor = tolower(c);
else if (islower(c))
- *el->el_line.cursor++ = toupper(c);
- else
- el->el_line.cursor++;
+ *el->el_line.cursor = toupper(c);
+
+ if (++el->el_line.cursor >= el->el_line.lastchar) {
+ el->el_line.cursor--;
+ re_fastaddc(el);
+ break;
+ }
re_fastaddc(el);
- return (CC_NORM);
}
- return (CC_ERROR);
+ return CC_NORM;
}
@@ -304,11 +299,7 @@ vi_insert_at_bol(EditLine *el, int c __attribute__((unused)))
{
el->el_line.cursor = el->el_line.buffer;
- el->el_chared.c_vcmd.ins = el->el_line.cursor;
-
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.action = DELETE;
-
+ cv_undo(el);
el->el_map.current = el->el_map.key;
return (CC_CURSOR);
}
@@ -323,13 +314,13 @@ protected el_action_t
vi_replace_char(EditLine *el, int c __attribute__((unused)))
{
+ if (el->el_line.cursor >= el->el_line.lastchar)
+ return CC_ERROR;
+
el->el_map.current = el->el_map.key;
el->el_state.inputmode = MODE_REPLACE_1;
- el->el_chared.c_undo.action = CHANGE;
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.isize = 0;
- el->el_chared.c_undo.dsize = 0;
- return (CC_NORM);
+ cv_undo(el);
+ return (CC_ARGHACK);
}
@@ -344,17 +335,14 @@ vi_replace_mode(EditLine *el, int c __attribute__((unused)))
el->el_map.current = el->el_map.key;
el->el_state.inputmode = MODE_REPLACE;
- el->el_chared.c_undo.action = CHANGE;
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.isize = 0;
- el->el_chared.c_undo.dsize = 0;
+ cv_undo(el);
return (CC_NORM);
}
/* vi_substitute_char():
* Vi replace character under the cursor and enter insert mode
- * [r]
+ * [s]
*/
protected el_action_t
/*ARGSUSED*/
@@ -376,6 +364,9 @@ protected el_action_t
vi_substitute_line(EditLine *el, int c __attribute__((unused)))
{
+ cv_undo(el);
+ cv_yank(el, el->el_line.buffer,
+ el->el_line.lastchar - el->el_line.buffer);
(void) em_kill_line(el, 0);
el->el_map.current = el->el_map.key;
return (CC_REFRESH);
@@ -391,6 +382,9 @@ protected el_action_t
vi_change_to_eol(EditLine *el, int c __attribute__((unused)))
{
+ cv_undo(el);
+ cv_yank(el, el->el_line.cursor,
+ el->el_line.lastchar - el->el_line.cursor);
(void) ed_kill_line(el, 0);
el->el_map.current = el->el_map.key;
return (CC_REFRESH);
@@ -407,11 +401,7 @@ vi_insert(EditLine *el, int c __attribute__((unused)))
{
el->el_map.current = el->el_map.key;
-
- el->el_chared.c_vcmd.ins = el->el_line.cursor;
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.action = DELETE;
-
+ cv_undo(el);
return (CC_NORM);
}
@@ -435,9 +425,7 @@ vi_add(EditLine *el, int c __attribute__((unused)))
} else
ret = CC_NORM;
- el->el_chared.c_vcmd.ins = el->el_line.cursor;
- el->el_chared.c_undo.ptr = el->el_line.cursor;
- el->el_chared.c_undo.action = DELETE;
+ cv_undo(el);
return (ret);
}
@@ -454,11 +442,7 @@ vi_add_at_eol(EditLine *el, int c __attribute__((unused)))
el->el_map.current = el->el_map.key;
el->el_line.cursor = el->el_line.lastchar;
-
- /* Mark where insertion begins */
- el->el_chared.c_vcmd.ins = el->el_line.lastchar;
- el->el_chared.c_undo.ptr = el->el_line.lastchar;
- el->el_chared.c_undo.action = DELETE;
+ cv_undo(el);
return (CC_CURSOR);
}
@@ -476,22 +460,22 @@ vi_delete_meta(EditLine *el, int c __attribute__((unused)))
}
-/* vi_end_word():
+/* vi_end_big_word():
* Vi move to the end of the current space delimited word
* [E]
*/
protected el_action_t
/*ARGSUSED*/
-vi_end_word(EditLine *el, int c __attribute__((unused)))
+vi_end_big_word(EditLine *el, int c __attribute__((unused)))
{
if (el->el_line.cursor == el->el_line.lastchar)
return (CC_ERROR);
el->el_line.cursor = cv__endword(el->el_line.cursor,
- el->el_line.lastchar, el->el_state.argument);
+ el->el_line.lastchar, el->el_state.argument, cv__isWord);
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
el->el_line.cursor++;
cv_delfini(el);
return (CC_REFRESH);
@@ -500,22 +484,22 @@ vi_end_word(EditLine *el, int c __attribute__((unused)))
}
-/* vi_to_end_word():
+/* vi_end_word():
* Vi move to the end of the current word
* [e]
*/
protected el_action_t
/*ARGSUSED*/
-vi_to_end_word(EditLine *el, int c __attribute__((unused)))
+vi_end_word(EditLine *el, int c __attribute__((unused)))
{
if (el->el_line.cursor == el->el_line.lastchar)
return (CC_ERROR);
el->el_line.cursor = cv__endword(el->el_line.cursor,
- el->el_line.lastchar, el->el_state.argument);
+ el->el_line.lastchar, el->el_state.argument, cv__isword);
- if (el->el_chared.c_vcmd.action & DELETE) {
+ if (el->el_chared.c_vcmd.action != NOP) {
el->el_line.cursor++;
cv_delfini(el);
return (CC_REFRESH);
@@ -532,100 +516,19 @@ protected el_action_t
/*ARGSUSED*/
vi_undo(EditLine *el, int c __attribute__((unused)))
{
- char *cp, *kp;
- char temp;
- int i, size;
- c_undo_t *un = &el->el_chared.c_undo;
-
-#ifdef DEBUG_UNDO
- (void) fprintf(el->el_errfile, "Undo: %x \"%s\" +%d -%d\n",
- un->action, un->buf, un->isize, un->dsize);
-#endif
- switch (un->action) {
- case DELETE:
- if (un->dsize == 0)
- return (CC_NORM);
-
- (void) memcpy(un->buf, un->ptr, un->dsize);
- for (cp = un->ptr; cp <= el->el_line.lastchar; cp++)
- *cp = cp[un->dsize];
-
- el->el_line.lastchar -= un->dsize;
- el->el_line.cursor = un->ptr;
-
- un->action = INSERT;
- un->isize = un->dsize;
- un->dsize = 0;
- break;
+ c_undo_t un = el->el_chared.c_undo;
- case DELETE | INSERT:
- size = un->isize - un->dsize;
- if (size > 0)
- i = un->dsize;
- else
- i = un->isize;
- cp = un->ptr;
- kp = un->buf;
- while (i-- > 0) {
- temp = *kp;
- *kp++ = *cp;
- *cp++ = temp;
- }
- if (size > 0) {
- el->el_line.cursor = cp;
- c_insert(el, size);
- while (size-- > 0 && cp < el->el_line.lastchar) {
- temp = *kp;
- *kp++ = *cp;
- *cp++ = temp;
- }
- } else if (size < 0) {
- size = -size;
- for (; cp <= el->el_line.lastchar; cp++) {
- *kp++ = *cp;
- *cp = cp[size];
- }
- el->el_line.lastchar -= size;
- }
- el->el_line.cursor = un->ptr;
- i = un->dsize;
- un->dsize = un->isize;
- un->isize = i;
- break;
-
- case INSERT:
- if (un->isize == 0)
- return (CC_NORM);
-
- el->el_line.cursor = un->ptr;
- c_insert(el, (int) un->isize);
- (void) memcpy(un->ptr, un->buf, un->isize);
- un->action = DELETE;
- un->dsize = un->isize;
- un->isize = 0;
- break;
+ if (un.len == -1)
+ return CC_ERROR;
- case CHANGE:
- if (un->isize == 0)
- return (CC_NORM);
-
- el->el_line.cursor = un->ptr;
- size = (int) (el->el_line.cursor - el->el_line.lastchar);
- if (size < (int)un->isize)
- size = un->isize;
- cp = un->ptr;
- kp = un->buf;
- for (i = 0; i < size; i++) {
- temp = *kp;
- *kp++ = *cp;
- *cp++ = temp;
- }
- un->dsize = 0;
- break;
-
- default:
- return (CC_ERROR);
- }
+ /* switch line buffer and undo buffer */
+ el->el_chared.c_undo.buf = el->el_line.buffer;
+ el->el_chared.c_undo.len = el->el_line.lastchar - el->el_line.buffer;
+ el->el_chared.c_undo.cursor = el->el_line.cursor - el->el_line.buffer;
+ el->el_line.limit = un.buf + (el->el_line.limit - el->el_line.buffer);
+ el->el_line.buffer = un.buf;
+ el->el_line.cursor = un.buf + un.cursor;
+ el->el_line.lastchar = un.buf + un.len;
return (CC_REFRESH);
}
@@ -639,22 +542,12 @@ protected el_action_t
/*ARGSUSED*/
vi_command_mode(EditLine *el, int c __attribute__((unused)))
{
- int size;
/* [Esc] cancels pending action */
- el->el_chared.c_vcmd.ins = 0;
el->el_chared.c_vcmd.action = NOP;
el->el_chared.c_vcmd.pos = 0;
el->el_state.doingarg = 0;
- size = el->el_chared.c_undo.ptr - el->el_line.cursor;
- if (size < 0)
- size = -size;
- if (el->el_chared.c_undo.action == (INSERT | DELETE) ||
- el->el_chared.c_undo.action == DELETE)
- el->el_chared.c_undo.dsize = size;
- else
- el->el_chared.c_undo.isize = size;
el->el_state.inputmode = MODE_INSERT;
el->el_map.current = el->el_map.alt;
@@ -674,41 +567,37 @@ protected el_action_t
vi_zero(EditLine *el, int c)
{
- if (el->el_state.doingarg) {
- if (el->el_state.argument > 1000000)
- return (CC_ERROR);
- el->el_state.argument =
- (el->el_state.argument * 10) + (c - '0');
- return (CC_ARGHACK);
- } else {
- el->el_line.cursor = el->el_line.buffer;
- if (el->el_chared.c_vcmd.action & DELETE) {
- cv_delfini(el);
- return (CC_REFRESH);
- }
- return (CC_CURSOR);
+ if (el->el_state.doingarg)
+ return ed_argument_digit(el, c);
+
+ el->el_line.cursor = el->el_line.buffer;
+ if (el->el_chared.c_vcmd.action != NOP) {
+ cv_delfini(el);
+ return (CC_REFRESH);
}
+ return (CC_CURSOR);
}
/* vi_delete_prev_char():
* Vi move to previous character (backspace)
- * [^H]
+ * [^H] in insert mode only
*/
protected el_action_t
/*ARGSUSED*/
vi_delete_prev_char(EditLine *el, int c __attribute__((unused)))
{
+ char *cp;
- if (el->el_chared.c_vcmd.ins == 0)
+ cp = el->el_line.cursor;
+ if (cp <= el->el_line.buffer)
return (CC_ERROR);
- if (el->el_chared.c_vcmd.ins >
- el->el_line.cursor - el->el_state.argument)
- return (CC_ERROR);
-
- c_delbefore(el, el->el_state.argument);
- el->el_line.cursor -= el->el_state.argument;
+ /* do the delete here so we dont mess up the undo and paste buffers */
+ el->el_line.cursor = --cp;
+ for (; cp < el->el_line.lastchar; cp++)
+ cp[0] = cp[1];
+ el->el_line.lastchar = cp - 1;
return (CC_REFRESH);
}
@@ -829,16 +718,7 @@ protected el_action_t
/*ARGSUSED*/
vi_next_char(EditLine *el, int c __attribute__((unused)))
{
- char ch;
-
- if (el_getc(el, &ch) != 1)
- return (ed_end_of_file(el, 0));
-
- el->el_search.chadir = CHAR_FWD;
- el->el_search.chacha = ch;
-
- return (cv_csearch_fwd(el, ch, el->el_state.argument, 0));
-
+ return cv_csearch(el, CHAR_FWD, -1, el->el_state.argument, 0);
}
@@ -850,15 +730,7 @@ protected el_action_t
/*ARGSUSED*/
vi_prev_char(EditLine *el, int c __attribute__((unused)))
{
- char ch;
-
- if (el_getc(el, &ch) != 1)
- return (ed_end_of_file(el, 0));
-
- el->el_search.chadir = CHAR_BACK;
- el->el_search.chacha = ch;
-
- return (cv_csearch_back(el, ch, el->el_state.argument, 0));
+ return cv_csearch(el, CHAR_BACK, -1, el->el_state.argument, 0);
}
@@ -870,13 +742,7 @@ protected el_action_t
/*ARGSUSED*/
vi_to_next_char(EditLine *el, int c __attribute__((unused)))
{
- char ch;
-
- if (el_getc(el, &ch) != 1)
- return (ed_end_of_file(el, 0));
-
- return (cv_csearch_fwd(el, ch, el->el_state.argument, 1));
-
+ return cv_csearch(el, CHAR_FWD, -1, el->el_state.argument, 1);
}
@@ -888,12 +754,7 @@ protected el_action_t
/*ARGSUSED*/
vi_to_prev_char(EditLine *el, int c __attribute__((unused)))
{
- char ch;
-
- if (el_getc(el, &ch) != 1)
- return (ed_end_of_file(el, 0));
-
- return (cv_csearch_back(el, ch, el->el_state.argument, 1));
+ return cv_csearch(el, CHAR_BACK, -1, el->el_state.argument, 1);
}
@@ -906,14 +767,8 @@ protected el_action_t
vi_repeat_next_char(EditLine *el, int c __attribute__((unused)))
{
- if (el->el_search.chacha == 0)
- return (CC_ERROR);
-
- return (el->el_search.chadir == CHAR_FWD
- ? cv_csearch_fwd(el, el->el_search.chacha,
- el->el_state.argument, 0)
- : cv_csearch_back(el, el->el_search.chacha,
- el->el_state.argument, 0));
+ return cv_csearch(el, el->el_search.chadir, el->el_search.chacha,
+ el->el_state.argument, el->el_search.chatflg);
}
@@ -925,11 +780,343 @@ protected el_action_t
/*ARGSUSED*/
vi_repeat_prev_char(EditLine *el, int c __attribute__((unused)))
{
+ el_action_t r;
+ int dir = el->el_search.chadir;
- if (el->el_search.chacha == 0)
- return (CC_ERROR);
+ r = cv_csearch(el, -dir, el->el_search.chacha,
+ el->el_state.argument, el->el_search.chatflg);
+ el->el_search.chadir = dir;
+ return r;
+}
+
+
+/* vi_match():
+ * Vi go to matching () {} or []
+ * [%]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_match(EditLine *el, int c __attribute__((unused)))
+{
+ const char match_chars[] = "()[]{}";
+ char *cp;
+ int delta, i, count;
+ char o_ch, c_ch;
+
+ *el->el_line.lastchar = '\0'; /* just in case */
+
+ i = strcspn(el->el_line.cursor, match_chars);
+ o_ch = el->el_line.cursor[i];
+ if (o_ch == 0)
+ return CC_ERROR;
+ delta = strchr(match_chars, o_ch) - match_chars;
+ c_ch = match_chars[delta ^ 1];
+ count = 1;
+ delta = 1 - (delta & 1) * 2;
+
+ for (cp = &el->el_line.cursor[i]; count; ) {
+ cp += delta;
+ if (cp < el->el_line.buffer || cp >= el->el_line.lastchar)
+ return CC_ERROR;
+ if (*cp == o_ch)
+ count++;
+ else if (*cp == c_ch)
+ count--;
+ }
+
+ el->el_line.cursor = cp;
+
+ if (el->el_chared.c_vcmd.action != NOP) {
+ /* NB posix says char under cursor should NOT be deleted
+ for -ve delta - this is different to netbsd vi. */
+ if (delta > 0)
+ el->el_line.cursor++;
+ cv_delfini(el);
+ return (CC_REFRESH);
+ }
+ return (CC_CURSOR);
+}
+
+/* vi_undo_line():
+ * Vi undo all changes to line
+ * [U]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_undo_line(EditLine *el, int c __attribute__((unused)))
+{
+
+ cv_undo(el);
+ return hist_get(el);
+}
+
+/* vi_to_column():
+ * Vi go to specified column
+ * [|]
+ * NB netbsd vi goes to screen column 'n', posix says nth character
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_to_column(EditLine *el, int c __attribute__((unused)))
+{
+
+ el->el_line.cursor = el->el_line.buffer;
+ el->el_state.argument--;
+ return ed_next_char(el, 0);
+}
+
+/* vi_yank_end():
+ * Vi yank to end of line
+ * [Y]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_yank_end(EditLine *el, int c __attribute__((unused)))
+{
+
+ cv_yank(el, el->el_line.cursor,
+ el->el_line.lastchar - el->el_line.cursor);
+ return CC_REFRESH;
+}
+
+/* vi_yank():
+ * Vi yank
+ * [y]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_yank(EditLine *el, int c __attribute__((unused)))
+{
+
+ return cv_action(el, YANK);
+}
+
+/* vi_comment_out():
+ * Vi comment out current command
+ * [c]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_comment_out(EditLine *el, int c __attribute__((unused)))
+{
+
+ el->el_line.cursor = el->el_line.buffer;
+ c_insert(el, 1);
+ *el->el_line.cursor = '#';
+ re_refresh(el);
+ return ed_newline(el, 0);
+}
+
+/* vi_alias():
+ * Vi include shell alias
+ * [@]
+ * NB: posix impiles that we should enter insert mode, however
+ * this is against historical precedent...
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_alias(EditLine *el __attribute__((unused)), int c __attribute__((unused)))
+{
+#ifdef __weak_extern
+ char alias_name[3];
+ char *alias_text;
+ extern char *get_alias_text(const char *);
+ __weak_extern(get_alias_text);
+
+ if (get_alias_text == 0) {
+ return CC_ERROR;
+ }
+
+ alias_name[0] = '_';
+ alias_name[2] = 0;
+ if (el_getc(el, &alias_name[1]) != 1)
+ return CC_ERROR;
+
+ alias_text = get_alias_text(alias_name);
+ if (alias_text != NULL)
+ el_push(el, alias_text);
+ return CC_NORM;
+#else
+ return CC_ERROR;
+#endif
+}
+
+/* vi_to_history_line():
+ * Vi go to specified history file line.
+ * [G]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_to_history_line(EditLine *el, int c __attribute__((unused)))
+{
+ int sv_event_no = el->el_history.eventno;
+ el_action_t rval;
+
+
+ if (el->el_history.eventno == 0) {
+ (void) strncpy(el->el_history.buf, el->el_line.buffer,
+ EL_BUFSIZ);
+ el->el_history.last = el->el_history.buf +
+ (el->el_line.lastchar - el->el_line.buffer);
+ }
+
+ /* Lack of a 'count' means oldest, not 1 */
+ if (!el->el_state.doingarg) {
+ el->el_history.eventno = 0x7fffffff;
+ hist_get(el);
+ } else {
+ /* This is brain dead, all the rest of this code counts
+ * upwards going into the past. Here we need count in the
+ * other direction (to match the output of fc -l).
+ * I could change the world, but this seems to suffice.
+ */
+ el->el_history.eventno = 1;
+ if (hist_get(el) == CC_ERROR)
+ return CC_ERROR;
+ el->el_history.eventno = 1 + el->el_history.ev.num
+ - el->el_state.argument;
+ if (el->el_history.eventno < 0) {
+ el->el_history.eventno = sv_event_no;
+ return CC_ERROR;
+ }
+ }
+ rval = hist_get(el);
+ if (rval == CC_ERROR)
+ el->el_history.eventno = sv_event_no;
+ return rval;
+}
+
+/* vi_histedit():
+ * Vi edit history line with vi
+ * [v]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_histedit(EditLine *el, int c __attribute__((unused)))
+{
+ int fd;
+ pid_t pid;
+ int st;
+ char tempfile[] = "/tmp/histedit.XXXXXXXXXX";
+ char *cp;
+
+ if (el->el_state.doingarg) {
+ if (vi_to_history_line(el, 0) == CC_ERROR)
+ return CC_ERROR;
+ }
+
+ fd = mkstemp(tempfile);
+ if (fd < 0)
+ return CC_ERROR;
+ cp = el->el_line.buffer;
+ write(fd, cp, el->el_line.lastchar - cp +0u);
+ write(fd, "\n", 1);
+ pid = fork();
+ switch (pid) {
+ case -1:
+ close(fd);
+ unlink(tempfile);
+ return CC_ERROR;
+ case 0:
+ close(fd);
+ execlp("vi", "vi", tempfile, 0);
+ exit(0);
+ /*NOTREACHED*/
+ default:
+ while (waitpid(pid, &st, 0) != pid)
+ continue;
+ lseek(fd, 0ll, SEEK_SET);
+ st = read(fd, cp, el->el_line.limit - cp +0u);
+ if (st > 0 && cp[st - 1] == '\n')
+ st--;
+ el->el_line.cursor = cp;
+ el->el_line.lastchar = cp + st;
+ break;
+ }
+
+ close(fd);
+ unlink(tempfile);
+ /* return CC_REFRESH; */
+ return ed_newline(el, 0);
+}
+
+/* vi_history_word():
+ * Vi append word from previous input line
+ * [_]
+ * Who knows where this one came from!
+ * '_' in vi means 'entire current line', so 'cc' is a synonym for 'c_'
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_history_word(EditLine *el, int c __attribute__((unused)))
+{
+ const char *wp = HIST_FIRST(el);
+ const char *wep, *wsp;
+ int len;
+ char *cp;
+ const char *lim;
+
+ if (wp == NULL)
+ return CC_ERROR;
+
+ wep = wsp = 0;
+ do {
+ while (isspace((unsigned char)*wp))
+ wp++;
+ if (*wp == 0)
+ break;
+ wsp = wp;
+ while (*wp && !isspace((unsigned char)*wp))
+ wp++;
+ wep = wp;
+ } while ((!el->el_state.doingarg || --el->el_state.argument > 0) && *wp != 0);
+
+ if (wsp == 0 || (el->el_state.doingarg && el->el_state.argument != 0))
+ return CC_ERROR;
+
+ cv_undo(el);
+ len = wep - wsp;
+ if (el->el_line.cursor < el->el_line.lastchar)
+ el->el_line.cursor++;
+ c_insert(el, len + 1);
+ cp = el->el_line.cursor;
+ lim = el->el_line.limit;
+ if (cp < lim)
+ *cp++ = ' ';
+ while (wsp < wep && cp < lim)
+ *cp++ = *wsp++;
+ el->el_line.cursor = cp;
+
+ el->el_map.current = el->el_map.key;
+ return CC_REFRESH;
+}
+
+/* vi_redo():
+ * Vi redo last non-motion command
+ * [.]
+ */
+protected el_action_t
+/*ARGSUSED*/
+vi_redo(EditLine *el, int c __attribute__((unused)))
+{
+ c_redo_t *r = &el->el_chared.c_redo;
+
+ if (!el->el_state.doingarg && r->count) {
+ el->el_state.doingarg = 1;
+ el->el_state.argument = r->count;
+ }
+
+ el->el_chared.c_vcmd.pos = el->el_line.cursor;
+ el->el_chared.c_vcmd.action = r->action;
+ if (r->pos != r->buf) {
+ if (r->pos + 1 > r->lim)
+ /* sanity */
+ r->pos = r->lim - 1;
+ r->pos[0] = 0;
+ el_push(el, r->buf);
+ }
- return el->el_search.chadir == CHAR_BACK ?
- cv_csearch_fwd(el, el->el_search.chacha, el->el_state.argument, 0) :
- cv_csearch_back(el, el->el_search.chacha, el->el_state.argument, 0);
+ el->el_state.thiscmd = r->cmd;
+ el->el_state.thisch = r->ch;
+ return (*el->el_map.func[r->cmd])(el, r->ch);
}
diff --git a/configure.in b/configure.in
index b7bb1d95189..6df20440c48 100644
--- a/configure.in
+++ b/configure.in
@@ -62,9 +62,11 @@ AC_SUBST(MYSQL_NO_DASH_VERSION)
AC_SUBST(MYSQL_BASE_VERSION)
AC_SUBST(MYSQL_VERSION_ID)
AC_SUBST(PROTOCOL_VERSION)
-AC_DEFINE_UNQUOTED(PROTOCOL_VERSION, $PROTOCOL_VERSION)
+AC_DEFINE_UNQUOTED([PROTOCOL_VERSION], [$PROTOCOL_VERSION],
+ [mysql client protocol version])
AC_SUBST(DOT_FRM_VERSION)
-AC_DEFINE_UNQUOTED(DOT_FRM_VERSION, $DOT_FRM_VERSION)
+AC_DEFINE_UNQUOTED([DOT_FRM_VERSION], [$DOT_FRM_VERSION],
+ [Version of .frm files])
AC_SUBST(SHARED_LIB_VERSION)
AC_SUBST(AVAILABLE_LANGUAGES)
AC_SUBST(AVAILABLE_LANGUAGES_ERRORS)
@@ -74,19 +76,25 @@ AC_SUBST([NDB_VERSION_MAJOR])
AC_SUBST([NDB_VERSION_MINOR])
AC_SUBST([NDB_VERSION_BUILD])
AC_SUBST([NDB_VERSION_STATUS])
-AC_DEFINE_UNQUOTED([NDB_VERSION_MAJOR], [$NDB_VERSION_MAJOR])
-AC_DEFINE_UNQUOTED([NDB_VERSION_MINOR], [$NDB_VERSION_MINOR])
-AC_DEFINE_UNQUOTED([NDB_VERSION_BUILD], [$NDB_VERSION_BUILD])
-AC_DEFINE_UNQUOTED([NDB_VERSION_STATUS], ["$NDB_VERSION_STATUS"])
+AC_DEFINE_UNQUOTED([NDB_VERSION_MAJOR], [$NDB_VERSION_MAJOR],
+ [NDB major version])
+AC_DEFINE_UNQUOTED([NDB_VERSION_MINOR], [$NDB_VERSION_MINOR],
+ [NDB minor version])
+AC_DEFINE_UNQUOTED([NDB_VERSION_BUILD], [$NDB_VERSION_BUILD],
+ [NDB build version])
+AC_DEFINE_UNQUOTED([NDB_VERSION_STATUS], ["$NDB_VERSION_STATUS"],
+ [NDB status version])
# Canonicalize the configuration name.
SYSTEM_TYPE="$host_vendor-$host_os"
MACHINE_TYPE="$host_cpu"
AC_SUBST(SYSTEM_TYPE)
-AC_DEFINE_UNQUOTED(SYSTEM_TYPE, "$SYSTEM_TYPE")
+AC_DEFINE_UNQUOTED([SYSTEM_TYPE], ["$SYSTEM_TYPE"],
+ [Name of system, eg solaris])
AC_SUBST(MACHINE_TYPE)
-AC_DEFINE_UNQUOTED(MACHINE_TYPE, "$MACHINE_TYPE")
+AC_DEFINE_UNQUOTED([MACHINE_TYPE], ["$MACHINE_TYPE"],
+ [Machine type name, eg sun10])
# Detect intel x86 like processor
BASE_MACHINE_TYPE=$MACHINE_TYPE
@@ -230,7 +238,7 @@ AC_MSG_CHECKING("return type of sprintf")
#check the return type of sprintf
case $SYSTEM_TYPE in
*netware*)
- AC_DEFINE(SPRINTF_RETURNS_INT) AC_MSG_RESULT("int")
+ AC_DEFINE(SPRINTF_RETURNS_INT, [1]) AC_MSG_RESULT("int")
;;
*)
AC_TRY_RUN([
@@ -244,8 +252,9 @@ AC_TRY_RUN([
return -1;
}
],
-AC_DEFINE(SPRINTF_RETURNS_INT) AC_MSG_RESULT("int"),
- AC_TRY_RUN([
+ [AC_DEFINE(SPRINTF_RETURNS_INT, [1], [POSIX sprintf])
+ AC_MSG_RESULT("int")],
+ [AC_TRY_RUN([
int main()
{
char* s = "hello";
@@ -253,9 +262,12 @@ AC_DEFINE(SPRINTF_RETURNS_INT) AC_MSG_RESULT("int"),
if((char*)sprintf(buf,s) == buf + strlen(s))
return 0;
return -1;
- }
-], AC_DEFINE(SPRINTF_RETURNS_PTR) AC_MSG_RESULT("ptr"),
- AC_DEFINE(SPRINTF_RETURNS_GARBAGE) AC_MSG_RESULT("garbage")))
+ } ],
+ [AC_DEFINE(SPRINTF_RETURNS_PTR, [1], [Broken sprintf])
+ AC_MSG_RESULT("ptr")],
+ [AC_DEFINE(SPRINTF_RETURNS_GARBAGE, [1], [Broken sprintf])
+ AC_MSG_RESULT("garbage")])
+ ])
;;
esac
@@ -384,7 +396,7 @@ then
# we will gets some problems when linking static programs.
# The following code is used to fix this problem.
- if test "$CXX" = "gcc" -o "$CXX" = "ccache gcc"
+ if echo $CXX | grep gcc > /dev/null 2>&1
then
if $CXX -v 2>&1 | grep 'version 3' > /dev/null 2>&1
then
@@ -652,15 +664,6 @@ AC_ARG_WITH(named-curses-libs,
[ with_named_curses=no ]
)
-# Force use of a zlib (compress)
-AC_ARG_WITH(named-z-libs,
- [ --with-named-z-libs=ARG
- Use specified zlib libraries instead of
- those automatically found by configure.],
- [ with_named_zlib=$withval ],
- [ with_named_zlib=z ]
- )
-
# Make thread safe client
AC_ARG_ENABLE(thread-safe-client,
[ --enable-thread-safe-client
@@ -701,7 +704,7 @@ AC_ARG_WITH(raid,
if test "$USE_RAID" = "yes"
then
AC_MSG_RESULT([yes])
- AC_DEFINE([USE_RAID])
+ AC_DEFINE([USE_RAID], [1], [Use MySQL RAID])
else
AC_MSG_RESULT([no])
fi
@@ -745,7 +748,8 @@ AC_ARG_ENABLE(local-infile,
if test "$ENABLED_LOCAL_INFILE" = "yes"
then
AC_MSG_RESULT([yes])
- AC_DEFINE([ENABLED_LOCAL_INFILE])
+ AC_DEFINE([ENABLED_LOCAL_INFILE], [1],
+ [If LOAD DATA LOCAL INFILE should be enabled by default])
else
AC_MSG_RESULT([no])
fi
@@ -789,20 +793,11 @@ AC_CHECK_FUNC(p2open, , AC_CHECK_LIB(gen, p2open))
AC_CHECK_FUNC(bind, , AC_CHECK_LIB(bind, bind))
# For crypt() on Linux
AC_CHECK_LIB(crypt, crypt)
-AC_CHECK_FUNC(crypt, AC_DEFINE(HAVE_CRYPT))
+AC_CHECK_FUNC(crypt, AC_DEFINE([HAVE_CRYPT], [1], [crypt]))
# For sem_xxx functions on Solaris 2.6
AC_CHECK_FUNC(sem_init, , AC_CHECK_LIB(posix4, sem_init))
-
-# For compress in zlib
-case $SYSTEM_TYPE in
- *netware* | *modesto*)
- AC_DEFINE(HAVE_COMPRESS)
- ;;
- *)
- MYSQL_CHECK_ZLIB_WITH_COMPRESS($with_named_zlib)
- ;;
-esac
+MYSQL_CHECK_ZLIB_WITH_COMPRESS
#--------------------------------------------------------------------
# Check for TCP wrapper support
@@ -832,8 +827,8 @@ int deny_severity = 0;
struct request_info *req;
],[hosts_access (req)],
AC_MSG_RESULT(yes)
- AC_DEFINE(LIBWRAP)
- AC_DEFINE(HAVE_LIBWRAP)
+ AC_DEFINE([LIBWRAP], [1], [Define if you have -lwrap])
+ AC_DEFINE([HAVE_LIBWRAP], [1], [Define if have -lwrap])
if test "$with_libwrap" != "yes"; then
WRAPLIBS="-L${with_libwrap}/lib"
fi
@@ -861,7 +856,10 @@ int main()
atomic_add(5, &v);
return atomic_read(&v) == 28 ? 0 : -1;
}
- ], AC_DEFINE(HAVE_ATOMIC_ADD) atom_ops="${atom_ops}atomic_add ",
+ ],
+ [AC_DEFINE([HAVE_ATOMIC_ADD], [1],
+ [atomic_add() from <asm/atomic.h> (Linux only)])
+ atom_ops="${atom_ops}atomic_add "],
)
AC_TRY_RUN([
#include <asm/atomic.h>
@@ -873,7 +871,10 @@ int main()
atomic_sub(5, &v);
return atomic_read(&v) == 18 ? 0 : -1;
}
- ], AC_DEFINE(HAVE_ATOMIC_SUB) atom_ops="${atom_ops}atomic_sub ",
+ ],
+ [AC_DEFINE([HAVE_ATOMIC_SUB], [1],
+ [atomic_sub() from <asm/atomic.h> (Linux only)])
+ atom_ops="${atom_ops}atomic_sub "],
)
if test -z "$atom_ops"; then atom_ops="no"; fi
@@ -903,7 +904,7 @@ dnl I have no idea if this is a good test - can not find docs for libiberty
with_mysqld_ldflags="-all-static"
AC_SUBST([pstack_dirs])
AC_SUBST([pstack_libs])
- AC_DEFINE([USE_PSTACK])
+ AC_DEFINE([USE_PSTACK], [1], [the pstack backtrace library])
dnl This check isn't needed, but might be nice to give some feedback....
dnl AC_CHECK_HEADER(libiberty.h,
dnl have_libiberty_h=yes,
@@ -926,7 +927,7 @@ then
fi
# We make a special variable for client library's to avoid including
# thread libs in the client.
-NON_THREADED_CLIENT_LIBS="$LIBS"
+NON_THREADED_CLIENT_LIBS="$LIBS $ZLIB_LIBS"
AC_MSG_CHECKING([for int8])
case $SYSTEM_TYPE in
@@ -952,7 +953,11 @@ int main()
int8 i;
return 0;
}
-], AC_DEFINE(HAVE_INT_8_16_32) AC_MSG_RESULT([yes]), AC_MSG_RESULT([no])
+],
+[AC_DEFINE([HAVE_INT_8_16_32], [1],
+ [whether int8, int16 and int32 types exist])
+AC_MSG_RESULT([yes])],
+[AC_MSG_RESULT([no])]
)
;;
esac
@@ -964,6 +969,16 @@ esac
MAX_C_OPTIMIZE="-O3"
MAX_CXX_OPTIMIZE="-O3"
+# workaround for Sun Forte/x86 see BUG#4681
+case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc in
+ *solaris*-i?86-no)
+ CFLAGS="$CFLAGS -DBIG_TABLES"
+ CXXFLAGS="$CXXFLAGS -DBIG_TABLES"
+ ;;
+ *) ;;
+esac
+
+
case $SYSTEM_TYPE in
*solaris2.7*)
# Solaris 2.7 has a broken /usr/include/widec.h
@@ -1029,7 +1044,8 @@ case $SYSTEM_TYPE in
if test "$ac_cv_prog_gcc" = "no"
then
CFLAGS="$CFLAGS -DHAVE_BROKEN_INLINE"
- CXXFLAGS="$CXXFLAGS +O2"
+# set working flags first in line, letting override it (i. e. for debug):
+ CXXFLAGS="+O2 $CXXFLAGS"
MAX_C_OPTIMIZE=""
MAX_CXX_OPTIMIZE=""
ndb_cxxflags_fix="$ndb_cxxflags_fix -Aa"
@@ -1066,10 +1082,11 @@ case $SYSTEM_TYPE in
MAX_C_OPTIMIZE="-O"
fi
;;
- *darwin7*)
+ *darwin[[7-8]]*)
+ # don't forget to escape [] like above
if test "$ac_cv_prog_gcc" = "yes"
then
- FLAGS="-DHAVE_DARWIN_THREADS -D_P1003_1B_VISIBLE -DSIGNAL_WITH_VIO_CLOSE -DSIGNALS_DONT_BREAK_READ"
+ FLAGS="-DHAVE_DARWIN_THREADS -D_P1003_1B_VISIBLE -DSIGNAL_WITH_VIO_CLOSE -DSIGNALS_DONT_BREAK_READ -DIGNORE_SIGHUP_SIGQUIT"
CFLAGS="$CFLAGS $FLAGS"
CXXFLAGS="$CXXFLAGS $FLAGS"
MAX_C_OPTIMIZE="-O"
@@ -1097,7 +1114,8 @@ case $SYSTEM_TYPE in
*bsdi*)
echo "Adding fix for BSDI"
CFLAGS="$CFLAGS -D__BSD__ -DHAVE_BROKEN_REALPATH"
- AC_DEFINE_UNQUOTED(SOCKOPT_OPTLEN_TYPE, size_t)
+ AC_DEFINE_UNQUOTED([SOCKOPT_OPTLEN_TYPE], [size_t],
+ [Last argument to get/setsockopt])
;;
*sgi-irix6*)
if test "$with_named_thread" = "no"
@@ -1247,7 +1265,8 @@ then
if test "$res" -gt 0
then
AC_MSG_RESULT("Found")
- AC_DEFINE(HAVE_LINUXTHREADS)
+ AC_DEFINE([HAVE_LINUXTHREADS], [1],
+ [Whether we are using Xavier Leroy's LinuxThreads])
# Linux 2.0 sanity check
AC_TRY_COMPILE([#include <sched.h>], [int a = sched_get_priority_min(1);], ,
AC_MSG_ERROR([Syntax error in sched.h. Change _P to __P in the /usr/include/sched.h file. See the Installation chapter in the Reference Manual]))
@@ -1270,7 +1289,8 @@ Reference Manual for more information.])
with_named_thread="-lpthread -lmach -lexc"
CFLAGS="$CFLAGS -D_REENTRANT"
CXXFLAGS="$CXXFLAGS -D_REENTRANT"
- AC_DEFINE(HAVE_DEC_THREADS)
+ AC_DEFINE(HAVE_DEC_THREADS, [1],
+ [Whether we are using DEC threads])
AC_MSG_RESULT("yes")
else
AC_MSG_RESULT("no")
@@ -1278,8 +1298,9 @@ Reference Manual for more information.])
if test -f /usr/shlib/libpthreads.so -a -f /usr/lib/libmach.a -a -f /usr/ccs/lib/cmplrs/cc/libexc.a
then
with_named_thread="-lpthreads -lmach -lc_r"
- AC_DEFINE(HAVE_DEC_THREADS)
- AC_DEFINE(HAVE_DEC_3_2_THREADS)
+ AC_DEFINE(HAVE_DEC_THREADS, [1])
+ AC_DEFINE([HAVE_DEC_3_2_THREADS], [1],
+ [Whether we are using OSF1 DEC threads on 3.2])
with_osf32_threads="yes"
MYSQLD_DEFAULT_SWITCHES="--skip-thread-priority"
AC_MSG_RESULT("yes")
@@ -1309,6 +1330,7 @@ then
with_named_thread="-lgthreads -lsocket -lgthreads"
# sched.h conflicts with fsu-threads
touch ./include/sched.h
+ touch ./include/semaphore.h
# We must have gcc
if expr "$CC" : ".*gcc.*"
@@ -1353,9 +1375,9 @@ then
fi
if expr "$SYSTEM_TYPE" : ".*unixware7.0.0" > /dev/null
then
- AC_DEFINE(HAVE_UNIXWARE7_THREADS)
+ AC_DEFINE(HAVE_UNIXWARE7_THREADS, [1])
else
- AC_DEFINE(HAVE_UNIXWARE7_POSIX)
+ AC_DEFINE(HAVE_UNIXWARE7_POSIX, [1])
fi
AC_MSG_RESULT("yes")
# We must have cc
@@ -1399,9 +1421,9 @@ then
fi
if expr "$SYSTEM_TYPE" : ".*unixware7.0.0" > /dev/null
then
- AC_DEFINE(HAVE_UNIXWARE7_THREADS)
+ AC_DEFINE(HAVE_UNIXWARE7_THREADS, [1])
else
- AC_DEFINE(HAVE_UNIXWARE7_POSIX)
+ AC_DEFINE(HAVE_UNIXWARE7_POSIX, [1])
fi
# We must have cc
AC_MSG_CHECKING("for gcc")
@@ -1440,9 +1462,11 @@ then
fi
if expr "$SYSTEM_TYPE" : ".*unixware7.0.0" > /dev/null
then
- AC_DEFINE(HAVE_UNIXWARE7_THREADS)
+ AC_DEFINE([HAVE_UNIXWARE7_THREADS], [1],
+ [UNIXWARE7 threads are not posix])
else
- AC_DEFINE(HAVE_UNIXWARE7_POSIX)
+ AC_DEFINE([HAVE_UNIXWARE7_POSIX], [1],
+ [new UNIXWARE7 threads that are not yet posix])
fi
# We must have cc
AC_MSG_CHECKING("for gcc")
@@ -1843,6 +1867,7 @@ MYSQL_HAVE_TIOCGWINSZ
MYSQL_HAVE_FIONREAD
MYSQL_HAVE_TIOCSTAT
MYSQL_STRUCT_DIRENT_D_INO
+MYSQL_STRUCT_DIRENT_D_NAMLEN
MYSQL_TYPE_SIGHANDLER
if test "$with_named_curses" = "no"
then
@@ -1852,6 +1877,15 @@ else
fi
AC_SUBST(TERMCAP_LIB)
+LIBEDIT_LOBJECTS=""
+AC_CHECK_FUNC(strunvis, ,[LIBEDIT_LOBJECTS="$LIBEDIT_LOBJECTS unvis.o"])
+AC_CHECK_FUNC(strvis, ,[LIBEDIT_LOBJECTS="$LIBEDIT_LOBJECTS vis.o"])
+AC_CHECK_FUNC(strlcpy, ,[LIBEDIT_LOBJECTS="$LIBEDIT_LOBJECTS strlcpy.o"])
+AC_CHECK_FUNC(strlcat, ,[LIBEDIT_LOBJECTS="$LIBEDIT_LOBJECTS strlcat.o"])
+AC_CHECK_FUNC(fgetln, ,[LIBEDIT_LOBJECTS="$LIBEDIT_LOBJECTS fgetln.o"])
+AC_SUBST(LIBEDIT_LOBJECTS)
+enable_readline="yes"
+
# End of readline/libedit stuff
#########################################################################
@@ -1891,7 +1925,7 @@ AC_CHECK_FUNCS(alarm bcmp bfill bmove bzero chsize cuserid fchmod fcntl \
AC_MSG_CHECKING(for isinf with <math.h>)
AC_TRY_LINK([#include <math.h>], [float f = 0.0; isinf(f)],
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_ISINF,,[isinf() macro or function]),
+ AC_DEFINE(HAVE_ISINF, [1], [isinf() macro or function]),
AC_MSG_RESULT(no))
CFLAGS="$ORG_CFLAGS"
@@ -1945,7 +1979,8 @@ AC_LANG_RESTORE
CXXFLAGS="$ac_save_CXXFLAGS"
if test "$mysql_cv_gethost_style" = "solaris"
then
- AC_DEFINE(HAVE_SOLARIS_STYLE_GETHOST)
+ AC_DEFINE([HAVE_SOLARIS_STYLE_GETHOST], [1],
+ [Solaris define gethostbyaddr_r with 7 arguments. glibc2 defines this with 8 arguments])
fi
#---START: Used in for client configure
@@ -1979,7 +2014,8 @@ AC_LANG_RESTORE
CXXFLAGS="$ac_save_CXXFLAGS"
if test "$mysql_cv_gethostname_style" = "glibc2"
then
- AC_DEFINE(HAVE_GETHOSTBYNAME_R_GLIBC2_STYLE)
+ AC_DEFINE([HAVE_GETHOSTBYNAME_R_GLIBC2_STYLE], [1],
+ [Solaris define gethostbyname_r with 5 arguments. glibc2 defines this with 6 arguments])
fi
# Check 3rd argument of getthostbyname_r
@@ -2010,7 +2046,8 @@ AC_LANG_RESTORE
CXXFLAGS="$ac_save_CXXFLAGS"
if test "$mysql_cv_gethostname_arg" = "hostent_data"
then
- AC_DEFINE(HAVE_GETHOSTBYNAME_R_RETURN_INT)
+ AC_DEFINE([HAVE_GETHOSTBYNAME_R_RETURN_INT], [1],
+ [In OSF 4.0f the 3'd argument to gethostname_r is hostent_data *])
fi
@@ -2029,7 +2066,8 @@ pthread_getspecific((pthread_key_t) NULL); ],
mysql_cv_getspecific_args=POSIX, mysql_cv_getspecific_args=other))
if test "$mysql_cv_getspecific_args" = "other"
then
- AC_DEFINE(HAVE_NONPOSIX_PTHREAD_GETSPECIFIC)
+ AC_DEFINE([HAVE_NONPOSIX_PTHREAD_GETSPECIFIC], [1],
+ [For some non posix threads])
fi
# Check definition of pthread_mutex_init
@@ -2047,7 +2085,8 @@ mysql_cv_getspecific_args=POSIX, mysql_cv_getspecific_args=other))
mysql_cv_mutex_init_args=POSIX, mysql_cv_mutex_init_args=other))
if test "$mysql_cv_mutex_init_args" = "other"
then
- AC_DEFINE(HAVE_NONPOSIX_PTHREAD_MUTEX_INIT)
+ AC_DEFINE([HAVE_NONPOSIX_PTHREAD_MUTEX_INIT], [1],
+ [For some non posix threads])
fi
fi
#---END:
@@ -2067,7 +2106,7 @@ readdir_r((DIR *) NULL, (struct dirent *) NULL, (struct dirent **) NULL); ],
mysql_cv_readdir_r=POSIX, mysql_cv_readdir_r=other))
if test "$mysql_cv_readdir_r" = "POSIX"
then
- AC_DEFINE(HAVE_READDIR_R)
+ AC_DEFINE([HAVE_READDIR_R], [1], [POSIX readdir_r])
fi
# Check definition of posix sigwait()
@@ -2087,7 +2126,7 @@ sigwait(&set,&sig);
mysql_cv_sigwait=POSIX, mysql_cv_sigwait=other))
if test "$mysql_cv_sigwait" = "POSIX"
then
- AC_DEFINE(HAVE_SIGWAIT)
+ AC_DEFINE([HAVE_SIGWAIT], [1], [POSIX sigwait])
fi
if test "$mysql_cv_sigwait" != "POSIX"
@@ -2108,7 +2147,7 @@ sigwait(&set);],
mysql_cv_sigwait=NONPOSIX, mysql_cv_sigwait=other))
if test "$mysql_cv_sigwait" = "NONPOSIX"
then
- AC_DEFINE(HAVE_NONPOSIX_SIGWAIT)
+ AC_DEFINE([HAVE_NONPOSIX_SIGWAIT], [1], [sigwait with one argument])
fi
fi
#---END:
@@ -2126,7 +2165,7 @@ pthread_attr_setscope(&thr_attr,0);],
mysql_cv_pthread_attr_setscope=yes, mysql_cv_pthread_attr_setscope=no))
if test "$mysql_cv_pthread_attr_setscope" = "yes"
then
- AC_DEFINE(HAVE_PTHREAD_ATTR_SETSCOPE)
+ AC_DEFINE([HAVE_PTHREAD_ATTR_SETSCOPE], [1], [pthread_attr_setscope])
fi
# Check for bad includes
@@ -2142,7 +2181,7 @@ AC_TRY_COMPILE(
netinet_inc=yes, netinet_inc=no)
if test "$netinet_inc" = "no"
then
- AC_DEFINE(HAVE_BROKEN_NETINET_INCLUDES)
+ AC_DEFINE([HAVE_BROKEN_NETINET_INCLUDES], [1], [Can netinet be included])
fi
AC_MSG_RESULT("$netinet_inc")
@@ -2167,7 +2206,7 @@ AC_ARG_WITH(query_cache,
if test "$with_query_cache" = "yes"
then
- AC_DEFINE(HAVE_QUERY_CACHE)
+ AC_DEFINE([HAVE_QUERY_CACHE], [1], [If we want to have query cache])
fi
AC_ARG_WITH(geometry,
@@ -2178,8 +2217,8 @@ AC_ARG_WITH(geometry,
if test "$with_geometry" = "yes"
then
- AC_DEFINE(HAVE_SPATIAL)
- AC_DEFINE(HAVE_RTREE_KEYS)
+ AC_DEFINE([HAVE_SPATIAL], [1], [Spatial extentions])
+ AC_DEFINE([HAVE_RTREE_KEYS], [1], [RTree keys])
fi
AC_ARG_WITH(embedded_privilege_control,
@@ -2192,7 +2231,8 @@ AC_ARG_WITH(embedded_privilege_control,
if test "$with_embedded_privilege_control" = "yes"
then
- AC_DEFINE(HAVE_EMBEDDED_PRIVILEGE_CONTROL)
+ AC_DEFINE([HAVE_EMBEDDED_PRIVILEGE_CONTROL], [1],
+ [Access checks in embedded library])
fi
AC_ARG_WITH(extra-tools,
@@ -2275,6 +2315,20 @@ AC_ARG_WITH(libedit,
[ with_libedit=undefined ]
)
+#
+# We support next variants of compilation:
+# --with-readline
+# | yes | no | undefined
+# --with-libedit | | |
+# ---------------+----------------+------+----------------------------------
+# yes | ERROR! | use libedit from mysql sources
+# ---------------+----------------+------+----------------------------------
+# no | use readline | use system readline or external libedit
+# | from mysql | according to results of m4 tests
+# ---------------+ sources (if it + +----------------------------------
+# undefined | is presented) | | use libedit from mysql sources
+
+
compile_readline="no"
compile_libedit="no"
@@ -2299,10 +2353,11 @@ then
readline_topdir="cmd-line-utils"
readline_basedir="libedit"
readline_dir="$readline_topdir/$readline_basedir"
- readline_link="\$(top_builddir)/cmd-line-utils/libedit/liblibedit.a"
+ readline_link="\$(top_builddir)/cmd-line-utils/libedit/libedit.a"
readline_h_ln_cmd="\$(LN) -s \$(top_builddir)/cmd-line-utils/libedit/readline readline"
compile_libedit=yes
- AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE)
+ AC_DEFINE_UNQUOTED(HAVE_HIST_ENTRY, 1)
+ AC_DEFINE_UNQUOTED(USE_LIBEDIT_INTERFACE, 1)
elif test "$with_readline" = "yes"
then
readline_topdir="cmd-line-utils"
@@ -2311,10 +2366,14 @@ then
readline_link="\$(top_builddir)/cmd-line-utils/readline/libreadline.a"
readline_h_ln_cmd="\$(LN) -s \$(top_builddir)/cmd-line-utils/readline readline"
compile_readline=yes
- AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE)
+ AC_DEFINE_UNQUOTED(USE_NEW_READLINE_INTERFACE, 1)
else
+ AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
MYSQL_CHECK_LIBEDIT_INTERFACE
MYSQL_CHECK_NEW_RL_INTERFACE
+ MYSQL_CHECK_READLINE_DECLARES_HIST_ENTRY
+ AC_LANG_RESTORE
if [test "$mysql_cv_new_rl_interface" = "yes"] || [test "$mysql_cv_libedit_interface" = "no"]
then
readline_link="-lreadline"
@@ -2339,7 +2398,7 @@ dnl you must also create strings/ctype-$charset_name.c
AC_DIVERT_PUSH(0)
define(CHARSETS_AVAILABLE0,binary)
-define(CHARSETS_AVAILABLE1,ascii armscii8 ascii big5 cp1250 cp1251 cp1256 cp1257)
+define(CHARSETS_AVAILABLE1,armscii8 ascii big5 cp1250 cp1251 cp1256 cp1257)
define(CHARSETS_AVAILABLE2,cp850 cp852 cp866 dec8 euckr gb2312 gbk geostd8)
define(CHARSETS_AVAILABLE3,greek hebrew hp8 keybcs2 koi8r koi8u)
define(CHARSETS_AVAILABLE4,latin1 latin2 latin5 latin7 macce macroman)
@@ -2388,8 +2447,10 @@ elif test "$extra_charsets" = none; then
CHARSETS="$CHARSETS"
elif test "$extra_charsets" = complex; then
CHARSETS="$CHARSETS $CHARSETS_COMPLEX"
+ AC_DEFINE([DEFINE_ALL_CHARACTER_SETS],1,[all charsets are available])
elif test "$extra_charsets" = all; then
CHARSETS="$CHARSETS $CHARSETS_AVAILABLE"
+ AC_DEFINE([DEFINE_ALL_CHARACTER_SETS],1,[all charsets are available])
else
EXTRA_CHARSETS=`echo $extra_charsets | sed -e 's/,/ /g'`
CHARSETS="$CHARSETS $EXTRA_CHARSETS"
@@ -2399,121 +2460,124 @@ for cs in $CHARSETS
do
case $cs in
armscii8)
- AC_DEFINE(HAVE_CHARSET_armscii8)
+ AC_DEFINE(HAVE_CHARSET_armscii8, 1,
+ [Define to enable charset armscii8])
;;
ascii)
- AC_DEFINE(HAVE_CHARSET_ascii)
+ AC_DEFINE(HAVE_CHARSET_ascii, 1,
+ [Define to enable ascii character set])
;;
big5)
- AC_DEFINE(HAVE_CHARSET_big5)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_big5, 1, [Define to enable charset big5])
+ AC_DEFINE([USE_MB], [1], [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, [1], [ ])
;;
binary)
;;
cp1250)
- AC_DEFINE(HAVE_CHARSET_cp1250)
+ AC_DEFINE(HAVE_CHARSET_cp1250, 1, [Define to enable cp1250])
;;
cp1251)
- AC_DEFINE(HAVE_CHARSET_cp1251)
+ AC_DEFINE(HAVE_CHARSET_cp1251, 1, [Define to enable charset cp1251])
;;
cp1256)
- AC_DEFINE(HAVE_CHARSET_cp1256)
+ AC_DEFINE(HAVE_CHARSET_cp1256, 1, [Define to enable charset cp1256])
;;
cp1257)
- AC_DEFINE(HAVE_CHARSET_cp1257)
+ AC_DEFINE(HAVE_CHARSET_cp1257, 1, [Define to enable charset cp1257])
;;
cp850)
- AC_DEFINE(HAVE_CHARSET_cp850)
+ AC_DEFINE(HAVE_CHARSET_cp850, 1, [Define to enable charset cp850])
;;
cp852)
- AC_DEFINE(HAVE_CHARSET_cp852)
+ AC_DEFINE(HAVE_CHARSET_cp852, 1, [Define to enable charset cp852])
;;
cp866)
- AC_DEFINE(HAVE_CHARSET_cp866)
+ AC_DEFINE(HAVE_CHARSET_cp866, 1, [Define to enable charset cp866])
;;
dec8)
- AC_DEFINE(HAVE_CHARSET_dec8)
+ AC_DEFINE(HAVE_CHARSET_dec8, 1, [Define to enable charset dec8])
;;
euckr)
- AC_DEFINE(HAVE_CHARSET_euckr)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_euckr, 1, [Define to enable charset euckr])
+ AC_DEFINE([USE_MB], [1], [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
gb2312)
- AC_DEFINE(HAVE_CHARSET_gb2312)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_gb2312, 1, [Define to enable charset gb2312])
+ AC_DEFINE([USE_MB], 1, [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
gbk)
- AC_DEFINE(HAVE_CHARSET_gbk)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_gbk, 1, [Define to enable charset gbk])
+ AC_DEFINE([USE_MB], [1], [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
geostd8)
- AC_DEFINE(HAVE_CHARSET_geostd8)
+ AC_DEFINE(HAVE_CHARSET_geostd8, 1, [Define to enable charset geostd8])
;;
greek)
- AC_DEFINE(HAVE_CHARSET_greek)
+ AC_DEFINE(HAVE_CHARSET_greek, 1, [Define to enable charset greek])
;;
hebrew)
- AC_DEFINE(HAVE_CHARSET_hebrew)
+ AC_DEFINE(HAVE_CHARSET_hebrew, 1, [Define to enable charset hebrew])
;;
hp8)
- AC_DEFINE(HAVE_CHARSET_hp8)
+ AC_DEFINE(HAVE_CHARSET_hp8, 1, [Define to enable charset hp8])
;;
keybcs2)
- AC_DEFINE(HAVE_CHARSET_keybcs2)
+ AC_DEFINE(HAVE_CHARSET_keybcs2, 1, [Define to enable charset keybcs2])
;;
koi8r)
- AC_DEFINE(HAVE_CHARSET_koi8r)
+ AC_DEFINE(HAVE_CHARSET_koi8r, 1, [Define to enable charset koi8r])
;;
koi8u)
- AC_DEFINE(HAVE_CHARSET_koi8u)
+ AC_DEFINE(HAVE_CHARSET_koi8u, 1, [Define to enable charset koi8u])
;;
latin1)
- AC_DEFINE(HAVE_CHARSET_latin1)
+ AC_DEFINE(HAVE_CHARSET_latin1, 1, [Define to enable charset latin1])
;;
latin2)
- AC_DEFINE(HAVE_CHARSET_latin2)
+ AC_DEFINE(HAVE_CHARSET_latin2, 1, [Define to enable charset latin2])
;;
latin5)
- AC_DEFINE(HAVE_CHARSET_latin5)
+ AC_DEFINE(HAVE_CHARSET_latin5, 1, [Define to enable charset latin5])
;;
latin7)
- AC_DEFINE(HAVE_CHARSET_latin7)
+ AC_DEFINE(HAVE_CHARSET_latin7, 1, [Define to enable charset latin7])
;;
macce)
- AC_DEFINE(HAVE_CHARSET_macce)
+ AC_DEFINE(HAVE_CHARSET_macce, 1, [Define to enable charset macce])
;;
macroman)
- AC_DEFINE(HAVE_CHARSET_macroman)
+ AC_DEFINE(HAVE_CHARSET_macroman, 1,
+ [Define to enable charset macroman])
;;
sjis)
- AC_DEFINE(HAVE_CHARSET_sjis)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_sjis, 1, [Define to enable charset sjis])
+ AC_DEFINE([USE_MB], 1, [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
swe7)
- AC_DEFINE(HAVE_CHARSET_swe7)
+ AC_DEFINE(HAVE_CHARSET_swe7, 1, [Define to enable charset swe7])
;;
tis620)
- AC_DEFINE(HAVE_CHARSET_tis620)
+ AC_DEFINE(HAVE_CHARSET_tis620, 1, [Define to enable charset tis620])
;;
ucs2)
- AC_DEFINE(HAVE_CHARSET_ucs2)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_ucs2, 1, [Define to enable charset ucs2])
+ AC_DEFINE([USE_MB], [1], [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
ujis)
- AC_DEFINE(HAVE_CHARSET_ujis)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_ujis, 1, [Define to enable charset ujis])
+ AC_DEFINE([USE_MB], [1], [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
utf8)
- AC_DEFINE(HAVE_CHARSET_utf8)
- AC_DEFINE(USE_MB)
- AC_DEFINE(USE_MB_IDENT)
+ AC_DEFINE(HAVE_CHARSET_utf8, 1, [Define to enable ut8])
+ AC_DEFINE([USE_MB], 1, [Use multi-byte character routines])
+ AC_DEFINE(USE_MB_IDENT, 1)
;;
*)
AC_MSG_ERROR([Charset '$cs' not available. (Available are: $CHARSETS_AVAILABLE).
@@ -2543,7 +2607,7 @@ case $default_charset in
;;
cp1250)
default_charset_default_collation="cp1250_general_ci"
- default_charset_collations="cp1250_general_ci cp1250_czech_ci cp1250_bin"
+ default_charset_collations="cp1250_general_ci cp1250_czech_cs cp1250_bin"
;;
cp1251)
default_charset_default_collation="cp1251_general_ci"
@@ -2619,7 +2683,7 @@ case $default_charset in
;;
latin2)
default_charset_default_collation="latin2_general_ci"
- default_charset_collations="latin2_general_ci latin2_bin latin2_czech_ci latin2_hungarian_ci latin2_croatian_ci"
+ default_charset_collations="latin2_general_ci latin2_bin latin2_czech_cs latin2_hungarian_ci latin2_croatian_ci"
;;
latin5)
default_charset_default_collation="latin5_turkish_ci"
@@ -2711,14 +2775,17 @@ else
]);
fi
-AC_DEFINE_UNQUOTED(MYSQL_DEFAULT_CHARSET_NAME,"$default_charset")
-AC_DEFINE_UNQUOTED(MYSQL_DEFAULT_COLLATION_NAME,"$default_collation")
+AC_DEFINE_UNQUOTED([MYSQL_DEFAULT_CHARSET_NAME], ["$default_charset"],
+ [Define the default charset name])
+AC_DEFINE_UNQUOTED([MYSQL_DEFAULT_COLLATION_NAME], ["$default_collation"],
+ [Define the default charset name])
MYSQL_CHECK_ISAM
MYSQL_CHECK_BDB
MYSQL_CHECK_INNODB
MYSQL_CHECK_EXAMPLEDB
MYSQL_CHECK_ARCHIVEDB
+MYSQL_CHECK_CSVDB
MYSQL_CHECK_NDBCLUSTER
# If we have threads generate some library functions and test programs
@@ -2735,7 +2802,7 @@ if test "$THREAD_SAFE_CLIENT" != "no"
then
sql_client_dirs="libmysql_r $sql_client_dirs"
linked_client_targets="$linked_client_targets linked_libmysql_r_sources"
- AC_DEFINE(THREAD_SAFE_CLIENT)
+ AC_DEFINE([THREAD_SAFE_CLIENT], [1], [Should be client be thread safe])
fi
CLIENT_LIBS="$CLIENT_LIBS $STATIC_NSS_FLAGS"
@@ -2761,7 +2828,8 @@ ac_configure_args="$ac_configure_args CFLAGS='$CFLAGS' CXXFLAGS='$CXXFLAGS'"
if test "$with_server" = "yes" -o "$THREAD_SAFE_CLIENT" != "no"
then
- AC_DEFINE(THREAD)
+ AC_DEFINE([THREAD], [1],
+ [Define if you want to have threaded code. This may be undef on client code])
# Avoid _PROGRAMS names
THREAD_LPROGRAMS="test_thr_alarm\$(EXEEXT) test_thr_lock\$(EXEEXT)"
AC_SUBST(THREAD_LPROGRAMS)
@@ -2788,7 +2856,7 @@ then
AC_CONFIG_FILES(bdb/Makefile)
echo "CONFIGURING FOR BERKELEY DB"
- bdb_conf_flags=
+ bdb_conf_flags="--disable-shared"
if test $with_debug = "yes"
then
bdb_conf_flags="$bdb_conf_flags --enable-debug --enable-diagnostic"
@@ -2821,7 +2889,7 @@ dnl echo "bdb = '$bdb'; inc = '$bdb_includes', lib = '$bdb_libs'"
echo "END OF BERKELEY DB CONFIGURATION"
fi
- AC_DEFINE(HAVE_BERKELEY_DB)
+ AC_DEFINE([HAVE_BERKELEY_DB], [1], [Have berkeley db installed])
else
if test -d bdb; then :
else
@@ -2878,7 +2946,7 @@ EOF
then
# MIT user level threads
thread_dirs="mit-pthreads"
- AC_DEFINE(HAVE_mit_thread)
+ AC_DEFINE([HAVE_mit_thread], [1], [Do we use user level threads])
MT_INCLUDES="-I\$(top_srcdir)/mit-pthreads/include"
AC_SUBST(MT_INCLUDES)
if test -n "$OVERRIDE_MT_LD_ADD"
@@ -2912,7 +2980,7 @@ AC_SUBST(server_scripts)
#if test "$with_posix_threads" = "no" -o "$with_mit_threads" = "yes"
#then
# MIT pthreads does now support connecting with unix sockets
- # AC_DEFINE(HAVE_THREADS_WITHOUT_SOCKETS)
+ # AC_DEFINE([HAVE_THREADS_WITHOUT_SOCKETS], [], [MIT pthreads does not support connecting with unix sockets])
#fi
# Some usefull subst
@@ -2939,10 +3007,10 @@ then
if test "$with_debug" = "yes"
then
# Medium debug.
- NDB_DEFS="-DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
+ NDB_DEFS="-DNDB_DEBUG -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
elif test "$with_debug" = "full"
then
- NDB_DEFS="-DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
+ NDB_DEFS="-DNDB_DEBUG_FULL -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD"
else
NDB_DEFS="-DNDEBUG"
fi
@@ -3042,6 +3110,7 @@ AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
include/mysql_version.h dnl
cmd-line-utils/Makefile dnl
cmd-line-utils/libedit/Makefile dnl
+ zlib/Makefile dnl
cmd-line-utils/readline/Makefile)
AC_CONFIG_COMMANDS([default], , test -z "$CONFIG_HEADERS" || echo timestamp > stamp-h)
AC_OUTPUT
diff --git a/extra/mysql_waitpid.c b/extra/mysql_waitpid.c
index bff1752ec21..c228cc52c8b 100644
--- a/extra/mysql_waitpid.c
+++ b/extra/mysql_waitpid.c
@@ -20,6 +20,7 @@
#include <my_global.h>
#include <m_string.h>
+#include <my_sys.h>
#include <my_getopt.h>
#include <signal.h>
#include <errno.h>
diff --git a/extra/perror.c b/extra/perror.c
index f1b1a4c2005..a28626fd873 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -42,7 +42,7 @@ static struct my_option my_long_options[] =
NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_NDBCLUSTER_DB
{"ndb", 0, "Ndbcluster storage engine specific error codes.", (gptr*) &ndb_code,
- (gptr*) &ndb_code, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ (gptr*) &ndb_code, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
#ifdef HAVE_SYS_ERRLIST
{"all", 'a', "Print all the error messages and the number.",
@@ -222,7 +222,7 @@ int main(int argc,char *argv[])
#ifdef HAVE_NDBCLUSTER_DB
if (ndb_code)
{
- if (ndb_error_string(code, ndb_string, 1024) < 0)
+ if (ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0)
msg= 0;
else
msg= ndb_string;
diff --git a/heap/hp_hash.c b/heap/hp_hash.c
index 2014b2b0adc..71eecc8bdf2 100644
--- a/heap/hp_hash.c
+++ b/heap/hp_hash.c
@@ -245,7 +245,15 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
- seg->charset->coll->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,&nr2);
+ CHARSET_INFO *cs= seg->charset;
+ uint char_length= (uint) ((uchar*) key - pos);
+ if (cs->mbmaxlen > 1)
+ {
+ uint length= char_length;
+ char_length= my_charpos(cs, pos, pos + length, length/cs->mbmaxlen);
+ set_if_smaller(char_length, length); /* QQ: ok to remove? */
+ }
+ cs->coll->hash_sort(cs, pos, char_length, &nr, &nr2);
}
else
{
@@ -280,7 +288,15 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
- seg->charset->coll->hash_sort(seg->charset,pos,end-pos,&nr,&nr2);
+ CHARSET_INFO *cs= seg->charset;
+ uint char_length= seg->length;
+ if (cs->mbmaxlen > 1)
+ {
+ char_length= my_charpos(cs, pos, pos + char_length,
+ char_length / cs->mbmaxlen);
+ set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */
+ }
+ cs->coll->hash_sort(cs, pos, char_length, &nr, &nr2);
}
else
{
@@ -401,9 +417,26 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
+ CHARSET_INFO *cs= seg->charset;
+ uint char_length1;
+ uint char_length2;
+ uchar *pos1= (uchar*)rec1 + seg->start;
+ uchar *pos2= (uchar*)rec2 + seg->start;
+ if (cs->mbmaxlen > 1)
+ {
+ uint char_length= seg->length / cs->mbmaxlen;
+ char_length1= my_charpos(cs, pos1, pos1 + seg->length, char_length);
+ set_if_smaller(char_length1, seg->length); /* QQ: ok to remove? */
+ char_length2= my_charpos(cs, pos2, pos2 + seg->length, char_length);
+ set_if_smaller(char_length2, seg->length); /* QQ: ok to remove? */
+ }
+ else
+ {
+ char_length1= char_length2= seg->length;
+ }
if (seg->charset->coll->strnncollsp(seg->charset,
- (uchar*) rec1+seg->start,seg->length,
- (uchar*) rec2+seg->start,seg->length))
+ pos1,char_length1,
+ pos2,char_length2))
return 1;
}
else
@@ -435,9 +468,27 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
+ CHARSET_INFO *cs= seg->charset;
+ uint char_length_key;
+ uint char_length_rec;
+ uchar *pos= (uchar*) rec + seg->start;
+ if (cs->mbmaxlen > 1)
+ {
+ uint char_length= seg->length / cs->mbmaxlen;
+ char_length_key= my_charpos(cs, key, key + seg->length, char_length);
+ set_if_smaller(char_length_key, seg->length);
+ char_length_rec= my_charpos(cs, pos, pos + seg->length, char_length);
+ set_if_smaller(char_length_rec, seg->length);
+ }
+ else
+ {
+ char_length_key= seg->length;
+ char_length_rec= seg->length;
+ }
+
if (seg->charset->coll->strnncollsp(seg->charset,
- (uchar*) rec+seg->start, seg->length,
- (uchar*) key, seg->length))
+ (uchar*) pos, char_length_rec,
+ (uchar*) key, char_length_key))
return 1;
}
else
@@ -458,13 +509,23 @@ void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec)
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
{
+ CHARSET_INFO *cs= seg->charset;
+ uint char_length= seg->length;
+ uchar *pos= (uchar*) rec + seg->start;
if (seg->null_bit)
*key++= test(rec[seg->null_pos] & seg->null_bit);
- memcpy(key,rec+seg->start,(size_t) seg->length);
- key+=seg->length;
+ if (cs->mbmaxlen > 1)
+ {
+ char_length= my_charpos(cs, pos, pos + seg->length,
+ char_length / cs->mbmaxlen);
+ set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */
+ }
+ memcpy(key,rec+seg->start,(size_t) char_length);
+ key+= char_length;
}
}
+
uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
const byte *rec, byte *recpos)
{
@@ -473,6 +534,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++)
{
+ uint char_length;
if (seg->null_bit)
{
if (!(*key++= 1 - test(rec[seg->null_pos] & seg->null_bit)))
@@ -515,14 +577,27 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
}
continue;
}
- memcpy(key, rec + seg->start, (size_t) seg->length);
+ char_length= seg->length;
+ if (seg->charset->mbmaxlen > 1)
+ {
+ char_length= my_charpos(seg->charset,
+ rec + seg->start, rec + seg->start + char_length,
+ char_length / seg->charset->mbmaxlen);
+ set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */
+ if (char_length < seg->length)
+ seg->charset->cset->fill(seg->charset, key + char_length,
+ seg->length - char_length, ' ');
+ }
+ memcpy(key, rec + seg->start, (size_t) char_length);
key+= seg->length;
}
memcpy(key, &recpos, sizeof(byte*));
return key - start_key;
}
-uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, uint k_len)
+
+uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old,
+ uint k_len)
{
HA_KEYSEG *seg, *endseg;
uchar *start_key= key;
@@ -530,6 +605,7 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, uint k_len)
for (seg= keydef->seg, endseg= seg + keydef->keysegs;
seg < endseg && (int) k_len > 0; old+= seg->length, seg++)
{
+ uint char_length;
if (seg->null_bit)
{
k_len--;
@@ -551,19 +627,31 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, uint k_len)
}
continue;
}
- memcpy((byte*) key, old, seg->length);
+ char_length= seg->length;
+ if (seg->charset->mbmaxlen > 1)
+ {
+ char_length= my_charpos(seg->charset, old, old+char_length,
+ char_length / seg->charset->mbmaxlen);
+ set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */
+ if (char_length < seg->length)
+ seg->charset->cset->fill(seg->charset, key + char_length,
+ seg->length - char_length, ' ');
+ }
+ memcpy(key, old, (size_t) char_length);
key+= seg->length;
k_len-= seg->length;
}
return key - start_key;
}
+
uint hp_rb_key_length(HP_KEYDEF *keydef,
const byte *key __attribute__((unused)))
{
return keydef->length;
}
+
uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key)
{
const byte *start_key= key;
diff --git a/include/config-win.h b/include/config-win.h
index 91697c985d1..0ba8dd2cf43 100644
--- a/include/config-win.h
+++ b/include/config-win.h
@@ -150,6 +150,9 @@ typedef uint rf_SetTimer;
#define HAVE_NAMED_PIPE /* We can only create pipes on NT */
#endif
+/* ERROR is defined in wingdi.h */
+#undef ERROR
+
/* We need to close files to break connections on shutdown */
#ifndef SIGNAL_WITH_VIO_CLOSE
#define SIGNAL_WITH_VIO_CLOSE
diff --git a/include/m_ctype.h b/include/m_ctype.h
index 785fa431385..65b11f4c06a 100644
--- a/include/m_ctype.h
+++ b/include/m_ctype.h
@@ -149,6 +149,7 @@ typedef struct my_charset_handler_st
uint (*well_formed_len)(struct charset_info_st *,
const char *b,const char *e, uint nchars);
uint (*lengthsp)(struct charset_info_st *, const char *ptr, uint length);
+ uint (*numcells)(struct charset_info_st *, const char *b, const char *e);
/* Unicode convertion */
int (*mb_wc)(struct charset_info_st *cs,my_wc_t *wc,
@@ -325,6 +326,7 @@ int my_wildcmp_8bit(CHARSET_INFO *,
int escape, int w_one, int w_many);
uint my_numchars_8bit(CHARSET_INFO *, const char *b, const char *e);
+uint my_numcells_8bit(CHARSET_INFO *, const char *b, const char *e);
uint my_charpos_8bit(CHARSET_INFO *, const char *b, const char *e, uint pos);
uint my_well_formed_len_8bit(CHARSET_INFO *, const char *b, const char *e, uint pos);
int my_mbcharlen_8bit(CHARSET_INFO *, uint c);
@@ -342,6 +344,7 @@ int my_wildcmp_mb(CHARSET_INFO *,
const char *wildstr,const char *wildend,
int escape, int w_one, int w_many);
uint my_numchars_mb(CHARSET_INFO *, const char *b, const char *e);
+uint my_numcells_mb(CHARSET_INFO *, const char *b, const char *e);
uint my_charpos_mb(CHARSET_INFO *, const char *b, const char *e, uint pos);
uint my_well_formed_len_mb(CHARSET_INFO *, const char *b, const char *e, uint pos);
uint my_instr_mb(struct charset_info_st *,
@@ -391,8 +394,10 @@ extern my_bool my_parse_charset_xml(const char *bug, uint len,
#define my_strnncoll(s, a, b, c, d) ((s)->coll->strnncoll((s), (a), (b), (c), (d), 0))
#define my_like_range(s, a, b, c, d, e, f, g, h, i, j) \
((s)->coll->like_range((s), (a), (b), (c), (d), (e), (f), (g), (h), (i), (j)))
-#define my_wildcmp(cs,s,se,w,we,e,o,m) ((cs)->coll->wildcmp((cs),(s),(se),(w),(we),(e),(o),(m)))
+#define my_wildcmp(cs,s,se,w,we,e,o,m) ((cs)->coll->wildcmp((cs),(s),(se),(w),(we),(e),(o),(m)))
#define my_strcasecmp(s, a, b) ((s)->coll->strcasecmp((s), (a), (b)))
+#define my_charpos(cs, b, e, num) (cs)->cset->charpos((cs), (const char*) (b), (const char *)(e), (num))
+
#define use_mb(s) ((s)->cset->ismbchar != NULL)
#define my_ismbchar(s, a, b) ((s)->cset->ismbchar((s), (a), (b)))
diff --git a/include/m_string.h b/include/m_string.h
index 0709dbaffb4..97d34421537 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -238,6 +238,9 @@ longlong my_strtoll10(const char *nptr, char **endptr, int *error);
#ifndef HAVE_STRTOULL
#define HAVE_STRTOULL
#endif
+#ifndef HAVE_STRTOLL
+#define HAVE_STRTOLL
+#endif
#else
#ifdef HAVE_LONG_LONG
extern char *longlong2str(longlong val,char *dst,int radix);
diff --git a/include/my_getopt.h b/include/my_getopt.h
index bf119892a31..e6ca1130f85 100644
--- a/include/my_getopt.h
+++ b/include/my_getopt.h
@@ -14,6 +14,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifndef _my_getopt_h
+#define _my_getopt_h
+
C_MODE_START
#define GET_NO_ARG 1
@@ -51,14 +54,15 @@ struct my_option
int app_type; /* To be used by an application */
};
+typedef my_bool (* my_get_one_option) (int, const struct my_option *, char * );
+typedef void (* my_error_reporter) (enum loglevel level, const char *format, ... );
+
extern char *disabled_my_option;
extern my_bool my_getopt_print_errors;
+extern my_error_reporter my_getopt_error_reporter;
extern int handle_options (int *argc, char ***argv,
- const struct my_option *longopts,
- my_bool (*get_one_option)(int,
- const struct my_option *,
- char *));
+ const struct my_option *longopts, my_get_one_option);
extern void my_print_help(const struct my_option *options);
extern void my_print_variables(const struct my_option *options);
extern void my_getopt_register_get_addr(gptr* (*func_addr)(const char *, uint,
@@ -66,4 +70,8 @@ extern void my_getopt_register_get_addr(gptr* (*func_addr)(const char *, uint,
ulonglong getopt_ull_limit_value(ulonglong num, const struct my_option *optp);
my_bool getopt_compare_strings(const char *s, const char *t, uint length);
+
C_MODE_END
+
+#endif /* _my_getopt_h */
+
diff --git a/include/my_global.h b/include/my_global.h
index a6a777e8eaf..684617695a7 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -644,23 +644,17 @@ typedef SOCKET_SIZE_TYPE size_socket;
#endif
#endif /* defined (HAVE_LONG_LONG) && !defined(ULONGLONG_MAX)*/
-#if SIZEOF_LONG == 4
-#define INT_MIN32 (long) 0x80000000L
-#define INT_MAX32 (long) 0x7FFFFFFFL
-#define INT_MIN24 ((long) 0xff800000L)
-#define INT_MAX24 0x007fffffL
-#define INT_MIN16 ((short int) 0x8000)
-#define INT_MAX16 0x7FFF
-#define INT_MIN8 ((char) 0x80)
-#define INT_MAX8 ((char) 0x7F)
-#else /* Probably Alpha */
-#define INT_MIN32 ((long) (int) 0x80000000)
-#define INT_MAX32 ((long) (int) 0x7FFFFFFF)
-#define INT_MIN24 ((long) (int) 0xff800000)
-#define INT_MAX24 ((long) (int) 0x007fffff)
-#define INT_MIN16 ((short int) 0xffff8000)
-#define INT_MAX16 ((short int) 0x00007FFF)
-#endif
+#define INT_MIN32 (~0x7FFFFFFFL)
+#define INT_MAX32 0x7FFFFFFFL
+#define UINT_MAX32 0xFFFFFFFFL
+#define INT_MIN24 (~0x007FFFFF)
+#define INT_MAX24 0x007FFFFF
+#define UINT_MAX24 0x00FFFFFF
+#define INT_MIN16 (~0x7FFF)
+#define INT_MAX16 0x7FFF
+#define UINT_MAX16 0xFFFF
+#define INT_MIN8 (~0x7F)
+#define INT_MAX8 0x7F
/* From limits.h instead */
#ifndef DBL_MIN
diff --git a/include/my_sys.h b/include/my_sys.h
index 0cfad5750c1..54feb73d3e6 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -251,6 +251,12 @@ typedef struct wild_file_pack /* Struct to hold info when selecting files */
my_string *wild; /* Pointer to wildcards */
} WF_PACK;
+enum loglevel {
+ ERROR_LEVEL,
+ WARNING_LEVEL,
+ INFORMATION_LEVEL
+};
+
enum cache_type
{
READ_CACHE,WRITE_CACHE,
diff --git a/include/my_time.h b/include/my_time.h
index e42f7e9e402..d4dbe459c3b 100644
--- a/include/my_time.h
+++ b/include/my_time.h
@@ -27,6 +27,16 @@
C_MODE_START
extern ulonglong log_10_int[20];
+extern uchar days_in_month[];
+
+/*
+ Portable time_t replacement.
+ Should be signed and hold seconds for 1902-2038 range.
+*/
+typedef long my_time_t;
+
+#define MY_TIME_T_MAX LONG_MAX
+#define MY_TIME_T_MIN LONG_MIN
#define YY_PART_YEAR 70
@@ -41,6 +51,15 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
bool str_to_time(const char *str,uint length, MYSQL_TIME *l_time,
int *was_cut);
+long calc_daynr(uint year,uint month,uint day);
+
+void init_time(void);
+
+my_time_t
+my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap);
+
+void set_zero_time(MYSQL_TIME *tm);
+
C_MODE_END
#endif /* _my_time_h_ */
diff --git a/include/myisampack.h b/include/myisampack.h
index 06c94fea75f..c92429e4c01 100644
--- a/include/myisampack.h
+++ b/include/myisampack.h
@@ -22,215 +22,218 @@
*/
/* these two are for uniformity */
-#define mi_sint1korr(A) (int8)(*A)
-#define mi_uint1korr(A) (uint8)(*A)
-
-#define mi_sint2korr(A) (int16) (((int16) ((uchar) (A)[1])) +\
- ((int16) ((int16) (A)[0]) << 8))
-#define mi_sint3korr(A) ((int32) ((((uchar) (A)[0]) & 128) ? \
- (((uint32) 255L << 24) | \
- (((uint32) (uchar) (A)[0]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[2])) : \
- (((uint32) (uchar) (A)[0]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[2])))
-#define mi_sint4korr(A) (int32) (((int32) ((uchar) (A)[3])) +\
- (((int32) ((uchar) (A)[2]) << 8)) +\
- (((int32) ((uchar) (A)[1]) << 16)) +\
- (((int32) ((int16) (A)[0]) << 24)))
-#define mi_sint8korr(A) (longlong) mi_uint8korr(A)
-#define mi_uint2korr(A) (uint16) (((uint16) ((uchar) (A)[1])) +\
- ((uint16) ((uchar) (A)[0]) << 8))
-#define mi_uint3korr(A) (uint32) (((uint32) ((uchar) (A)[2])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[0])) << 16))
-#define mi_uint4korr(A) (uint32) (((uint32) ((uchar) (A)[3])) +\
- (((uint32) ((uchar) (A)[2])) << 8) +\
- (((uint32) ((uchar) (A)[1])) << 16) +\
- (((uint32) ((uchar) (A)[0])) << 24))
-#define mi_uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[4])) +\
- (((uint32) ((uchar) (A)[3])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16) +\
- (((uint32) ((uchar) (A)[1])) << 24)) +\
- (((ulonglong) ((uchar) (A)[0])) << 32))
-#define mi_uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[5])) +\
- (((uint32) ((uchar) (A)[4])) << 8) +\
- (((uint32) ((uchar) (A)[3])) << 16) +\
- (((uint32) ((uchar) (A)[2])) << 24)) +\
- (((ulonglong) (((uint32) ((uchar) (A)[1])) +\
- (((uint32) ((uchar) (A)[0]) << 8)))) <<\
- 32))
-#define mi_uint7korr(A) ((ulonglong)(((uint32) ((uchar) (A)[6])) +\
- (((uint32) ((uchar) (A)[5])) << 8) +\
- (((uint32) ((uchar) (A)[4])) << 16) +\
- (((uint32) ((uchar) (A)[3])) << 24)) +\
- (((ulonglong) (((uint32) ((uchar) (A)[2])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[0])) << 16))) <<\
- 32))
-#define mi_uint8korr(A) ((ulonglong)(((uint32) ((uchar) (A)[7])) +\
- (((uint32) ((uchar) (A)[6])) << 8) +\
- (((uint32) ((uchar) (A)[5])) << 16) +\
- (((uint32) ((uchar) (A)[4])) << 24)) +\
- (((ulonglong) (((uint32) ((uchar) (A)[3])) +\
- (((uint32) ((uchar) (A)[2])) << 8) +\
- (((uint32) ((uchar) (A)[1])) << 16) +\
- (((uint32) ((uchar) (A)[0])) << 24))) <<\
- 32))
+#define mi_sint1korr(A) ((int8)(*A))
+#define mi_uint1korr(A) ((uint8)(*A))
+
+#define mi_sint2korr(A) ((int16) (((int16) (((uchar*) (A))[1])) +\
+ ((int16) ((int16) ((char*) (A))[0]) << 8)))
+#define mi_sint3korr(A) ((int32) (((((uchar*) (A))[0]) & 128) ? \
+ (((uint32) 255L << 24) | \
+ (((uint32) ((uchar*) (A))[0]) << 16) |\
+ (((uint32) ((uchar*) (A))[1]) << 8) | \
+ ((uint32) ((uchar*) (A))[2])) : \
+ (((uint32) ((uchar*) (A))[0]) << 16) |\
+ (((uint32) ((uchar*) (A))[1]) << 8) | \
+ ((uint32) ((uchar*) (A))[2])))
+#define mi_sint4korr(A) ((int32) (((int32) (((uchar*) (A))[3])) +\
+ ((int32) (((uchar*) (A))[2]) << 8) +\
+ ((int32) (((uchar*) (A))[1]) << 16) +\
+ ((int32) ((int16) ((char*) (A))[0]) << 24)))
+#define mi_sint8korr(A) ((longlong) mi_uint8korr(A))
+#define mi_uint2korr(A) ((uint16) (((uint16) (((uchar*) (A))[1])) +\
+ ((uint16) (((uchar*) (A))[0]) << 8)))
+#define mi_uint3korr(A) ((uint32) (((uint32) (((uchar*) (A))[2])) +\
+ (((uint32) (((uchar*) (A))[1])) << 8) +\
+ (((uint32) (((uchar*) (A))[0])) << 16)))
+#define mi_uint4korr(A) ((uint32) (((uint32) (((uchar*) (A))[3])) +\
+ (((uint32) (((uchar*) (A))[2])) << 8) +\
+ (((uint32) (((uchar*) (A))[1])) << 16) +\
+ (((uint32) (((uchar*) (A))[0])) << 24)))
+#define mi_uint5korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[4])) +\
+ (((uint32) (((uchar*) (A))[3])) << 8) +\
+ (((uint32) (((uchar*) (A))[2])) << 16) +\
+ (((uint32) (((uchar*) (A))[1])) << 24)) +\
+ (((ulonglong) (((uchar*) (A))[0])) << 32))
+#define mi_uint6korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[5])) +\
+ (((uint32) (((uchar*) (A))[4])) << 8) +\
+ (((uint32) (((uchar*) (A))[3])) << 16) +\
+ (((uint32) (((uchar*) (A))[2])) << 24)) +\
+ (((ulonglong) (((uint32) (((uchar*) (A))[1])) +\
+ (((uint32) (((uchar*) (A))[0]) << 8)))) <<\
+ 32))
+#define mi_uint7korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[6])) +\
+ (((uint32) (((uchar*) (A))[5])) << 8) +\
+ (((uint32) (((uchar*) (A))[4])) << 16) +\
+ (((uint32) (((uchar*) (A))[3])) << 24)) +\
+ (((ulonglong) (((uint32) (((uchar*) (A))[2])) +\
+ (((uint32) (((uchar*) (A))[1])) << 8) +\
+ (((uint32) (((uchar*) (A))[0])) << 16))) <<\
+ 32))
+#define mi_uint8korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[7])) +\
+ (((uint32) (((uchar*) (A))[6])) << 8) +\
+ (((uint32) (((uchar*) (A))[5])) << 16) +\
+ (((uint32) (((uchar*) (A))[4])) << 24)) +\
+ (((ulonglong) (((uint32) (((uchar*) (A))[3])) +\
+ (((uint32) (((uchar*) (A))[2])) << 8) +\
+ (((uint32) (((uchar*) (A))[1])) << 16) +\
+ (((uint32) (((uchar*) (A))[0])) << 24))) <<\
+ 32))
/* This one is for uniformity */
#define mi_int1store(T,A) *((uchar*)(T))= (uchar) (A)
-#define mi_int2store(T,A) { uint def_temp= (uint) (A) ;\
- *((uchar*) ((T)+1))= (uchar)(def_temp); \
- *((uchar*) ((T)+0))= (uchar)(def_temp >> 8); }
-#define mi_int3store(T,A) { /*lint -save -e734 */\
- ulong def_temp= (ulong) (A);\
- *(((T)+2))=(char) (def_temp);\
- *((T)+1)= (char) (def_temp >> 8);\
- *((T)+0)= (char) (def_temp >> 16);\
- /*lint -restore */}
-#define mi_int4store(T,A) { ulong def_temp= (ulong) (A);\
- *((T)+3)=(char) (def_temp);\
- *((T)+2)=(char) (def_temp >> 8);\
- *((T)+1)=(char) (def_temp >> 16);\
- *((T)+0)=(char) (def_temp >> 24); }
-#define mi_int5store(T,A) { ulong def_temp= (ulong) (A),\
- def_temp2= (ulong) ((A) >> 32);\
- *((T)+4)=(char) (def_temp);\
- *((T)+3)=(char) (def_temp >> 8);\
- *((T)+2)=(char) (def_temp >> 16);\
- *((T)+1)=(char) (def_temp >> 24);\
- *((T)+0)=(char) (def_temp2); }
-#define mi_int6store(T,A) { ulong def_temp= (ulong) (A),\
- def_temp2= (ulong) ((A) >> 32);\
- *((T)+5)=(char) (def_temp);\
- *((T)+4)=(char) (def_temp >> 8);\
- *((T)+3)=(char) (def_temp >> 16);\
- *((T)+2)=(char) (def_temp >> 24);\
- *((T)+1)=(char) (def_temp2);\
- *((T)+0)=(char) (def_temp2 >> 8); }
-#define mi_int7store(T,A) { ulong def_temp= (ulong) (A),\
- def_temp2= (ulong) ((A) >> 32);\
- *((T)+6)=(char) (def_temp);\
- *((T)+5)=(char) (def_temp >> 8);\
- *((T)+4)=(char) (def_temp >> 16);\
- *((T)+3)=(char) (def_temp >> 24);\
- *((T)+2)=(char) (def_temp2);\
- *((T)+1)=(char) (def_temp2 >> 8);\
- *((T)+0)=(char) (def_temp2 >> 16); }
-#define mi_int8store(T,A) { ulong def_temp3= (ulong) (A), \
- def_temp4= (ulong) ((A) >> 32); \
- mi_int4store((T),def_temp4); \
- mi_int4store((T+4),def_temp3); \
- }
+#define mi_int2store(T,A) { uint def_temp= (uint) (A) ;\
+ ((uchar*) (T))[1]= (uchar) (def_temp);\
+ ((uchar*) (T))[0]= (uchar) (def_temp >> 8); }
+#define mi_int3store(T,A) { /*lint -save -e734 */\
+ ulong def_temp= (ulong) (A);\
+ ((uchar*) (T))[2]= (uchar) (def_temp);\
+ ((uchar*) (T))[1]= (uchar) (def_temp >> 8);\
+ ((uchar*) (T))[0]= (uchar) (def_temp >> 16);\
+ /*lint -restore */}
+#define mi_int4store(T,A) { ulong def_temp= (ulong) (A);\
+ ((uchar*) (T))[3]= (uchar) (def_temp);\
+ ((uchar*) (T))[2]= (uchar) (def_temp >> 8);\
+ ((uchar*) (T))[1]= (uchar) (def_temp >> 16);\
+ ((uchar*) (T))[0]= (uchar) (def_temp >> 24); }
+#define mi_int5store(T,A) { ulong def_temp= (ulong) (A),\
+ def_temp2= (ulong) ((A) >> 32);\
+ ((uchar*) (T))[4]= (uchar) (def_temp);\
+ ((uchar*) (T))[3]= (uchar) (def_temp >> 8);\
+ ((uchar*) (T))[2]= (uchar) (def_temp >> 16);\
+ ((uchar*) (T))[1]= (uchar) (def_temp >> 24);\
+ ((uchar*) (T))[0]= (uchar) (def_temp2); }
+#define mi_int6store(T,A) { ulong def_temp= (ulong) (A),\
+ def_temp2= (ulong) ((A) >> 32);\
+ ((uchar*) (T))[5]= (uchar) (def_temp);\
+ ((uchar*) (T))[4]= (uchar) (def_temp >> 8);\
+ ((uchar*) (T))[3]= (uchar) (def_temp >> 16);\
+ ((uchar*) (T))[2]= (uchar) (def_temp >> 24);\
+ ((uchar*) (T))[1]= (uchar) (def_temp2);\
+ ((uchar*) (T))[0]= (uchar) (def_temp2 >> 8); }
+#define mi_int7store(T,A) { ulong def_temp= (ulong) (A),\
+ def_temp2= (ulong) ((A) >> 32);\
+ ((uchar*) (T))[6]= (uchar) (def_temp);\
+ ((uchar*) (T))[5]= (uchar) (def_temp >> 8);\
+ ((uchar*) (T))[4]= (uchar) (def_temp >> 16);\
+ ((uchar*) (T))[3]= (uchar) (def_temp >> 24);\
+ ((uchar*) (T))[2]= (uchar) (def_temp2);\
+ ((uchar*) (T))[1]= (uchar) (def_temp2 >> 8);\
+ ((uchar*) (T))[0]= (uchar) (def_temp2 >> 16); }
+#define mi_int8store(T,A) { ulong def_temp3= (ulong) (A),\
+ def_temp4= (ulong) ((A) >> 32);\
+ mi_int4store((uchar*) (T) + 0, def_temp4);\
+ mi_int4store((uchar*) (T) + 4, def_temp3); }
#ifdef WORDS_BIGENDIAN
-#define mi_float4store(T,A) { *(T)= ((byte *) &A)[0];\
- *((T)+1)=(char) ((byte *) &A)[1];\
- *((T)+2)=(char) ((byte *) &A)[2];\
- *((T)+3)=(char) ((byte *) &A)[3]; }
+#define mi_float4store(T,A) { ((uchar*) (T))[0]= ((uchar*) &A)[0];\
+ ((uchar*) (T))[1]= ((uchar*) &A)[1];\
+ ((uchar*) (T))[2]= ((uchar*) &A)[2];\
+ ((uchar*) (T))[3]= ((uchar*) &A)[3]; }
#define mi_float4get(V,M) { float def_temp;\
- ((byte*) &def_temp)[0]=(M)[0];\
- ((byte*) &def_temp)[1]=(M)[1];\
- ((byte*) &def_temp)[2]=(M)[2];\
- ((byte*) &def_temp)[3]=(M)[3];\
- (V)=def_temp; }
-
-#define mi_float8store(T,V) { *(T)= ((byte *) &V)[0];\
- *((T)+1)=(char) ((byte *) &V)[1];\
- *((T)+2)=(char) ((byte *) &V)[2];\
- *((T)+3)=(char) ((byte *) &V)[3];\
- *((T)+4)=(char) ((byte *) &V)[4];\
- *((T)+5)=(char) ((byte *) &V)[5];\
- *((T)+6)=(char) ((byte *) &V)[6];\
- *((T)+7)=(char) ((byte *) &V)[7]; }
+ ((uchar*) &def_temp)[0]= ((uchar*) (M))[0];\
+ ((uchar*) &def_temp)[1]= ((uchar*) (M))[1];\
+ ((uchar*) &def_temp)[2]= ((uchar*) (M))[2];\
+ ((uchar*) &def_temp)[3]= ((uchar*) (M))[3];\
+ (V)= def_temp; }
+
+#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[0];\
+ ((uchar*) (T))[1]= ((uchar*) &V)[1];\
+ ((uchar*) (T))[2]= ((uchar*) &V)[2];\
+ ((uchar*) (T))[3]= ((uchar*) &V)[3];\
+ ((uchar*) (T))[4]= ((uchar*) &V)[4];\
+ ((uchar*) (T))[5]= ((uchar*) &V)[5];\
+ ((uchar*) (T))[6]= ((uchar*) &V)[6];\
+ ((uchar*) (T))[7]= ((uchar*) &V)[7]; }
#define mi_float8get(V,M) { double def_temp;\
- ((byte*) &def_temp)[0]=(M)[0];\
- ((byte*) &def_temp)[1]=(M)[1];\
- ((byte*) &def_temp)[2]=(M)[2];\
- ((byte*) &def_temp)[3]=(M)[3];\
- ((byte*) &def_temp)[4]=(M)[4];\
- ((byte*) &def_temp)[5]=(M)[5];\
- ((byte*) &def_temp)[6]=(M)[6];\
- ((byte*) &def_temp)[7]=(M)[7]; \
- (V)=def_temp; }
+ ((uchar*) &def_temp)[0]= ((uchar*) (M))[0];\
+ ((uchar*) &def_temp)[1]= ((uchar*) (M))[1];\
+ ((uchar*) &def_temp)[2]= ((uchar*) (M))[2];\
+ ((uchar*) &def_temp)[3]= ((uchar*) (M))[3];\
+ ((uchar*) &def_temp)[4]= ((uchar*) (M))[4];\
+ ((uchar*) &def_temp)[5]= ((uchar*) (M))[5];\
+ ((uchar*) &def_temp)[6]= ((uchar*) (M))[6];\
+ ((uchar*) &def_temp)[7]= ((uchar*) (M))[7]; \
+ (V)= def_temp; }
#else
-#define mi_float4store(T,A) { *(T)= ((byte *) &A)[3];\
- *((T)+1)=(char) ((byte *) &A)[2];\
- *((T)+2)=(char) ((byte *) &A)[1];\
- *((T)+3)=(char) ((byte *) &A)[0]; }
+#define mi_float4store(T,A) { ((uchar*) (T))[0]= ((uchar*) &A)[3];\
+ ((uchar*) (T))[1]= ((uchar*) &A)[2];\
+ ((uchar*) (T))[2]= ((uchar*) &A)[1];\
+ ((uchar*) (T))[3]= ((uchar*) &A)[0]; }
#define mi_float4get(V,M) { float def_temp;\
- ((byte*) &def_temp)[0]=(M)[3];\
- ((byte*) &def_temp)[1]=(M)[2];\
- ((byte*) &def_temp)[2]=(M)[1];\
- ((byte*) &def_temp)[3]=(M)[0];\
- (V)=def_temp; }
+ ((uchar*) &def_temp)[0]= ((uchar*) (M))[3];\
+ ((uchar*) &def_temp)[1]= ((uchar*) (M))[2];\
+ ((uchar*) &def_temp)[2]= ((uchar*) (M))[1];\
+ ((uchar*) &def_temp)[3]= ((uchar*) (M))[0];\
+ (V)= def_temp; }
#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN)
-#define mi_float8store(T,V) { *(T)= ((byte *) &V)[3];\
- *((T)+1)=(char) ((byte *) &V)[2];\
- *((T)+2)=(char) ((byte *) &V)[1];\
- *((T)+3)=(char) ((byte *) &V)[0];\
- *((T)+4)=(char) ((byte *) &V)[7];\
- *((T)+5)=(char) ((byte *) &V)[6];\
- *((T)+6)=(char) ((byte *) &V)[5];\
- *((T)+7)=(char) ((byte *) &V)[4];}
+#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[3];\
+ ((uchar*) (T))[1]= ((uchar*) &V)[2];\
+ ((uchar*) (T))[2]= ((uchar*) &V)[1];\
+ ((uchar*) (T))[3]= ((uchar*) &V)[0];\
+ ((uchar*) (T))[4]= ((uchar*) &V)[7];\
+ ((uchar*) (T))[5]= ((uchar*) &V)[6];\
+ ((uchar*) (T))[6]= ((uchar*) &V)[5];\
+ ((uchar*) (T))[7]= ((uchar*) &V)[4];}
#define mi_float8get(V,M) { double def_temp;\
- ((byte*) &def_temp)[0]=(M)[3];\
- ((byte*) &def_temp)[1]=(M)[2];\
- ((byte*) &def_temp)[2]=(M)[1];\
- ((byte*) &def_temp)[3]=(M)[0];\
- ((byte*) &def_temp)[4]=(M)[7];\
- ((byte*) &def_temp)[5]=(M)[6];\
- ((byte*) &def_temp)[6]=(M)[5];\
- ((byte*) &def_temp)[7]=(M)[4];\
- (V)=def_temp; }
+ ((uchar*) &def_temp)[0]= ((uchar*) (M))[3];\
+ ((uchar*) &def_temp)[1]= ((uchar*) (M))[2];\
+ ((uchar*) &def_temp)[2]= ((uchar*) (M))[1];\
+ ((uchar*) &def_temp)[3]= ((uchar*) (M))[0];\
+ ((uchar*) &def_temp)[4]= ((uchar*) (M))[7];\
+ ((uchar*) &def_temp)[5]= ((uchar*) (M))[6];\
+ ((uchar*) &def_temp)[6]= ((uchar*) (M))[5];\
+ ((uchar*) &def_temp)[7]= ((uchar*) (M))[4];\
+ (V)= def_temp; }
#else
-#define mi_float8store(T,V) { *(T)= ((byte *) &V)[7];\
- *((T)+1)=(char) ((byte *) &V)[6];\
- *((T)+2)=(char) ((byte *) &V)[5];\
- *((T)+3)=(char) ((byte *) &V)[4];\
- *((T)+4)=(char) ((byte *) &V)[3];\
- *((T)+5)=(char) ((byte *) &V)[2];\
- *((T)+6)=(char) ((byte *) &V)[1];\
- *((T)+7)=(char) ((byte *) &V)[0];}
+#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[7];\
+ ((uchar*) (T))[1]= ((uchar*) &V)[6];\
+ ((uchar*) (T))[2]= ((uchar*) &V)[5];\
+ ((uchar*) (T))[3]= ((uchar*) &V)[4];\
+ ((uchar*) (T))[4]= ((uchar*) &V)[3];\
+ ((uchar*) (T))[5]= ((uchar*) &V)[2];\
+ ((uchar*) (T))[6]= ((uchar*) &V)[1];\
+ ((uchar*) (T))[7]= ((uchar*) &V)[0];}
#define mi_float8get(V,M) { double def_temp;\
- ((byte*) &def_temp)[0]=(M)[7];\
- ((byte*) &def_temp)[1]=(M)[6];\
- ((byte*) &def_temp)[2]=(M)[5];\
- ((byte*) &def_temp)[3]=(M)[4];\
- ((byte*) &def_temp)[4]=(M)[3];\
- ((byte*) &def_temp)[5]=(M)[2];\
- ((byte*) &def_temp)[6]=(M)[1];\
- ((byte*) &def_temp)[7]=(M)[0];\
- (V)=def_temp; }
+ ((uchar*) &def_temp)[0]= ((uchar*) (M))[7];\
+ ((uchar*) &def_temp)[1]= ((uchar*) (M))[6];\
+ ((uchar*) &def_temp)[2]= ((uchar*) (M))[5];\
+ ((uchar*) &def_temp)[3]= ((uchar*) (M))[4];\
+ ((uchar*) &def_temp)[4]= ((uchar*) (M))[3];\
+ ((uchar*) &def_temp)[5]= ((uchar*) (M))[2];\
+ ((uchar*) &def_temp)[6]= ((uchar*) (M))[1];\
+ ((uchar*) &def_temp)[7]= ((uchar*) (M))[0];\
+ (V)= def_temp; }
#endif /* __FLOAT_WORD_ORDER */
#endif /* WORDS_BIGENDIAN */
/* Fix to avoid warnings when sizeof(ha_rows) == sizeof(long) */
#ifdef BIG_TABLES
-#define mi_rowstore(T,A) mi_int8store(T,A)
-#define mi_rowkorr(T) mi_uint8korr(T)
+#define mi_rowstore(T,A) mi_int8store(T, A)
+#define mi_rowkorr(T) mi_uint8korr(T)
#else
-#define mi_rowstore(T,A) { mi_int4store(T,0); mi_int4store(((T)+4),A); }
-#define mi_rowkorr(T) mi_uint4korr((T)+4)
+#define mi_rowstore(T,A) { mi_int4store(T, 0);\
+ mi_int4store(((uchar*) (T) + 4), A); }
+#define mi_rowkorr(T) mi_uint4korr((uchar*) (T) + 4)
#endif
#if SIZEOF_OFF_T > 4
-#define mi_sizestore(T,A) mi_int8store(T,A)
-#define mi_sizekorr(T) mi_uint8korr(T)
+#define mi_sizestore(T,A) mi_int8store(T, A)
+#define mi_sizekorr(T) mi_uint8korr(T)
#else
-#define mi_sizestore(T,A) { if ((A) == HA_OFFSET_ERROR) bfill((char*) (T),8,255); else { mi_int4store((T),0); mi_int4store(((T)+4),A); }}
-#define mi_sizekorr(T) mi_uint4korr((T)+4)
+#define mi_sizestore(T,A) { if ((A) == HA_OFFSET_ERROR)\
+ bfill((char*) (T), 8, 255);\
+ else { mi_int4store((T), 0);\
+ mi_int4store(((T) + 4), A); }}
+#define mi_sizekorr(T) mi_uint4korr((uchar*) (T) + 4)
#endif
diff --git a/include/mysql.h b/include/mysql.h
index 2af1c657aeb..9d08bf5aa57 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -99,7 +99,7 @@ typedef struct st_mysql_field {
unsigned int flags; /* Div flags */
unsigned int decimals; /* Number of decimals in field */
unsigned int charsetnr; /* Character set */
- enum enum_field_types type; /* Type of field. Se mysql_com.h for types */
+ enum enum_field_types type; /* Type of field. See mysql_com.h for types */
} MYSQL_FIELD;
typedef char **MYSQL_ROW; /* return data as array of strings */
@@ -175,7 +175,7 @@ struct st_mysql_options {
*/
my_bool rpl_parse;
/*
- If set, never read from a master,only from slave, when doing
+ If set, never read from a master, only from slave, when doing
a read that is replication-aware
*/
my_bool no_master_reads;
@@ -454,7 +454,7 @@ int STDCALL mysql_add_slave(MYSQL* mysql, const char* host,
const char* passwd);
int STDCALL mysql_shutdown(MYSQL *mysql,
- enum enum_shutdown_level
+ enum mysql_enum_shutdown_level
shutdown_level);
int STDCALL mysql_dump_debug_info(MYSQL *mysql);
int STDCALL mysql_refresh(MYSQL *mysql,
@@ -538,7 +538,7 @@ enum enum_mysql_stmt_state
typedef struct st_mysql_bind
{
unsigned long *length; /* output length pointer */
- my_bool *is_null; /* Pointer to null indicators */
+ my_bool *is_null; /* Pointer to null indicator */
void *buffer; /* buffer to get/put data */
enum enum_field_types buffer_type; /* buffer type */
unsigned long buffer_length; /* buffer length, must be set for str/binary */
@@ -587,7 +587,7 @@ typedef struct st_mysql_stmt
*/
unsigned int server_status;
unsigned int last_errno; /* error code */
- unsigned int param_count; /* inpute parameters count */
+ unsigned int param_count; /* input parameter count */
unsigned int field_count; /* number of columns in result set */
enum enum_mysql_stmt_state state; /* statement state */
char last_error[MYSQL_ERRMSG_SIZE]; /* error message */
@@ -638,6 +638,7 @@ typedef struct st_mysql_methods
MYSQL_RES * (*use_result)(MYSQL *mysql);
void (*fetch_lengths)(unsigned long *to,
MYSQL_ROW column, unsigned int field_count);
+ void (*flush_use_result)(MYSQL *mysql);
#if !defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY)
MYSQL_FIELD * (*list_fields)(MYSQL *mysql);
my_bool (*read_prepare_result)(MYSQL *mysql, MYSQL_STMT *stmt);
diff --git a/include/mysql_com.h b/include/mysql_com.h
index fa73895000c..3cf1a011e3c 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -242,25 +242,32 @@ enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY,
#define FIELD_TYPE_INTERVAL MYSQL_TYPE_ENUM
#define FIELD_TYPE_GEOMETRY MYSQL_TYPE_GEOMETRY
-enum enum_shutdown_level {
- /*
- We want levels to be in growing order of hardness. So we leave room
- for future intermediate levels. For now, escalating one level is += 10;
- later if we insert new levels in between we will need a function
- next_shutdown_level(level). Note that DEFAULT does not respect the
- growing property.
- */
- SHUTDOWN_DEFAULT= 0, /* mapped to WAIT_ALL_BUFFERS for now */
+
+/* Shutdown/kill enums and constants */
+
+/* Bits for THD::killable. */
+#define MYSQL_SHUTDOWN_KILLABLE_CONNECT (unsigned char)(1 << 0)
+#define MYSQL_SHUTDOWN_KILLABLE_TRANS (unsigned char)(1 << 1)
+#define MYSQL_SHUTDOWN_KILLABLE_LOCK_TABLE (unsigned char)(1 << 2)
+#define MYSQL_SHUTDOWN_KILLABLE_UPDATE (unsigned char)(1 << 3)
+
+enum mysql_enum_shutdown_level {
/*
- Here is the list in growing order (the next does the previous plus
- something). WAIT_ALL_BUFFERS is what we have now. Others are "this MySQL
- server does not support this shutdown level yet".
+ We want levels to be in growing order of hardness (because we use number
+ comparisons). Note that DEFAULT does not respect the growing property, but
+ it's ok.
*/
- SHUTDOWN_WAIT_CONNECTIONS= 10, /* wait for existing connections to finish */
- SHUTDOWN_WAIT_TRANSACTIONS= 20, /* wait for existing trans to finish */
- SHUTDOWN_WAIT_STATEMENTS= 30, /* wait for existing updating stmts to finish */
- SHUTDOWN_WAIT_ALL_BUFFERS= 40, /* flush InnoDB buffers */
- SHUTDOWN_WAIT_CRITICAL_BUFFERS= 50, /* flush MyISAM buffs (no corruption) */
+ SHUTDOWN_DEFAULT = 0,
+ /* wait for existing connections to finish */
+ SHUTDOWN_WAIT_CONNECTIONS= MYSQL_SHUTDOWN_KILLABLE_CONNECT,
+ /* wait for existing trans to finish */
+ SHUTDOWN_WAIT_TRANSACTIONS= MYSQL_SHUTDOWN_KILLABLE_TRANS,
+ /* wait for existing updates to finish (=> no partial MyISAM update) */
+ SHUTDOWN_WAIT_UPDATES= MYSQL_SHUTDOWN_KILLABLE_UPDATE,
+ /* flush InnoDB buffers and other storage engines' buffers*/
+ SHUTDOWN_WAIT_ALL_BUFFERS= (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1),
+ /* don't flush InnoDB buffers, flush other storage engines' buffers*/
+ SHUTDOWN_WAIT_CRITICAL_BUFFERS= (MYSQL_SHUTDOWN_KILLABLE_UPDATE << 1) + 1,
/* Now the 2 levels of the KILL command */
#if MYSQL_VERSION_ID >= 50000
KILL_QUERY= 254,
diff --git a/include/mysql_time.h b/include/mysql_time.h
index 943d018fc14..ec67d60dea5 100644
--- a/include/mysql_time.h
+++ b/include/mysql_time.h
@@ -17,7 +17,14 @@
#ifndef _mysql_time_h_
#define _mysql_time_h_
-/* Time declarations shared between server and client library */
+/*
+ Time declarations shared between the server and client API:
+ you should not add anything to this header unless it's used
+ (and hence should be visible) in mysql.h.
+ If you're looking for a place to add new time-related declaration,
+ it's most likely my_time.h. See also "C API Handling of Date
+ and Time Values" chapter in documentation.
+*/
enum enum_mysql_timestamp_type
{
diff --git a/include/mysqld_error.h b/include/mysqld_error.h
index 3ee12fa9580..e1a0b352861 100644
--- a/include/mysqld_error.h
+++ b/include/mysqld_error.h
@@ -318,60 +318,61 @@
#define ER_WARN_INVALID_TIMESTAMP 1299
#define ER_INVALID_CHARACTER_STRING 1300
#define ER_WARN_ALLOWED_PACKET_OVERFLOWED 1301
-#define ER_SP_NO_RECURSIVE_CREATE 1302
-#define ER_SP_ALREADY_EXISTS 1303
-#define ER_SP_DOES_NOT_EXIST 1304
-#define ER_SP_DROP_FAILED 1305
-#define ER_SP_STORE_FAILED 1306
-#define ER_SP_LILABEL_MISMATCH 1307
-#define ER_SP_LABEL_REDEFINE 1308
-#define ER_SP_LABEL_MISMATCH 1309
-#define ER_SP_UNINIT_VAR 1310
-#define ER_SP_BADSELECT 1311
-#define ER_SP_BADRETURN 1312
-#define ER_SP_BADSTATEMENT 1313
-#define ER_UPDATE_LOG_DEPRECATED_IGNORED 1314
-#define ER_UPDATE_LOG_DEPRECATED_TRANSLATED 1315
-#define ER_QUERY_INTERRUPTED 1316
-#define ER_SP_WRONG_NO_OF_ARGS 1317
-#define ER_SP_COND_MISMATCH 1318
-#define ER_SP_NORETURN 1319
-#define ER_SP_NORETURNEND 1320
-#define ER_SP_BAD_CURSOR_QUERY 1321
-#define ER_SP_BAD_CURSOR_SELECT 1322
-#define ER_SP_CURSOR_MISMATCH 1323
-#define ER_SP_CURSOR_ALREADY_OPEN 1324
-#define ER_SP_CURSOR_NOT_OPEN 1325
-#define ER_SP_UNDECLARED_VAR 1326
-#define ER_SP_WRONG_NO_OF_FETCH_ARGS 1327
-#define ER_SP_FETCH_NO_DATA 1328
-#define ER_SP_DUP_PARAM 1329
-#define ER_SP_DUP_VAR 1330
-#define ER_SP_DUP_COND 1331
-#define ER_SP_DUP_CURS 1332
-#define ER_SP_CANT_ALTER 1333
-#define ER_SP_SUBSELECT_NYI 1334
-#define ER_SP_NO_USE 1335
-#define ER_SP_VARCOND_AFTER_CURSHNDLR 1336
-#define ER_SP_CURSOR_AFTER_HANDLER 1337
-#define ER_SP_CASE_NOT_FOUND 1338
-#define ER_FPARSER_TOO_BIG_FILE 1339
-#define ER_FPARSER_BAD_HEADER 1340
-#define ER_FPARSER_EOF_IN_COMMENT 1341
-#define ER_FPARSER_ERROR_IN_PARAMETER 1342
-#define ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER 1343
-#define ER_VIEW_NO_EXPLAIN 1344
-#define ER_FRM_UNKNOWN_TYPE 1345
-#define ER_WRONG_OBJECT 1346
-#define ER_NONUPDATEABLE_COLUMN 1347
-#define ER_VIEW_SELECT_DERIVED 1348
-#define ER_VIEW_SELECT_PROCEDURE 1349
-#define ER_VIEW_SELECT_VARIABLE 1350
-#define ER_VIEW_SELECT_TMPTABLE 1351
-#define ER_VIEW_WRONG_LIST 1352
-#define ER_WARN_VIEW_MERGE 1353
-#define ER_WARN_VIEW_WITHOUT_KEY 1354
-#define ER_VIEW_INVALID 1355
-#define ER_SP_NO_DROP_SP 1356
-#define ER_SP_GOTO_IN_HNDLR 1357
-#define ER_ERROR_MESSAGES 357
+#define ER_CONFLICTING_DECLARATIONS 1302
+#define ER_SP_NO_RECURSIVE_CREATE 1303
+#define ER_SP_ALREADY_EXISTS 1304
+#define ER_SP_DOES_NOT_EXIST 1305
+#define ER_SP_DROP_FAILED 1306
+#define ER_SP_STORE_FAILED 1307
+#define ER_SP_LILABEL_MISMATCH 1308
+#define ER_SP_LABEL_REDEFINE 1309
+#define ER_SP_LABEL_MISMATCH 1310
+#define ER_SP_UNINIT_VAR 1311
+#define ER_SP_BADSELECT 1312
+#define ER_SP_BADRETURN 1313
+#define ER_SP_BADSTATEMENT 1314
+#define ER_UPDATE_LOG_DEPRECATED_IGNORED 1315
+#define ER_UPDATE_LOG_DEPRECATED_TRANSLATED 1316
+#define ER_QUERY_INTERRUPTED 1317
+#define ER_SP_WRONG_NO_OF_ARGS 1318
+#define ER_SP_COND_MISMATCH 1319
+#define ER_SP_NORETURN 1320
+#define ER_SP_NORETURNEND 1321
+#define ER_SP_BAD_CURSOR_QUERY 1322
+#define ER_SP_BAD_CURSOR_SELECT 1323
+#define ER_SP_CURSOR_MISMATCH 1324
+#define ER_SP_CURSOR_ALREADY_OPEN 1325
+#define ER_SP_CURSOR_NOT_OPEN 1326
+#define ER_SP_UNDECLARED_VAR 1327
+#define ER_SP_WRONG_NO_OF_FETCH_ARGS 1328
+#define ER_SP_FETCH_NO_DATA 1329
+#define ER_SP_DUP_PARAM 1330
+#define ER_SP_DUP_VAR 1331
+#define ER_SP_DUP_COND 1332
+#define ER_SP_DUP_CURS 1333
+#define ER_SP_CANT_ALTER 1334
+#define ER_SP_SUBSELECT_NYI 1335
+#define ER_SP_NO_USE 1336
+#define ER_SP_VARCOND_AFTER_CURSHNDLR 1337
+#define ER_SP_CURSOR_AFTER_HANDLER 1338
+#define ER_SP_CASE_NOT_FOUND 1339
+#define ER_FPARSER_TOO_BIG_FILE 1340
+#define ER_FPARSER_BAD_HEADER 1341
+#define ER_FPARSER_EOF_IN_COMMENT 1342
+#define ER_FPARSER_ERROR_IN_PARAMETER 1343
+#define ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER 1344
+#define ER_VIEW_NO_EXPLAIN 1345
+#define ER_FRM_UNKNOWN_TYPE 1346
+#define ER_WRONG_OBJECT 1347
+#define ER_NONUPDATEABLE_COLUMN 1348
+#define ER_VIEW_SELECT_DERIVED 1349
+#define ER_VIEW_SELECT_PROCEDURE 1350
+#define ER_VIEW_SELECT_VARIABLE 1351
+#define ER_VIEW_SELECT_TMPTABLE 1352
+#define ER_VIEW_WRONG_LIST 1353
+#define ER_WARN_VIEW_MERGE 1354
+#define ER_WARN_VIEW_WITHOUT_KEY 1355
+#define ER_VIEW_INVALID 1356
+#define ER_SP_NO_DROP_SP 1357
+#define ER_SP_GOTO_IN_HNDLR 1358
+#define ER_ERROR_MESSAGES 359
diff --git a/include/sql_common.h b/include/sql_common.h
index 3f50008a922..cde53786f83 100644
--- a/include/sql_common.h
+++ b/include/sql_common.h
@@ -25,7 +25,6 @@ extern "C" {
MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
my_bool default_value, uint server_capabilities);
void free_rows(MYSQL_DATA *cur);
-void flush_use_result(MYSQL *mysql);
my_bool mysql_autenticate(MYSQL *mysql, const char *passwd);
void free_old_query(MYSQL *mysql);
void end_server(MYSQL *mysql);
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index 6cefdb60956..964c396dd08 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -217,7 +217,9 @@ buf_flush_buffered_writes(void)
/*===========================*/
{
buf_block_t* block;
+ byte* write_buf;
ulint len;
+ ulint len2;
ulint i;
if (trx_doublewrite == NULL) {
@@ -244,6 +246,16 @@ buf_flush_buffered_writes(void)
block = trx_doublewrite->buf_block_arr[i];
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
+ if (mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(block->frame + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+" InnoDB: ERROR: The page to be written seems corrupt!\n"
+"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
+"InnoDB: before posting to the doublewrite buffer.\n");
+ }
+
if (block->check_index_page_at_flush
&& !page_simple_validate(block->frame)) {
@@ -272,6 +284,19 @@ buf_flush_buffered_writes(void)
trx_doublewrite->block1, 0, len,
(void*)trx_doublewrite->write_buf, NULL);
+ write_buf = trx_doublewrite->write_buf;
+
+ for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len; len2 += UNIV_PAGE_SIZE) {
+ if (mach_read_from_4(write_buf + len2 + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(write_buf + len2 + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+" InnoDB: ERROR: The page to be written seems corrupt!\n"
+"InnoDB: The lsn fields do not match! Noticed in the doublewrite block1.\n");
+ }
+ }
+
if (trx_doublewrite->first_free > TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
len = (trx_doublewrite->first_free
- TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) * UNIV_PAGE_SIZE;
@@ -282,6 +307,22 @@ buf_flush_buffered_writes(void)
(void*)(trx_doublewrite->write_buf
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE),
NULL);
+
+ write_buf = trx_doublewrite->write_buf
+ + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE;
+ for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len;
+ len2 += UNIV_PAGE_SIZE) {
+ if (mach_read_from_4(write_buf + len2
+ + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(write_buf + len2
+ + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+" InnoDB: ERROR: The page to be written seems corrupt!\n"
+"InnoDB: The lsn fields do not match! Noticed in the doublewrite block2.\n");
+ }
+ }
}
/* Now flush the doublewrite buffer data to disk */
@@ -295,6 +336,18 @@ buf_flush_buffered_writes(void)
for (i = 0; i < trx_doublewrite->first_free; i++) {
block = trx_doublewrite->buf_block_arr[i];
+ if (mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(block->frame + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+" InnoDB: ERROR: The page to be written seems corrupt!\n"
+"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
+"InnoDB: after posting and flushing the doublewrite buffer.\n"
+"InnoDB: Page buf fix count %lu, io fix %lu, state %lu\n",
+ (ulong)block->buf_fix_count, (ulong)block->io_fix,
+ (ulong)block->state);
+ }
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
@@ -412,6 +465,9 @@ buf_flush_write_block_low(
/*======================*/
buf_block_t* block) /* in: buffer block to write */
{
+#ifdef UNIV_LOG_DEBUG
+ static ibool univ_log_debug_warned;
+#endif /* UNIV_LOG_DEBUG */
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
#ifdef UNIV_IBUF_DEBUG
@@ -420,8 +476,13 @@ buf_flush_write_block_low(
ut_ad(!ut_dulint_is_zero(block->newest_modification));
#ifdef UNIV_LOG_DEBUG
- fputs("Warning: cannot force log to disk in the log debug version!\n",
- stderr);
+ if (!univ_log_debug_warned) {
+ univ_log_debug_warned = TRUE;
+ fputs(
+ "Warning: cannot force log to disk if UNIV_LOG_DEBUG is defined!\n"
+ "Crash recovery will not work!\n",
+ stderr);
+ }
#else
/* Force the log to the disk before writing the modified block */
log_write_up_to(block->newest_modification, LOG_WAIT_ALL_GROUPS, TRUE);
diff --git a/innobase/buf/buf0rea.c b/innobase/buf/buf0rea.c
index 71e885ff439..11107d777c8 100644
--- a/innobase/buf/buf0rea.c
+++ b/innobase/buf/buf0rea.c
@@ -629,6 +629,8 @@ buf_read_ibuf_merge_pages(
}
}
+ os_aio_simulated_wake_handler_threads();
+
/* Flush pages from the end of the LRU list if necessary */
buf_flush_free_margin();
diff --git a/innobase/dict/dict0crea.c b/innobase/dict/dict0crea.c
index fd8e02585ae..1e4d906b7b5 100644
--- a/innobase/dict/dict0crea.c
+++ b/innobase/dict/dict0crea.c
@@ -32,6 +32,7 @@ static
dtuple_t*
dict_create_sys_tables_tuple(
/*=========================*/
+ /* out: the tuple which should be inserted */
dict_table_t* table, /* in: table */
mem_heap_t* heap) /* in: memory heap from which the memory for
the built tuple is allocated */
diff --git a/innobase/dict/dict0dict.c b/innobase/dict/dict0dict.c
index 71cf908db4e..eeefd7bf1ae 100644
--- a/innobase/dict/dict0dict.c
+++ b/innobase/dict/dict0dict.c
@@ -682,6 +682,7 @@ dict_init(void)
rw_lock_set_level(&dict_operation_lock, SYNC_DICT_OPERATION);
dict_foreign_err_file = os_file_create_tmpfile();
+ ut_a(dict_foreign_err_file);
mutex_create(&dict_foreign_err_mutex);
mutex_set_level(&dict_foreign_err_mutex, SYNC_ANY_LATCH);
}
diff --git a/innobase/fil/fil0fil.c b/innobase/fil/fil0fil.c
index 59fbd6f785d..885738deae2 100644
--- a/innobase/fil/fil0fil.c
+++ b/innobase/fil/fil0fil.c
@@ -1513,6 +1513,8 @@ fil_decr_pending_ibuf_merges(
mutex_exit(&(system->mutex));
}
+/************************************************************
+Creates the database directory for a table if it does not exist yet. */
static
void
fil_create_directory_for_tablename(
diff --git a/innobase/include/dyn0dyn.h b/innobase/include/dyn0dyn.h
index 501fde05e90..abee62300e3 100644
--- a/innobase/include/dyn0dyn.h
+++ b/innobase/include/dyn0dyn.h
@@ -47,7 +47,8 @@ dyn_array_open(
/*===========*/
/* out: pointer to the buffer */
dyn_array_t* arr, /* in: dynamic array */
- ulint size); /* in: size in bytes of the buffer */
+ ulint size); /* in: size in bytes of the buffer; MUST be
+ smaller than DYN_ARRAY_DATA_SIZE! */
/*************************************************************************
Closes the buffer returned by dyn_array_open. */
UNIV_INLINE
diff --git a/innobase/include/mtr0log.h b/innobase/include/mtr0log.h
index 41be168a371..9c9c6f696e8 100644
--- a/innobase/include/mtr0log.h
+++ b/innobase/include/mtr0log.h
@@ -111,7 +111,8 @@ mlog_open(
/*======*/
/* out: buffer, NULL if log mode MTR_LOG_NONE */
mtr_t* mtr, /* in: mtr */
- ulint size); /* in: buffer size in bytes */
+ ulint size); /* in: buffer size in bytes; MUST be
+ smaller than DYN_ARRAY_DATA_SIZE! */
/************************************************************
Closes a buffer opened to mlog. */
UNIV_INLINE
diff --git a/innobase/include/mtr0log.ic b/innobase/include/mtr0log.ic
index aa3f945c202..08d9a6448eb 100644
--- a/innobase/include/mtr0log.ic
+++ b/innobase/include/mtr0log.ic
@@ -18,7 +18,8 @@ mlog_open(
/*======*/
/* out: buffer, NULL if log mode MTR_LOG_NONE */
mtr_t* mtr, /* in: mtr */
- ulint size) /* in: buffer size in bytes */
+ ulint size) /* in: buffer size in bytes; MUST be
+ smaller than DYN_ARRAY_DATA_SIZE! */
{
dyn_array_t* mlog;
diff --git a/innobase/include/os0file.h b/innobase/include/os0file.h
index 6549a3748df..f1647c47bce 100644
--- a/innobase/include/os0file.h
+++ b/innobase/include/os0file.h
@@ -169,12 +169,12 @@ void
os_io_init_simple(void);
/*===================*/
/***************************************************************************
-Creates a temporary file. In case of error, causes abnormal termination. */
+Creates a temporary file. */
FILE*
os_file_create_tmpfile(void);
/*========================*/
- /* out: temporary file handle (never NULL) */
+ /* out: temporary file handle (never NULL) */
/***************************************************************************
The os_file_opendir() function opens a directory stream corresponding to the
directory named by the dirname argument. The directory stream is positioned
diff --git a/innobase/include/page0page.ic b/innobase/include/page0page.ic
index e7c0f8ee07c..3d2bf3b090e 100644
--- a/innobase/include/page0page.ic
+++ b/innobase/include/page0page.ic
@@ -479,7 +479,20 @@ page_rec_get_next(
offs = rec_get_next_offs(rec);
- ut_a(offs < UNIV_PAGE_SIZE);
+ if (offs >= UNIV_PAGE_SIZE) {
+ fprintf(stderr,
+"InnoDB: Next record offset is nonsensical %lu in record at offset %lu\n",
+ (ulong)offs, (ulong)(rec - page));
+ fprintf(stderr,
+"\nInnoDB: rec address %lx, first buffer frame %lx\n"
+"InnoDB: buffer pool high end %lx, buf fix count %lu\n",
+ (ulong)rec, (ulong)buf_pool->frame_zero,
+ (ulong)buf_pool->high_end,
+ (ulong)buf_block_align(rec)->buf_fix_count);
+ buf_page_print(page);
+
+ ut_a(0);
+ }
if (offs == 0) {
diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h
index c7ba39aaaf1..2e42c2f5036 100644
--- a/innobase/include/srv0srv.h
+++ b/innobase/include/srv0srv.h
@@ -42,6 +42,7 @@ extern char* srv_arch_dir;
#endif /* UNIV_LOG_ARCHIVE */
extern ibool srv_file_per_table;
+extern ibool srv_locks_unsafe_for_binlog;
extern ulint srv_n_data_files;
extern char** srv_data_file_names;
@@ -98,6 +99,8 @@ extern lint srv_conc_n_threads;
extern ibool srv_fast_shutdown;
+extern ibool srv_innodb_status;
+
extern ibool srv_use_doublewrite_buf;
extern ibool srv_set_thread_priorities;
diff --git a/innobase/include/srv0start.h b/innobase/include/srv0start.h
index 0074de537c3..75af1a212b4 100644
--- a/innobase/include/srv0start.h
+++ b/innobase/include/srv0start.h
@@ -64,15 +64,17 @@ innobase_start_or_create_for_mysql(void);
/* out: DB_SUCCESS or error code */
/********************************************************************
Shuts down the Innobase database. */
-
int
innobase_shutdown_for_mysql(void);
/*=============================*/
/* out: DB_SUCCESS or error code */
-
extern dulint srv_shutdown_lsn;
extern dulint srv_start_lsn;
+#ifdef __NETWARE__
+void set_panic_flag_for_netware(void);
+#endif
+
extern ulint srv_sizeof_trx_t_in_ha_innodb_cc;
extern ibool srv_is_being_started;
diff --git a/innobase/include/ut0dbg.h b/innobase/include/ut0dbg.h
index a155f68bd12..5f30a894874 100644
--- a/innobase/include/ut0dbg.h
+++ b/innobase/include/ut0dbg.h
@@ -22,7 +22,38 @@ extern ulint* ut_dbg_null_ptr;
extern const char* ut_dbg_msg_assert_fail;
extern const char* ut_dbg_msg_trap;
extern const char* ut_dbg_msg_stop;
-
+/* Have a graceful exit on NetWare rather than a segfault to avoid abends */
+#ifdef __NETWARE__
+extern ibool panic_shutdown;
+#define ut_a(EXPR) do {\
+ if (!((ulint)(EXPR) + ut_dbg_zero)) {\
+ ut_print_timestamp(stderr);\
+ fprintf(stderr, ut_dbg_msg_assert_fail,\
+ os_thread_pf(os_thread_get_curr_id()), __FILE__,\
+ (ulint)__LINE__);\
+ fputs("InnoDB: Failing assertion: " #EXPR "\n", stderr);\
+ fputs(ut_dbg_msg_trap, stderr);\
+ ut_dbg_stop_threads = TRUE;\
+ if (ut_dbg_stop_threads) {\
+ fprintf(stderr, ut_dbg_msg_stop,\
+ os_thread_pf(os_thread_get_curr_id()), __FILE__, (ulint)__LINE__);\
+ }\
+ if(!panic_shutdown){\
+ panic_shutdown = TRUE;\
+ innobase_shutdown_for_mysql();}\
+ exit(1);\
+ }\
+} while (0)
+#define ut_error do {\
+ ut_print_timestamp(stderr);\
+ fprintf(stderr, ut_dbg_msg_assert_fail,\
+ os_thread_pf(os_thread_get_curr_id()), __FILE__, (ulint)__LINE__);\
+ fprintf(stderr, ut_dbg_msg_trap);\
+ ut_dbg_stop_threads = TRUE;\
+ if(!panic_shutdown){panic_shutdown = TRUE;\
+ innobase_shutdown_for_mysql();}\
+} while (0)
+#else
#define ut_a(EXPR) do {\
if (!((ulint)(EXPR) + ut_dbg_zero)) {\
ut_print_timestamp(stderr);\
@@ -49,6 +80,7 @@ extern const char* ut_dbg_msg_stop;
ut_dbg_stop_threads = TRUE;\
if (*(ut_dbg_null_ptr)) ut_dbg_null_ptr = NULL;\
} while (0)
+#endif
#ifdef UNIV_DEBUG
#define ut_ad(EXPR) ut_a(EXPR)
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c
index 92e8f224dea..c9c0cd109a9 100644
--- a/innobase/lock/lock0lock.c
+++ b/innobase/lock/lock0lock.c
@@ -509,6 +509,7 @@ lock_sys_create(
/* hash_create_mutexes(lock_sys->rec_hash, 2, SYNC_REC_LOCK); */
lock_latest_err_file = os_file_create_tmpfile();
+ ut_a(lock_latest_err_file);
}
/*************************************************************************
diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c
index 7e57efcf9e1..e5b0300239a 100644
--- a/innobase/log/log0recv.c
+++ b/innobase/log/log0recv.c
@@ -628,6 +628,9 @@ log_block_checksum_is_ok_or_old_format(
format of InnoDB version < 3.23.52 */
byte* block) /* in: pointer to a log block */
{
+#ifdef UNIV_LOG_DEBUG
+ return(TRUE);
+#endif /* UNIV_LOG_DEBUG */
if (log_block_calc_checksum(block) == log_block_get_checksum(block)) {
return(TRUE);
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index d5ca8f927c6..392580eb570 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -478,22 +478,72 @@ os_io_init_simple(void)
}
}
+#ifndef UNIV_HOTBACKUP
+/*************************************************************************
+Creates a temporary file. This function is defined in ha_innodb.cc. */
+
+int
+innobase_mysql_tmpfile(void);
+/*========================*/
+ /* out: temporary file descriptor, or < 0 on error */
+#endif /* !UNIV_HOTBACKUP */
+
/***************************************************************************
-Creates a temporary file. In case of error, causes abnormal termination. */
+Creates a temporary file. */
FILE*
os_file_create_tmpfile(void)
/*========================*/
- /* out: temporary file handle (never NULL) */
+ /* out: temporary file handle, or NULL on error */
{
- FILE* file = tmpfile();
- if (file == NULL) {
+ FILE* file = NULL;
+ int fd = -1;
+#ifdef UNIV_HOTBACKUP
+ int tries;
+ for (tries = 10; tries--; ) {
+ char* name = tempnam(fil_path_to_mysql_datadir, "ib");
+ if (!name) {
+ break;
+ }
+
+ fd = open(name,
+# ifdef __WIN__
+ O_SEQUENTIAL | O_SHORT_LIVED | O_TEMPORARY |
+# endif /* __WIN__ */
+ O_CREAT | O_EXCL | O_RDWR,
+ S_IREAD | S_IWRITE);
+ if (fd >= 0) {
+# ifndef __WIN__
+ unlink(name);
+# endif /* !__WIN__ */
+ free(name);
+ break;
+ }
+
ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: unable to create temporary file\n",
- stderr);
- os_file_handle_error(NULL, "tmpfile");
- ut_error;
+ fprintf(stderr, " InnoDB: Warning: "
+ "unable to create temporary file %s, retrying\n",
+ name);
+ free(name);
+ }
+#else /* UNIV_HOTBACKUP */
+ fd = innobase_mysql_tmpfile();
+#endif /* UNIV_HOTBACKUP */
+
+ if (fd >= 0) {
+ file = fdopen(fd, "w+b");
+ }
+
+ if (!file) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error: unable to create temporary file;"
+ " errno: %d\n", errno);
+ if (fd >= 0) {
+ close(fd);
+ }
}
+
return(file);
}
@@ -3623,6 +3673,9 @@ consecutive_loop:
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written seems corrupt!\n");
+ fprintf(stderr,
+"InnoDB: Writing a block of %lu bytes, currently writing at offset %lu\n",
+ (ulong)total_len, (ulong)len2);
buf_page_print(combined_buf + len2);
fprintf(stderr,
"InnoDB: ERROR: The page to be written seems corrupt!\n");
diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c
index 556c80c948d..2e8c3adf94f 100644
--- a/innobase/row/row0mysql.c
+++ b/innobase/row/row0mysql.c
@@ -2284,7 +2284,6 @@ row_drop_table_for_mysql(
"COMMIT WORK;\n"
"END;\n";
- ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
ut_a(name != NULL);
if (srv_created_new_raw) {
@@ -2338,21 +2337,6 @@ row_drop_table_for_mysql(
srv_print_innodb_table_monitor = FALSE;
}
- ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
- ut_a(name != NULL);
-
- if (srv_created_new_raw) {
- fputs(
- "InnoDB: A new raw disk partition was initialized or\n"
- "InnoDB: innodb_force_recovery is on: we do not allow\n"
- "InnoDB: database modifications by the user. Shut down\n"
- "InnoDB: mysqld and edit my.cnf so that newraw is replaced\n"
- "InnoDB: with raw, and innodb_force_... is removed.\n",
- stderr);
-
- return(DB_ERROR);
- }
-
quoted_name = mem_strdupq(name, '\'');
namelen = strlen(quoted_name);
sql = mem_alloc((sizeof str1) + (sizeof str2) - 2 + 1 + namelen);
@@ -3011,6 +2995,30 @@ row_rename_table_for_mysql(
NULL);
trx->error_state = DB_SUCCESS;
}
+ } else {
+ err = dict_load_foreigns(new_name);
+
+ if (err != DB_SUCCESS) {
+
+ ut_print_timestamp(stderr);
+
+ fputs(
+ " InnoDB: Error: in RENAME TABLE table ",
+ stderr);
+ ut_print_name(stderr, new_name);
+ fputs("\n"
+ "InnoDB: is referenced in foreign key constraints\n"
+ "InnoDB: which are not compatible with the new table definition.\n",
+ stderr);
+
+ ut_a(dict_table_rename_in_cache(table,
+ old_name, FALSE));
+
+ trx->error_state = DB_SUCCESS;
+ trx_general_rollback_for_mysql(trx, FALSE,
+ NULL);
+ trx->error_state = DB_SUCCESS;
+ }
}
}
funct_exit:
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index 9e8e9b82f2d..dc6694fc18c 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -631,10 +631,24 @@ row_sel_get_clust_rec(
if (!node->read_view) {
/* Try to place a lock on the index record */
-
+
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+ if ( srv_locks_unsafe_for_binlog )
+ {
+ err = lock_clust_rec_read_check_and_lock(0, clust_rec,
+ index,node->row_lock_mode, LOCK_REC_NOT_GAP, thr);
+ }
+ else
+ {
err = lock_clust_rec_read_check_and_lock(0, clust_rec, index,
node->row_lock_mode, LOCK_ORDINARY, thr);
- if (err != DB_SUCCESS) {
+
+ }
+
+ if (err != DB_SUCCESS) {
return(err);
}
@@ -1184,9 +1198,23 @@ rec_loop:
search result set, resulting in the phantom problem. */
if (!consistent_read) {
+
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+
+ if ( srv_locks_unsafe_for_binlog )
+ {
+ err = sel_set_rec_lock(page_rec_get_next(rec), index,
+ node->row_lock_mode, LOCK_REC_NOT_GAP, thr);
+ }
+ else
+ {
err = sel_set_rec_lock(page_rec_get_next(rec), index,
node->row_lock_mode, LOCK_ORDINARY, thr);
- if (err != DB_SUCCESS) {
+ }
+ if (err != DB_SUCCESS) {
/* Note that in this case we will store in pcur
the PREDECESSOR of the record we are waiting
the lock for */
@@ -1211,8 +1239,22 @@ rec_loop:
if (!consistent_read) {
/* Try to place a lock on the index record */
- err = sel_set_rec_lock(rec, index, node->row_lock_mode,
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+
+ if ( srv_locks_unsafe_for_binlog )
+ {
+ err = sel_set_rec_lock(rec, index, node->row_lock_mode,
+ LOCK_REC_NOT_GAP, thr);
+ }
+ else
+ {
+ err = sel_set_rec_lock(rec, index, node->row_lock_mode,
LOCK_ORDINARY, thr);
+ }
+
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
@@ -3169,10 +3211,24 @@ rec_loop:
/* Try to place a lock on the index record */
- err = sel_set_rec_lock(rec, index,
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+ if ( srv_locks_unsafe_for_binlog )
+ {
+ err = sel_set_rec_lock(rec, index,
+ prebuilt->select_lock_type,
+ LOCK_REC_NOT_GAP, thr);
+ }
+ else
+ {
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_ORDINARY, thr);
- if (err != DB_SUCCESS) {
+ }
+
+ if (err != DB_SUCCESS) {
goto lock_wait_or_error;
}
@@ -3193,8 +3249,15 @@ rec_loop:
if (srv_force_recovery == 0 || moves_up == FALSE) {
ut_print_timestamp(stderr);
+ buf_page_print(buf_frame_align(rec));
+ fprintf(stderr,
+"\nInnoDB: rec address %lx, first buffer frame %lx\n"
+"InnoDB: buffer pool high end %lx, buf block fix count %lu\n",
+ (ulong)rec, (ulong)buf_pool->frame_zero,
+ (ulong)buf_pool->high_end,
+ (ulong)buf_block_align(rec)->buf_fix_count);
fprintf(stderr,
-" InnoDB: Index corruption: rec offs %lu next offs %lu, page no %lu,\n"
+"InnoDB: Index corruption: rec offs %lu next offs %lu, page no %lu,\n"
"InnoDB: ",
(ulong) (rec - buf_frame_align(rec)),
(ulong) next_offs,
@@ -3325,9 +3388,22 @@ rec_loop:
prebuilt->select_lock_type,
LOCK_REC_NOT_GAP, thr);
} else {
- err = sel_set_rec_lock(rec, index,
+ /* If innodb_locks_unsafe_for_binlog option is used,
+ we lock only the record, i.e. next-key locking is
+ not used.
+ */
+ if ( srv_locks_unsafe_for_binlog )
+ {
+ err = sel_set_rec_lock(rec, index,
+ prebuilt->select_lock_type,
+ LOCK_REC_NOT_GAP, thr);
+ }
+ else
+ {
+ err = sel_set_rec_lock(rec, index,
prebuilt->select_lock_type,
LOCK_ORDINARY, thr);
+ }
}
if (err != DB_SUCCESS) {
diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c
index 4bc5b0dc795..389cd5b779d 100644
--- a/innobase/srv/srv0srv.c
+++ b/innobase/srv/srv0srv.c
@@ -77,6 +77,10 @@ ibool srv_file_per_table = FALSE; /* store to its own file each table
created by an user; data dictionary
tables are in the system tablespace
0 */
+ibool srv_locks_unsafe_for_binlog = FALSE; /* Place locks to records only
+ i.e. do not use next-key locking
+ except on duplicate key checking and
+ foreign key checking */
ulint srv_n_data_files = 0;
char** srv_data_file_names = NULL;
ulint* srv_data_file_sizes = NULL; /* size in database pages */
@@ -238,6 +242,9 @@ merge to completion before shutdown */
ibool srv_fast_shutdown = FALSE;
+/* Generate a innodb_status.<pid> file */
+ibool srv_innodb_status = FALSE;
+
ibool srv_use_doublewrite_buf = TRUE;
ibool srv_set_thread_priorities = TRUE;
diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c
index 74dd23e4252..4a0335086f0 100644
--- a/innobase/srv/srv0start.c
+++ b/innobase/srv/srv0start.c
@@ -1112,16 +1112,24 @@ NetWare. */
mutex_create(&srv_monitor_file_mutex);
mutex_set_level(&srv_monitor_file_mutex, SYNC_NO_ORDER_CHECK);
- srv_monitor_file_name = mem_alloc(
- strlen(fil_path_to_mysql_datadir) +
- 20 + sizeof "/innodb_status.");
- sprintf(srv_monitor_file_name, "%s/innodb_status.%lu",
- fil_path_to_mysql_datadir, os_proc_get_number());
- srv_monitor_file = fopen(srv_monitor_file_name, "w+");
- if (!srv_monitor_file) {
- fprintf(stderr, "InnoDB: unable to create %s: %s\n",
- srv_monitor_file_name, strerror(errno));
- return(DB_ERROR);
+ if (srv_innodb_status) {
+ srv_monitor_file_name = mem_alloc(
+ strlen(fil_path_to_mysql_datadir) +
+ 20 + sizeof "/innodb_status.");
+ sprintf(srv_monitor_file_name, "%s/innodb_status.%lu",
+ fil_path_to_mysql_datadir, os_proc_get_number());
+ srv_monitor_file = fopen(srv_monitor_file_name, "w+");
+ if (!srv_monitor_file) {
+ fprintf(stderr, "InnoDB: unable to create %s: %s\n",
+ srv_monitor_file_name, strerror(errno));
+ return(DB_ERROR);
+ }
+ } else {
+ srv_monitor_file_name = NULL;
+ srv_monitor_file = os_file_create_tmpfile();
+ if (!srv_monitor_file) {
+ return(DB_ERROR);
+ }
}
/* Restrict the maximum number of file i/o threads */
@@ -1177,6 +1185,7 @@ NetWare. */
for (i = 0; i < srv_n_file_io_threads; i++) {
n[i] = i;
+
os_thread_create(io_handler_thread, n + i, thread_ids + i);
}
@@ -1578,9 +1587,10 @@ NetWare. */
fprintf(stderr,
"InnoDB: You have now successfully upgraded to the multiple tablespaces\n"
-"InnoDB: format. You should NOT DOWNGRADE again to an earlier version of\n"
-"InnoDB: InnoDB! But if you absolutely need to downgrade, see section 4.6 of\n"
-"InnoDB: http://www.innodb.com/ibman.php for instructions.\n");
+"InnoDB: format. You should NOT DOWNGRADE to an earlier version of\n"
+"InnoDB: InnoDB! But if you absolutely need to downgrade, see\n"
+"InnoDB: http://dev.mysql.com/doc/mysql/en/Multiple_tablespaces.html\n"
+"InnoDB: for instructions.\n");
}
if (srv_force_recovery == 0) {
@@ -1606,7 +1616,9 @@ innobase_shutdown_for_mysql(void)
/* out: DB_SUCCESS or error code */
{
ulint i;
-
+#ifdef __NETWARE__
+ extern ibool panic_shutdown;
+#endif
if (!srv_was_started) {
if (srv_is_being_started) {
ut_print_timestamp(stderr);
@@ -1623,8 +1635,11 @@ innobase_shutdown_for_mysql(void)
The step 1 is the real InnoDB shutdown. The remaining steps 2 - ...
just free data structures after the shutdown. */
+#ifdef __NETWARE__
+ if(!panic_shutdown)
+#endif
logs_empty_and_mark_files_at_shutdown();
-
+
if (srv_conc_n_threads != 0) {
fprintf(stderr,
"InnoDB: Warning: query counter shows %ld queries still\n"
@@ -1687,15 +1702,16 @@ innobase_shutdown_for_mysql(void)
if (srv_monitor_file) {
fclose(srv_monitor_file);
srv_monitor_file = 0;
- unlink(srv_monitor_file_name);
- mem_free(srv_monitor_file_name);
+ if (srv_monitor_file_name) {
+ unlink(srv_monitor_file_name);
+ mem_free(srv_monitor_file_name);
+ }
}
-
+
mutex_free(&srv_monitor_file_mutex);
/* 3. Free all InnoDB's own mutexes and the os_fast_mutexes inside
them */
-
sync_close();
/* 4. Free the os_conc_mutex and all os_events and os_mutexes */
@@ -1706,7 +1722,7 @@ innobase_shutdown_for_mysql(void)
/* 5. Free all allocated memory and the os_fast_mutex created in
ut0mem.c */
- ut_free_all_mem();
+ ut_free_all_mem();
if (os_thread_count != 0
|| os_event_count != 0
@@ -1736,3 +1752,11 @@ innobase_shutdown_for_mysql(void)
return((int) DB_SUCCESS);
}
+
+#ifdef __NETWARE__
+void set_panic_flag_for_netware()
+{
+ extern ibool panic_shutdown;
+ panic_shutdown = TRUE;
+}
+#endif
diff --git a/innobase/ut/ut0dbg.c b/innobase/ut/ut0dbg.c
index 65703ec1c86..2a0cfe1f13a 100644
--- a/innobase/ut/ut0dbg.c
+++ b/innobase/ut/ut0dbg.c
@@ -14,7 +14,12 @@ ulint ut_dbg_zero = 0;
/* If this is set to TRUE all threads will stop into the next assertion
and assert */
ibool ut_dbg_stop_threads = FALSE;
-
+#ifdef __NETWARE__
+ibool panic_shutdown = FALSE; /* This is set to TRUE when on NetWare there
+ happens an InnoDB assertion failure or other
+ fatal error condition that requires an
+ immediate shutdown. */
+#endif
/* Null pointer used to generate memory trap */
ulint* ut_dbg_null_ptr = NULL;
diff --git a/innobase/ut/ut0mem.c b/innobase/ut/ut0mem.c
index 9a591df9f77..09410e348c2 100644
--- a/innobase/ut/ut0mem.c
+++ b/innobase/ut/ut0mem.c
@@ -107,7 +107,13 @@ ut_malloc_low(
/* Make an intentional seg fault so that we get a stack
trace */
+ /* Intentional segfault on NetWare causes an abend. Avoid this
+ by graceful exit handling in ut_a(). */
+#if (!defined __NETWARE__)
if (*ut_mem_null_ptr) ut_mem_null_ptr = 0;
+#else
+ ut_a(0);
+#endif
}
if (set_to_zero) {
diff --git a/install-sh b/install-sh
index e9de23842dc..c1666c37407 100755
--- a/install-sh
+++ b/install-sh
@@ -43,7 +43,7 @@ mkdirprog="${MKDIRPROG-mkdir}"
transformbasename=""
transform_arg=""
-instcmd="$mvprog"
+instcmd="$cpprog"
chmodcmd="$chmodprog 0755"
chowncmd=""
chgrpcmd=""
diff --git a/libmysql/Makefile.am b/libmysql/Makefile.am
index 3e026fe589a..5c2dc9c7ba6 100644
--- a/libmysql/Makefile.am
+++ b/libmysql/Makefile.am
@@ -1,9 +1,12 @@
-# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+# Copyright (C) 2000-2004 MySQL AB
#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Library General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 GNU General Public License as
+# published by the Free Software Foundation.
+#
+# There are special exceptions to the terms and conditions of the GPL as it
+# is applied to this software. View the full text of the exception in file
+# EXCEPTIONS-CLIENT in the directory of this software distribution.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -14,13 +17,13 @@
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA
-
+#
# This file is public domain and comes with NO WARRANTY of any kind
target = libmysqlclient.la
target_defs = -DUNDEF_THREADS_HACK -DDONT_USE_RAID @LIB_EXTRA_CCFLAGS@
LIBS = @CLIENT_LIBS@
-INCLUDES = -I$(top_srcdir)/include $(openssl_includes)
+INCLUDES = -I$(top_srcdir)/include $(openssl_includes) @ZLIB_INCLUDES@
include $(srcdir)/Makefile.shared
diff --git a/libmysql/Makefile.shared b/libmysql/Makefile.shared
index b073155f02b..389e8e9ff34 100644
--- a/libmysql/Makefile.shared
+++ b/libmysql/Makefile.shared
@@ -1,21 +1,24 @@
-## Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-##
-## This library is free software; you can redistribute it and/or
-## modify it under the terms of the GNU Library General Public
-## License as published by the Free Software Foundation; either
-## version 2 of the License, or (at your option) any later version.
-##
-## This library is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-## Library General Public License for more details.
-##
-## You should have received a copy of the GNU Library General Public
-## License along with this library; if not, write to the Free
-## Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-## MA 02111-1307, USA
-##
-## This file is public domain and comes with NO WARRANTY of any kind
+# Copyright (C) 2000-2004 MySQL AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 GNU General Public License as
+# published by the Free Software Foundation.
+#
+# There are special exceptions to the terms and conditions of the GPL as it
+# is applied to this software. View the full text of the exception in file
+# EXCEPTIONS-CLIENT in the directory of this software distribution.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Library General Public License for more details.
+#
+# You should have received a copy of the GNU Library General Public
+# License along with this library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA
+#
+# This file is public domain and comes with NO WARRANTY of any kind
MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
diff --git a/libmysql/client_settings.h b/libmysql/client_settings.h
index 1c1ff9bac10..a29f52ce366 100644
--- a/libmysql/client_settings.h
+++ b/libmysql/client_settings.h
@@ -18,7 +18,7 @@ extern uint mysql_port;
extern my_string mysql_unix_port;
#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | \
- CLIENT_LOCAL_FILES | CLIENT_TRANSACTIONS | \
+ CLIENT_TRANSACTIONS | \
CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION)
sig_handler pipe_sig_handler(int sig __attribute__((unused)));
diff --git a/libmysql/conf_to_src.c b/libmysql/conf_to_src.c
index 8d931309abb..785e3cad4c1 100644
--- a/libmysql/conf_to_src.c
+++ b/libmysql/conf_to_src.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/libmysql/dll.c b/libmysql/dll.c
index e9334d68a0c..b0e4b9cab3b 100644
--- a/libmysql/dll.c
+++ b/libmysql/dll.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c
index 2b941470fc3..82040100ded 100644
--- a/libmysql/errmsg.c
+++ b/libmysql/errmsg.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/libmysql/get_password.c b/libmysql/get_password.c
index 0e3b2dcb0ae..e55e77320f0 100644
--- a/libmysql/get_password.c
+++ b/libmysql/get_password.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index a93a89f797f..fcea9288283 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -1288,7 +1291,7 @@ mysql_drop_db(MYSQL *mysql, const char *db)
int STDCALL
-mysql_shutdown(MYSQL *mysql, enum enum_shutdown_level shutdown_level)
+mysql_shutdown(MYSQL *mysql, enum mysql_enum_shutdown_level shutdown_level)
{
uchar level[1];
DBUG_ENTER("mysql_shutdown");
@@ -1995,7 +1998,7 @@ mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length)
}
/*
- alloc_root will return valid address even in case param_count
+ alloc_root will return valid address even in case when param_count
and field_count are zero. Thus we should never rely on stmt->bind
or stmt->params when checking for existence of placeholders or
result set.
@@ -2092,12 +2095,6 @@ static void update_stmt_fields(MYSQL_STMT *stmt)
mysql_stmt_result_metadata()
stmt statement handle
- RETURN
- NULL statement contains no result set or out of memory.
- In the latter case you can retreive error message
- with mysql_stmt_error.
- MYSQL_RES a result set with no rows
-
DESCRIPTION
This function should be used after mysql_stmt_execute().
You can safely check that prepared statement has a result set by calling
@@ -2111,6 +2108,12 @@ static void update_stmt_fields(MYSQL_STMT *stmt)
mysql_fetch_field_direct, mysql_fetch_fields, mysql_field_seek.
- free returned MYSQL_RES structure with mysql_free_result.
- proceed to binding of output parameters.
+
+ RETURN
+ NULL statement contains no result set or out of memory.
+ In the latter case you can retreive error message
+ with mysql_stmt_error.
+ MYSQL_RES a result set with no rows
*/
MYSQL_RES * STDCALL
@@ -2195,11 +2198,11 @@ static void store_param_type(char **pos, MYSQL_BIND *param)
param MySQL bind param
DESCRIPTION
- These funtions are invoked from mysql_stmt_execute by
- MYSQL_BIND::store_param_func pointer. This pointer is set once per many
- executions in mysql_stmt_bind_param. The caller must ensure that network
- buffer have enough capacity to store parameter (MYSQL_BIND::buffer_length
- contains needed number of bytes).
+ These funtions are invoked from mysql_stmt_execute() by
+ MYSQL_BIND::store_param_func pointer. This pointer is set once per
+ many executions in mysql_stmt_bind_param(). The caller must ensure
+ that network buffer have enough capacity to store parameter
+ (MYSQL_BIND::buffer_length contains needed number of bytes).
*/
static void store_param_tinyint(NET *net, MYSQL_BIND *param)
@@ -2762,7 +2765,7 @@ int STDCALL mysql_stmt_execute(MYSQL_STMT *stmt)
example a table used in the query was altered.
Note, that now (4.1.3) we always send metadata in reply to
COM_EXECUTE (even if it is not necessary), so either this or
- previous always branch works.
+ previous branch always works.
TODO: send metadata only when it's really necessary and add a warning
'Metadata changed' when it's sent twice.
*/
@@ -2845,19 +2848,171 @@ static my_bool int_is_null_false= 0;
/*
- Setup the input parameter data buffers from application
+ Set up input data buffers for a statement.
SYNOPSIS
mysql_stmt_bind_param()
stmt statement handle
The statement must be prepared with mysql_stmt_prepare().
bind Array of mysql_stmt_param_count() bind parameters.
+ This function doesn't check that size of this argument
+ is >= mysql_stmt_field_count(): it's user's responsibility.
+
+ DESCRIPTION
+ Use this call after mysql_stmt_prepare() to bind user variables to
+ placeholders.
+ Each element of bind array stands for a placeholder. Placeholders
+ are counted from 0. For example statement
+ 'INSERT INTO t (a, b) VALUES (?, ?)'
+ contains two placeholders, and for such statement you should supply
+ bind array of two elements (MYSQL_BIND bind[2]).
+
+ By properly initializing bind array you can bind virtually any
+ C language type to statement's placeholders:
+ First, it's strongly recommended to always zero-initialize entire
+ bind structure before setting it's members. This will both shorten
+ your application code and make it robust to future extensions of
+ MYSQL_BIND structure.
+ Then you need to assign typecode of your application buffer to
+ MYSQL_BIND::buffer_type. The following typecodes with their
+ correspondence to C language types are supported:
+ MYSQL_TYPE_TINY for 8-bit integer variables. Normally it's
+ 'signed char' and 'unsigned char';
+ MYSQL_TYPE_SHORT for 16-bit signed and unsigned variables. This
+ is usually 'short' and 'unsigned short';
+ MYSQL_TYPE_LONG for 32-bit signed and unsigned variables. It
+ corresponds to 'int' and 'unsigned int' on
+ vast majority of platforms. On IA-32 and some
+ other 32-bit systems you can also use 'long'
+ here;
+ MYSQL_TYPE_LONGLONG 64-bit signed or unsigned integer. Stands for
+ '[unsigned] long long' on most platforms;
+ MYSQL_TYPE_FLOAT 32-bit floating point type, 'float' on most
+ systems;
+ MYSQL_TYPE_DOUBLE 64-bit floating point type, 'double' on most
+ systems;
+ MYSQL_TYPE_TIME broken-down time stored in MYSQL_TIME
+ structure
+ MYSQL_TYPE_DATE date stored in MYSQL_TIME structure
+ MYSQL_TYPE_DATETIME datetime stored in MYSQL_TIME structure See
+ more on how to use these types for sending
+ dates and times below;
+ MYSQL_TYPE_STRING character string, assumed to be in
+ character-set-client. If character set of
+ client is not equal to character set of
+ column, value for this placeholder will be
+ converted to destination character set before
+ insert.
+ MYSQL_TYPE_BLOB sequence of bytes. This sequence is assumed to
+ be in binary character set (which is the same
+ as no particular character set), and is never
+ converted to any other character set. See also
+ notes about supplying string/blob length
+ below.
+ MYSQL_TYPE_NULL special typecode for binding nulls.
+ These C/C++ types are not supported yet by the API: long double,
+ bool.
+
+ As you can see from the list above, it's responsibility of
+ application programmer to ensure that chosen typecode properly
+ corresponds to host language type. For example on all platforms
+ where we build MySQL packages (as of MySQL 4.1.4) int is a 32-bit
+ type. So for int you can always assume that proper typecode is
+ MYSQL_TYPE_LONG (however queer it sounds, the name is legacy of the
+ old MySQL API). In contrary sizeof(long) can be 4 or 8 8-bit bytes,
+ depending on platform.
+
+ TODO: provide client typedefs for each integer and floating point
+ typecode, i. e. int8, uint8, float32, etc.
+
+ Once typecode was set, it's necessary to assign MYSQL_BIND::buffer
+ to point to the buffer of given type. Finally, additional actions
+ may be taken for some types or use cases:
+
+ Binding integer types.
+ For integer types you might also need to set MYSQL_BIND::is_unsigned
+ member. Set it to TRUE when binding unsigned char, unsigned short,
+ unsigned int, unsigned long, unsigned long long.
+
+ Binding floating point types.
+ For floating point types you just need to set
+ MYSQL_BIND::buffer_type and MYSQL_BIND::buffer. The rest of the
+ members should be zero-initialized.
+
+ Binding NULLs.
+ You might have a column always NULL, never NULL, or sometimes NULL.
+ For an always NULL column set MYSQL_BIND::buffer_type to
+ MYSQL_TYPE_NULL. The rest of the members just need to be
+ zero-initialized. For never NULL columns set MYSQL_BIND::is_null to
+ 0, or this has already been done if you zero-initialized the entire
+ structure. If you set MYSQL_TYPE::is_null to point to an
+ application buffer of type 'my_bool', then this buffer will be
+ checked on each execution: this way you can set the buffer to TRUE,
+ or any non-0 value for NULLs, and to FALSE or 0 for not NULL data.
+
+ Binding text strings and sequences of bytes.
+ For strings, in addition to MYSQL_BIND::buffer_type and
+ MYSQL_BIND::buffer you need to set MYSQL_BIND::length or
+ MYSQL_BIND::buffer_length.
+ If 'length' is set, 'buffer_length' is ignored. 'buffer_length'
+ member should be used when size of string doesn't change between
+ executions. If you want to vary buffer length for each value, set
+ 'length' to point to an application buffer of type 'unsigned long'
+ and set this long to length of the string before each
+ mysql_stmt_execute().
+
+ Binding dates and times.
+ For binding dates and times prepared statements API provides clients
+ with MYSQL_TIME structure. A pointer to instance of this structure
+ should be assigned to MYSQL_BIND::buffer whenever MYSQL_TYPE_TIME,
+ MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME typecodes are used. When
+ typecode is MYSQL_TYPE_TIME, only members 'hour', 'minute', 'second'
+ and 'neg' (is time offset negative) are used. These members only
+ will be sent to the server.
+ MYSQL_TYPE_DATE implies use of 'year', 'month', 'day', 'neg'.
+ MYSQL_TYPE_DATETIME utilizes both parts of MYSQL_TIME structure.
+ You don't have to set MYSQL_TIME::time_type member: it's not used
+ when sending data to the server, typecode information is enough.
+ 'second_part' member can hold microsecond precision of time value,
+ but now it's only supported on protocol level: you can't store
+ microsecond in a column, or use in temporal calculations. However,
+ if you send a time value with microsecond part for 'SELECT ?',
+ statement, you'll get it back unchanged from the server.
+
+ Data conversion.
+ If conversion from host language type to data representation,
+ corresponding to SQL type, is required it's done on the server.
+ Data truncation is possible when conversion is lossy. For example,
+ if you supply MYSQL_TYPE_DATETIME value out of valid SQL type
+ TIMESTAMP range, the same conversion will be applied as if this
+ value would have been sent as string in the old protocol.
+ TODO: document how the server will behave in case of truncation/data
+ loss.
+
+ After variables were bound, you can repeatedly set/change their
+ values and mysql_stmt_execute() the statement.
+
+ See also: mysql_stmt_send_long_data() for sending long text/blob
+ data in pieces, examples in tests/client_test.c.
+ Next steps you might want to make:
+ - execute statement with mysql_stmt_execute(),
+ - reset statement using mysql_stmt_reset() or reprepare it with
+ another query using mysql_stmt_prepare()
+ - close statement with mysql_stmt_close().
+
+ IMPLEMENTATION
+ The function copies given bind array to internal storage of the
+ statement, and sets up typecode-specific handlers to perform
+ serialization of bound data. This means that although you don't need
+ to call this routine after each assignment to bind buffers, you
+ need to call it each time you change parameter typecodes, or other
+ members of MYSQL_BIND array.
+ This is a pure local call. Data types of client buffers are sent
+ along with buffers' data at first execution of the statement.
RETURN
0 success
1 error, can be retrieved with mysql_stmt_error.
- Note, that this function doesn't check that size of MYSQL_BIND
- array is >= mysql_stmt_field_count(),
*/
my_bool STDCALL mysql_stmt_bind_param(MYSQL_STMT *stmt, MYSQL_BIND *bind)
@@ -3040,10 +3195,7 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number,
if (param->buffer_type < MYSQL_TYPE_TINY_BLOB ||
param->buffer_type > MYSQL_TYPE_STRING)
{
- /*
- Long data handling should be used only for string/binary
- types only
- */
+ /* Long data handling should be used only for string/binary types */
strmov(stmt->sqlstate, unknown_sqlstate);
sprintf(stmt->last_error, ER(stmt->last_errno= CR_INVALID_BUFFER_USE),
param->param_number);
@@ -3084,12 +3236,6 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number,
Fetch and conversion of result set rows (binary protocol).
*********************************************************************/
-static void set_zero_time(MYSQL_TIME *tm)
-{
- bzero((void *)tm, sizeof(*tm));
-}
-
-
/*
Read date, (time, datetime) value from network buffer and store it
in MYSQL_TIME structure.
@@ -3110,349 +3256,412 @@ static void set_zero_time(MYSQL_TIME *tm)
static uint read_binary_time(MYSQL_TIME *tm, uchar **pos)
{
- uchar *to;
uint length;
/* net_field_length will set pos to the first byte of data */
if (!(length= net_field_length(pos)))
- {
set_zero_time(tm);
- return 0;
- }
-
- to= *pos;
- tm->neg= (bool) to[0];
+ else
+ {
+ uchar *to= *pos;
+ tm->neg= (bool) to[0];
- tm->day= (ulong) sint4korr(to+1);
- tm->hour= (uint) to[5];
- tm->minute= (uint) to[6];
- tm->second= (uint) to[7];
- tm->second_part= (length > 8) ? (ulong) sint4korr(to+8) : 0;
+ tm->day= (ulong) sint4korr(to+1);
+ tm->hour= (uint) to[5];
+ tm->minute= (uint) to[6];
+ tm->second= (uint) to[7];
+ tm->second_part= (length > 8) ? (ulong) sint4korr(to+8) : 0;
- tm->year= tm->month= 0;
+ tm->year= tm->month= 0;
+ tm->time_type= MYSQL_TIMESTAMP_TIME;
+ }
return length;
}
static uint read_binary_datetime(MYSQL_TIME *tm, uchar **pos)
{
- uchar *to;
uint length;
if (!(length= net_field_length(pos)))
- {
set_zero_time(tm);
- return 0;
- }
-
- to= *pos;
+ else
+ {
+ uchar *to= *pos;
- tm->neg= 0;
- tm->year= (uint) sint2korr(to);
- tm->month= (uint) to[2];
- tm->day= (uint) to[3];
+ tm->neg= 0;
+ tm->year= (uint) sint2korr(to);
+ tm->month= (uint) to[2];
+ tm->day= (uint) to[3];
- if (length > 4)
- {
- tm->hour= (uint) to[4];
- tm->minute= (uint) to[5];
- tm->second= (uint) to[6];
+ if (length > 4)
+ {
+ tm->hour= (uint) to[4];
+ tm->minute= (uint) to[5];
+ tm->second= (uint) to[6];
+ }
+ else
+ tm->hour= tm->minute= tm->second= 0;
+ tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0;
+ tm->time_type= MYSQL_TIMESTAMP_DATETIME;
}
- else
- tm->hour= tm->minute= tm->second= 0;
- tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0;
return length;
}
static uint read_binary_date(MYSQL_TIME *tm, uchar **pos)
{
- uchar *to;
uint length;
if (!(length= net_field_length(pos)))
- {
set_zero_time(tm);
- return 0;
- }
-
- to= *pos;
- tm->year = (uint) sint2korr(to);
- tm->month= (uint) to[2];
- tm->day= (uint) to[3];
+ else
+ {
+ uchar *to= *pos;
+ tm->year = (uint) sint2korr(to);
+ tm->month= (uint) to[2];
+ tm->day= (uint) to[3];
- tm->hour= tm->minute= tm->second= 0;
- tm->second_part= 0;
- tm->neg= 0;
+ tm->hour= tm->minute= tm->second= 0;
+ tm->second_part= 0;
+ tm->neg= 0;
+ tm->time_type= MYSQL_TIMESTAMP_DATE;
+ }
return length;
}
-/* Convert integer value to client buffer type. */
+/*
+ Convert string to supplied buffer of any type.
+
+ SYNOPSIS
+ fetch_string_with_conversion()
+ param output buffer descriptor
+ value column data
+ length data length
+*/
-static void send_data_long(MYSQL_BIND *param, MYSQL_FIELD *field,
- longlong value)
+static void fetch_string_with_conversion(MYSQL_BIND *param, char *value,
+ uint length)
{
char *buffer= (char *)param->buffer;
- uint field_is_unsigned= (field->flags & UNSIGNED_FLAG);
+ int err= 0;
- switch (param->buffer_type) {
+ /*
+ This function should support all target buffer types: the rest
+ of conversion functions can delegate conversion to it.
+ */
+ switch(param->buffer_type) {
case MYSQL_TYPE_NULL: /* do nothing */
break;
case MYSQL_TYPE_TINY:
- *(uchar *)param->buffer= (uchar) value;
+ {
+ uchar data= (uchar) my_strntol(&my_charset_latin1, value, length, 10,
+ NULL, &err);
+ *buffer= data;
break;
+ }
case MYSQL_TYPE_SHORT:
- shortstore(buffer, value);
+ {
+ short data= (short) my_strntol(&my_charset_latin1, value, length, 10,
+ NULL, &err);
+ shortstore(buffer, data);
break;
+ }
case MYSQL_TYPE_LONG:
- longstore(buffer, value);
+ {
+ int32 data= (int32)my_strntol(&my_charset_latin1, value, length, 10,
+ NULL, &err);
+ longstore(buffer, data);
break;
+ }
case MYSQL_TYPE_LONGLONG:
- longlongstore(buffer, value);
+ {
+ longlong data= my_strntoll(&my_charset_latin1, value, length, 10,
+ NULL, &err);
+ longlongstore(buffer, data);
break;
+ }
case MYSQL_TYPE_FLOAT:
{
- float data= (field_is_unsigned ? (float) ulonglong2double(value) :
- (float) value);
+ float data = (float) my_strntod(&my_charset_latin1, value, length,
+ NULL, &err);
floatstore(buffer, data);
break;
}
case MYSQL_TYPE_DOUBLE:
{
- double data= (field_is_unsigned ? ulonglong2double(value) :
- (double) value);
+ double data= my_strntod(&my_charset_latin1, value, length, NULL, &err);
doublestore(buffer, data);
break;
}
+ case MYSQL_TYPE_TIME:
+ {
+ MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
+ str_to_time(value, length, tm, &err);
+ break;
+ }
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_TIMESTAMP:
+ {
+ MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
+ str_to_datetime(value, length, tm, 0, &err);
+ break;
+ }
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
default:
{
- char tmp[22]; /* Enough for longlong */
- uint length= (uint)(longlong10_to_str(value,(char *)tmp,
- field_is_unsigned ? 10: -10) -
- tmp);
- ulong copy_length= min((ulong)length-param->offset, param->buffer_length);
- if ((long) copy_length < 0)
- copy_length=0;
+ /*
+ Copy column data to the buffer taking into account offset,
+ data length and buffer length.
+ */
+ char *start= value + param->offset;
+ char *end= value + length;
+ ulong copy_length;
+ if (start < end)
+ {
+ copy_length= end - start;
+ /* We've got some data beyond offset: copy up to buffer_length bytes */
+ if (param->buffer_length)
+ memcpy(buffer, start, min(copy_length, param->buffer_length));
+ }
else
- memcpy(buffer, (char *)tmp+param->offset, copy_length);
+ copy_length= 0;
+ if (copy_length < param->buffer_length)
+ buffer[copy_length]= '\0';
+ /*
+ param->length will always contain length of entire column;
+ number of copied bytes may be way different:
+ */
*param->length= length;
-
- if (copy_length != param->buffer_length)
- *(buffer+copy_length)= '\0';
+ break;
}
}
}
-/* Convert Double to buffer types */
+/*
+ Convert integer value to client buffer of any type.
-static void send_data_double(MYSQL_BIND *param, double value)
+ SYNOPSIS
+ fetch_long_with_conversion()
+ param output buffer descriptor
+ field column metadata
+ value column data
+*/
+
+static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field,
+ longlong value)
{
char *buffer= (char *)param->buffer;
+ uint field_is_unsigned= field->flags & UNSIGNED_FLAG;
- switch(param->buffer_type) {
+ switch (param->buffer_type) {
case MYSQL_TYPE_NULL: /* do nothing */
break;
case MYSQL_TYPE_TINY:
- *buffer= (uchar)value;
+ *(uchar *)param->buffer= (uchar) value;
break;
case MYSQL_TYPE_SHORT:
- shortstore(buffer, (short)value);
+ shortstore(buffer, value);
break;
case MYSQL_TYPE_LONG:
- longstore(buffer, (long)value);
+ longstore(buffer, value);
break;
case MYSQL_TYPE_LONGLONG:
- {
- longlong val= (longlong) value;
- longlongstore(buffer, val);
+ longlongstore(buffer, value);
break;
- }
case MYSQL_TYPE_FLOAT:
{
- float data= (float) value;
+ float data= field_is_unsigned ? (float) ulonglong2double(value) :
+ (float) value;
floatstore(buffer, data);
break;
}
case MYSQL_TYPE_DOUBLE:
{
- doublestore(buffer, value);
+ double data= field_is_unsigned ? ulonglong2double(value) :
+ (double) value;
+ doublestore(buffer, data);
break;
}
default:
{
- char tmp[128];
- uint length= my_sprintf(tmp,(tmp,"%g",value));
- ulong copy_length= min((ulong)length-param->offset, param->buffer_length);
- if ((long) copy_length < 0)
- copy_length=0;
- else
- memcpy(buffer, (char *)tmp+param->offset, copy_length);
- *param->length= length;
-
- if (copy_length != param->buffer_length)
- *(buffer+copy_length)= '\0';
+ char buff[22]; /* Enough for longlong */
+ char *end= longlong10_to_str(value, buff, field_is_unsigned ? 10: -10);
+ /* Resort to string conversion which supports all typecodes */
+ fetch_string_with_conversion(param, buff, (uint) (end - buff));
+ break;
}
}
}
-/* Convert string to buffer types */
+/*
+ Convert double/float column to supplied buffer of any type.
+
+ SYNOPSIS
+ fetch_float_with_conversion()
+ param output buffer descriptor
+ field column metadata
+ value column data
+ width default number of significant digits used when converting
+ float/double to string
+*/
-static void send_data_str(MYSQL_BIND *param, char *value, uint length)
+static void fetch_float_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field,
+ double value, int width)
{
char *buffer= (char *)param->buffer;
- int err=0;
- switch(param->buffer_type) {
+ switch (param->buffer_type) {
case MYSQL_TYPE_NULL: /* do nothing */
break;
case MYSQL_TYPE_TINY:
- {
- uchar data= (uchar)my_strntol(&my_charset_latin1,value,length,10,NULL,
- &err);
- *buffer= data;
+ *buffer= (uchar)value;
break;
- }
case MYSQL_TYPE_SHORT:
- {
- short data= (short)my_strntol(&my_charset_latin1,value,length,10,NULL,
- &err);
- shortstore(buffer, data);
+ shortstore(buffer, (short)value);
break;
- }
case MYSQL_TYPE_LONG:
- {
- int32 data= (int32)my_strntol(&my_charset_latin1,value,length,10,NULL,
- &err);
- longstore(buffer, data);
+ longstore(buffer, (long)value);
break;
- }
case MYSQL_TYPE_LONGLONG:
{
- longlong data= my_strntoll(&my_charset_latin1,value,length,10,NULL,&err);
- longlongstore(buffer, data);
+ longlong val= (longlong) value;
+ longlongstore(buffer, val);
break;
}
case MYSQL_TYPE_FLOAT:
{
- float data = (float)my_strntod(&my_charset_latin1,value,length,NULL,&err);
+ float data= (float) value;
floatstore(buffer, data);
break;
}
case MYSQL_TYPE_DOUBLE:
{
- double data= my_strntod(&my_charset_latin1,value,length,NULL,&err);
- doublestore(buffer, data);
- break;
- }
- case MYSQL_TYPE_TIME:
- {
- int dummy;
- MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
- str_to_time(value, length, tm, &dummy);
+ doublestore(buffer, value);
break;
}
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_DATETIME:
+ default:
{
- int dummy;
- MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
- str_to_datetime(value, length, tm, 0, &dummy);
+ /*
+ Resort to fetch_string_with_conversion: this should handle
+ floating point -> string conversion nicely, honor all typecodes
+ and param->offset possibly set in mysql_stmt_fetch_column
+ */
+ char buff[331];
+ char *end;
+ /* TODO: move this to a header shared between client and server. */
+#define NOT_FIXED_DEC 31
+ if (field->decimals >= 31)
+#undef NOT_FIXED_DEC
+ {
+ sprintf(buff, "%-*.*g", (int) param->buffer_length, width, value);
+ end= strcend(buff, ' ');
+ *end= 0;
+ }
+ else
+ {
+ sprintf(buff, "%.*f", (int) field->decimals, value);
+ end= strend(buff);
+ }
+ fetch_string_with_conversion(param, buff, (uint) (end - buff));
break;
}
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- *param->length= length;
- length= min(length-param->offset, param->buffer_length);
- if ((long) length > 0)
- memcpy(buffer, value+param->offset, length);
- break;
- default:
- *param->length= length;
- length= min(length-param->offset, param->buffer_length);
- if ((long) length < 0)
- length= 0;
- else
- memcpy(buffer, value+param->offset, length);
- if (length != param->buffer_length)
- buffer[length]= '\0';
}
}
-static void send_data_time(MYSQL_BIND *param, MYSQL_TIME ltime,
- uint length)
+/*
+ Fetch time/date/datetime to supplied buffer of any type
+
+ SYNOPSIS
+ param output buffer descriptor
+ time column data
+*/
+
+static void fetch_datetime_with_conversion(MYSQL_BIND *param,
+ MYSQL_TIME *time)
{
switch (param->buffer_type) {
case MYSQL_TYPE_NULL: /* do nothing */
break;
-
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_TIMESTAMP:
- {
- MYSQL_TIME *tm= (MYSQL_TIME *)param->buffer;
-
- tm->year= ltime.year;
- tm->month= ltime.month;
- tm->day= ltime.day;
-
- tm->hour= ltime.hour;
- tm->minute= ltime.minute;
- tm->second= ltime.second;
-
- tm->second_part= ltime.second_part;
- tm->neg= ltime.neg;
+ /* XXX: should we copy only relevant members here? */
+ *(MYSQL_TIME *)(param->buffer)= *time;
break;
- }
default:
{
+ /*
+ Convert time value to string and delegate the rest to
+ fetch_string_with_conversion:
+ */
char buff[25];
+ uint length;
- if (!length)
- ltime.time_type= MYSQL_TIMESTAMP_NONE;
- switch (ltime.time_type) {
+ switch (time->time_type) {
case MYSQL_TIMESTAMP_DATE:
- length= my_sprintf(buff,(buff, "%04d-%02d-%02d", ltime.year,
- ltime.month,ltime.day));
+ length= my_sprintf(buff,(buff, "%04d-%02d-%02d",
+ time->year, time->month, time->day));
break;
case MYSQL_TIMESTAMP_DATETIME:
length= my_sprintf(buff,(buff, "%04d-%02d-%02d %02d:%02d:%02d",
- ltime.year,ltime.month,ltime.day,
- ltime.hour,ltime.minute,ltime.second));
+ time->year, time->month, time->day,
+ time->hour, time->minute, time->second));
break;
case MYSQL_TIMESTAMP_TIME:
length= my_sprintf(buff, (buff, "%02d:%02d:%02d",
- ltime.hour,ltime.minute,ltime.second));
+ time->hour, time->minute, time->second));
break;
default:
length= 0;
buff[0]='\0';
+ break;
}
- send_data_str(param, (char *)buff, length);
+ /* Resort to string conversion */
+ fetch_string_with_conversion(param, (char *)buff, length);
+ break;
}
}
}
-/* Fetch data to client buffers with conversion. */
+/*
+ Fetch and convert result set column to output buffer.
+
+ SYNOPSIS
+ fetch_result_with_conversion()
+ param output buffer descriptor
+ field column metadata
+ row points to a column of result set tuple in binary format
+
+ DESCRIPTION
+ This is a fallback implementation of column fetch used
+ if column and output buffer types do not match.
+ Increases tuple pointer to point at the next column within the
+ tuple.
+*/
-static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
+static void fetch_result_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field,
+ uchar **row)
{
ulong length;
enum enum_field_types field_type= field->type;
+ uint field_is_unsigned= field->flags & UNSIGNED_FLAG;
switch (field_type) {
case MYSQL_TYPE_TINY:
{
char value= (char) **row;
- uint field_is_unsigned= (field->flags & UNSIGNED_FLAG);
- longlong data= ((field_is_unsigned) ? (longlong) (unsigned char) value:
- (longlong) value);
- send_data_long(param, field, data);
+ longlong data= field_is_unsigned ? (longlong) (unsigned char) value :
+ (longlong) value;
+ fetch_long_with_conversion(param, field, data);
length= 1;
break;
}
@@ -3460,27 +3669,26 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
case MYSQL_TYPE_YEAR:
{
short value= sint2korr(*row);
- uint field_is_unsigned= (field->flags & UNSIGNED_FLAG);
- longlong data= ((field_is_unsigned) ? (longlong) (unsigned short) value:
- (longlong) value);
- send_data_long(param, field, data);
+ longlong data= field_is_unsigned ? (longlong) (unsigned short) value :
+ (longlong) value;
+ fetch_long_with_conversion(param, field, data);
length= 2;
break;
}
+ case MYSQL_TYPE_INT24: /* mediumint is sent as 4 bytes int */
case MYSQL_TYPE_LONG:
{
long value= sint4korr(*row);
- uint field_is_unsigned= (field->flags & UNSIGNED_FLAG);
- longlong data= ((field_is_unsigned) ? (longlong) (unsigned long) value:
- (longlong) value);
- send_data_long(param, field, data);
+ longlong data= field_is_unsigned ? (longlong) (unsigned long) value :
+ (longlong) value;
+ fetch_long_with_conversion(param, field, data);
length= 4;
break;
}
case MYSQL_TYPE_LONGLONG:
{
longlong value= (longlong)sint8korr(*row);
- send_data_long(param, field, value);
+ fetch_long_with_conversion(param, field, value);
length= 8;
break;
}
@@ -3488,7 +3696,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
{
float value;
float4get(value,*row);
- send_data_double(param,value);
+ fetch_float_with_conversion(param, field, value, FLT_DIG);
length= 4;
break;
}
@@ -3496,7 +3704,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
{
double value;
float8get(value,*row);
- send_data_double(param,value);
+ fetch_float_with_conversion(param, field, value, DBL_DIG);
length= 8;
break;
}
@@ -3505,8 +3713,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
MYSQL_TIME tm;
length= read_binary_date(&tm, row);
- tm.time_type= MYSQL_TIMESTAMP_DATE;
- send_data_time(param, tm, length);
+ fetch_datetime_with_conversion(param, &tm);
break;
}
case MYSQL_TYPE_TIME:
@@ -3514,8 +3721,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
MYSQL_TIME tm;
length= read_binary_time(&tm, row);
- tm.time_type= MYSQL_TIMESTAMP_TIME;
- send_data_time(param, tm, length);
+ fetch_datetime_with_conversion(param, &tm);
break;
}
case MYSQL_TYPE_DATETIME:
@@ -3524,13 +3730,12 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row)
MYSQL_TIME tm;
length= read_binary_datetime(&tm, row);
- tm.time_type= MYSQL_TIMESTAMP_DATETIME;
- send_data_time(param, tm, length);
+ fetch_datetime_with_conversion(param, &tm);
break;
}
default:
length= net_field_length(row);
- send_data_str(param,(char*) *row,length);
+ fetch_string_with_conversion(param, (char*) *row, length);
break;
}
*row+= length;
@@ -3675,7 +3880,6 @@ static void skip_result_string(MYSQL_BIND *param __attribute__((unused)),
}
-
/*
Setup the bind buffers for resultset processing
*/
@@ -3894,7 +4098,7 @@ static int stmt_fetch_row(MYSQL_STMT *stmt, uchar *row)
if (field->type == bind->buffer_type)
(*bind->fetch_result)(bind, &row);
else
- fetch_results(bind, field, &row);
+ fetch_result_with_conversion(bind, field, &row);
}
if (!((bit<<=1) & 255))
{
@@ -3986,7 +4190,7 @@ int STDCALL mysql_stmt_fetch_column(MYSQL_STMT *stmt, MYSQL_BIND *bind,
*bind->length= *param->length;
else
bind->length= &param->internal_length; /* Needed for fetch_result() */
- fetch_results(bind, field, &row);
+ fetch_result_with_conversion(bind, field, &row);
}
else
{
@@ -4250,7 +4454,7 @@ my_bool STDCALL mysql_stmt_free_result(MYSQL_STMT *stmt)
if (mysql->status != MYSQL_STATUS_READY)
{
/* There is a result set and it belongs to this statement */
- flush_use_result(mysql);
+ (*mysql->methods->flush_use_result)(mysql);
mysql->status= MYSQL_STATUS_READY;
}
}
@@ -4300,7 +4504,7 @@ my_bool STDCALL mysql_stmt_close(MYSQL_STMT *stmt)
Flush result set of the connection. If it does not belong
to this statement, set a warning.
*/
- flush_use_result(mysql);
+ (*mysql->methods->flush_use_result)(mysql);
if (mysql->unbuffered_fetch_owner)
*mysql->unbuffered_fetch_owner= TRUE;
mysql->status= MYSQL_STATUS_READY;
diff --git a/libmysql/manager.c b/libmysql/manager.c
index f1c8d045e6c..f030eb17889 100644
--- a/libmysql/manager.c
+++ b/libmysql/manager.c
@@ -1,9 +1,12 @@
-/* Copyright (C) 2000 MySQL AB
+/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ the Free Software Foundation.
+
+ There are special exceptions to the terms and conditions of the GPL as it
+ is applied to this software. View the full text of the exception in file
+ EXCEPTIONS-CLIENT in the directory of this software distribution.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/libmysql_r/Makefile.am b/libmysql_r/Makefile.am
index b75f65b6f78..939cb4c73dd 100644
--- a/libmysql_r/Makefile.am
+++ b/libmysql_r/Makefile.am
@@ -1,9 +1,12 @@
-# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+# Copyright (C) 2000-2004 MySQL AB
#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Library General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 GNU General Public License as
+# published by the Free Software Foundation.
+#
+# There are special exceptions to the terms and conditions of the GPL as it
+# is applied to this software. View the full text of the exception in file
+# EXCEPTIONS-CLIENT in the directory of this software distribution.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -14,14 +17,15 @@
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA
-
-
+#
+# This file is public domain and comes with NO WARRANTY of any kind
target = libmysqlclient_r.la
target_defs = -DDONT_USE_RAID -DMYSQL_CLIENT @LIB_EXTRA_CCFLAGS@
LIBS = @LIBS@ @openssl_libs@
-INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes)
+INCLUDES = @MT_INCLUDES@ \
+ -I$(top_srcdir)/include $(openssl_includes) @ZLIB_INCLUDES@
## automake barfs if you don't use $(srcdir) or $(top_srcdir) in include
include $(top_srcdir)/libmysql/Makefile.shared
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index 28f0d5111e6..f0cda9ae524 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -27,7 +27,7 @@ DEFS = -DEMBEDDED_LIBRARY -DMYSQL_SERVER \
-DSHAREDIR="\"$(MYSQLSHAREdir)\""
INCLUDES= @MT_INCLUDES@ @bdb_includes@ -I$(top_srcdir)/include \
-I$(top_srcdir)/sql -I$(top_srcdir)/regex \
- $(openssl_includes)
+ $(openssl_includes) @ZLIB_INCLUDES@
noinst_LIBRARIES = libmysqld_int.a
pkglib_LIBRARIES = libmysqld.a
diff --git a/libmysqld/examples/Makefile.am b/libmysqld/examples/Makefile.am
index b3db54d305a..2712e0dff48 100644
--- a/libmysqld/examples/Makefile.am
+++ b/libmysqld/examples/Makefile.am
@@ -16,7 +16,7 @@ DEFS = -DEMBEDDED_LIBRARY
INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir) \
-I$(top_srcdir) -I$(top_srcdir)/client $(openssl_includes)
LIBS = @LIBS@ @WRAPLIBS@ @CLIENT_LIBS@
-LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @bdb_libs_with_path@ @LIBDL@ $(CXXLDFLAGS)
+LDADD = @CLIENT_EXTRA_LDFLAGS@ ../libmysqld.a @innodb_system_libs@ @LIBDL@ $(CXXLDFLAGS)
mysqltest_LINK = $(CXXLINK)
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index b1c0ef57fac..fdc1acea09b 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -72,6 +72,11 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
THD *thd=(THD *) mysql->thd;
NET *net= &mysql->net;
+ if (thd->data)
+ {
+ free_rows(thd->data);
+ thd->data= 0;
+ }
/* Check that we are calling the client functions in right order */
if (mysql->status != MYSQL_STATUS_READY)
{
@@ -84,6 +89,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
thd->clear_error();
mysql->affected_rows= ~(my_ulonglong) 0;
mysql->field_count= 0;
+ net->last_errno= 0;
thd->store_globals(); // Fix if more than one connect
/*
@@ -107,17 +113,38 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
if (!skip_check)
result= thd->net.last_errno ? -1 : 0;
- embedded_get_error(mysql);
+ /*
+ If mysql->field_count is set it means the parsing of the query was OK
+ and metadata was returned (see Protocol::send_fields).
+ In this case we postpone the error to be returned in mysql_stmt_store_result
+ (see emb_read_rows) to behave just as standalone server.
+ */
+ if (!mysql->field_count)
+ embedded_get_error(mysql);
mysql->server_status= thd->server_status;
mysql->warning_count= ((THD*)mysql->thd)->total_warn_count;
return result;
}
+static void emb_flush_use_result(MYSQL *mysql)
+{
+ MYSQL_DATA *data= ((THD*)(mysql->thd))->data;
+
+ if (data)
+ {
+ free_rows(data);
+ ((THD*)(mysql->thd))->data= NULL;
+ }
+}
+
static MYSQL_DATA *
emb_read_rows(MYSQL *mysql, MYSQL_FIELD *mysql_fields __attribute__((unused)),
unsigned int fields __attribute__((unused)))
{
MYSQL_DATA *result= ((THD*)mysql->thd)->data;
+ embedded_get_error(mysql);
+ if (mysql->net.last_errno)
+ return NULL;
if (!result)
{
if (!(result=(MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA),
@@ -195,11 +222,6 @@ static int emb_stmt_execute(MYSQL_STMT *stmt)
THD *thd= (THD*)stmt->mysql->thd;
thd->client_param_count= stmt->param_count;
thd->client_params= stmt->params;
- if (thd->data)
- {
- free_rows(thd->data);
- thd->data= 0;
- }
if (emb_advanced_command(stmt->mysql, COM_EXECUTE,0,0,
(const char*)&stmt->stmt_id,sizeof(stmt->stmt_id),
1) ||
@@ -227,6 +249,9 @@ int emb_read_binary_rows(MYSQL_STMT *stmt)
int emb_unbuffered_fetch(MYSQL *mysql, char **row)
{
MYSQL_DATA *data= ((THD*)mysql->thd)->data;
+ embedded_get_error(mysql);
+ if (mysql->net.last_errno)
+ return mysql->net.last_errno;
if (!data || !data->data)
{
*row= NULL;
@@ -293,6 +318,7 @@ MYSQL_METHODS embedded_methods=
emb_read_rows,
emb_mysql_store_result,
emb_fetch_lengths,
+ emb_flush_use_result,
emb_list_fields,
emb_read_prepare_result,
emb_stmt_execute,
@@ -442,14 +468,6 @@ int init_embedded_server(int argc, char **argv, char **groups)
}
}
- /*
- Update mysqld variables from client variables if set
- The client variables are set also by get_one_option() in mysqld.cc
- */
- if (max_allowed_packet)
- global_system_variables.max_allowed_packet= max_allowed_packet;
- if (net_buffer_length)
- global_system_variables.net_buffer_length= net_buffer_length;
return 0;
}
@@ -478,18 +496,20 @@ void *create_embedded_thd(int client_flag, char *db)
if (thd->store_globals())
{
fprintf(stderr,"store_globals failed.\n");
- return NULL;
+ goto err;
}
thd->mysys_var= my_thread_var;
thd->dbug_thread_id= my_thread_id();
thd->thread_stack= (char*) &thd;
+/* TODO - add init_connect command execution */
+
thd->proc_info=0; // Remove 'login'
thd->command=COM_SLEEP;
thd->version=refresh_version;
thd->set_time();
- init_sql_alloc(&thd->mem_root,8192,8192);
+ thd->init_for_queries();
thd->client_capabilities= client_flag;
thd->db= db;
@@ -504,6 +524,9 @@ void *create_embedded_thd(int client_flag, char *db)
thread_count++;
return thd;
+err:
+ delete(thd);
+ return NULL;
}
#ifdef NO_EMBEDDED_ACCESS_CHECKS
@@ -609,9 +632,9 @@ bool Protocol::send_fields(List<Item> *list, int flags)
client_field->org_table_length= strlen(client_field->org_table);
client_field->charsetnr= server_field.charsetnr;
- client_field->catalog= strdup_root(field_alloc, "std");
+ client_field->catalog= strdup_root(field_alloc, "def");
client_field->catalog_length= 3;
-
+
if (INTERNAL_NUM_FIELD(client_field))
client_field->flags|= NUM_FLAG;
diff --git a/libmysqld/libmysqld.rc b/libmysqld/libmysqld.rc
new file mode 100755
index 00000000000..5b6142faddf
--- /dev/null
+++ b/libmysqld/libmysqld.rc
@@ -0,0 +1,125 @@
+//Microsoft Developer Studio generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include "afxres.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// English (U.S.) resources
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
+#ifdef _WIN32
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+#pragma code_page(1252)
+#endif //_WIN32
+
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+
+1 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "#include ""afxres.h""\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+
+#ifndef _MAC
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION 4,0,20,0
+ PRODUCTVERSION 4,0,20,0
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x9L
+#else
+ FILEFLAGS 0x8L
+#endif
+ FILEOS 0x40004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+#ifdef _DEBUG
+ VALUE "Comments", "Embedded Server\0"
+ VALUE "CompanyName", "MySQL AB\0"
+ VALUE "FileDescription", "Embedded Server\0"
+ VALUE "FileVersion", "4.0.20\0"
+ VALUE "InternalName", "Embedded Server\0"
+ VALUE "LegalCopyright", "Copyright 2004\0"
+ VALUE "LegalTrademarks", "MySQL and MySQL AB\0"
+ VALUE "OriginalFilename", "libmysqld.dll debug\0"
+ VALUE "PrivateBuild", "libmysqld.dll debug \0"
+ VALUE "ProductName", "libmysqld.dll debug\0"
+ VALUE "ProductVersion", "4.0.20\0"
+ VALUE "SpecialBuild", "\0"
+#else
+ VALUE "Comments", "Embedded Server\0"
+ VALUE "CompanyName", "MySQL AB\0"
+ VALUE "FileDescription", "Embedded Server\0"
+ VALUE "FileVersion", "4.0.20\0"
+ VALUE "InternalName", "Embedded Server\0"
+ VALUE "LegalCopyright", "Copyright 2004\0"
+ VALUE "LegalTrademarks", "MySQL and MySQL AB\0"
+ VALUE "OriginalFilename", "libmysqld.dll release\0"
+ VALUE "PrivateBuild", "libmysqld.dll release \0"
+ VALUE "ProductName", "libmysqld.dll release\0"
+ VALUE "ProductVersion", "4.0.20\0"
+ VALUE "SpecialBuild", "\0"
+#endif
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
+
+#endif // !_MAC
+
+#endif // English (U.S.) resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
+
diff --git a/libmysqld/resource.h b/libmysqld/resource.h
new file mode 100755
index 00000000000..f770fe490a6
--- /dev/null
+++ b/libmysqld/resource.h
@@ -0,0 +1,15 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Developer Studio generated include file.
+// Used by libmysqld.rc
+//
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 101
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1000
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/man/Makefile.am b/man/Makefile.am
index 37eb8a13f4e..539c43dfed6 100644
--- a/man/Makefile.am
+++ b/man/Makefile.am
@@ -23,7 +23,7 @@ man_MANS = mysql.1 isamchk.1 isamlog.1 mysql_zap.1 mysqlaccess.1 \
EXTRA_DIST = mysql.1.in isamchk.1.in isamlog.1.in mysql_zap.1.in \
mysqlaccess.1.in mysqladmin.1.in mysqld.1.in mysqld_multi.1.in \
- mysqldump.1.in mysqlshow.1.in perror.1.in replace.1.in \
+ mysqldump.1.in mysqlshow.1.in perror.1.in replace.1.in mysqlman.1.in \
mysqld_safe.1.in mysql_fix_privilege_tables.1.in
CLEANFILES = $(man_MANS)
diff --git a/man/mysqlman.1.in b/man/mysqlman.1.in
new file mode 100644
index 00000000000..610a64da198
--- /dev/null
+++ b/man/mysqlman.1.in
@@ -0,0 +1,15 @@
+.TH mysqlman 1 "20 July 2004" "MySQL @MYSQL_BASE_VERSION@" "MySQL database"
+.SH NAME
+mysqlman \- default man page for mysql
+.SH "DESCRIPTION"
+Certain executables distributed with the MySQL database management system do
+not have specific man pages.
+.SH "SEE ALSO"
+In most cases, you can run the executable from the command line with a "--help"
+argument to display a brief summary of the executable's arguments and function.
+For more information about MySQL, please refer to the MySQL reference manual,
+which may already be installed locally and which is also available online at
+http://dev.mysql.com/doc/
+.SH BUGS
+Please refer to http://bugs.mysql.com/ to report bugs.
+.\" end of man page
diff --git a/myisam/Makefile.am b/myisam/Makefile.am
index 5aa0740261e..378e8107814 100644
--- a/myisam/Makefile.am
+++ b/myisam/Makefile.am
@@ -18,8 +18,10 @@ EXTRA_DIST = mi_test_all.sh mi_test_all.res
pkgdata_DATA = mi_test_all mi_test_all.res
INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include
-LDADD = @CLIENT_EXTRA_LDFLAGS@ libmyisam.a ../mysys/libmysys.a \
- ../dbug/libdbug.a ../strings/libmystrings.a
+LDADD = @CLIENT_EXTRA_LDFLAGS@ libmyisam.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@
pkglib_LIBRARIES = libmyisam.a
bin_PROGRAMS = myisamchk myisamlog myisampack myisam_ftdump
myisamchk_DEPENDENCIES= $(LIBRARIES)
diff --git a/myisam/ftdefs.h b/myisam/ftdefs.h
index e23bc3b75ac..e7a0829e140 100644
--- a/myisam/ftdefs.h
+++ b/myisam/ftdefs.h
@@ -27,7 +27,7 @@
#define misc_word_char(X) ((X)=='\'')
#define word_char(s,X) (true_word_char(s,X) || misc_word_char(X))
-#define FT_MAX_WORD_LEN_FOR_SORT 20
+#define FT_MAX_WORD_LEN_FOR_SORT 31
#define COMPILE_STOPWORDS_IN
diff --git a/myisam/mi_check.c b/myisam/mi_check.c
index 052fa55a559..1f6089d0a3c 100644
--- a/myisam/mi_check.c
+++ b/myisam/mi_check.c
@@ -1585,7 +1585,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
int old_lock;
MYISAM_SHARE *share=info->s;
MI_STATE_INFO old_state;
- DBUG_ENTER("sort_index");
+ DBUG_ENTER("mi_sort_index");
if (!(param->testflag & T_SILENT))
printf("- Sorting index for MyISAM-table '%s'\n",name);
@@ -1664,7 +1664,7 @@ err:
err2:
VOID(my_delete(param->temp_filename,MYF(MY_WME)));
DBUG_RETURN(-1);
-} /* sort_index */
+} /* mi_sort_index */
/* Sort records recursive using one index */
@@ -1672,7 +1672,7 @@ err2:
static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
my_off_t pagepos, File new_file)
{
- uint length,nod_flag,used_length;
+ uint length,nod_flag,used_length, key_length;
uchar *buff,*keypos,*endpos;
uchar key[MI_MAX_POSSIBLE_KEY_BUFF];
my_off_t new_page_pos,next_page;
@@ -1693,7 +1693,7 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
llstr(pagepos,llbuff));
goto err;
}
- if ((nod_flag=mi_test_if_nod(buff)))
+ if ((nod_flag=mi_test_if_nod(buff)) || keyinfo->flag & HA_FULLTEXT)
{
used_length=mi_getint(buff);
keypos=buff+2+nod_flag;
@@ -1704,7 +1704,7 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
{
next_page=_mi_kpos(nod_flag,keypos);
_mi_kpointer(info,keypos-nod_flag,param->new_file_pos); /* Save new pos */
- if (sort_one_index(param,info,keyinfo,next_page, new_file))
+ if (sort_one_index(param,info,keyinfo,next_page,new_file))
{
DBUG_PRINT("error",("From page: %ld, keyoffset: %d used_length: %d",
(ulong) pagepos, (int) (keypos - buff),
@@ -1714,11 +1714,25 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
}
}
if (keypos >= endpos ||
- ((*keyinfo->get_key)(keyinfo,nod_flag,&keypos,key)) == 0)
+ (key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&keypos,key)) == 0)
break;
-#ifdef EXTRA_DEBUG
- assert(keypos <= endpos);
-#endif
+ DBUG_ASSERT(keypos <= endpos);
+ if (keyinfo->flag & HA_FULLTEXT)
+ {
+ uint off;
+ int subkeys;
+ get_key_full_length_rdonly(off, key);
+ subkeys=ft_sintXkorr(key+off);
+ if (subkeys < 0)
+ {
+ next_page= _mi_dpos(info,0,key+key_length);
+ _mi_dpointer(info,keypos-nod_flag-info->s->rec_reflength,
+ param->new_file_pos); /* Save new pos */
+ if (sort_one_index(param,info,&info->s->ft2_keyinfo,
+ next_page,new_file))
+ goto err;
+ }
+ }
}
}
@@ -2020,12 +2034,14 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
if (sort_param.keyinfo->flag & HA_FULLTEXT)
{
+ uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
+ sort_param.keyinfo->seg->charset->mbmaxlen;
sort_info.max_records=
- (ha_rows) (sort_info.filelength/FT_MAX_WORD_LEN_FOR_SORT+1);
+ (ha_rows) (sort_info.filelength/ft_max_word_len_for_sort+1);
sort_param.key_read=sort_ft_key_read;
sort_param.key_write=sort_ft_key_write;
- sort_param.key_length+=FT_MAX_WORD_LEN_FOR_SORT-HA_FT_MAXBYTELEN;
+ sort_param.key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
}
else
{
@@ -2425,7 +2441,11 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
total_key_length+=sort_param[i].key_length;
if (sort_param[i].keyinfo->flag & HA_FULLTEXT)
- sort_param[i].key_length+=FT_MAX_WORD_LEN_FOR_SORT-HA_FT_MAXBYTELEN;
+ {
+ uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
+ sort_param[i].keyinfo->seg->charset->mbmaxlen;
+ sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
+ }
}
sort_info.total_keys=i;
sort_param[0].master= 1;
@@ -2634,7 +2654,6 @@ static int sort_key_read(MI_SORT_PARAM *sort_param, void *key)
DBUG_RETURN(sort_write_record(sort_param));
} /* sort_key_read */
-
static int sort_ft_key_read(MI_SORT_PARAM *sort_param, void *key)
{
int error;
@@ -3950,25 +3969,28 @@ static ha_checksum mi_byte_checksum(const byte *buf, uint length)
return crc;
}
-/*
- Deactive all not unique index that can be recreated fast
- These include packed keys on which sorting will use more temporary
- space than the max allowed file length or for which the unpacked keys
- will take much more space than packed keys.
- Note that 'rows' may be zero for the case when we don't know how many
- rows we will put into the file.
- */
-
static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
{
uint key_maxlength=key->maxlength;
if (key->flag & HA_FULLTEXT)
- key_maxlength+=FT_MAX_WORD_LEN_FOR_SORT-HA_FT_MAXBYTELEN;
+ {
+ uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
+ key->seg->charset->mbmaxlen;
+ key_maxlength+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
+ }
return (key->flag & (HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY | HA_FULLTEXT) &&
((ulonglong) rows * key_maxlength >
(ulonglong) myisam_max_temp_length));
}
+/*
+ Deactivate all not unique index that can be recreated fast
+ These include packed keys on which sorting will use more temporary
+ space than the max allowed file length or for which the unpacked keys
+ will take much more space than packed keys.
+ Note that 'rows' may be zero for the case when we don't know how many
+ rows we will put into the file.
+ */
void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
{
diff --git a/myisam/mi_key.c b/myisam/mi_key.c
index d19a3130a86..3545756779f 100644
--- a/myisam/mi_key.c
+++ b/myisam/mi_key.c
@@ -23,7 +23,14 @@
#include <ieeefp.h>
#endif
-#define CHECK_KEYS
+#define CHECK_KEYS /* Enable safety checks */
+
+#define FIX_LENGTH(cs, pos, length, char_length) \
+ do { \
+ if (length > char_length) \
+ char_length= my_charpos(cs, pos, pos+length, char_length); \
+ set_if_smaller(char_length,length); \
+ } while(0)
static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record);
@@ -38,11 +45,12 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
byte *pos,*end;
uchar *start;
reg1 HA_KEYSEG *keyseg;
+ my_bool is_ft= info->s->keyinfo[keynr].flag & HA_FULLTEXT;
DBUG_ENTER("_mi_make_key");
- if(info->s->keyinfo[keynr].flag & HA_SPATIAL)
+ if (info->s->keyinfo[keynr].flag & HA_SPATIAL)
{
- /*
+ /*
TODO: nulls processing
*/
#ifdef HAVE_SPATIAL
@@ -57,6 +65,8 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
{
enum ha_base_keytype type=(enum ha_base_keytype) keyseg->type;
uint length=keyseg->length;
+ uint char_length;
+ CHARSET_INFO *cs=keyseg->charset;
if (keyseg->null_bit)
{
@@ -68,6 +78,9 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
*key++=1; /* Not NULL */
}
+ char_length= ((!is_ft && cs && cs->mbmaxlen > 1) ? length/cs->mbmaxlen :
+ length);
+
pos= (byte*) record+keyseg->start;
if (keyseg->flag & HA_SPACE_PACK)
{
@@ -83,9 +96,10 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
pos++;
}
length=(uint) (end-pos);
- store_key_length_inc(key,length);
- memcpy((byte*) key,(byte*) pos,(size_t) length);
- key+=length;
+ FIX_LENGTH(cs, pos, length, char_length);
+ store_key_length_inc(key,char_length);
+ memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ key+=char_length;
continue;
}
if (keyseg->flag & HA_VAR_LENGTH)
@@ -93,14 +107,22 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
uint tmp_length=uint2korr(pos);
pos+=2; /* Skip VARCHAR length */
set_if_smaller(length,tmp_length);
- store_key_length_inc(key,length);
+ FIX_LENGTH(cs, pos, length, char_length);
+ store_key_length_inc(key,char_length);
+ memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ key+= char_length;
+ continue;
}
else if (keyseg->flag & HA_BLOB_PART)
{
uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos);
memcpy_fixed((byte*) &pos,pos+keyseg->bit_start,sizeof(char*));
set_if_smaller(length,tmp_length);
- store_key_length_inc(key,length);
+ FIX_LENGTH(cs, pos, length, char_length);
+ store_key_length_inc(key,char_length);
+ memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ key+= char_length;
+ continue;
}
else if (keyseg->flag & HA_SWAP_KEY)
{ /* Numerical column */
@@ -112,7 +134,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
if (isnan(nr))
{
/* Replace NAN with zero */
- bzero(key,length);
+ bzero(key,length);
key+=length;
continue;
}
@@ -123,7 +145,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
float8get(nr,pos);
if (isnan(nr))
{
- bzero(key,length);
+ bzero(key,length);
key+=length;
continue;
}
@@ -136,7 +158,10 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
}
continue;
}
- memcpy((byte*) key, pos, length);
+ FIX_LENGTH(cs, pos, length, char_length);
+ memcpy((byte*) key, pos, char_length);
+ if (length > char_length)
+ cs->cset->fill(cs, key+char_length, length-char_length, ' ');
key+= length;
}
_mi_dpointer(info,key,filepos);
@@ -164,38 +189,43 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
RETURN
length of packed key
- last_use_keyseg Store pointer to the keyseg after the last used one
+ last_use_keyseg Store pointer to the keyseg after the last used one
*/
uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
uint k_length, HA_KEYSEG **last_used_keyseg)
{
- uint length;
- uchar *pos,*end,*start_key=key;
- reg1 HA_KEYSEG *keyseg;
- enum ha_base_keytype type;
+ uchar *start_key=key;
+ HA_KEYSEG *keyseg;
+ my_bool is_ft= info->s->keyinfo[keynr].flag & HA_FULLTEXT;
DBUG_ENTER("_mi_pack_key");
- start_key=key;
for (keyseg=info->s->keyinfo[keynr].seg ;
keyseg->type && (int) k_length > 0;
old+=keyseg->length, keyseg++)
{
- length=min((uint) keyseg->length,(uint) k_length);
- type=(enum ha_base_keytype) keyseg->type;
+ enum ha_base_keytype type=(enum ha_base_keytype) keyseg->type;
+ uint length=min((uint) keyseg->length,(uint) k_length);
+ uint char_length;
+ uchar *pos;
+ CHARSET_INFO *cs=keyseg->charset;
+
if (keyseg->null_bit)
{
k_length--;
if (!(*key++= (char) 1-*old++)) /* Copy null marker */
{
k_length-=length;
+ if (keyseg->flag & (HA_VAR_LENGTH | HA_BLOB_PART))
+ k_length-=2; /* Skip length */
continue; /* Found NULL */
}
}
+ char_length= (!is_ft && cs && cs->mbmaxlen > 1) ? length/cs->mbmaxlen : length;
pos=old;
if (keyseg->flag & HA_SPACE_PACK)
{
- end=pos+length;
+ uchar *end=pos+length;
if (type != HA_KEYTYPE_NUM)
{
while (end > pos && end[-1] == ' ')
@@ -208,9 +238,10 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
}
k_length-=length;
length=(uint) (end-pos);
- store_key_length_inc(key,length);
- memcpy((byte*) key,pos,(size_t) length);
- key+= length;
+ FIX_LENGTH(cs, pos, length, char_length);
+ store_key_length_inc(key,char_length);
+ memcpy((byte*) key,pos,(size_t) char_length);
+ key+= char_length;
continue;
}
else if (keyseg->flag & (HA_VAR_LENGTH | HA_BLOB_PART))
@@ -218,11 +249,13 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
/* Length of key-part used with mi_rkey() always 2 */
uint tmp_length=uint2korr(pos);
k_length-= 2+length;
+ pos+=2;
set_if_smaller(length,tmp_length); /* Safety */
- store_key_length_inc(key,length);
+ FIX_LENGTH(cs, pos, length, char_length);
+ store_key_length_inc(key,char_length);
old+=2; /* Skip length */
- memcpy((byte*) key, pos+2,(size_t) length);
- key+= length;
+ memcpy((byte*) key, pos,(size_t) char_length);
+ key+= char_length;
continue;
}
else if (keyseg->flag & HA_SWAP_KEY)
@@ -235,7 +268,10 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
}
continue;
}
- memcpy((byte*) key,pos,(size_t) length);
+ FIX_LENGTH(cs, pos, length, char_length);
+ memcpy((byte*) key, pos, char_length);
+ if (length > char_length)
+ cs->cset->fill(cs,key+char_length, length-char_length, ' ');
key+= length;
k_length-=length;
}
diff --git a/myisam/mi_search.c b/myisam/mi_search.c
index 1b03acddbc1..bc8be9c2732 100644
--- a/myisam/mi_search.c
+++ b/myisam/mi_search.c
@@ -63,8 +63,8 @@ int _mi_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
uchar *keypos,*maxpos;
uchar lastkey[MI_MAX_KEY_BUFF],*buff;
DBUG_ENTER("_mi_search");
- DBUG_PRINT("enter",("pos: %ld nextflag: %d lastpos: %ld",
- pos,nextflag,info->lastpos));
+ DBUG_PRINT("enter",("pos: %lu nextflag: %u lastpos: %lu",
+ (ulong) pos, nextflag, (ulong) info->lastpos));
DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,key_len););
if (pos == HA_OFFSET_ERROR)
@@ -235,15 +235,15 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
if (length == 0 || page > end)
{
my_errno=HA_ERR_CRASHED;
- DBUG_PRINT("error",("Found wrong key: length: %d page: %lx end: %lx",
- length,page,end));
+ DBUG_PRINT("error",("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
if ((flag=ha_key_cmp(keyinfo->seg,t_buff,key,key_len,comp_flag,
&not_used)) >= 0)
break;
#ifdef EXTRA_DEBUG
- DBUG_PRINT("loop",("page: %lx key: '%s' flag: %d",page,t_buff,flag));
+ DBUG_PRINT("loop",("page: %p key: '%s' flag: %d", page, t_buff, flag));
#endif
memcpy(buff,t_buff,length);
*ret_pos=page;
@@ -251,7 +251,7 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
if (flag == 0)
memcpy(buff,t_buff,length); /* Result is first key */
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: %lx",flag,*ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _mi_seq_search */
@@ -381,8 +381,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
if (page > end)
{
my_errno=HA_ERR_CRASHED;
- DBUG_PRINT("error",("Found wrong key: length: %d page: %lx end: %lx",
- length,page,end));
+ DBUG_PRINT("error",("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
@@ -396,9 +396,18 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
matched=prefix_len+left;
- for (my_flag=0;left;left--)
- if ((my_flag= (int) sort_order[*vseg++] - (int) sort_order[*k++]))
- break;
+ if (sort_order)
+ {
+ for (my_flag=0;left;left--)
+ if ((my_flag= (int) sort_order[*vseg++] - (int) sort_order[*k++]))
+ break;
+ }
+ else
+ {
+ for (my_flag=0;left;left--)
+ if ((my_flag= (int) *vseg++ - (int) *k++))
+ break;
+ }
if (my_flag>0) /* mismatch */
break;
@@ -442,9 +451,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
/* We have to compare k and vseg as if they where space extended */
for (end=vseg + (len-cmplen) ;
vseg < end && *vseg == (uchar) ' ';
- vseg++) ;
- if (vseg == end)
- goto cmp_rest; /* should never happen */
+ vseg++, matched++) ;
+ DBUG_ASSERT(vseg < end);
if (*vseg > (uchar) ' ')
{
@@ -502,7 +510,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: %lx",flag,*ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _mi_prefix_search */
@@ -579,7 +587,7 @@ my_off_t _mi_dpos(MI_INFO *info, uint nod_flag, uchar *after_key)
after_key-=(nod_flag + info->s->rec_reflength);
switch (info->s->rec_reflength) {
#if SIZEOF_OFF_T > 4
- case 8: pos= (my_off_t) mi_uint5korr(after_key); break;
+ case 8: pos= (my_off_t) mi_uint8korr(after_key); break;
case 7: pos= (my_off_t) mi_uint7korr(after_key); break;
case 6: pos= (my_off_t) mi_uint6korr(after_key); break;
case 5: pos= (my_off_t) mi_uint5korr(after_key); break;
@@ -750,8 +758,9 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
key+= length; /* Same diff_key as prev */
if (length > keyseg->length)
{
- DBUG_PRINT("error",("Found too long null packed key: %d of %d at %lx",
- length, keyseg->length, *page_pos));
+ DBUG_PRINT("error",
+ ("Found too long null packed key: %u of %u at %p",
+ length, keyseg->length, *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
my_errno=HA_ERR_CRASHED;
return 0;
@@ -806,7 +815,7 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
}
if (length > (uint) keyseg->length)
{
- DBUG_PRINT("error",("Found too long packed key: %d of %d at %lx",
+ DBUG_PRINT("error",("Found too long packed key: %u of %u at %p",
length, keyseg->length, *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
my_errno=HA_ERR_CRASHED;
@@ -861,7 +870,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
{
if (length > keyinfo->maxlength)
{
- DBUG_PRINT("error",("Found too long binary packed key: %d of %d at %lx",
+ DBUG_PRINT("error",("Found too long binary packed key: %u of %u at %p",
length, keyinfo->maxlength, *page_pos));
DBUG_DUMP("key",(char*) *page_pos,16);
my_errno=HA_ERR_CRASHED;
@@ -908,7 +917,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
length-=tmp;
from=page; from_end=page_end;
}
- DBUG_PRINT("info",("key: %lx from: %lx length: %u",
+ DBUG_PRINT("info",("key: %p from: %p length: %u",
key, from, length));
memcpy_overlap((byte*) key, (byte*) from, (size_t) length);
key+=length;
@@ -964,7 +973,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
}
}
}
- DBUG_PRINT("exit",("page: %lx length: %d",page,*return_key_length));
+ DBUG_PRINT("exit",("page: %p length: %u", page, *return_key_length));
DBUG_RETURN(page);
} /* _mi_get_key */
@@ -1015,7 +1024,7 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
uint nod_flag;
uchar *lastpos;
DBUG_ENTER("_mi_get_last_key");
- DBUG_PRINT("enter",("page: %lx endpos: %lx",page,endpos));
+ DBUG_PRINT("enter",("page: %p endpos: %p", page, endpos));
nod_flag=mi_test_if_nod(page);
if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY)))
@@ -1035,13 +1044,13 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
*return_key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&page,lastkey);
if (*return_key_length == 0)
{
- DBUG_PRINT("error",("Couldn't find last key: page: %lx",page));
+ DBUG_PRINT("error",("Couldn't find last key: page: %p", page));
my_errno=HA_ERR_CRASHED;
DBUG_RETURN(0);
}
}
}
- DBUG_PRINT("exit",("lastpos: %lx length: %d",lastpos,*return_key_length));
+ DBUG_PRINT("exit",("lastpos: %p length: %u", lastpos, *return_key_length));
DBUG_RETURN(lastpos);
} /* _mi_get_last_key */
@@ -1126,8 +1135,9 @@ int _mi_search_next(register MI_INFO *info, register MI_KEYDEF *keyinfo,
uint nod_flag;
uchar lastkey[MI_MAX_KEY_BUFF];
DBUG_ENTER("_mi_search_next");
- DBUG_PRINT("enter",("nextflag: %d lastpos: %ld int_keypos: %lx",
- nextflag,(long) info->lastpos,info->int_keypos));
+ DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos: %lu",
+ nextflag, (ulong) info->lastpos,
+ (ulong) info->int_keypos));
DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,key_length););
/* Force full read if we are at last key or if we are not on a leaf
@@ -1234,7 +1244,7 @@ int _mi_search_first(register MI_INFO *info, register MI_KEYDEF *keyinfo,
info->page_changed=info->buff_used=0;
info->lastpos=_mi_dpos(info,0,info->lastkey+info->lastkey_length);
- DBUG_PRINT("exit",("found key at %ld",(ulong) info->lastpos));
+ DBUG_PRINT("exit",("found key at %lu", (ulong) info->lastpos));
DBUG_RETURN(0);
} /* _mi_search_first */
@@ -1468,8 +1478,8 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key,
}
s_temp->totlength=(uint) length;
s_temp->prev_length=0;
- DBUG_PRINT("test",("tot_length: %d length: %d uniq_key_length: %d",
- key_length,length,s_temp->key_length));
+ DBUG_PRINT("test",("tot_length: %u length: %d uniq_key_length: %u",
+ key_length, length, s_temp->key_length));
/* If something after that hasn't length=0, test if we can combine */
if ((s_temp->next_key_pos=next_key))
@@ -1575,7 +1585,7 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key,
ref_length=0;
next_length_pack=0;
}
- DBUG_PRINT("test",("length: %d next_key: %lx",length,next_key));
+ DBUG_PRINT("test",("length: %d next_key: %p", length, next_key));
{
uint tmp_length;
diff --git a/myisam/myisam_ftdump.c b/myisam/myisam_ftdump.c
index eeee96d0ff2..8219c19848a 100644
--- a/myisam/myisam_ftdump.c
+++ b/myisam/myisam_ftdump.c
@@ -68,7 +68,7 @@ int main(int argc,char *argv[])
struct { MI_INFO *info; } aio0, *aio=&aio0; /* for GWS_IN_USE */
MY_INIT(argv[0]);
- if ((error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+ if (error=handle_options(&argc, &argv, my_long_options, get_one_option))
exit(error);
if (count || dump)
verbose=0;
diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c
index 1be1e72c435..bee248c0c65 100644
--- a/myisam/myisamchk.c
+++ b/myisam/myisamchk.c
@@ -154,7 +154,7 @@ enum options_mc {
OPT_KEY_CACHE_BLOCK_SIZE, OPT_MYISAM_BLOCK_SIZE,
OPT_READ_BUFFER_SIZE, OPT_WRITE_BUFFER_SIZE, OPT_SORT_BUFFER_SIZE,
OPT_SORT_KEY_BLOCKS, OPT_DECODE_BITS, OPT_FT_MIN_WORD_LEN,
- OPT_FT_MAX_WORD_LEN, OPT_FT_MAX_WORD_LEN_FOR_SORT, OPT_FT_STOPWORD_FILE,
+ OPT_FT_MAX_WORD_LEN, OPT_FT_STOPWORD_FILE,
OPT_MAX_RECORD_LENGTH
};
@@ -328,11 +328,11 @@ static struct my_option my_long_options[] =
{ "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", (gptr*) &ft_max_word_len,
(gptr*) &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10,
HA_FT_MAXCHARLEN, 0, 1, 0},
- { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{ "ft_stopword_file", OPT_FT_STOPWORD_FILE,
"Use stopwords from this file instead of built-in list.",
(gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0}
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/myisam/sort.c b/myisam/sort.c
index 8b0ee51ae4e..5537ba55c7d 100644
--- a/myisam/sort.c
+++ b/myisam/sort.c
@@ -204,7 +204,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
goto err; /* purecov: inspected */
if (!no_messages)
- puts(" - Last merge and dumping keys\n"); /* purecov: tested */
+ printf(" - Last merge and dumping keys\n"); /* purecov: tested */
if (merge_index(info,keys,sort_keys,dynamic_element(&buffpek,0,BUFFPEK *),
maxbuffer,&tempfile))
goto err; /* purecov: inspected */
@@ -219,6 +219,8 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
uint keyno=info->key;
uint key_length, ref_length=index->s->rec_reflength;
+ if (!no_messages)
+ printf(" - Adding exceptions\n"); /* purecov: tested */
if (flush_io_cache(&tempfile_for_exceptions) ||
reinit_io_cache(&tempfile_for_exceptions,READ_CACHE,0L,0,0))
goto err;
diff --git a/myisammrg/myrg_open.c b/myisammrg/myrg_open.c
index 5188669cad1..a59ccb7d966 100644
--- a/myisammrg/myrg_open.c
+++ b/myisammrg/myrg_open.c
@@ -32,30 +32,38 @@
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
{
- int save_errno,i,errpos;
- uint files,dir_length,length,key_parts;
- ulonglong file_offset;
+ int save_errno,errpos=0;
+ uint files=0,i,dir_length,length,key_parts;
+ ulonglong file_offset=0;
char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end;
- MYRG_INFO info,*m_info;
+ MYRG_INFO *m_info=0;
File fd;
IO_CACHE file;
- MI_INFO *isam,*last_isam;
+ MI_INFO *isam=0;
+ uint found_merge_insert_method= 0;
DBUG_ENTER("myrg_open");
- LINT_INIT(last_isam);
- LINT_INIT(m_info);
- isam=0;
- errpos=files=0;
- bzero((gptr) &info,sizeof(info));
+ LINT_INIT(key_parts);
+
bzero((char*) &file,sizeof(file));
if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT,4),
- O_RDONLY | O_SHARE,MYF(0))) < 0 ||
- init_io_cache(&file, fd, IO_SIZE, READ_CACHE, 0, 0,
+ O_RDONLY | O_SHARE,MYF(0))) < 0)
+ goto err;
+ errpos=1;
+ if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0,
MYF(MY_WME | MY_NABP)))
- goto err;
- errpos=1;
+ goto err;
+ errpos=2;
dir_length=dirname_part(name_buff,name);
- info.reclength=0;
+ while ((length=my_b_gets(&file,buff,FN_REFLEN-1)))
+ {
+ if ((end=buff+length)[-1] == '\n')
+ end[-1]='\0';
+ if (buff[0] && buff[0] != '#')
+ files++;
+ }
+
+ my_b_seek(&file, 0);
while ((length=my_b_gets(&file,buff,FN_REFLEN-1)))
{
if ((end=buff+length)[-1] == '\n')
@@ -64,10 +72,10 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
continue; /* Skip empty lines */
if (buff[0] == '#')
{
- if( !strncmp(buff+1,"INSERT_METHOD=",14))
+ if (!strncmp(buff+1,"INSERT_METHOD=",14))
{ /* Lookup insert method */
int tmp=find_type(buff+15,&merge_insert_method,2);
- info.merge_insert_method = (uint) (tmp >= 0 ? tmp : 0);
+ found_merge_insert_method = (uint) (tmp >= 0 ? tmp : 0);
}
continue; /* Skip comments */
}
@@ -79,66 +87,56 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
VOID(cleanup_dirname(buff,name_buff));
}
if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0))))
- goto err;
+ goto err;
+ if (!m_info) /* First file */
+ {
+ key_parts=isam->s->base.key_parts;
+ if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO) +
+ files*sizeof(MYRG_TABLE) +
+ key_parts*sizeof(long),
+ MYF(MY_WME|MY_ZEROFILL))))
+ goto err;
+ if (files)
+ {
+ m_info->open_tables=(MYRG_TABLE *) (m_info+1);
+ m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files);
+ m_info->tables= files;
+ files= 0;
+ }
+ m_info->reclength=isam->s->base.reclength;
+ errpos=3;
+ }
+ m_info->open_tables[files].table= isam;
+ m_info->open_tables[files].file_offset=(my_off_t) file_offset;
+ file_offset+=isam->state->data_file_length;
files++;
- last_isam=isam;
- if (info.reclength && info.reclength != isam->s->base.reclength)
+ if (m_info->reclength != isam->s->base.reclength)
{
my_errno=HA_ERR_WRONG_MRG_TABLE_DEF;
goto err;
}
- info.reclength=isam->s->base.reclength;
- }
- key_parts=(isam ? isam->s->base.key_parts : 0);
- if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO)+
- files*sizeof(MYRG_TABLE)+
- sizeof(long)*key_parts,
- MYF(MY_WME))))
- goto err;
- *m_info=info;
- m_info->tables=files;
- if (files)
- {
- m_info->open_tables=(MYRG_TABLE *) (m_info+1);
- m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files);
- bzero((char*) m_info->rec_per_key_part,sizeof(long)*key_parts);
+ m_info->options|= isam->s->options;
+ m_info->records+= isam->state->records;
+ m_info->del+= isam->state->del;
+ m_info->data_file_length+= isam->state->data_file_length;
+ for (i=0; i < key_parts; i++)
+ m_info->rec_per_key_part[i]+= (isam->s->state.rec_per_key_part[i] /
+ m_info->tables);
}
- else
- {
- m_info->open_tables=0;
- m_info->rec_per_key_part=0;
- }
- errpos=2;
- for (i=files ; i-- > 0 ; )
- {
- uint j;
- m_info->open_tables[i].table=isam;
- m_info->options|=isam->s->options;
- m_info->records+=isam->state->records;
- m_info->del+=isam->state->del;
- m_info->data_file_length+=isam->state->data_file_length;
- for (j=0; j < key_parts; j++)
- m_info->rec_per_key_part[j]+=isam->s->state.rec_per_key_part[j] / files;
- if (i)
- isam=(MI_INFO*) (isam->open_list.next->data);
- }
+ if (!m_info && !(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO),
+ MYF(MY_WME | MY_ZEROFILL))))
+ goto err;
/* Don't mark table readonly, for ALTER TABLE ... UNION=(...) to work */
m_info->options&= ~(HA_OPTION_COMPRESS_RECORD | HA_OPTION_READ_ONLY_DATA);
+ m_info->merge_insert_method= found_merge_insert_method;
- /* Fix fileinfo for easyer debugging (actually set by rrnd) */
- file_offset=0;
- for (i=0 ; (uint) i < files ; i++)
- {
- m_info->open_tables[i].file_offset=(my_off_t) file_offset;
- file_offset+=m_info->open_tables[i].table->state->data_file_length;
- }
if (sizeof(my_off_t) == 4 && file_offset > (ulonglong) (ulong) ~0L)
{
my_errno=HA_ERR_RECORD_FILE_FULL;
goto err;
}
- m_info->keys=(files) ? m_info->open_tables->table->s->base.keys : 0;
+ m_info->keys= files ? isam->s->base.keys : 0;
bzero((char*) &m_info->by_key,sizeof(m_info->by_key));
/* this works ok if the table list is empty */
@@ -156,19 +154,16 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
err:
save_errno=my_errno;
switch (errpos) {
- case 2:
+ case 3:
+ while (files)
+ mi_close(m_info->open_tables[--files].table);
my_free((char*) m_info,MYF(0));
/* Fall through */
+ case 2:
+ end_io_cache(&file);
+ /* Fall through */
case 1:
VOID(my_close(fd,MYF(0)));
- end_io_cache(&file);
- for (i=files ; i-- > 0 ; )
- {
- isam=last_isam;
- if (i)
- last_isam=(MI_INFO*) (isam->open_list.next->data);
- mi_close(isam);
- }
}
my_errno=save_errno;
DBUG_RETURN (NULL);
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index e10c0739cb4..c3f9eea875d 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -62,6 +62,7 @@ install-data-local:
$(INSTALL_DATA) $(srcdir)/r/*.require $(DESTDIR)$(testdir)/r
$(INSTALL_DATA) $(srcdir)/include/*.inc $(DESTDIR)$(testdir)/include
$(INSTALL_DATA) $(srcdir)/std_data/*.dat $(DESTDIR)$(testdir)/std_data
+ $(INSTALL_DATA) $(srcdir)/std_data/*.*001 $(DESTDIR)$(testdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/des_key_file $(DESTDIR)$(testdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.pem $(DESTDIR)$(testdir)/std_data
diff --git a/mysql-test/include/have_csv.inc b/mysql-test/include/have_csv.inc
new file mode 100644
index 00000000000..d28199831b8
--- /dev/null
+++ b/mysql-test/include/have_csv.inc
@@ -0,0 +1,4 @@
+-- require r/have_csv.require
+disable_query_log;
+show variables like "have_csv";
+enable_query_log;
diff --git a/mysql-test/include/have_exampledb.inc b/mysql-test/include/have_exampledb.inc
new file mode 100644
index 00000000000..7ddd15c48b3
--- /dev/null
+++ b/mysql-test/include/have_exampledb.inc
@@ -0,0 +1,4 @@
+-- require r/have_exampledb.require
+disable_query_log;
+show variables like "have_example_engine";
+enable_query_log;
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 6684ac6b9ed..955bc12a43a 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -367,10 +367,12 @@ while test $# -gt 0; do
VALGRIND=`which valgrind` # this will print an error if not found
# Give good warning to the user and stop
if [ -z "$VALGRIND" ] ; then
- $ECHO "You need to have the 'valgrind' program in your PATH to run mysql-test-run with option --valgrind. Valgrind's home page is http://developer.kde.org/~sewardj ."
+ $ECHO "You need to have the 'valgrind' program in your PATH to run mysql-test-run with option --valgrind. Valgrind's home page is http://valgrind.kde.org ."
exit 1
fi
- VALGRIND="$VALGRIND --tool=memcheck --alignment=8 --leak-check=yes --num-callers=16"
+ # >=2.1.2 requires the --tool option, some versions write to stdout, some to stderr
+ valgrind --help 2>&1 | grep "\-\-tool" > /dev/null && VALGRIND="$VALGRIND --tool=memcheck"
+ VALGRIND="$VALGRIND --alignment=8 --leak-check=yes --num-callers=16"
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc --skip-bdb"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc --skip-bdb"
SLEEP_TIME_AFTER_RESTART=10
@@ -436,7 +438,7 @@ SLAVE_MYERR="$MYSQL_TEST_DIR/var/log/slave.err"
CURRENT_TEST="$MYSQL_TEST_DIR/var/log/current_test"
SMALL_SERVER="--key_buffer_size=1M --sort_buffer=256K --max_heap_table_size=1M"
-export MASTER_MYPORT SLAVE_MYPORT MYSQL_TCP_PORT
+export MASTER_MYPORT SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK
if [ x$SOURCE_DIST = x1 ] ; then
MY_BASEDIR=$MYSQL_TEST_DIR
@@ -523,11 +525,6 @@ else
fi
fi
-MYSQL_DUMP="$MYSQL_DUMP --no-defaults -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT"
-MYSQL_BINLOG="$MYSQL_BINLOG --no-defaults --local-load=$MYSQL_TMP_DIR $EXTRA_MYSQLBINLOG_OPT"
-MYSQL_FIX_SYSTEM_TABLES="$MYSQL_FIX_SYSTEM_TABLES --no-defaults --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD --basedir=$BASEDIR --bindir=$CLIENT_BINDIR --verbose"
-MYSQL="$MYSQL --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD"
-
if [ -z "$MASTER_MYSQLD" ]
then
MASTER_MYSQLD=$MYSQLD
@@ -560,6 +557,12 @@ then
fi
+MYSQL_DUMP="$MYSQL_DUMP --no-defaults -uroot --socket=$MASTER_MYSOCK --password=$DBPASSWD $EXTRA_MYSQLDUMP_OPT"
+MYSQL_BINLOG="$MYSQL_BINLOG --no-defaults --local-load=$MYSQL_TMP_DIR $EXTRA_MYSQLBINLOG_OPT"
+MYSQL_FIX_SYSTEM_TABLES="$MYSQL_FIX_SYSTEM_TABLES --no-defaults --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD --basedir=$BASEDIR --bindir=$CLIENT_BINDIR --verbose"
+MYSQL="$MYSQL --host=localhost --port=$MASTER_MYPORT --socket=$MASTER_MYSOCK --user=root --password=$DBPASSWD"
+export MYSQL MYSQL_DUMP MYSQL_BINLOG MYSQL_FIX_SYSTEM_TABLES CLIENT_BINDIR
+
MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \
--user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \
--tmpdir=$MYSQL_TMP_DIR --port=$MASTER_MYPORT $MYSQL_TEST_SSL_OPTS"
@@ -612,6 +615,7 @@ show_failed_diff ()
echo "Please follow the instructions outlined at"
echo "http://www.mysql.com/doc/en/Reporting_mysqltest_bugs.html"
echo "to find the reason to this problem and how to report this."
+ echo ""
fi
}
@@ -1428,7 +1432,7 @@ then
if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then
# Kill any running ndbcluster stuff
- ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --stop
+ ./ndb/ndbcluster --data-dir=$MYSQL_TEST_DIR/var --port-base=$NDBCLUSTER_PORT --stop
fi
fi
@@ -1449,11 +1453,11 @@ then
if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then
echo "Starting ndbcluster"
- ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --discless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
- export NDB_CONNECTSTRING=`cat Ndb.cfg`
+ ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
+ USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"host=localhost:$NDBCLUSTER_PORT\""
else
- export NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
- echo "Using ndbcluster at $NDB_CONNECTSTRING"
+ USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$USE_RUNNING_NDBCLUSTER\""
+ echo "Using ndbcluster at $USE_NDBCLUSTER"
fi
fi
@@ -1549,7 +1553,7 @@ then
if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then
# Kill any running ndbcluster stuff
- ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --stop
+ ./ndb/ndbcluster --data-dir=$MYSQL_TEST_DIR/var --port-base=$NDBCLUSTER_PORT --stop
fi
fi
diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini
index 82c65c98866..bf5c67cd1d6 100644
--- a/mysql-test/ndb/ndb_config_2_node.ini
+++ b/mysql-test/ndb/ndb_config_2_node.ini
@@ -3,73 +3,34 @@ NoOfReplicas: 2
MaxNoOfConcurrentOperations: CHOOSE_MaxNoOfConcurrentOperations
DataMemory: CHOOSE_DataMemory
IndexMemory: CHOOSE_IndexMemory
-Discless: CHOOSE_Discless
+Diskless: CHOOSE_Diskless
+TimeBetweenWatchDogCheck: 30000
+FileSystemPath: CHOOSE_FILESYSTEM
[COMPUTER]
Id: 1
-ByteOrder: Little
HostName: CHOOSE_HOSTNAME_1
[COMPUTER]
Id: 2
-ByteOrder: Little
HostName: CHOOSE_HOSTNAME_2
-[COMPUTER]
-Id: 3
-ByteOrder: Little
-HostName: CHOOSE_HOSTNAME_3
-
-[COMPUTER]
-Id: 4
-ByteOrder: Little
-HostName: CHOOSE_HOSTNAME_4
-
-[COMPUTER]
-Id: 5
-ByteOrder: Little
-HostName: CHOOSE_HOSTNAME_5
-
-[COMPUTER]
-Id: 6
-ByteOrder: Little
-HostName: CHOOSE_HOSTNAME_6
-
-[COMPUTER]
-Id: 7
-ByteOrder: Little
-HostName: CHOOSE_HOSTNAME_7
-
-[MGM]
-Id: 1
+[DB]
ExecuteOnComputer: 1
-PortNumber: CHOOSE_PORT_MGM
[DB]
-Id: 2
ExecuteOnComputer: 2
-FileSystemPath: CHOOSE_FILESYSTEM_NODE_2
-[DB]
-Id: 3
-ExecuteOnComputer: 3
-FileSystemPath: CHOOSE_FILESYSTEM_NODE_3
+[MGM]
+PortNumber: CHOOSE_PORT_MGM
-[API]
-Id: 11
-ExecuteOnComputer: 4
+[MYSQLD]
-[API]
-Id: 12
-ExecuteOnComputer: 5
+[MYSQLD]
-[API]
-Id: 13
-ExecuteOnComputer: 6
+[MYSQLD]
-[API]
-Id: 14
-ExecuteOnComputer: 7
+[MYSQLD]
[TCP DEFAULT]
PortNumber: CHOOSE_PORT_TRANSPORTER
diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh
index 4afbe5cf3d7..bbd3fa3257a 100644
--- a/mysql-test/ndb/ndbcluster.sh
+++ b/mysql-test/ndb/ndbcluster.sh
@@ -42,7 +42,7 @@ cfgfile=Ndb.cfg
stop_ndb=
initial_ndb=
status_ndb=
-ndb_discless=0
+ndb_diskless=0
ndb_con_op=100000
ndb_dmem=80M
@@ -54,7 +54,7 @@ while test $# -gt 0; do
stop_ndb=1
;;
--initial)
- flags_ndb=$flags_ndb" -i"
+ flags_ndb="$flags_ndb -i"
initial_ndb=1
;;
--status)
@@ -65,8 +65,8 @@ while test $# -gt 0; do
ndb_dmem=40M
ndb_imem=12M
;;
- --discless)
- ndb_discless=1
+ --diskless)
+ ndb_diskless=1
;;
--data-dir=*)
fsdir=`echo "$1" | sed -e "s;--data-dir=;;"`
@@ -81,123 +81,101 @@ while test $# -gt 0; do
shift
done
-fs_ndb=$fsdir/ndbcluster
-fs_mgm_1=$fs_ndb/1.ndb_mgm
-fs_ndb_2=$fs_ndb/2.ndb_db
-fs_ndb_3=$fs_ndb/3.ndb_db
-fs_name_2=$fs_ndb/node-2-fs-$port_base
-fs_name_3=$fs_ndb/node-3-fs-$port_base
+fs_ndb="$fsdir/ndbcluster-$port_base"
NDB_HOME=
-export NDB_CONNECTSTRING
-if [ ! -x $fsdir ]; then
+if [ ! -x "$fsdir" ]; then
echo "$fsdir missing"
exit 1
fi
-if [ ! -x $exec_ndb ]; then
+if [ ! -x "$exec_ndb" ]; then
echo "$exec_ndb missing"
exit 1
fi
-if [ ! -x $exec_mgmtsrvr ]; then
+if [ ! -x "$exec_mgmtsrvr" ]; then
echo "$exec_mgmtsrvr missing"
exit 1
fi
+ndb_host="localhost"
+ndb_mgmd_port=$port_base
+NDB_CONNECTSTRING="host=$ndb_host:$ndb_mgmd_port"
+export NDB_CONNECTSTRING
+
start_default_ndbcluster() {
# do some checks
-NDB_CONNECTSTRING=
-
-if [ $initial_ndb ] ; then
- [ -d $fs_ndb ] || mkdir $fs_ndb
- [ -d $fs_mgm_1 ] || mkdir $fs_mgm_1
- [ -d $fs_ndb_2 ] || mkdir $fs_ndb_2
- [ -d $fs_ndb_3 ] || mkdir $fs_ndb_3
- [ -d $fs_name_2 ] || mkdir $fs_name_2
- [ -d $fs_name_3 ] || mkdir $fs_name_3
+if [ "$initial_ndb" ] ; then
+ [ -d "$fs_ndb" ] || mkdir "$fs_ndb"
fi
-if [ -d "$fs_ndb" -a -d "$fs_mgm_1" -a -d "$fs_ndb_2" -a -d "$fs_ndb_3" -a -d "$fs_name_2" -a -d "$fs_name_3" ]; then :; else
+if [ -d "$fs_ndb" ]; then :; else
echo "$fs_ndb filesystem directory does not exist"
exit 1
fi
# set som help variables
-ndb_host="localhost"
-ndb_mgmd_port=$port_base
port_transporter=`expr $ndb_mgmd_port + 2`
-NDB_CONNECTSTRING_BASE="host=$ndb_host:$ndb_mgmd_port;nodeid="
-
# Start management server as deamon
-NDB_ID="1"
-NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID
-
# Edit file system path and ports in config file
if [ $initial_ndb ] ; then
sed \
- -e s,"CHOOSE_MaxNoOfConcurrentOperations",$ndb_con_op,g \
- -e s,"CHOOSE_DataMemory",$ndb_dmem,g \
- -e s,"CHOOSE_IndexMemory",$ndb_imem,g \
- -e s,"CHOOSE_Discless",$ndb_discless,g \
+ -e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \
+ -e s,"CHOOSE_DataMemory","$ndb_dmem",g \
+ -e s,"CHOOSE_IndexMemory","$ndb_imem",g \
+ -e s,"CHOOSE_Diskless","$ndb_diskless",g \
-e s,"CHOOSE_HOSTNAME_".*,"$ndb_host",g \
- -e s,"CHOOSE_FILESYSTEM_NODE_2","$fs_name_2",g \
- -e s,"CHOOSE_FILESYSTEM_NODE_3","$fs_name_3",g \
- -e s,"CHOOSE_PORT_MGM",$ndb_mgmd_port,g \
- -e s,"CHOOSE_PORT_TRANSPORTER",$port_transporter,g \
+ -e s,"CHOOSE_FILESYSTEM","$fs_ndb",g \
+ -e s,"CHOOSE_PORT_MGM","$ndb_mgmd_port",g \
+ -e s,"CHOOSE_PORT_TRANSPORTER","$port_transporter",g \
< ndb/ndb_config_2_node.ini \
- > "$fs_mgm_1/config.ini"
+ > "$fs_ndb/config.ini"
fi
-if ( cd $fs_mgm_1 ; echo $NDB_CONNECTSTRING > $cfgfile ; $exec_mgmtsrvr -d -c config.ini ) ; then :; else
+rm -f "$cfgfile" 2>&1 | cat > /dev/null
+rm -f "$fs_ndb/$cfgfile" 2>&1 | cat > /dev/null
+
+if ( cd "$fs_ndb" ; $exec_mgmtsrvr -d -c config.ini ) ; then :; else
echo "Unable to start $exec_mgmtsrvr from `pwd`"
exit 1
fi
-cat `find $fs_ndb -name 'node*.pid'` > $pidfile
+cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
# Start database node
-NDB_ID="2"
-NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID
-echo "Starting ndbd connectstring=\""$NDB_CONNECTSTRING\"
-( cd $fs_ndb_2 ; echo $NDB_CONNECTSTRING > $cfgfile ; $exec_ndb -d $flags_ndb & )
+echo "Starting ndbd"
+( cd "$fs_ndb" ; $exec_ndb -d $flags_ndb & )
-cat `find $fs_ndb -name 'node*.pid'` > $pidfile
+cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
# Start database node
-NDB_ID="3"
-NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID
-echo "Starting ndbd connectstring=\""$NDB_CONNECTSTRING\"
-( cd $fs_ndb_3 ; echo $NDB_CONNECTSTRING > $cfgfile ; $exec_ndb -d $flags_ndb & )
+echo "Starting ndbd"
+( cd "$fs_ndb" ; $exec_ndb -d $flags_ndb & )
-cat `find $fs_ndb -name 'node*.pid'` > $pidfile
+cat `find "$fs_ndb" -name 'ndb_*.pid'` > "$fs_ndb/$pidfile"
# test if Ndb Cluster starts properly
echo "Waiting for started..."
-NDB_ID="11"
-NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID
if ( $exec_waiter ) | grep "NDBT_ProgramExit: 0 - OK"; then :; else
echo "Ndbcluster startup failed"
exit 1
fi
-echo $NDB_CONNECTSTRING > $cfgfile
-
-cat `find $fs_ndb -name 'node*.pid'` > $pidfile
+cat `find "$fs_ndb" -name 'ndb_*.pid'` > $fs_ndb/$pidfile
status_ndbcluster
}
status_ndbcluster() {
-# Start management client
-
-echo "show" | $exec_mgmtclient $ndb_host $ndb_mgmd_port
+ # Start management client
+ echo "show" | $exec_mgmtclient
}
stop_default_ndbcluster() {
@@ -206,26 +184,21 @@ stop_default_ndbcluster() {
# exit 0
#fi
-if [ ! -f $cfgfile ] ; then
- echo "$cfgfile missing"
- exit 1
-fi
-
-ndb_host=`cat $cfgfile | sed -e "s,.*host=\(.*\)\:.*,\1,1"`
-ndb_mgmd_port=`cat $cfgfile | sed -e "s,.*host=$ndb_host\:\([0-9]*\).*,\1,1"`
+#if [ ! -f $cfgfile ] ; then
+# echo "$cfgfile missing"
+# exit 1
+#fi
# Start management client
-exec_mgmtclient="$exec_mgmtclient --try-reconnect=1 $ndb_host $ndb_mgmd_port"
-
-echo "$exec_mgmtclient"
-echo "all stop" | $exec_mgmtclient
+exec_mgmtclient="$exec_mgmtclient --try-reconnect=1"
-sleep 5
+echo "all stop" | $exec_mgmtclient 2>&1 | cat > /dev/null
+echo "3 stop" | $exec_mgmtclient 2>&1 | cat > /dev/null
-if [ -f $pidfile ] ; then
- kill `cat $pidfile` 2> /dev/null
- rm $pidfile
+if [ -f "$fs_ndb/$pidfile" ] ; then
+ kill -9 `cat "$fs_ndb/$pidfile"` 2> /dev/null
+ rm "$fs_ndb/$pidfile"
fi
}
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 5d50a3da666..e85ad303564 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -476,3 +476,9 @@ alter table t1 drop key no_such_key;
ERROR 42000: Can't DROP 'no_such_key'; check that column/key exists
alter table t1 drop key a;
drop table t1;
+create table t1 (a int);
+alter table t1 rename to `t1\\`;
+ERROR 42000: Incorrect table name 't1\\'
+rename table t1 to `t1\\`;
+ERROR 42000: Incorrect table name 't1\\'
+drop table t1;
diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result
index 6bc59d4771f..f5ec5f1f852 100644
--- a/mysql-test/r/auto_increment.result
+++ b/mysql-test/r/auto_increment.result
@@ -201,6 +201,24 @@ a b
202 5
203 6
204 7
+alter table t1 modify b mediumint;
+select * from t1 order by b;
+a b
+1 1
+200 2
+0 3
+201 4
+202 5
+203 6
+204 7
+create table t2 (a int);
+insert t2 values (1),(2);
+alter table t2 add b int auto_increment primary key;
+select * from t2;
+a b
+1 1
+2 2
+drop table t2;
delete from t1 where a=0;
update t1 set a=0 where b=5;
select * from t1 order by b;
diff --git a/mysql-test/r/binary.result b/mysql-test/r/binary.result
index f6ad190b05a..405de1158d6 100644
--- a/mysql-test/r/binary.result
+++ b/mysql-test/r/binary.result
@@ -59,9 +59,15 @@ concat("-",a,"-",b,"-")
-hello-hello-
select concat("-",a,"-",b,"-") from t1 where b="hello ";
concat("-",a,"-",b,"-")
+-hello-hello-
select concat("-",a,"-",b,"-") from t1 ignore index (b) where b="hello ";
concat("-",a,"-",b,"-")
+-hello-hello-
alter table t1 modify b tinytext not null, drop key b, add key (b(100));
+select concat("-",a,"-",b,"-") from t1;
+concat("-",a,"-",b,"-")
+-hello-hello-
+-hello2-hello2-
select concat("-",a,"-",b,"-") from t1 where b="hello ";
concat("-",a,"-",b,"-")
-hello-hello-
@@ -105,8 +111,26 @@ a b
aaa bbb
select charset(a), charset(b), charset(binary 'ccc') from t1 limit 1;
charset(a) charset(b) charset(binary 'ccc')
-latin1 binary latin1
+latin1 binary binary
select collation(a), collation(b), collation(binary 'ccc') from t1 limit 1;
collation(a) collation(b) collation(binary 'ccc')
-latin1_bin binary latin1_bin
+latin1_bin binary binary
+drop table t1;
+create table t1( firstname char(20), lastname char(20));
+insert into t1 values ("john","doe"),("John","Doe");
+select * from t1 where firstname='john' and firstname like binary 'john';
+firstname lastname
+john doe
+select * from t1 where firstname='john' and binary 'john' = firstname;
+firstname lastname
+john doe
+select * from t1 where firstname='john' and firstname = binary 'john';
+firstname lastname
+john doe
+select * from t1 where firstname='John' and firstname like binary 'john';
+firstname lastname
+john doe
+select * from t1 where firstname='john' and firstname like binary 'John';
+firstname lastname
+John Doe
drop table t1;
diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result
index b5d0e4d9ae2..0e4583fbc96 100644
--- a/mysql-test/r/connect.result
+++ b/mysql-test/r/connect.result
@@ -42,6 +42,9 @@ show tables;
Tables_in_test table_type
update mysql.user set password=old_password("gambling2") where user=_binary"test";
flush privileges;
+set password="";
+set password='gambling3';
+ERROR HY000: Password hash should be a 41-digit hexadecimal number
set password=old_password('gambling3');
show tables;
Tables_in_mysql table_type
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index 699485ff3f7..92c825f547d 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -1,5 +1,5 @@
drop table if exists t1,t2,t3;
-drop database if exists test_$1;
+drop database if exists mysqltest;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -58,18 +58,18 @@ select 1ea10.1a20,1e+ 1e+10 from 1ea10;
drop table 1ea10;
create table t1 (t1.index int);
drop table t1;
-drop database if exists test_$1;
+drop database if exists mysqltest;
Warnings:
-Note 1008 Can't drop database 'test_$1'; database doesn't exist
-create database test_$1;
-create table test_$1.$test1 (a$1 int, $b int, c$ int);
-insert into test_$1.$test1 values (1,2,3);
-select a$1, $b, c$ from test_$1.$test1;
+Note 1008 Can't drop database 'mysqltest'; database doesn't exist
+create database mysqltest;
+create table mysqltest.$test1 (a$1 int, $b int, c$ int);
+insert into mysqltest.$test1 values (1,2,3);
+select a$1, $b, c$ from mysqltest.$test1;
a$1 $b c$
1 2 3
-create table test_$1.test2$ (a int);
-drop table test_$1.test2$;
-drop database test_$1;
+create table mysqltest.test2$ (a int);
+drop table mysqltest.test2$;
+drop database mysqltest;
create table `` (a int);
ERROR 42000: Incorrect table name ''
drop table if exists ``;
@@ -320,9 +320,9 @@ t3 CREATE TABLE `t3` (
select * from t3;
id name
drop table t2, t3;
-create database test_$1;
-create table test_$1.t3 like t1;
-create temporary table t3 like test_$1.t3;
+create database mysqltest;
+create table mysqltest.t3 like t1;
+create temporary table t3 like mysqltest.t3;
show create table t3;
Table Create Table
t3 CREATE TEMPORARY TABLE `t3` (
@@ -339,7 +339,7 @@ t2 CREATE TABLE `t2` (
select * from t2;
id name
create table t3 like t1;
-create table t3 like test_$1.t3;
+create table t3 like mysqltest.t3;
ERROR 42S01: Table 't3' already exists
create table non_existing_database.t1 like t1;
Got one of the listed errors
@@ -351,7 +351,7 @@ create table t3 like `a/a`;
ERROR 42000: Incorrect table name 'a/a'
drop table t1, t2, t3;
drop table t3;
-drop database test_$1;
+drop database mysqltest;
SET SESSION storage_engine="heap";
SELECT @@storage_engine;
@@storage_engine
@@ -488,12 +488,12 @@ Note 1291 Column 'cset' has duplicated value 'b' in SET
Note 1291 Column 'cset' has duplicated value 'B' in SET
Note 1291 Column 'cset' has duplicated value 'd' in SET
drop table t1, t2, t3;
-create database test_$1;
-use test_$1;
+create database mysqltest;
+use mysqltest;
select database();
database()
-test_$1
-drop database test_$1;
+mysqltest
+drop database mysqltest;
select database();
database()
NULL
diff --git a/mysql-test/r/create_select_tmp.result b/mysql-test/r/create_select_tmp.result
index 09ffc9013c7..b99bf3e3591 100644
--- a/mysql-test/r/create_select_tmp.result
+++ b/mysql-test/r/create_select_tmp.result
@@ -1,19 +1,19 @@
drop table if exists t1, t2;
CREATE TABLE t1 ( a int );
INSERT INTO t1 VALUES (1),(2),(1);
-CREATE TABLE t2 ( PRIMARY KEY (a) ) TYPE=INNODB SELECT a FROM t1;
+CREATE TABLE t2 ( PRIMARY KEY (a) ) ENGINE=INNODB SELECT a FROM t1;
ERROR 23000: Duplicate entry '1' for key 1
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
-CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) TYPE=INNODB SELECT a FROM t1;
+CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) ENGINE=INNODB SELECT a FROM t1;
ERROR 23000: Duplicate entry '1' for key 1
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
-CREATE TABLE t2 ( PRIMARY KEY (a) ) TYPE=MYISAM SELECT a FROM t1;
+CREATE TABLE t2 ( PRIMARY KEY (a) ) ENGINE=MYISAM SELECT a FROM t1;
ERROR 23000: Duplicate entry '1' for key 1
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
-CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) TYPE=MYISAM SELECT a FROM t1;
+CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) ENGINE=MYISAM SELECT a FROM t1;
ERROR 23000: Duplicate entry '1' for key 1
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
diff --git a/mysql-test/r/csv.result b/mysql-test/r/csv.result
new file mode 100644
index 00000000000..ea0d34271b5
--- /dev/null
+++ b/mysql-test/r/csv.result
@@ -0,0 +1,4931 @@
+drop table if exists t1,t2,t3,t4;
+CREATE TABLE t1 (
+Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE = CSV;
+INSERT INTO t1 VALUES (9410,9412);
+select period from t1;
+period
+9410
+select * from t1;
+Period Varor_period
+9410 9412
+select t1.* from t1;
+Period Varor_period
+9410 9412
+CREATE TABLE t2 (
+auto int not null,
+fld1 int(6) unsigned zerofill DEFAULT '000000' NOT NULL,
+companynr tinyint(2) unsigned zerofill DEFAULT '00' NOT NULL,
+fld3 char(30) DEFAULT '' NOT NULL,
+fld4 char(35) DEFAULT '' NOT NULL,
+fld5 char(35) DEFAULT '' NOT NULL,
+fld6 char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%";
+fld3
+imaginable
+select fld3 from t2 where fld3 like "%cultivation" ;
+fld3
+cultivation
+select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3;
+fld3 companynr
+concoct 58
+druggists 58
+engrossing 58
+Eurydice 58
+exclaimers 58
+ferociousness 58
+hopelessness 58
+Huey 58
+imaginable 58
+judges 58
+merging 58
+ostrich 58
+peering 58
+Phelps 58
+presumes 58
+Ruth 58
+sentences 58
+Shylock 58
+straggled 58
+synergy 58
+thanking 58
+tying 58
+unlocks 58
+select fld3,companynr from t2 where companynr = 58 order by fld3;
+fld3 companynr
+concoct 58
+druggists 58
+engrossing 58
+Eurydice 58
+exclaimers 58
+ferociousness 58
+hopelessness 58
+Huey 58
+imaginable 58
+judges 58
+merging 58
+ostrich 58
+peering 58
+Phelps 58
+presumes 58
+Ruth 58
+sentences 58
+Shylock 58
+straggled 58
+synergy 58
+thanking 58
+tying 58
+unlocks 58
+select fld3 from t2 order by fld3 desc limit 10;
+fld3
+youthfulness
+yelped
+Wotan
+workers
+Witt
+witchcraft
+Winsett
+Willy
+willed
+wildcats
+select fld3 from t2 order by fld3 desc limit 5;
+fld3
+youthfulness
+yelped
+Wotan
+workers
+Witt
+select fld3 from t2 order by fld3 desc limit 5,5;
+fld3
+witchcraft
+Winsett
+Willy
+willed
+wildcats
+UPDATE t2 SET fld3="foo" WHERE fld3="b%";
+select fld3 from t2;
+fld3
+Omaha
+breaking
+Romans
+intercepted
+bewilderingly
+astound
+admonishing
+sumac
+flanking
+combed
+subjective
+scatterbrain
+Eulerian
+dubbed
+Kane
+overlay
+perturb
+goblins
+annihilates
+Wotan
+snatching
+concludes
+laterally
+yelped
+grazing
+Baird
+celery
+misunderstander
+handgun
+foldout
+mystic
+succumbed
+Nabisco
+fingerings
+aging
+afield
+ammonium
+boat
+intelligibility
+Augustine
+teethe
+dreaded
+scholastics
+audiology
+wallet
+parters
+eschew
+quitter
+neat
+Steinberg
+jarring
+tinily
+balled
+persist
+attainments
+fanatic
+measures
+rightfulness
+capably
+impulsive
+starlet
+terminators
+untying
+announces
+featherweight
+pessimist
+daughter
+decliner
+lawgiver
+stated
+readable
+attrition
+cascade
+motors
+interrogate
+pests
+stairway
+dopers
+testicle
+Parsifal
+leavings
+postulation
+squeaking
+contrasted
+leftover
+whiteners
+erases
+Punjab
+Merritt
+Quixotism
+sweetish
+dogging
+scornfully
+bellow
+bills
+cupboard
+sureties
+puddings
+tapestry
+fetters
+bivalves
+incurring
+Adolph
+pithed
+emergency
+Miles
+trimmings
+tragedies
+skulking
+flint
+flopping
+relaxing
+offload
+suites
+lists
+animized
+multilayer
+standardizes
+Judas
+vacuuming
+dentally
+humanness
+inch
+Weissmuller
+irresponsibly
+luckily
+culled
+medical
+bloodbath
+subschema
+animals
+Micronesia
+repetitions
+Antares
+ventilate
+pityingly
+interdependent
+Graves
+neonatal
+scribbled
+chafe
+honoring
+realtor
+elite
+funereal
+abrogating
+sorters
+Conley
+lectured
+Abraham
+Hawaii
+cage
+hushes
+Simla
+reporters
+Dutchman
+descendants
+groupings
+dissociate
+coexist
+Beebe
+Taoism
+Connally
+fetched
+checkpoints
+rusting
+galling
+obliterates
+traitor
+resumes
+analyzable
+terminator
+gritty
+firearm
+minima
+Selfridge
+disable
+witchcraft
+betroth
+Manhattanize
+imprint
+peeked
+swelling
+interrelationships
+riser
+Gandhian
+peacock
+bee
+kanji
+dental
+scarf
+chasm
+insolence
+syndicate
+alike
+imperial
+convulsion
+railway
+validate
+normalizes
+comprehensive
+chewing
+denizen
+schemer
+chronicle
+Kline
+Anatole
+partridges
+brunch
+recruited
+dimensions
+Chicana
+announced
+praised
+employing
+linear
+quagmire
+western
+relishing
+serving
+scheduling
+lore
+eventful
+arteriole
+disentangle
+cured
+Fenton
+avoidable
+drains
+detectably
+husky
+impelling
+undoes
+evened
+squeezes
+destroyer
+rudeness
+beaner
+boorish
+Everhart
+encompass
+mushrooms
+Alison
+externally
+pellagra
+cult
+creek
+Huffman
+Majorca
+governing
+gadfly
+reassigned
+intentness
+craziness
+psychic
+squabbled
+burlesque
+capped
+extracted
+DiMaggio
+exclamation
+subdirectory
+fangs
+buyer
+pithing
+transistorizing
+nonbiodegradable
+dislocate
+monochromatic
+batting
+postcondition
+catalog
+Remus
+devices
+bike
+qualify
+detained
+commended
+civilize
+Elmhurst
+anesthetizing
+deaf
+Brigham
+title
+coarse
+combinations
+grayness
+innumerable
+Caroline
+fatty
+eastbound
+inexperienced
+hoarder
+scotch
+passport
+strategic
+gated
+flog
+Pipestone
+Dar
+Corcoran
+flyers
+competitions
+suppliers
+skips
+institutes
+troop
+connective
+denies
+polka
+observations
+askers
+homeless
+Anna
+subdirectories
+decaying
+outwitting
+Harpy
+crazed
+suffocate
+provers
+technically
+Franklinizations
+considered
+tinnily
+uninterruptedly
+whistled
+automate
+gutting
+surreptitious
+Choctaw
+cooks
+millivolt
+counterpoise
+Gothicism
+feminine
+metaphysically
+sanding
+contributorily
+receivers
+adjourn
+straggled
+druggists
+thanking
+ostrich
+hopelessness
+Eurydice
+excitation
+presumes
+imaginable
+concoct
+peering
+Phelps
+ferociousness
+sentences
+unlocks
+engrossing
+Ruth
+tying
+exclaimers
+synergy
+Huey
+merging
+judges
+Shylock
+Miltonism
+hen
+honeybee
+towers
+dilutes
+numerals
+democracy
+Ibero-
+invalids
+behavior
+accruing
+relics
+rackets
+Fischbein
+phony
+cross
+cleanup
+conspirator
+label
+university
+cleansed
+ballgown
+starlet
+aqueous
+portrayal
+despising
+distort
+palmed
+faced
+silverware
+assessor
+spiders
+artificially
+reminiscence
+Mexican
+obnoxious
+fragile
+apprehensible
+births
+garages
+panty
+anteater
+displacement
+drovers
+patenting
+far
+shrieks
+aligning
+pragmatism
+fevers
+reexamines
+occupancies
+sweats
+modulators
+demand
+Madeira
+Viennese
+chillier
+wildcats
+gentle
+Angles
+accuracies
+toggle
+Mendelssohn
+behaviorally
+Rochford
+mirror
+Modula
+clobbering
+chronography
+Eskimoizeds
+British
+pitfalls
+verify
+scatter
+Aztecan
+acuity
+sinking
+beasts
+Witt
+physicists
+folksong
+strokes
+crowder
+merry
+cadenced
+alimony
+principled
+golfing
+undiscovered
+irritates
+patriots
+rooms
+towering
+displease
+photosensitive
+inking
+gainers
+leaning
+hydrant
+preserve
+blinded
+interactions
+Barry
+whiteness
+pastimes
+Edenization
+Muscat
+assassinated
+labeled
+glacial
+implied
+bibliographies
+Buchanan
+forgivably
+innuendo
+den
+submarines
+mouthful
+expiring
+unfulfilled
+precession
+nullified
+affects
+Cynthia
+Chablis
+betterments
+advertising
+rubies
+southwest
+superstitious
+tabernacle
+silk
+handsomest
+Persian
+analog
+complex
+Taoist
+suspend
+relegated
+awesome
+Bruxelles
+imprecisely
+televise
+braking
+true
+disappointing
+navally
+circus
+beetles
+trumps
+fourscore
+Blackfoots
+Grady
+quiets
+floundered
+profundity
+Garrisonian
+Strauss
+cemented
+contrition
+mutations
+exhibits
+tits
+mate
+arches
+Moll
+ropers
+bombast
+difficultly
+adsorption
+definiteness
+cultivation
+heals
+Heusen
+target
+cited
+congresswoman
+Katherine
+titter
+aspire
+Mardis
+Nadia
+estimating
+stuck
+fifteenth
+Colombo
+survey
+staffing
+obtain
+loaded
+slaughtered
+lights
+circumference
+dull
+weekly
+wetness
+visualized
+Tannenbaum
+moribund
+demultiplex
+lockings
+thugs
+unnerves
+abut
+Chippewa
+stratifications
+signaled
+Italianizes
+algorithmic
+paranoid
+camping
+signifying
+Patrice
+search
+Angeles
+semblance
+taxed
+Beatrice
+retrace
+lockout
+grammatic
+helmsman
+uniform
+hamming
+disobedience
+captivated
+transferals
+cartographer
+aims
+Pakistani
+burglarized
+saucepans
+lacerating
+corny
+megabytes
+chancellor
+bulk
+commits
+meson
+deputies
+northeaster
+dipole
+machining
+therefore
+Telefunken
+salvaging
+Corinthianizes
+restlessly
+bromides
+generalized
+mishaps
+quelling
+spiritual
+beguiles
+Trobriand
+fleeing
+Armour
+chin
+provers
+aeronautic
+voltage
+sash
+anaerobic
+simultaneous
+accumulating
+Medusan
+shouted
+freakish
+index
+commercially
+mistiness
+endpoint
+straight
+flurried
+denotative
+coming
+commencements
+gentleman
+gifted
+Shanghais
+sportswriting
+sloping
+navies
+leaflet
+shooter
+Joplin
+babies
+subdivision
+burstiness
+belted
+assails
+admiring
+swaying
+Goldstine
+fitting
+Norwalk
+weakening
+analogy
+deludes
+cokes
+Clayton
+exhausts
+causality
+sating
+icon
+throttles
+communicants
+dehydrate
+priceless
+publicly
+incidentals
+commonplace
+mumbles
+furthermore
+cautioned
+parametrized
+registration
+sadly
+positioning
+babysitting
+eternal
+hoarder
+congregates
+rains
+workers
+sags
+unplug
+garage
+boulder
+hollowly
+specifics
+Teresa
+Winsett
+convenient
+buckboards
+amenities
+resplendent
+priding
+configurations
+untidiness
+Brice
+sews
+participated
+Simon
+certificates
+Fitzpatrick
+Evanston
+misted
+textures
+save
+count
+rightful
+chaperone
+Lizzy
+clenched
+effortlessly
+accessed
+beaters
+Hornblower
+vests
+indulgences
+infallibly
+unwilling
+excrete
+spools
+crunches
+overestimating
+ineffective
+humiliation
+sophomore
+star
+rifles
+dialysis
+arriving
+indulge
+clockers
+languages
+Antarctica
+percentage
+ceiling
+specification
+regimented
+ciphers
+pictures
+serpents
+allot
+realized
+mayoral
+opaquely
+hostess
+fiftieth
+incorrectly
+decomposition
+stranglings
+mixture
+electroencephalography
+similarities
+charges
+freest
+Greenberg
+tinting
+expelled
+warm
+smoothed
+deductions
+Romano
+bitterroot
+corset
+securing
+environing
+cute
+Crays
+heiress
+inform
+avenge
+universals
+Kinsey
+ravines
+bestseller
+equilibrium
+extents
+relatively
+pressure
+critiques
+befouled
+rightfully
+mechanizing
+Latinizes
+timesharing
+Aden
+embassies
+males
+shapelessly
+genres
+mastering
+Newtonian
+finishers
+abates
+teem
+kiting
+stodgy
+scalps
+feed
+guitars
+airships
+store
+denounces
+Pyle
+Saxony
+serializations
+Peruvian
+taxonomically
+kingdom
+stint
+Sault
+faithful
+Ganymede
+tidiness
+gainful
+contrary
+Tipperary
+tropics
+theorizers
+renew
+already
+terminal
+Hegelian
+hypothesizer
+warningly
+journalizing
+nested
+Lars
+saplings
+foothill
+labeled
+imperiously
+reporters
+furnishings
+precipitable
+discounts
+excises
+Stalin
+despot
+ripeness
+Arabia
+unruly
+mournfulness
+boom
+slaughter
+Sabine
+handy
+rural
+organizer
+shipyard
+civics
+inaccuracy
+rules
+juveniles
+comprised
+investigations
+stabilizes
+seminaries
+Hunter
+sporty
+test
+weasels
+CERN
+tempering
+afore
+Galatean
+techniques
+error
+veranda
+severely
+Cassites
+forthcoming
+guides
+vanish
+lied
+sawtooth
+fated
+gradually
+widens
+preclude
+Jobrel
+hooker
+rainstorm
+disconnects
+cruelty
+exponentials
+affective
+arteries
+Crosby
+acquaint
+evenhandedly
+percentage
+disobedience
+humility
+gleaning
+petted
+bloater
+minion
+marginal
+apiary
+measures
+precaution
+repelled
+primary
+coverings
+Artemia
+navigate
+spatial
+Gurkha
+meanwhile
+Melinda
+Butterfield
+Aldrich
+previewing
+glut
+unaffected
+inmate
+mineral
+impending
+meditation
+ideas
+miniaturizes
+lewdly
+title
+youthfulness
+creak
+Chippewa
+clamored
+freezes
+forgivably
+reduce
+McGovern
+Nazis
+epistle
+socializes
+conceptions
+Kevin
+uncovering
+chews
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+raining
+infest
+compartment
+minting
+ducks
+roped
+waltz
+Lillian
+repressions
+chillingly
+noncritical
+lithograph
+spongers
+parenthood
+posed
+instruments
+filial
+fixedly
+relives
+Pandora
+watering
+ungrateful
+secures
+chastisers
+icon
+reuniting
+imagining
+abiding
+omnisciently
+Britannic
+scholastics
+mechanics
+humidly
+masterpiece
+however
+Mendelian
+jarred
+scolds
+infatuate
+willed
+joyfully
+Microsoft
+fibrosities
+Baltimorean
+equestrian
+Goodrich
+apish
+Adlerian
+Tropez
+nouns
+distracting
+mutton
+bridgeable
+stickers
+transcontinental
+amateurish
+Gandhian
+stratified
+chamberlains
+creditably
+philosophic
+ores
+Carleton
+tape
+afloat
+goodness
+welcoming
+Pinsky
+halting
+bibliography
+decoding
+variance
+allowed
+dire
+dub
+poisoning
+Iraqis
+heaving
+population
+bomb
+Majorca
+Gershwins
+explorers
+libretto
+occurred
+Lagos
+rats
+bankruptcies
+crying
+unexpected
+accessed
+colorful
+versatility
+cosy
+Darius
+mastering
+Asiaticizations
+offerers
+uncles
+sleepwalk
+Ernestine
+checksumming
+stopped
+sicker
+Italianization
+alphabetic
+pharmaceutic
+creator
+chess
+charcoal
+Epiphany
+bulldozes
+Pygmalion
+caressing
+Palestine
+regimented
+scars
+realest
+diffusing
+clubroom
+Blythe
+ahead
+reviver
+retransmitting
+landslide
+Eiffel
+absentee
+aye
+forked
+Peruvianizes
+clerked
+tutor
+boulevard
+shuttered
+quotes
+Caltech
+Mossberg
+kept
+roundly
+features
+imaginable
+controller
+racial
+uprisings
+narrowed
+cannot
+vest
+famine
+sugars
+exterminated
+belays
+Hodges
+translatable
+duality
+recording
+rouses
+poison
+attitude
+dusted
+encompasses
+presentation
+Kantian
+imprecision
+saving
+maternal
+hewed
+kerosene
+Cubans
+photographers
+nymph
+bedlam
+north
+Schoenberg
+botany
+curs
+solidification
+inheritresses
+stiller
+t1
+suite
+ransomer
+Willy
+Rena
+Seattle
+relaxes
+exclaim
+exclaim
+implicated
+distinguish
+assayed
+homeowner
+and
+stealth
+coinciding
+founder
+environing
+jewelry
+lemons
+brokenness
+bedpost
+assurers
+annoyers
+affixed
+warbling
+seriously
+boasted
+Chantilly
+Iranizes
+violinist
+extramarital
+spates
+cloakroom
+gazer
+hand
+tucked
+gems
+clinker
+refiner
+callus
+leopards
+comfortingly
+generically
+getters
+sexually
+spear
+serums
+Italianization
+attendants
+spies
+Anthony
+planar
+cupped
+cleanser
+commuters
+honeysuckle
+orphanage
+skies
+crushers
+Puritan
+squeezer
+bruises
+bonfire
+Colombo
+nondecreasing
+UPDATE t2 SET fld3="bar" WHERE fld3="s%";
+select fld3 from t2;
+fld3
+Omaha
+breaking
+Romans
+intercepted
+bewilderingly
+astound
+admonishing
+sumac
+flanking
+combed
+subjective
+scatterbrain
+Eulerian
+dubbed
+Kane
+overlay
+perturb
+goblins
+annihilates
+Wotan
+snatching
+concludes
+laterally
+yelped
+grazing
+Baird
+celery
+misunderstander
+handgun
+foldout
+mystic
+succumbed
+Nabisco
+fingerings
+aging
+afield
+ammonium
+boat
+intelligibility
+Augustine
+teethe
+dreaded
+scholastics
+audiology
+wallet
+parters
+eschew
+quitter
+neat
+Steinberg
+jarring
+tinily
+balled
+persist
+attainments
+fanatic
+measures
+rightfulness
+capably
+impulsive
+starlet
+terminators
+untying
+announces
+featherweight
+pessimist
+daughter
+decliner
+lawgiver
+stated
+readable
+attrition
+cascade
+motors
+interrogate
+pests
+stairway
+dopers
+testicle
+Parsifal
+leavings
+postulation
+squeaking
+contrasted
+leftover
+whiteners
+erases
+Punjab
+Merritt
+Quixotism
+sweetish
+dogging
+scornfully
+bellow
+bills
+cupboard
+sureties
+puddings
+tapestry
+fetters
+bivalves
+incurring
+Adolph
+pithed
+emergency
+Miles
+trimmings
+tragedies
+skulking
+flint
+flopping
+relaxing
+offload
+suites
+lists
+animized
+multilayer
+standardizes
+Judas
+vacuuming
+dentally
+humanness
+inch
+Weissmuller
+irresponsibly
+luckily
+culled
+medical
+bloodbath
+subschema
+animals
+Micronesia
+repetitions
+Antares
+ventilate
+pityingly
+interdependent
+Graves
+neonatal
+scribbled
+chafe
+honoring
+realtor
+elite
+funereal
+abrogating
+sorters
+Conley
+lectured
+Abraham
+Hawaii
+cage
+hushes
+Simla
+reporters
+Dutchman
+descendants
+groupings
+dissociate
+coexist
+Beebe
+Taoism
+Connally
+fetched
+checkpoints
+rusting
+galling
+obliterates
+traitor
+resumes
+analyzable
+terminator
+gritty
+firearm
+minima
+Selfridge
+disable
+witchcraft
+betroth
+Manhattanize
+imprint
+peeked
+swelling
+interrelationships
+riser
+Gandhian
+peacock
+bee
+kanji
+dental
+scarf
+chasm
+insolence
+syndicate
+alike
+imperial
+convulsion
+railway
+validate
+normalizes
+comprehensive
+chewing
+denizen
+schemer
+chronicle
+Kline
+Anatole
+partridges
+brunch
+recruited
+dimensions
+Chicana
+announced
+praised
+employing
+linear
+quagmire
+western
+relishing
+serving
+scheduling
+lore
+eventful
+arteriole
+disentangle
+cured
+Fenton
+avoidable
+drains
+detectably
+husky
+impelling
+undoes
+evened
+squeezes
+destroyer
+rudeness
+beaner
+boorish
+Everhart
+encompass
+mushrooms
+Alison
+externally
+pellagra
+cult
+creek
+Huffman
+Majorca
+governing
+gadfly
+reassigned
+intentness
+craziness
+psychic
+squabbled
+burlesque
+capped
+extracted
+DiMaggio
+exclamation
+subdirectory
+fangs
+buyer
+pithing
+transistorizing
+nonbiodegradable
+dislocate
+monochromatic
+batting
+postcondition
+catalog
+Remus
+devices
+bike
+qualify
+detained
+commended
+civilize
+Elmhurst
+anesthetizing
+deaf
+Brigham
+title
+coarse
+combinations
+grayness
+innumerable
+Caroline
+fatty
+eastbound
+inexperienced
+hoarder
+scotch
+passport
+strategic
+gated
+flog
+Pipestone
+Dar
+Corcoran
+flyers
+competitions
+suppliers
+skips
+institutes
+troop
+connective
+denies
+polka
+observations
+askers
+homeless
+Anna
+subdirectories
+decaying
+outwitting
+Harpy
+crazed
+suffocate
+provers
+technically
+Franklinizations
+considered
+tinnily
+uninterruptedly
+whistled
+automate
+gutting
+surreptitious
+Choctaw
+cooks
+millivolt
+counterpoise
+Gothicism
+feminine
+metaphysically
+sanding
+contributorily
+receivers
+adjourn
+straggled
+druggists
+thanking
+ostrich
+hopelessness
+Eurydice
+excitation
+presumes
+imaginable
+concoct
+peering
+Phelps
+ferociousness
+sentences
+unlocks
+engrossing
+Ruth
+tying
+exclaimers
+synergy
+Huey
+merging
+judges
+Shylock
+Miltonism
+hen
+honeybee
+towers
+dilutes
+numerals
+democracy
+Ibero-
+invalids
+behavior
+accruing
+relics
+rackets
+Fischbein
+phony
+cross
+cleanup
+conspirator
+label
+university
+cleansed
+ballgown
+starlet
+aqueous
+portrayal
+despising
+distort
+palmed
+faced
+silverware
+assessor
+spiders
+artificially
+reminiscence
+Mexican
+obnoxious
+fragile
+apprehensible
+births
+garages
+panty
+anteater
+displacement
+drovers
+patenting
+far
+shrieks
+aligning
+pragmatism
+fevers
+reexamines
+occupancies
+sweats
+modulators
+demand
+Madeira
+Viennese
+chillier
+wildcats
+gentle
+Angles
+accuracies
+toggle
+Mendelssohn
+behaviorally
+Rochford
+mirror
+Modula
+clobbering
+chronography
+Eskimoizeds
+British
+pitfalls
+verify
+scatter
+Aztecan
+acuity
+sinking
+beasts
+Witt
+physicists
+folksong
+strokes
+crowder
+merry
+cadenced
+alimony
+principled
+golfing
+undiscovered
+irritates
+patriots
+rooms
+towering
+displease
+photosensitive
+inking
+gainers
+leaning
+hydrant
+preserve
+blinded
+interactions
+Barry
+whiteness
+pastimes
+Edenization
+Muscat
+assassinated
+labeled
+glacial
+implied
+bibliographies
+Buchanan
+forgivably
+innuendo
+den
+submarines
+mouthful
+expiring
+unfulfilled
+precession
+nullified
+affects
+Cynthia
+Chablis
+betterments
+advertising
+rubies
+southwest
+superstitious
+tabernacle
+silk
+handsomest
+Persian
+analog
+complex
+Taoist
+suspend
+relegated
+awesome
+Bruxelles
+imprecisely
+televise
+braking
+true
+disappointing
+navally
+circus
+beetles
+trumps
+fourscore
+Blackfoots
+Grady
+quiets
+floundered
+profundity
+Garrisonian
+Strauss
+cemented
+contrition
+mutations
+exhibits
+tits
+mate
+arches
+Moll
+ropers
+bombast
+difficultly
+adsorption
+definiteness
+cultivation
+heals
+Heusen
+target
+cited
+congresswoman
+Katherine
+titter
+aspire
+Mardis
+Nadia
+estimating
+stuck
+fifteenth
+Colombo
+survey
+staffing
+obtain
+loaded
+slaughtered
+lights
+circumference
+dull
+weekly
+wetness
+visualized
+Tannenbaum
+moribund
+demultiplex
+lockings
+thugs
+unnerves
+abut
+Chippewa
+stratifications
+signaled
+Italianizes
+algorithmic
+paranoid
+camping
+signifying
+Patrice
+search
+Angeles
+semblance
+taxed
+Beatrice
+retrace
+lockout
+grammatic
+helmsman
+uniform
+hamming
+disobedience
+captivated
+transferals
+cartographer
+aims
+Pakistani
+burglarized
+saucepans
+lacerating
+corny
+megabytes
+chancellor
+bulk
+commits
+meson
+deputies
+northeaster
+dipole
+machining
+therefore
+Telefunken
+salvaging
+Corinthianizes
+restlessly
+bromides
+generalized
+mishaps
+quelling
+spiritual
+beguiles
+Trobriand
+fleeing
+Armour
+chin
+provers
+aeronautic
+voltage
+sash
+anaerobic
+simultaneous
+accumulating
+Medusan
+shouted
+freakish
+index
+commercially
+mistiness
+endpoint
+straight
+flurried
+denotative
+coming
+commencements
+gentleman
+gifted
+Shanghais
+sportswriting
+sloping
+navies
+leaflet
+shooter
+Joplin
+babies
+subdivision
+burstiness
+belted
+assails
+admiring
+swaying
+Goldstine
+fitting
+Norwalk
+weakening
+analogy
+deludes
+cokes
+Clayton
+exhausts
+causality
+sating
+icon
+throttles
+communicants
+dehydrate
+priceless
+publicly
+incidentals
+commonplace
+mumbles
+furthermore
+cautioned
+parametrized
+registration
+sadly
+positioning
+babysitting
+eternal
+hoarder
+congregates
+rains
+workers
+sags
+unplug
+garage
+boulder
+hollowly
+specifics
+Teresa
+Winsett
+convenient
+buckboards
+amenities
+resplendent
+priding
+configurations
+untidiness
+Brice
+sews
+participated
+Simon
+certificates
+Fitzpatrick
+Evanston
+misted
+textures
+save
+count
+rightful
+chaperone
+Lizzy
+clenched
+effortlessly
+accessed
+beaters
+Hornblower
+vests
+indulgences
+infallibly
+unwilling
+excrete
+spools
+crunches
+overestimating
+ineffective
+humiliation
+sophomore
+star
+rifles
+dialysis
+arriving
+indulge
+clockers
+languages
+Antarctica
+percentage
+ceiling
+specification
+regimented
+ciphers
+pictures
+serpents
+allot
+realized
+mayoral
+opaquely
+hostess
+fiftieth
+incorrectly
+decomposition
+stranglings
+mixture
+electroencephalography
+similarities
+charges
+freest
+Greenberg
+tinting
+expelled
+warm
+smoothed
+deductions
+Romano
+bitterroot
+corset
+securing
+environing
+cute
+Crays
+heiress
+inform
+avenge
+universals
+Kinsey
+ravines
+bestseller
+equilibrium
+extents
+relatively
+pressure
+critiques
+befouled
+rightfully
+mechanizing
+Latinizes
+timesharing
+Aden
+embassies
+males
+shapelessly
+genres
+mastering
+Newtonian
+finishers
+abates
+teem
+kiting
+stodgy
+scalps
+feed
+guitars
+airships
+store
+denounces
+Pyle
+Saxony
+serializations
+Peruvian
+taxonomically
+kingdom
+stint
+Sault
+faithful
+Ganymede
+tidiness
+gainful
+contrary
+Tipperary
+tropics
+theorizers
+renew
+already
+terminal
+Hegelian
+hypothesizer
+warningly
+journalizing
+nested
+Lars
+saplings
+foothill
+labeled
+imperiously
+reporters
+furnishings
+precipitable
+discounts
+excises
+Stalin
+despot
+ripeness
+Arabia
+unruly
+mournfulness
+boom
+slaughter
+Sabine
+handy
+rural
+organizer
+shipyard
+civics
+inaccuracy
+rules
+juveniles
+comprised
+investigations
+stabilizes
+seminaries
+Hunter
+sporty
+test
+weasels
+CERN
+tempering
+afore
+Galatean
+techniques
+error
+veranda
+severely
+Cassites
+forthcoming
+guides
+vanish
+lied
+sawtooth
+fated
+gradually
+widens
+preclude
+Jobrel
+hooker
+rainstorm
+disconnects
+cruelty
+exponentials
+affective
+arteries
+Crosby
+acquaint
+evenhandedly
+percentage
+disobedience
+humility
+gleaning
+petted
+bloater
+minion
+marginal
+apiary
+measures
+precaution
+repelled
+primary
+coverings
+Artemia
+navigate
+spatial
+Gurkha
+meanwhile
+Melinda
+Butterfield
+Aldrich
+previewing
+glut
+unaffected
+inmate
+mineral
+impending
+meditation
+ideas
+miniaturizes
+lewdly
+title
+youthfulness
+creak
+Chippewa
+clamored
+freezes
+forgivably
+reduce
+McGovern
+Nazis
+epistle
+socializes
+conceptions
+Kevin
+uncovering
+chews
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+raining
+infest
+compartment
+minting
+ducks
+roped
+waltz
+Lillian
+repressions
+chillingly
+noncritical
+lithograph
+spongers
+parenthood
+posed
+instruments
+filial
+fixedly
+relives
+Pandora
+watering
+ungrateful
+secures
+chastisers
+icon
+reuniting
+imagining
+abiding
+omnisciently
+Britannic
+scholastics
+mechanics
+humidly
+masterpiece
+however
+Mendelian
+jarred
+scolds
+infatuate
+willed
+joyfully
+Microsoft
+fibrosities
+Baltimorean
+equestrian
+Goodrich
+apish
+Adlerian
+Tropez
+nouns
+distracting
+mutton
+bridgeable
+stickers
+transcontinental
+amateurish
+Gandhian
+stratified
+chamberlains
+creditably
+philosophic
+ores
+Carleton
+tape
+afloat
+goodness
+welcoming
+Pinsky
+halting
+bibliography
+decoding
+variance
+allowed
+dire
+dub
+poisoning
+Iraqis
+heaving
+population
+bomb
+Majorca
+Gershwins
+explorers
+libretto
+occurred
+Lagos
+rats
+bankruptcies
+crying
+unexpected
+accessed
+colorful
+versatility
+cosy
+Darius
+mastering
+Asiaticizations
+offerers
+uncles
+sleepwalk
+Ernestine
+checksumming
+stopped
+sicker
+Italianization
+alphabetic
+pharmaceutic
+creator
+chess
+charcoal
+Epiphany
+bulldozes
+Pygmalion
+caressing
+Palestine
+regimented
+scars
+realest
+diffusing
+clubroom
+Blythe
+ahead
+reviver
+retransmitting
+landslide
+Eiffel
+absentee
+aye
+forked
+Peruvianizes
+clerked
+tutor
+boulevard
+shuttered
+quotes
+Caltech
+Mossberg
+kept
+roundly
+features
+imaginable
+controller
+racial
+uprisings
+narrowed
+cannot
+vest
+famine
+sugars
+exterminated
+belays
+Hodges
+translatable
+duality
+recording
+rouses
+poison
+attitude
+dusted
+encompasses
+presentation
+Kantian
+imprecision
+saving
+maternal
+hewed
+kerosene
+Cubans
+photographers
+nymph
+bedlam
+north
+Schoenberg
+botany
+curs
+solidification
+inheritresses
+stiller
+t1
+suite
+ransomer
+Willy
+Rena
+Seattle
+relaxes
+exclaim
+exclaim
+implicated
+distinguish
+assayed
+homeowner
+and
+stealth
+coinciding
+founder
+environing
+jewelry
+lemons
+brokenness
+bedpost
+assurers
+annoyers
+affixed
+warbling
+seriously
+boasted
+Chantilly
+Iranizes
+violinist
+extramarital
+spates
+cloakroom
+gazer
+hand
+tucked
+gems
+clinker
+refiner
+callus
+leopards
+comfortingly
+generically
+getters
+sexually
+spear
+serums
+Italianization
+attendants
+spies
+Anthony
+planar
+cupped
+cleanser
+commuters
+honeysuckle
+orphanage
+skies
+crushers
+Puritan
+squeezer
+bruises
+bonfire
+Colombo
+nondecreasing
+DELETE FROM t2 WHERE fld3="r%";
+SELECT fld3 FROM t2;
+fld3
+Omaha
+breaking
+Romans
+intercepted
+bewilderingly
+astound
+admonishing
+sumac
+flanking
+combed
+subjective
+scatterbrain
+Eulerian
+dubbed
+Kane
+overlay
+perturb
+goblins
+annihilates
+Wotan
+snatching
+concludes
+laterally
+yelped
+grazing
+Baird
+celery
+misunderstander
+handgun
+foldout
+mystic
+succumbed
+Nabisco
+fingerings
+aging
+afield
+ammonium
+boat
+intelligibility
+Augustine
+teethe
+dreaded
+scholastics
+audiology
+wallet
+parters
+eschew
+quitter
+neat
+Steinberg
+jarring
+tinily
+balled
+persist
+attainments
+fanatic
+measures
+rightfulness
+capably
+impulsive
+starlet
+terminators
+untying
+announces
+featherweight
+pessimist
+daughter
+decliner
+lawgiver
+stated
+readable
+attrition
+cascade
+motors
+interrogate
+pests
+stairway
+dopers
+testicle
+Parsifal
+leavings
+postulation
+squeaking
+contrasted
+leftover
+whiteners
+erases
+Punjab
+Merritt
+Quixotism
+sweetish
+dogging
+scornfully
+bellow
+bills
+cupboard
+sureties
+puddings
+tapestry
+fetters
+bivalves
+incurring
+Adolph
+pithed
+emergency
+Miles
+trimmings
+tragedies
+skulking
+flint
+flopping
+relaxing
+offload
+suites
+lists
+animized
+multilayer
+standardizes
+Judas
+vacuuming
+dentally
+humanness
+inch
+Weissmuller
+irresponsibly
+luckily
+culled
+medical
+bloodbath
+subschema
+animals
+Micronesia
+repetitions
+Antares
+ventilate
+pityingly
+interdependent
+Graves
+neonatal
+scribbled
+chafe
+honoring
+realtor
+elite
+funereal
+abrogating
+sorters
+Conley
+lectured
+Abraham
+Hawaii
+cage
+hushes
+Simla
+reporters
+Dutchman
+descendants
+groupings
+dissociate
+coexist
+Beebe
+Taoism
+Connally
+fetched
+checkpoints
+rusting
+galling
+obliterates
+traitor
+resumes
+analyzable
+terminator
+gritty
+firearm
+minima
+Selfridge
+disable
+witchcraft
+betroth
+Manhattanize
+imprint
+peeked
+swelling
+interrelationships
+riser
+Gandhian
+peacock
+bee
+kanji
+dental
+scarf
+chasm
+insolence
+syndicate
+alike
+imperial
+convulsion
+railway
+validate
+normalizes
+comprehensive
+chewing
+denizen
+schemer
+chronicle
+Kline
+Anatole
+partridges
+brunch
+recruited
+dimensions
+Chicana
+announced
+praised
+employing
+linear
+quagmire
+western
+relishing
+serving
+scheduling
+lore
+eventful
+arteriole
+disentangle
+cured
+Fenton
+avoidable
+drains
+detectably
+husky
+impelling
+undoes
+evened
+squeezes
+destroyer
+rudeness
+beaner
+boorish
+Everhart
+encompass
+mushrooms
+Alison
+externally
+pellagra
+cult
+creek
+Huffman
+Majorca
+governing
+gadfly
+reassigned
+intentness
+craziness
+psychic
+squabbled
+burlesque
+capped
+extracted
+DiMaggio
+exclamation
+subdirectory
+fangs
+buyer
+pithing
+transistorizing
+nonbiodegradable
+dislocate
+monochromatic
+batting
+postcondition
+catalog
+Remus
+devices
+bike
+qualify
+detained
+commended
+civilize
+Elmhurst
+anesthetizing
+deaf
+Brigham
+title
+coarse
+combinations
+grayness
+innumerable
+Caroline
+fatty
+eastbound
+inexperienced
+hoarder
+scotch
+passport
+strategic
+gated
+flog
+Pipestone
+Dar
+Corcoran
+flyers
+competitions
+suppliers
+skips
+institutes
+troop
+connective
+denies
+polka
+observations
+askers
+homeless
+Anna
+subdirectories
+decaying
+outwitting
+Harpy
+crazed
+suffocate
+provers
+technically
+Franklinizations
+considered
+tinnily
+uninterruptedly
+whistled
+automate
+gutting
+surreptitious
+Choctaw
+cooks
+millivolt
+counterpoise
+Gothicism
+feminine
+metaphysically
+sanding
+contributorily
+receivers
+adjourn
+straggled
+druggists
+thanking
+ostrich
+hopelessness
+Eurydice
+excitation
+presumes
+imaginable
+concoct
+peering
+Phelps
+ferociousness
+sentences
+unlocks
+engrossing
+Ruth
+tying
+exclaimers
+synergy
+Huey
+merging
+judges
+Shylock
+Miltonism
+hen
+honeybee
+towers
+dilutes
+numerals
+democracy
+Ibero-
+invalids
+behavior
+accruing
+relics
+rackets
+Fischbein
+phony
+cross
+cleanup
+conspirator
+label
+university
+cleansed
+ballgown
+starlet
+aqueous
+portrayal
+despising
+distort
+palmed
+faced
+silverware
+assessor
+spiders
+artificially
+reminiscence
+Mexican
+obnoxious
+fragile
+apprehensible
+births
+garages
+panty
+anteater
+displacement
+drovers
+patenting
+far
+shrieks
+aligning
+pragmatism
+fevers
+reexamines
+occupancies
+sweats
+modulators
+demand
+Madeira
+Viennese
+chillier
+wildcats
+gentle
+Angles
+accuracies
+toggle
+Mendelssohn
+behaviorally
+Rochford
+mirror
+Modula
+clobbering
+chronography
+Eskimoizeds
+British
+pitfalls
+verify
+scatter
+Aztecan
+acuity
+sinking
+beasts
+Witt
+physicists
+folksong
+strokes
+crowder
+merry
+cadenced
+alimony
+principled
+golfing
+undiscovered
+irritates
+patriots
+rooms
+towering
+displease
+photosensitive
+inking
+gainers
+leaning
+hydrant
+preserve
+blinded
+interactions
+Barry
+whiteness
+pastimes
+Edenization
+Muscat
+assassinated
+labeled
+glacial
+implied
+bibliographies
+Buchanan
+forgivably
+innuendo
+den
+submarines
+mouthful
+expiring
+unfulfilled
+precession
+nullified
+affects
+Cynthia
+Chablis
+betterments
+advertising
+rubies
+southwest
+superstitious
+tabernacle
+silk
+handsomest
+Persian
+analog
+complex
+Taoist
+suspend
+relegated
+awesome
+Bruxelles
+imprecisely
+televise
+braking
+true
+disappointing
+navally
+circus
+beetles
+trumps
+fourscore
+Blackfoots
+Grady
+quiets
+floundered
+profundity
+Garrisonian
+Strauss
+cemented
+contrition
+mutations
+exhibits
+tits
+mate
+arches
+Moll
+ropers
+bombast
+difficultly
+adsorption
+definiteness
+cultivation
+heals
+Heusen
+target
+cited
+congresswoman
+Katherine
+titter
+aspire
+Mardis
+Nadia
+estimating
+stuck
+fifteenth
+Colombo
+survey
+staffing
+obtain
+loaded
+slaughtered
+lights
+circumference
+dull
+weekly
+wetness
+visualized
+Tannenbaum
+moribund
+demultiplex
+lockings
+thugs
+unnerves
+abut
+Chippewa
+stratifications
+signaled
+Italianizes
+algorithmic
+paranoid
+camping
+signifying
+Patrice
+search
+Angeles
+semblance
+taxed
+Beatrice
+retrace
+lockout
+grammatic
+helmsman
+uniform
+hamming
+disobedience
+captivated
+transferals
+cartographer
+aims
+Pakistani
+burglarized
+saucepans
+lacerating
+corny
+megabytes
+chancellor
+bulk
+commits
+meson
+deputies
+northeaster
+dipole
+machining
+therefore
+Telefunken
+salvaging
+Corinthianizes
+restlessly
+bromides
+generalized
+mishaps
+quelling
+spiritual
+beguiles
+Trobriand
+fleeing
+Armour
+chin
+provers
+aeronautic
+voltage
+sash
+anaerobic
+simultaneous
+accumulating
+Medusan
+shouted
+freakish
+index
+commercially
+mistiness
+endpoint
+straight
+flurried
+denotative
+coming
+commencements
+gentleman
+gifted
+Shanghais
+sportswriting
+sloping
+navies
+leaflet
+shooter
+Joplin
+babies
+subdivision
+burstiness
+belted
+assails
+admiring
+swaying
+Goldstine
+fitting
+Norwalk
+weakening
+analogy
+deludes
+cokes
+Clayton
+exhausts
+causality
+sating
+icon
+throttles
+communicants
+dehydrate
+priceless
+publicly
+incidentals
+commonplace
+mumbles
+furthermore
+cautioned
+parametrized
+registration
+sadly
+positioning
+babysitting
+eternal
+hoarder
+congregates
+rains
+workers
+sags
+unplug
+garage
+boulder
+hollowly
+specifics
+Teresa
+Winsett
+convenient
+buckboards
+amenities
+resplendent
+priding
+configurations
+untidiness
+Brice
+sews
+participated
+Simon
+certificates
+Fitzpatrick
+Evanston
+misted
+textures
+save
+count
+rightful
+chaperone
+Lizzy
+clenched
+effortlessly
+accessed
+beaters
+Hornblower
+vests
+indulgences
+infallibly
+unwilling
+excrete
+spools
+crunches
+overestimating
+ineffective
+humiliation
+sophomore
+star
+rifles
+dialysis
+arriving
+indulge
+clockers
+languages
+Antarctica
+percentage
+ceiling
+specification
+regimented
+ciphers
+pictures
+serpents
+allot
+realized
+mayoral
+opaquely
+hostess
+fiftieth
+incorrectly
+decomposition
+stranglings
+mixture
+electroencephalography
+similarities
+charges
+freest
+Greenberg
+tinting
+expelled
+warm
+smoothed
+deductions
+Romano
+bitterroot
+corset
+securing
+environing
+cute
+Crays
+heiress
+inform
+avenge
+universals
+Kinsey
+ravines
+bestseller
+equilibrium
+extents
+relatively
+pressure
+critiques
+befouled
+rightfully
+mechanizing
+Latinizes
+timesharing
+Aden
+embassies
+males
+shapelessly
+genres
+mastering
+Newtonian
+finishers
+abates
+teem
+kiting
+stodgy
+scalps
+feed
+guitars
+airships
+store
+denounces
+Pyle
+Saxony
+serializations
+Peruvian
+taxonomically
+kingdom
+stint
+Sault
+faithful
+Ganymede
+tidiness
+gainful
+contrary
+Tipperary
+tropics
+theorizers
+renew
+already
+terminal
+Hegelian
+hypothesizer
+warningly
+journalizing
+nested
+Lars
+saplings
+foothill
+labeled
+imperiously
+reporters
+furnishings
+precipitable
+discounts
+excises
+Stalin
+despot
+ripeness
+Arabia
+unruly
+mournfulness
+boom
+slaughter
+Sabine
+handy
+rural
+organizer
+shipyard
+civics
+inaccuracy
+rules
+juveniles
+comprised
+investigations
+stabilizes
+seminaries
+Hunter
+sporty
+test
+weasels
+CERN
+tempering
+afore
+Galatean
+techniques
+error
+veranda
+severely
+Cassites
+forthcoming
+guides
+vanish
+lied
+sawtooth
+fated
+gradually
+widens
+preclude
+Jobrel
+hooker
+rainstorm
+disconnects
+cruelty
+exponentials
+affective
+arteries
+Crosby
+acquaint
+evenhandedly
+percentage
+disobedience
+humility
+gleaning
+petted
+bloater
+minion
+marginal
+apiary
+measures
+precaution
+repelled
+primary
+coverings
+Artemia
+navigate
+spatial
+Gurkha
+meanwhile
+Melinda
+Butterfield
+Aldrich
+previewing
+glut
+unaffected
+inmate
+mineral
+impending
+meditation
+ideas
+miniaturizes
+lewdly
+title
+youthfulness
+creak
+Chippewa
+clamored
+freezes
+forgivably
+reduce
+McGovern
+Nazis
+epistle
+socializes
+conceptions
+Kevin
+uncovering
+chews
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+raining
+infest
+compartment
+minting
+ducks
+roped
+waltz
+Lillian
+repressions
+chillingly
+noncritical
+lithograph
+spongers
+parenthood
+posed
+instruments
+filial
+fixedly
+relives
+Pandora
+watering
+ungrateful
+secures
+chastisers
+icon
+reuniting
+imagining
+abiding
+omnisciently
+Britannic
+scholastics
+mechanics
+humidly
+masterpiece
+however
+Mendelian
+jarred
+scolds
+infatuate
+willed
+joyfully
+Microsoft
+fibrosities
+Baltimorean
+equestrian
+Goodrich
+apish
+Adlerian
+Tropez
+nouns
+distracting
+mutton
+bridgeable
+stickers
+transcontinental
+amateurish
+Gandhian
+stratified
+chamberlains
+creditably
+philosophic
+ores
+Carleton
+tape
+afloat
+goodness
+welcoming
+Pinsky
+halting
+bibliography
+decoding
+variance
+allowed
+dire
+dub
+poisoning
+Iraqis
+heaving
+population
+bomb
+Majorca
+Gershwins
+explorers
+libretto
+occurred
+Lagos
+rats
+bankruptcies
+crying
+unexpected
+accessed
+colorful
+versatility
+cosy
+Darius
+mastering
+Asiaticizations
+offerers
+uncles
+sleepwalk
+Ernestine
+checksumming
+stopped
+sicker
+Italianization
+alphabetic
+pharmaceutic
+creator
+chess
+charcoal
+Epiphany
+bulldozes
+Pygmalion
+caressing
+Palestine
+regimented
+scars
+realest
+diffusing
+clubroom
+Blythe
+ahead
+reviver
+retransmitting
+landslide
+Eiffel
+absentee
+aye
+forked
+Peruvianizes
+clerked
+tutor
+boulevard
+shuttered
+quotes
+Caltech
+Mossberg
+kept
+roundly
+features
+imaginable
+controller
+racial
+uprisings
+narrowed
+cannot
+vest
+famine
+sugars
+exterminated
+belays
+Hodges
+translatable
+duality
+recording
+rouses
+poison
+attitude
+dusted
+encompasses
+presentation
+Kantian
+imprecision
+saving
+maternal
+hewed
+kerosene
+Cubans
+photographers
+nymph
+bedlam
+north
+Schoenberg
+botany
+curs
+solidification
+inheritresses
+stiller
+t1
+suite
+ransomer
+Willy
+Rena
+Seattle
+relaxes
+exclaim
+exclaim
+implicated
+distinguish
+assayed
+homeowner
+and
+stealth
+coinciding
+founder
+environing
+jewelry
+lemons
+brokenness
+bedpost
+assurers
+annoyers
+affixed
+warbling
+seriously
+boasted
+Chantilly
+Iranizes
+violinist
+extramarital
+spates
+cloakroom
+gazer
+hand
+tucked
+gems
+clinker
+refiner
+callus
+leopards
+comfortingly
+generically
+getters
+sexually
+spear
+serums
+Italianization
+attendants
+spies
+Anthony
+planar
+cupped
+cleanser
+commuters
+honeysuckle
+orphanage
+skies
+crushers
+Puritan
+squeezer
+bruises
+bonfire
+Colombo
+nondecreasing
+DELETE FROM t2 WHERE fld3="d%" ORDER BY RAND();
+SELECT fld3 FROM t2;
+fld3
+Omaha
+breaking
+Romans
+intercepted
+bewilderingly
+astound
+admonishing
+sumac
+flanking
+combed
+subjective
+scatterbrain
+Eulerian
+dubbed
+Kane
+overlay
+perturb
+goblins
+annihilates
+Wotan
+snatching
+concludes
+laterally
+yelped
+grazing
+Baird
+celery
+misunderstander
+handgun
+foldout
+mystic
+succumbed
+Nabisco
+fingerings
+aging
+afield
+ammonium
+boat
+intelligibility
+Augustine
+teethe
+dreaded
+scholastics
+audiology
+wallet
+parters
+eschew
+quitter
+neat
+Steinberg
+jarring
+tinily
+balled
+persist
+attainments
+fanatic
+measures
+rightfulness
+capably
+impulsive
+starlet
+terminators
+untying
+announces
+featherweight
+pessimist
+daughter
+decliner
+lawgiver
+stated
+readable
+attrition
+cascade
+motors
+interrogate
+pests
+stairway
+dopers
+testicle
+Parsifal
+leavings
+postulation
+squeaking
+contrasted
+leftover
+whiteners
+erases
+Punjab
+Merritt
+Quixotism
+sweetish
+dogging
+scornfully
+bellow
+bills
+cupboard
+sureties
+puddings
+tapestry
+fetters
+bivalves
+incurring
+Adolph
+pithed
+emergency
+Miles
+trimmings
+tragedies
+skulking
+flint
+flopping
+relaxing
+offload
+suites
+lists
+animized
+multilayer
+standardizes
+Judas
+vacuuming
+dentally
+humanness
+inch
+Weissmuller
+irresponsibly
+luckily
+culled
+medical
+bloodbath
+subschema
+animals
+Micronesia
+repetitions
+Antares
+ventilate
+pityingly
+interdependent
+Graves
+neonatal
+scribbled
+chafe
+honoring
+realtor
+elite
+funereal
+abrogating
+sorters
+Conley
+lectured
+Abraham
+Hawaii
+cage
+hushes
+Simla
+reporters
+Dutchman
+descendants
+groupings
+dissociate
+coexist
+Beebe
+Taoism
+Connally
+fetched
+checkpoints
+rusting
+galling
+obliterates
+traitor
+resumes
+analyzable
+terminator
+gritty
+firearm
+minima
+Selfridge
+disable
+witchcraft
+betroth
+Manhattanize
+imprint
+peeked
+swelling
+interrelationships
+riser
+Gandhian
+peacock
+bee
+kanji
+dental
+scarf
+chasm
+insolence
+syndicate
+alike
+imperial
+convulsion
+railway
+validate
+normalizes
+comprehensive
+chewing
+denizen
+schemer
+chronicle
+Kline
+Anatole
+partridges
+brunch
+recruited
+dimensions
+Chicana
+announced
+praised
+employing
+linear
+quagmire
+western
+relishing
+serving
+scheduling
+lore
+eventful
+arteriole
+disentangle
+cured
+Fenton
+avoidable
+drains
+detectably
+husky
+impelling
+undoes
+evened
+squeezes
+destroyer
+rudeness
+beaner
+boorish
+Everhart
+encompass
+mushrooms
+Alison
+externally
+pellagra
+cult
+creek
+Huffman
+Majorca
+governing
+gadfly
+reassigned
+intentness
+craziness
+psychic
+squabbled
+burlesque
+capped
+extracted
+DiMaggio
+exclamation
+subdirectory
+fangs
+buyer
+pithing
+transistorizing
+nonbiodegradable
+dislocate
+monochromatic
+batting
+postcondition
+catalog
+Remus
+devices
+bike
+qualify
+detained
+commended
+civilize
+Elmhurst
+anesthetizing
+deaf
+Brigham
+title
+coarse
+combinations
+grayness
+innumerable
+Caroline
+fatty
+eastbound
+inexperienced
+hoarder
+scotch
+passport
+strategic
+gated
+flog
+Pipestone
+Dar
+Corcoran
+flyers
+competitions
+suppliers
+skips
+institutes
+troop
+connective
+denies
+polka
+observations
+askers
+homeless
+Anna
+subdirectories
+decaying
+outwitting
+Harpy
+crazed
+suffocate
+provers
+technically
+Franklinizations
+considered
+tinnily
+uninterruptedly
+whistled
+automate
+gutting
+surreptitious
+Choctaw
+cooks
+millivolt
+counterpoise
+Gothicism
+feminine
+metaphysically
+sanding
+contributorily
+receivers
+adjourn
+straggled
+druggists
+thanking
+ostrich
+hopelessness
+Eurydice
+excitation
+presumes
+imaginable
+concoct
+peering
+Phelps
+ferociousness
+sentences
+unlocks
+engrossing
+Ruth
+tying
+exclaimers
+synergy
+Huey
+merging
+judges
+Shylock
+Miltonism
+hen
+honeybee
+towers
+dilutes
+numerals
+democracy
+Ibero-
+invalids
+behavior
+accruing
+relics
+rackets
+Fischbein
+phony
+cross
+cleanup
+conspirator
+label
+university
+cleansed
+ballgown
+starlet
+aqueous
+portrayal
+despising
+distort
+palmed
+faced
+silverware
+assessor
+spiders
+artificially
+reminiscence
+Mexican
+obnoxious
+fragile
+apprehensible
+births
+garages
+panty
+anteater
+displacement
+drovers
+patenting
+far
+shrieks
+aligning
+pragmatism
+fevers
+reexamines
+occupancies
+sweats
+modulators
+demand
+Madeira
+Viennese
+chillier
+wildcats
+gentle
+Angles
+accuracies
+toggle
+Mendelssohn
+behaviorally
+Rochford
+mirror
+Modula
+clobbering
+chronography
+Eskimoizeds
+British
+pitfalls
+verify
+scatter
+Aztecan
+acuity
+sinking
+beasts
+Witt
+physicists
+folksong
+strokes
+crowder
+merry
+cadenced
+alimony
+principled
+golfing
+undiscovered
+irritates
+patriots
+rooms
+towering
+displease
+photosensitive
+inking
+gainers
+leaning
+hydrant
+preserve
+blinded
+interactions
+Barry
+whiteness
+pastimes
+Edenization
+Muscat
+assassinated
+labeled
+glacial
+implied
+bibliographies
+Buchanan
+forgivably
+innuendo
+den
+submarines
+mouthful
+expiring
+unfulfilled
+precession
+nullified
+affects
+Cynthia
+Chablis
+betterments
+advertising
+rubies
+southwest
+superstitious
+tabernacle
+silk
+handsomest
+Persian
+analog
+complex
+Taoist
+suspend
+relegated
+awesome
+Bruxelles
+imprecisely
+televise
+braking
+true
+disappointing
+navally
+circus
+beetles
+trumps
+fourscore
+Blackfoots
+Grady
+quiets
+floundered
+profundity
+Garrisonian
+Strauss
+cemented
+contrition
+mutations
+exhibits
+tits
+mate
+arches
+Moll
+ropers
+bombast
+difficultly
+adsorption
+definiteness
+cultivation
+heals
+Heusen
+target
+cited
+congresswoman
+Katherine
+titter
+aspire
+Mardis
+Nadia
+estimating
+stuck
+fifteenth
+Colombo
+survey
+staffing
+obtain
+loaded
+slaughtered
+lights
+circumference
+dull
+weekly
+wetness
+visualized
+Tannenbaum
+moribund
+demultiplex
+lockings
+thugs
+unnerves
+abut
+Chippewa
+stratifications
+signaled
+Italianizes
+algorithmic
+paranoid
+camping
+signifying
+Patrice
+search
+Angeles
+semblance
+taxed
+Beatrice
+retrace
+lockout
+grammatic
+helmsman
+uniform
+hamming
+disobedience
+captivated
+transferals
+cartographer
+aims
+Pakistani
+burglarized
+saucepans
+lacerating
+corny
+megabytes
+chancellor
+bulk
+commits
+meson
+deputies
+northeaster
+dipole
+machining
+therefore
+Telefunken
+salvaging
+Corinthianizes
+restlessly
+bromides
+generalized
+mishaps
+quelling
+spiritual
+beguiles
+Trobriand
+fleeing
+Armour
+chin
+provers
+aeronautic
+voltage
+sash
+anaerobic
+simultaneous
+accumulating
+Medusan
+shouted
+freakish
+index
+commercially
+mistiness
+endpoint
+straight
+flurried
+denotative
+coming
+commencements
+gentleman
+gifted
+Shanghais
+sportswriting
+sloping
+navies
+leaflet
+shooter
+Joplin
+babies
+subdivision
+burstiness
+belted
+assails
+admiring
+swaying
+Goldstine
+fitting
+Norwalk
+weakening
+analogy
+deludes
+cokes
+Clayton
+exhausts
+causality
+sating
+icon
+throttles
+communicants
+dehydrate
+priceless
+publicly
+incidentals
+commonplace
+mumbles
+furthermore
+cautioned
+parametrized
+registration
+sadly
+positioning
+babysitting
+eternal
+hoarder
+congregates
+rains
+workers
+sags
+unplug
+garage
+boulder
+hollowly
+specifics
+Teresa
+Winsett
+convenient
+buckboards
+amenities
+resplendent
+priding
+configurations
+untidiness
+Brice
+sews
+participated
+Simon
+certificates
+Fitzpatrick
+Evanston
+misted
+textures
+save
+count
+rightful
+chaperone
+Lizzy
+clenched
+effortlessly
+accessed
+beaters
+Hornblower
+vests
+indulgences
+infallibly
+unwilling
+excrete
+spools
+crunches
+overestimating
+ineffective
+humiliation
+sophomore
+star
+rifles
+dialysis
+arriving
+indulge
+clockers
+languages
+Antarctica
+percentage
+ceiling
+specification
+regimented
+ciphers
+pictures
+serpents
+allot
+realized
+mayoral
+opaquely
+hostess
+fiftieth
+incorrectly
+decomposition
+stranglings
+mixture
+electroencephalography
+similarities
+charges
+freest
+Greenberg
+tinting
+expelled
+warm
+smoothed
+deductions
+Romano
+bitterroot
+corset
+securing
+environing
+cute
+Crays
+heiress
+inform
+avenge
+universals
+Kinsey
+ravines
+bestseller
+equilibrium
+extents
+relatively
+pressure
+critiques
+befouled
+rightfully
+mechanizing
+Latinizes
+timesharing
+Aden
+embassies
+males
+shapelessly
+genres
+mastering
+Newtonian
+finishers
+abates
+teem
+kiting
+stodgy
+scalps
+feed
+guitars
+airships
+store
+denounces
+Pyle
+Saxony
+serializations
+Peruvian
+taxonomically
+kingdom
+stint
+Sault
+faithful
+Ganymede
+tidiness
+gainful
+contrary
+Tipperary
+tropics
+theorizers
+renew
+already
+terminal
+Hegelian
+hypothesizer
+warningly
+journalizing
+nested
+Lars
+saplings
+foothill
+labeled
+imperiously
+reporters
+furnishings
+precipitable
+discounts
+excises
+Stalin
+despot
+ripeness
+Arabia
+unruly
+mournfulness
+boom
+slaughter
+Sabine
+handy
+rural
+organizer
+shipyard
+civics
+inaccuracy
+rules
+juveniles
+comprised
+investigations
+stabilizes
+seminaries
+Hunter
+sporty
+test
+weasels
+CERN
+tempering
+afore
+Galatean
+techniques
+error
+veranda
+severely
+Cassites
+forthcoming
+guides
+vanish
+lied
+sawtooth
+fated
+gradually
+widens
+preclude
+Jobrel
+hooker
+rainstorm
+disconnects
+cruelty
+exponentials
+affective
+arteries
+Crosby
+acquaint
+evenhandedly
+percentage
+disobedience
+humility
+gleaning
+petted
+bloater
+minion
+marginal
+apiary
+measures
+precaution
+repelled
+primary
+coverings
+Artemia
+navigate
+spatial
+Gurkha
+meanwhile
+Melinda
+Butterfield
+Aldrich
+previewing
+glut
+unaffected
+inmate
+mineral
+impending
+meditation
+ideas
+miniaturizes
+lewdly
+title
+youthfulness
+creak
+Chippewa
+clamored
+freezes
+forgivably
+reduce
+McGovern
+Nazis
+epistle
+socializes
+conceptions
+Kevin
+uncovering
+chews
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+appendixes
+raining
+infest
+compartment
+minting
+ducks
+roped
+waltz
+Lillian
+repressions
+chillingly
+noncritical
+lithograph
+spongers
+parenthood
+posed
+instruments
+filial
+fixedly
+relives
+Pandora
+watering
+ungrateful
+secures
+chastisers
+icon
+reuniting
+imagining
+abiding
+omnisciently
+Britannic
+scholastics
+mechanics
+humidly
+masterpiece
+however
+Mendelian
+jarred
+scolds
+infatuate
+willed
+joyfully
+Microsoft
+fibrosities
+Baltimorean
+equestrian
+Goodrich
+apish
+Adlerian
+Tropez
+nouns
+distracting
+mutton
+bridgeable
+stickers
+transcontinental
+amateurish
+Gandhian
+stratified
+chamberlains
+creditably
+philosophic
+ores
+Carleton
+tape
+afloat
+goodness
+welcoming
+Pinsky
+halting
+bibliography
+decoding
+variance
+allowed
+dire
+dub
+poisoning
+Iraqis
+heaving
+population
+bomb
+Majorca
+Gershwins
+explorers
+libretto
+occurred
+Lagos
+rats
+bankruptcies
+crying
+unexpected
+accessed
+colorful
+versatility
+cosy
+Darius
+mastering
+Asiaticizations
+offerers
+uncles
+sleepwalk
+Ernestine
+checksumming
+stopped
+sicker
+Italianization
+alphabetic
+pharmaceutic
+creator
+chess
+charcoal
+Epiphany
+bulldozes
+Pygmalion
+caressing
+Palestine
+regimented
+scars
+realest
+diffusing
+clubroom
+Blythe
+ahead
+reviver
+retransmitting
+landslide
+Eiffel
+absentee
+aye
+forked
+Peruvianizes
+clerked
+tutor
+boulevard
+shuttered
+quotes
+Caltech
+Mossberg
+kept
+roundly
+features
+imaginable
+controller
+racial
+uprisings
+narrowed
+cannot
+vest
+famine
+sugars
+exterminated
+belays
+Hodges
+translatable
+duality
+recording
+rouses
+poison
+attitude
+dusted
+encompasses
+presentation
+Kantian
+imprecision
+saving
+maternal
+hewed
+kerosene
+Cubans
+photographers
+nymph
+bedlam
+north
+Schoenberg
+botany
+curs
+solidification
+inheritresses
+stiller
+t1
+suite
+ransomer
+Willy
+Rena
+Seattle
+relaxes
+exclaim
+exclaim
+implicated
+distinguish
+assayed
+homeowner
+and
+stealth
+coinciding
+founder
+environing
+jewelry
+lemons
+brokenness
+bedpost
+assurers
+annoyers
+affixed
+warbling
+seriously
+boasted
+Chantilly
+Iranizes
+violinist
+extramarital
+spates
+cloakroom
+gazer
+hand
+tucked
+gems
+clinker
+refiner
+callus
+leopards
+comfortingly
+generically
+getters
+sexually
+spear
+serums
+Italianization
+attendants
+spies
+Anthony
+planar
+cupped
+cleanser
+commuters
+honeysuckle
+orphanage
+skies
+crushers
+Puritan
+squeezer
+bruises
+bonfire
+Colombo
+nondecreasing
+DROP TABLE t1;
+ALTER TABLE t2 RENAME t1
+#;
+DROP TABLE t1;
+CREATE TABLE t1 (
+Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE = CSV;
+INSERT INTO t1 VALUES (9410,9412);
+select period from t1;
+period
+9410
+drop table if exists t1,t2,t3,t4;
+Warnings:
+Note 1051 Unknown table 't2'
+Note 1051 Unknown table 't3'
+Note 1051 Unknown table 't4'
diff --git a/mysql-test/r/ctype_cp1251.result b/mysql-test/r/ctype_cp1251.result
index 2a59f976156..3793e962d40 100644
--- a/mysql-test/r/ctype_cp1251.result
+++ b/mysql-test/r/ctype_cp1251.result
@@ -49,8 +49,8 @@ a b
aaa bbb
select charset(a), charset(b), charset(binary 'ccc') from t1 limit 1;
charset(a) charset(b) charset(binary 'ccc')
-cp1251 binary cp1251
+cp1251 binary binary
select collation(a), collation(b), collation(binary 'ccc') from t1 limit 1;
collation(a) collation(b) collation(binary 'ccc')
-cp1251_bin binary cp1251_bin
+cp1251_bin binary binary
drop table t1;
diff --git a/mysql-test/r/ctype_create.result b/mysql-test/r/ctype_create.result
index 0da76c556e2..b35131f62a4 100644
--- a/mysql-test/r/ctype_create.result
+++ b/mysql-test/r/ctype_create.result
@@ -54,4 +54,12 @@ t1 CREATE TABLE `t1` (
`a` char(10) collate latin1_german1_ci default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci
DROP TABLE t1;
+create table t1 (a char) character set latin1 character set latin2;
+ERROR HY000: Conflicting declarations: 'CHARACTER SET latin1' and 'CHARACTER SET latin2'
+create table t1 (a char) character set latin1 collate latin2_bin;
+ERROR 42000: COLLATION 'latin2_bin' is not valid for CHARACTER SET 'latin1'
+create database d1 default character set latin1 character set latin2;
+ERROR HY000: Conflicting declarations: 'CHARACTER SET latin1' and 'CHARACTER SET latin2'
+create database d1 default character set latin1 collate latin2_bin;
+ERROR 42000: COLLATION 'latin2_bin' is not valid for CHARACTER SET 'latin1'
DROP DATABASE mysqltest1;
diff --git a/mysql-test/r/ctype_recoding.result b/mysql-test/r/ctype_recoding.result
index 69cf1d722ff..1ef185388a8 100644
--- a/mysql-test/r/ctype_recoding.result
+++ b/mysql-test/r/ctype_recoding.result
@@ -136,6 +136,30 @@ SET character_set_connection=binary;
SELECT 'тест' as s;
s
тест
+SET NAMES latin1;
+CREATE TABLE t1 (`` CHAR(128) DEFAULT '', `1` ENUM('1','2') DEFAULT '2');
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `` char(128) default '',
+ `1` enum('1','2') default '2'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SHOW COLUMNS FROM t1;
+Field Type Null Key Default Extra
+ char(128) YES
+1 enum('1','2') YES 2
+SET NAMES binary;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `ä` char(128) default 'ä',
+ `ä1` enum('ä1','ä2') default 'ä2'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SHOW COLUMNS FROM t1;
+Field Type Null Key Default Extra
+ä char(128) YES ä
+ä1 enum('ä1','ä2') YES ä2
+DROP TABLE t1;
SET NAMES binary;
CREATE TABLE `good` (a int);
ERROR HY000: Invalid utf8 character string: ''
diff --git a/mysql-test/r/ctype_uca.result b/mysql-test/r/ctype_uca.result
index 2fd654da434..94fe15fed26 100644
--- a/mysql-test/r/ctype_uca.result
+++ b/mysql-test/r/ctype_uca.result
@@ -1548,6 +1548,116 @@ Z,z,Ź,ź,Ż,ż
ǁ
ǂ
ǃ
+select group_concat(c1 order by c1) from t1 group by c1 collate utf8_slovak_ci;
+group_concat(c1 order by c1)
+A,a,À,Á,Â,Ã,Å,à,á,â,ã,å,Ā,ā,Ă,ă,Ą,ą,Ǎ,ǎ,Ǟ,ǟ,Ǡ,ǡ,Ǻ,ǻ
+AA,Aa,aA,aa
+Ä,ä
+Æ,æ,Ǣ,ǣ,Ǽ,ǽ
+B,b
+Ƃ,ƃ
+C,c,Ç,ç,Ć,ć,Ĉ,ĉ,Ċ,ċ
+cH
+Č,č
+Ƈ,ƈ
+D,d,Ď,ď
+DZ,Dz,dZ,dz,DŽ,Dž,dž,DZ,Dz,dz
+Đ,đ
+Ƌ,ƌ
+Ð,ð
+E,e,È,É,Ê,Ë,è,é,ê,ë,Ē,ē,Ĕ,ĕ,Ė,ė,Ę,ę,Ě,ě
+Ǝ,ǝ
+F,f
+Ƒ,ƒ
+G,g,Ĝ,ĝ,Ğ,ğ,Ġ,ġ,Ģ,ģ,Ǧ,ǧ,Ǵ,ǵ
+Ǥ,ǥ
+Ƣ,ƣ
+H,h,Ĥ,ĥ
+CH,Ch,ch
+ƕ,Ƕ
+Ħ,ħ
+I,i,Ì,Í,Î,Ï,ì,í,î,ï,Ĩ,ĩ,Ī,ī,Ĭ,ĭ,Į,į,İ,Ǐ,ǐ
+IJ,Ij,iJ,ij,IJ,ij
+J,j,Ĵ,ĵ,ǰ
+K,k,Ķ,ķ,Ǩ,ǩ
+Ƙ,ƙ
+L,l,Ĺ,ĺ,Ļ,ļ,Ľ,ľ
+Ŀ,ŀ
+LJ,Lj,lJ,lj,LJ,Lj,lj
+LL,Ll,lL,ll
+Ł,ł
+M,m
+N,n,Ñ,ñ,Ń,ń,Ņ,ņ,Ň,ň,Ǹ,ǹ
+NJ,Nj,nJ,nj,NJ,Nj,nj
+Ŋ,ŋ
+O,o,Ò,Ó,Õ,Ö,ò,ó,õ,ö,Ō,ō,Ŏ,ŏ,Ő,ő,Ơ,ơ,Ǒ,ǒ,Ǫ,ǫ,Ǭ,ǭ
+OE,Oe,oE,oe,Œ,œ
+Ô,ô
+Ø,ø,Ǿ,ǿ
+P,p
+Ƥ,ƥ
+Q,q
+R,r,Ŕ,ŕ,Ŗ,ŗ,Ř,ř
+RR,Rr,rR,rr
+S,s,Ś,ś,Ŝ,ŝ,Ş,ş,ſ
+SS,Ss,sS,ss,ß
+Š,š
+T,t,Ţ,ţ,Ť,ť
+Ŧ,ŧ
+Ƭ,ƭ
+U,u,Ù,Ú,Û,Ü,ù,ú,û,ü,Ũ,ũ,Ū,ū,Ŭ,ŭ,Ů,ů,Ű,ű,Ų,ų,Ư,ư,Ǔ,ǔ,Ǖ,ǖ,Ǘ,ǘ,Ǚ,ǚ,Ǜ,ǜ
+V,v
+W,w,Ŵ,ŵ
+X,x
+Y,y,Ý,ý,ÿ,Ŷ,ŷ,Ÿ
+Ƴ,ƴ
+Z,z,Ź,ź,Ż,ż
+Ž,ž
+Ƶ,ƶ
+Ʒ,Ǯ,ǯ
+Ƹ,ƹ
+Þ,þ
+ƿ,Ƿ
+Ƨ,ƨ
+Ƽ,ƽ
+Ƅ,ƅ
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_spanish2_ci;
group_concat(c1 order by c1)
÷
diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result
index 4ab8cc83d0c..a0ac29b7989 100644
--- a/mysql-test/r/ctype_ucs.result
+++ b/mysql-test/r/ctype_ucs.result
@@ -464,3 +464,19 @@ HEX(a)
AAAA
000AAAAA
DROP TABLE t1;
+create table t1 (s1 char character set `ucs2` collate `ucs2_czech_ci`);
+insert into t1 values ('0'),('1'),('2'),('a'),('b'),('c');
+select s1 from t1 where s1 > 'a' order by s1;
+s1
+b
+c
+drop table t1;
+create table t1(a char(1)) default charset = ucs2;
+insert into t1 values ('a'),('b'),('c');
+alter table t1 modify a char(5);
+select a, hex(a) from t1;
+a hex(a)
+a 0061
+b 0062
+c 0063
+drop table t1;
diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result
index b8ca99fe8f1..f3be539251a 100644
--- a/mysql-test/r/ctype_utf8.result
+++ b/mysql-test/r/ctype_utf8.result
@@ -1,4 +1,4 @@
-drop table if exists t1;
+drop table if exists t1,t2;
set names utf8;
select left(_utf8 0xD0B0D0B1D0B2,1);
left(_utf8 0xD0B0D0B1D0B2,1)
@@ -78,6 +78,21 @@ SELECT 'a\t' < 'a';
SELECT 'a\t' < 'a ';
'a\t' < 'a '
1
+SELECT 'a' = 'a ' collate utf8_bin;
+'a' = 'a ' collate utf8_bin
+1
+SELECT 'a\0' < 'a' collate utf8_bin;
+'a\0' < 'a' collate utf8_bin
+1
+SELECT 'a\0' < 'a ' collate utf8_bin;
+'a\0' < 'a ' collate utf8_bin
+1
+SELECT 'a\t' < 'a' collate utf8_bin;
+'a\t' < 'a' collate utf8_bin
+1
+SELECT 'a\t' < 'a ' collate utf8_bin;
+'a\t' < 'a ' collate utf8_bin
+1
CREATE TABLE t1 (a char(10) character set utf8 not null);
INSERT INTO t1 VALUES ('a'),('a\0'),('a\t'),('a ');
SELECT hex(a),STRCMP(a,'a'), STRCMP(a,'a ') FROM t1;
@@ -93,6 +108,9 @@ this is a test
select insert("aa",100,1,"b"),insert("aa",1,3,"b");
insert("aa",100,1,"b") insert("aa",1,3,"b")
aa b
+select char_length(left(@a:='тест',5)), length(@a), @a;
+char_length(left(@a:='тест',5)) length(@a) @a
+4 8 тест
create table t1 select date_format("2004-01-19 10:10:10", "%Y-%m-%d");
show create table t1;
Table Create Table
@@ -243,3 +261,381 @@ select 'zвасяz' rlike '[[:<:]]вася[[:>:]]';
CREATE TABLE t1 (a enum ('Y', 'N') DEFAULT 'N' COLLATE utf8_unicode_ci);
ALTER TABLE t1 ADD COLUMN b CHAR(20);
DROP TABLE t1;
+set names utf8;
+create table t1 (a enum('aaaa','проба') character set utf8);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` enum('aaaa','проба') character set utf8 default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values ('проба');
+select * from t1;
+a
+проба
+create table t2 select ifnull(a,a) from t1;
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `ifnull(a,a)` char(5) character set utf8 default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from t2;
+ifnull(a,a)
+проба
+drop table t1;
+drop table t2;
+create table t1 (c varchar(30) character set utf8, unique(c(10)));
+insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z');
+insert into t1 values ('aaaaaaaaaa');
+insert into t1 values ('aaaaaaaaaaa');
+ERROR 23000: Duplicate entry 'aaaaaaaaaaa' for key 1
+insert into t1 values ('aaaaaaaaaaaa');
+ERROR 23000: Duplicate entry 'aaaaaaaaaaaa' for key 1
+insert into t1 values (repeat('b',20));
+select c c1 from t1 where c='1';
+c1
+1
+select c c2 from t1 where c='2';
+c2
+2
+select c c3 from t1 where c='3';
+c3
+3
+select c cx from t1 where c='x';
+cx
+x
+select c cy from t1 where c='y';
+cy
+y
+select c cz from t1 where c='z';
+cz
+z
+select c ca10 from t1 where c='aaaaaaaaaa';
+ca10
+aaaaaaaaaa
+select c cb20 from t1 where c=repeat('b',20);
+cb20
+bbbbbbbbbbbbbbbbbbbb
+drop table t1;
+create table t1 (c char(3) character set utf8, unique (c(2)));
+insert into t1 values ('1'),('2'),('3'),('4'),('x'),('y'),('z');
+insert into t1 values ('a');
+insert into t1 values ('aa');
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('b');
+insert into t1 values ('bb');
+insert into t1 values ('bbb');
+ERROR 23000: Duplicate entry 'bbb' for key 1
+insert into t1 values ('а');
+insert into t1 values ('аа');
+insert into t1 values ('ааа');
+ERROR 23000: Duplicate entry 'ааа' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'ббб' for key 1
+insert into t1 values ('ꪪ');
+insert into t1 values ('ꪪꪪ');
+insert into t1 values ('ꪪꪪꪪ');
+ERROR 23000: Duplicate entry 'ꪪꪪ' for key 1
+drop table t1;
+create table t1 (
+c char(10) character set utf8,
+unique key a using hash (c(1))
+) engine=heap;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) character set utf8 default NULL,
+ UNIQUE KEY `a` (`c`(1))
+) ENGINE=HEAP DEFAULT CHARSET=latin1
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (
+c char(10) character set utf8,
+unique key a using btree (c(1))
+) engine=heap;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) character set utf8 default NULL,
+ UNIQUE KEY `a` TYPE BTREE (`c`(1))
+) ENGINE=HEAP DEFAULT CHARSET=latin1
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (
+c char(10) character set utf8,
+unique key a (c(1))
+) engine=bdb;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (c varchar(30) character set utf8 collate utf8_bin, unique(c(10)));
+insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z');
+insert into t1 values ('aaaaaaaaaa');
+insert into t1 values ('aaaaaaaaaaa');
+ERROR 23000: Duplicate entry 'aaaaaaaaaaa' for key 1
+insert into t1 values ('aaaaaaaaaaaa');
+ERROR 23000: Duplicate entry 'aaaaaaaaaaaa' for key 1
+insert into t1 values (repeat('b',20));
+select c c1 from t1 where c='1';
+c1
+1
+select c c2 from t1 where c='2';
+c2
+2
+select c c3 from t1 where c='3';
+c3
+3
+select c cx from t1 where c='x';
+cx
+x
+select c cy from t1 where c='y';
+cy
+y
+select c cz from t1 where c='z';
+cz
+z
+select c ca10 from t1 where c='aaaaaaaaaa';
+ca10
+aaaaaaaaaa
+select c cb20 from t1 where c=repeat('b',20);
+cb20
+bbbbbbbbbbbbbbbbbbbb
+drop table t1;
+create table t1 (c char(3) character set utf8 collate utf8_bin, unique (c(2)));
+insert into t1 values ('1'),('2'),('3'),('4'),('x'),('y'),('z');
+insert into t1 values ('a');
+insert into t1 values ('aa');
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('b');
+insert into t1 values ('bb');
+insert into t1 values ('bbb');
+ERROR 23000: Duplicate entry 'bbb' for key 1
+insert into t1 values ('а');
+insert into t1 values ('аа');
+insert into t1 values ('ааа');
+ERROR 23000: Duplicate entry 'ааа' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'ббб' for key 1
+insert into t1 values ('ꪪ');
+insert into t1 values ('ꪪꪪ');
+insert into t1 values ('ꪪꪪꪪ');
+ERROR 23000: Duplicate entry 'ꪪꪪ' for key 1
+drop table t1;
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a using hash (c(1))
+) engine=heap;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) character set utf8 collate utf8_bin default NULL,
+ UNIQUE KEY `a` (`c`(1))
+) ENGINE=HEAP DEFAULT CHARSET=latin1
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a using btree (c(1))
+) engine=heap;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) character set utf8 collate utf8_bin default NULL,
+ UNIQUE KEY `a` TYPE BTREE (`c`(1))
+) ENGINE=HEAP DEFAULT CHARSET=latin1
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a (c(1))
+) engine=bdb;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+insert into t1 values ('aa');
+ERROR 23000: Duplicate entry 'aa' for key 1
+insert into t1 values ('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'б' for key 1
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'б' for key 1
+select c as c_all from t1 order by c;
+c_all
+a
+b
+c
+d
+e
+f
+select c as c_a from t1 where c='a';
+c_a
+a
+select c as c_a from t1 where c='б';
+c_a
+drop table t1;
+create table t1 (
+str varchar(255) character set utf8 not null,
+key str (str(2))
+) engine=myisam;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+str
+str
+drop table t1;
+create table t1 (
+str varchar(255) character set utf8 not null,
+key str using btree (str(2))
+) engine=heap;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+str
+str
+drop table t1;
+create table t1 (
+str varchar(255) character set utf8 not null,
+key str using hash (str(2))
+) engine=heap;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+str
+str
+drop table t1;
+create table t1 (
+str varchar(255) character set utf8 not null,
+key str (str(2))
+) engine=bdb;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+str
+str
+drop table t1;
diff --git a/mysql-test/r/date_formats.result b/mysql-test/r/date_formats.result
index 6a4935ef3f8..758a83defed 100644
--- a/mysql-test/r/date_formats.result
+++ b/mysql-test/r/date_formats.result
@@ -90,16 +90,23 @@ insert into t1 values
('2003-01-02 11:11:12Pm', '%Y-%m-%d %h:%i:%S%p'),
('10:20:10', '%H:%i:%s'),
('10:20:10', '%h:%i:%s.%f'),
+('10:20:10', '%T'),
('10:20:10AM', '%h:%i:%s%p'),
+('10:20:10AM', '%r'),
('10:20:10.44AM', '%h:%i:%s.%f%p'),
('15-01-2001 12:59:58', '%d-%m-%Y %H:%i:%S'),
('15 September 2001', '%d %M %Y'),
('15 SEPTEMB 2001', '%d %M %Y'),
('15 MAY 2001', '%d %b %Y'),
+('15th May 2001', '%D %b %Y'),
('Sunday 15 MAY 2001', '%W %d %b %Y'),
('Sund 15 MAY 2001', '%W %d %b %Y'),
('Tuesday 00 2002', '%W %U %Y'),
('Thursday 53 1998', '%W %u %Y'),
+('Sunday 01 2001', '%W %v %x'),
+('Tuesday 52 2001', '%W %V %X'),
+('060 2004', '%j %Y'),
+('4 53 1998', '%w %u %Y'),
('15-01-2001', '%d-%m-%Y %H:%i:%S'),
('15-01-20', '%d-%m-%y'),
('15-2001-1', '%d-%Y-%c');
@@ -114,16 +121,23 @@ date format str_to_date
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12
10:20:10 %H:%i:%s 0000-00-00 10:20:10
10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10
+10:20:10 %T 0000-00-00 10:20:10
10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10
+10:20:10AM %r 0000-00-00 10:20:10
10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58
15 September 2001 %d %M %Y 2001-09-15 00:00:00
15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00
15 MAY 2001 %d %b %Y 2001-05-15 00:00:00
+15th May 2001 %D %b %Y 2001-05-15 00:00:00
Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00
Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00
+Sunday 01 2001 %W %v %x 2001-01-07 00:00:00
+Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00
+060 2004 %j %Y 2004-02-29 00:00:00
+4 53 1998 %w %u %Y 1998-12-31 00:00:00
15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00
15-01-20 %d-%m-%y 2020-01-15 00:00:00
15-2001-1 %d-%Y-%c 2001-01-15 00:00:00
@@ -138,16 +152,23 @@ date format con
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12
10:20:10 %H:%i:%s 0000-00-00 10:20:10
10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10
+10:20:10 %T 0000-00-00 10:20:10
10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10
+10:20:10AM %r 0000-00-00 10:20:10
10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58
15 September 2001 %d %M %Y 2001-09-15 00:00:00
15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00
15 MAY 2001 %d %b %Y 2001-05-15 00:00:00
+15th May 2001 %D %b %Y 2001-05-15 00:00:00
Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00
Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00
+Sunday 01 2001 %W %v %x 2001-01-07 00:00:00
+Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00
+060 2004 %j %Y 2004-02-29 00:00:00
+4 53 1998 %w %u %Y 1998-12-31 00:00:00
15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00
15-01-20 %d-%m-%y 2020-01-15 00:00:00
15-2001-1 %d-%Y-%c 2001-01-15 00:00:00
@@ -162,16 +183,23 @@ date format datetime
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12
10:20:10 %H:%i:%s 0000-00-00 10:20:10
10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10
+10:20:10 %T 0000-00-00 10:20:10
10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10
+10:20:10AM %r 0000-00-00 10:20:10
10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58
15 September 2001 %d %M %Y 2001-09-15 00:00:00
15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00
15 MAY 2001 %d %b %Y 2001-05-15 00:00:00
+15th May 2001 %D %b %Y 2001-05-15 00:00:00
Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00
Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00
Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00
+Sunday 01 2001 %W %v %x 2001-01-07 00:00:00
+Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00
+060 2004 %j %Y 2004-02-29 00:00:00
+4 53 1998 %w %u %Y 1998-12-31 00:00:00
15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00
15-01-20 %d-%m-%y 2020-01-15 00:00:00
15-2001-1 %d-%Y-%c 2001-01-15 00:00:00
@@ -186,16 +214,23 @@ date format date2
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02
10:20:10 %H:%i:%s 0000-00-00
10:20:10 %h:%i:%s.%f 0000-00-00
+10:20:10 %T 0000-00-00
10:20:10AM %h:%i:%s%p 0000-00-00
+10:20:10AM %r 0000-00-00
10:20:10.44AM %h:%i:%s.%f%p 0000-00-00
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15
15 September 2001 %d %M %Y 2001-09-15
15 SEPTEMB 2001 %d %M %Y 2001-09-15
15 MAY 2001 %d %b %Y 2001-05-15
+15th May 2001 %D %b %Y 2001-05-15
Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15
Sund 15 MAY 2001 %W %d %b %Y 2001-05-15
Tuesday 00 2002 %W %U %Y 2002-01-01
Thursday 53 1998 %W %u %Y 1998-12-31
+Sunday 01 2001 %W %v %x 2001-01-07
+Tuesday 52 2001 %W %V %X 2002-01-01
+060 2004 %j %Y 2004-02-29
+4 53 1998 %w %u %Y 1998-12-31
15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15
15-01-20 %d-%m-%y 2020-01-15
15-2001-1 %d-%Y-%c 2001-01-15
@@ -210,16 +245,23 @@ date format time
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 23:11:12
10:20:10 %H:%i:%s 10:20:10
10:20:10 %h:%i:%s.%f 10:20:10
+10:20:10 %T 10:20:10
10:20:10AM %h:%i:%s%p 10:20:10
+10:20:10AM %r 10:20:10
10:20:10.44AM %h:%i:%s.%f%p 10:20:10.440000
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 12:59:58
15 September 2001 %d %M %Y 00:00:00
15 SEPTEMB 2001 %d %M %Y 00:00:00
15 MAY 2001 %d %b %Y 00:00:00
+15th May 2001 %D %b %Y 00:00:00
Sunday 15 MAY 2001 %W %d %b %Y 00:00:00
Sund 15 MAY 2001 %W %d %b %Y 00:00:00
Tuesday 00 2002 %W %U %Y 00:00:00
Thursday 53 1998 %W %u %Y 00:00:00
+Sunday 01 2001 %W %v %x 00:00:00
+Tuesday 52 2001 %W %V %X 00:00:00
+060 2004 %j %Y 00:00:00
+4 53 1998 %w %u %Y 00:00:00
15-01-2001 %d-%m-%Y %H:%i:%S 00:00:00
15-01-20 %d-%m-%y 00:00:00
15-2001-1 %d-%Y-%c 00:00:00
@@ -234,16 +276,23 @@ date format time2
2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 23:11:12
10:20:10 %H:%i:%s 10:20:10
10:20:10 %h:%i:%s.%f 10:20:10
+10:20:10 %T 10:20:10
10:20:10AM %h:%i:%s%p 10:20:10
+10:20:10AM %r 10:20:10
10:20:10.44AM %h:%i:%s.%f%p 10:20:10.440000
15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 12:59:58
15 September 2001 %d %M %Y 00:00:00
15 SEPTEMB 2001 %d %M %Y 00:00:00
15 MAY 2001 %d %b %Y 00:00:00
+15th May 2001 %D %b %Y 00:00:00
Sunday 15 MAY 2001 %W %d %b %Y 00:00:00
Sund 15 MAY 2001 %W %d %b %Y 00:00:00
Tuesday 00 2002 %W %U %Y 00:00:00
Thursday 53 1998 %W %u %Y 00:00:00
+Sunday 01 2001 %W %v %x 00:00:00
+Tuesday 52 2001 %W %V %X 00:00:00
+060 2004 %j %Y 00:00:00
+4 53 1998 %w %u %Y 00:00:00
15-01-2001 %d-%m-%Y %H:%i:%S 00:00:00
15-01-20 %d-%m-%y 00:00:00
15-2001-1 %d-%Y-%c 00:00:00
@@ -258,10 +307,13 @@ insert into t1 values
('15 Septembei 2001', '%d %M %Y'),
('15 Ju 2001', '%d %M %Y'),
('Sund 15 MA', '%W %d %b %Y'),
-('Sunday 01 2001', '%W %V %X'),
('Thursdai 12 1998', '%W %u %Y'),
-(NULL, get_format(DATE,'USA')),
-('Tuesday 52 2001', '%W %V %X');
+('Sunday 01 2001', '%W %v %X'),
+('Tuesday 52 2001', '%W %V %x'),
+('Tuesday 52 2001', '%W %V %Y'),
+('Tuesday 52 2001', '%W %u %x'),
+('7 53 1998', '%w %u %Y'),
+(NULL, get_format(DATE,'USA'));
select date,format,str_to_date(date, format) as str_to_date from t1;
date format str_to_date
2003-01-02 10:11:12 PM %Y-%m-%d %H:%i:%S %p NULL
@@ -273,10 +325,13 @@ date format str_to_date
15 Septembei 2001 %d %M %Y NULL
15 Ju 2001 %d %M %Y NULL
Sund 15 MA %W %d %b %Y NULL
-Sunday 01 2001 %W %V %X NULL
Thursdai 12 1998 %W %u %Y NULL
+Sunday 01 2001 %W %v %X NULL
+Tuesday 52 2001 %W %V %x NULL
+Tuesday 52 2001 %W %V %Y NULL
+Tuesday 52 2001 %W %u %x NULL
+7 53 1998 %w %u %Y NULL
NULL %m.%d.%Y NULL
-Tuesday 52 2001 %W %V %X NULL
select date,format,concat(str_to_date(date, format),'') as con from t1;
date format con
2003-01-02 10:11:12 PM %Y-%m-%d %H:%i:%S %p NULL
@@ -288,10 +343,13 @@ date format con
15 Septembei 2001 %d %M %Y NULL
15 Ju 2001 %d %M %Y NULL
Sund 15 MA %W %d %b %Y NULL
-Sunday 01 2001 %W %V %X NULL
Thursdai 12 1998 %W %u %Y NULL
+Sunday 01 2001 %W %v %X NULL
+Tuesday 52 2001 %W %V %x NULL
+Tuesday 52 2001 %W %V %Y NULL
+Tuesday 52 2001 %W %u %x NULL
+7 53 1998 %w %u %Y NULL
NULL %m.%d.%Y NULL
-Tuesday 52 2001 %W %V %X NULL
truncate table t1;
insert into t1 values
('10:20:10AM', '%h:%i:%s'),
@@ -321,6 +379,9 @@ a
select get_format(DATETIME, 'eur') as a;
a
%Y-%m-%d %H.%i.%s
+select get_format(TIMESTAMP, 'eur') as a;
+a
+%Y-%m-%d %H.%i.%s
select get_format(DATE, 'TEST') as a;
a
NULL
diff --git a/mysql-test/r/endspace.result b/mysql-test/r/endspace.result
index 4800bbf4ecb..96210a0e16d 100644
--- a/mysql-test/r/endspace.result
+++ b/mysql-test/r/endspace.result
@@ -52,13 +52,13 @@ select * from t1 ignore key (key1) where text1='teststring' or text1 like 'tests
text1
teststring
teststring
-select * from t1 where text1='teststring' or text1 like 'teststring_%';
-text1
-teststring
-teststring
-select * from t1 where text1='teststring' or text1 > 'teststring\t';
-text1
-teststring
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 like 'teststring_%';
+concat('|', text1, '|')
+|teststring |
+|teststring|
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
+concat('|', text1, '|')
+|teststring|
select text1, length(text1) from t1 order by text1;
text1 length(text1)
nothing 7
@@ -77,7 +77,28 @@ concat('|', text1, '|')
|teststring|
|teststring |
|teststring |
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
+concat('|', text1, '|')
+|teststring|
+|teststring |
+select concat('|', text1, '|') from t1 where text1='teststring';
+concat('|', text1, '|')
+|teststring|
+select concat('|', text1, '|') from t1 where text1='teststring ';
+concat('|', text1, '|')
+|teststring |
alter table t1 modify text1 text not null, pack_keys=1;
+select concat('|', text1, '|') from t1 where text1='teststring';
+concat('|', text1, '|')
+|teststring|
+|teststring |
+select concat('|', text1, '|') from t1 where text1='teststring ';
+concat('|', text1, '|')
+|teststring|
+|teststring |
+explain select concat('|', text1, '|') from t1 where text1='teststring ';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range key1 key1 22 NULL 2 Using where
select * from t1 where text1 like 'teststring_%';
text1
teststring
@@ -87,10 +108,10 @@ text1
teststring
teststring
teststring
-select * from t1 where text1='teststring' or text1 > 'teststring\t';
-text1
-teststring
-teststring
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
+concat('|', text1, '|')
+|teststring|
+|teststring |
select concat('|', text1, '|') from t1 order by text1;
concat('|', text1, '|')
|nothing|
diff --git a/mysql-test/r/exampledb.result b/mysql-test/r/exampledb.result
new file mode 100644
index 00000000000..9bfb77c1c0b
--- /dev/null
+++ b/mysql-test/r/exampledb.result
@@ -0,0 +1,6 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE=example;
+drop table t1;
diff --git a/mysql-test/r/flush_block_commit.result b/mysql-test/r/flush_block_commit.result
new file mode 100644
index 00000000000..17991f15382
--- /dev/null
+++ b/mysql-test/r/flush_block_commit.result
@@ -0,0 +1,23 @@
+drop table if exists t1;
+create table t1 (a int) engine=innodb;
+begin;
+insert into t1 values(1);
+flush tables with read lock;
+select * from t1;
+a
+ commit;
+select * from t1;
+a
+unlock tables;
+begin;
+select * from t1 for update;
+a
+1
+begin;
+ select * from t1 for update;
+ flush tables with read lock;
+commit;
+a
+1
+unlock tables;
+drop table t1;
diff --git a/mysql-test/r/fulltext2.result b/mysql-test/r/fulltext2.result
index 0fdb7d4dbd1..0b1d8eb9a15 100644
--- a/mysql-test/r/fulltext2.result
+++ b/mysql-test/r/fulltext2.result
@@ -7,6 +7,15 @@ FULLTEXT KEY (a)
repair table t1 quick;
Table Op Msg_type Msg_text
test.t1 repair status OK
+check table t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+check table t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
select count(*) from t1 where match a against ('aaaxxx');
count(*)
260
diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result
index b64f0ca57dd..665f9262dea 100644
--- a/mysql-test/r/func_gconcat.result
+++ b/mysql-test/r/func_gconcat.result
@@ -18,7 +18,7 @@ explain extended select grp,group_concat(c) from t1 group by grp;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9 Using filesort
Warnings:
-Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(`test`.`t1`.`c` seperator ',') AS `group_concat(c)` from `test`.`t1` group by `test`.`t1`.`grp`
+Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(`test`.`t1`.`c` separator ',') AS `group_concat(c)` from `test`.`t1` group by `test`.`t1`.`grp`
select grp,group_concat(a,c) from t1 group by grp;
grp group_concat(a,c)
1 1a
@@ -93,7 +93,7 @@ explain extended select grp,group_concat(distinct c order by c desc) from t1 gro
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9 Using filesort
Warnings:
-Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(distinct `test`.`t1`.`c` order by `test`.`t1`.`c` seperator ',') AS `group_concat(distinct c order by c desc)` from `test`.`t1` group by `test`.`t1`.`grp`
+Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(distinct `test`.`t1`.`c` order by `test`.`t1`.`c` separator ',') AS `group_concat(distinct c order by c desc)` from `test`.`t1` group by `test`.`t1`.`grp`
select grp,group_concat(c order by c separator ",") from t1 group by grp;
grp group_concat(c order by c separator ",")
1 a
@@ -113,7 +113,7 @@ explain extended select grp,group_concat(distinct c order by c separator ",") fr
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9 Using filesort
Warnings:
-Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(distinct `test`.`t1`.`c` order by `test`.`t1`.`c` seperator ',') AS `group_concat(distinct c order by c separator ",")` from `test`.`t1` group by `test`.`t1`.`grp`
+Note 1003 select `test`.`t1`.`grp` AS `grp`,group_concat(distinct `test`.`t1`.`c` order by `test`.`t1`.`c` separator ',') AS `group_concat(distinct c order by c separator ",")` from `test`.`t1` group by `test`.`t1`.`grp`
select grp,group_concat(distinct c order by c desc separator ",") from t1 group by grp;
grp group_concat(distinct c order by c desc separator ",")
1 a
@@ -294,6 +294,21 @@ grp
select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1;
grp
2,4,3,5
+select t1.a, group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1 group by 1;
+a grp
+1 2
+2 4,3
+3 5
+select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1 group by 1;
+a grp
+1 2
+2 4,3
+3 5
+select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1;
+a grp
+1 2
+2 4,3
+3 5
select a,c,(select group_concat(c order by a) from t2 where a=t1.a) as grp from t1 order by grp;
a c grp
3 5 3,3
@@ -321,3 +336,33 @@ HAVING LEFT(names, 1) ='J';
names
John###Anna###Bill
DROP TABLE t1;
+CREATE TABLE t1 ( a int, b TEXT );
+INSERT INTO t1 VALUES (1,'First Row'), (2,'Second Row');
+SELECT GROUP_CONCAT(b ORDER BY b) FROM t1 GROUP BY a;
+GROUP_CONCAT(b ORDER BY b)
+First Row
+Second Row
+DROP TABLE t1;
+CREATE TABLE t1 (a_id tinyint(4) NOT NULL default '0', PRIMARY KEY (a_id)) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (b_id tinyint(4) NOT NULL default '0',b_a tinyint(4) NOT NULL default '0', PRIMARY KEY (b_id), KEY (b_a),
+CONSTRAINT fk_b_a FOREIGN KEY (b_a) REFERENCES t1 (a_id) ON DELETE CASCADE ON UPDATE NO ACTION) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO t2 VALUES (1,1),(2,1),(3,1),(4,2),(5,2);
+SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN (t2) on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
+a_id b_list
+1 1,2,3
+2 4,5
+3 NULL
+DROP TABLE t2;
+DROP TABLE t1;
+CREATE TABLE t1 (A_ID INT NOT NULL,A_DESC CHAR(3) NOT NULL,PRIMARY KEY (A_ID));
+INSERT INTO t1 VALUES (1,'ABC'), (2,'EFG'), (3,'HIJ');
+CREATE TABLE t2 (A_ID INT NOT NULL,B_DESC CHAR(3) NOT NULL,PRIMARY KEY (A_ID,B_DESC));
+INSERT INTO t2 VALUES (1,'A'),(1,'B'),(3,'F');
+SELECT t1.A_ID, GROUP_CONCAT(t2.B_DESC) AS B_DESC FROM t1 LEFT JOIN t2 ON t1.A_ID=t2.A_ID GROUP BY t1.A_ID ORDER BY t1.A_DESC;
+A_ID B_DESC
+1 A,B
+2 NULL
+3 F
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/r/func_if.result b/mysql-test/r/func_if.result
index 36bd9a36d1c..307d1021cdb 100644
--- a/mysql-test/r/func_if.result
+++ b/mysql-test/r/func_if.result
@@ -43,7 +43,7 @@ explain extended select if(u=1,st,binary st) s from t1 where st like "%a%" order
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where; Using filesort
Warnings:
-Note 1003 select if((`test`.`t1`.`u` = 1),`test`.`t1`.`st`,(`test`.`t1`.`st` collate _latin1'BINARY')) AS `s` from `test`.`t1` where (`test`.`t1`.`st` like _latin1'%a%') order by if((`test`.`t1`.`u` = 1),`test`.`t1`.`st`,(`test`.`t1`.`st` collate _latin1'BINARY'))
+Note 1003 select if((test.t1.u = 1),test.t1.st,cast(test.t1.st as char charset binary)) AS `s` from test.t1 where (test.t1.st like _latin1'%a%') order by if((test.t1.u = 1),test.t1.st,cast(test.t1.st as char charset binary))
select nullif(u=0, 'test') from t1;
nullif(u=0, 'test')
NULL
diff --git a/mysql-test/r/func_in.result b/mysql-test/r/func_in.result
index 025ea02e454..b132edab81d 100644
--- a/mysql-test/r/func_in.result
+++ b/mysql-test/r/func_in.result
@@ -148,6 +148,16 @@ id select_type table type possible_keys key key_len ref rows Extra
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (_latin1'a' in (`test`.`t1`.`a`,`test`.`t1`.`b`,(`test`.`t1`.`c` collate _latin1'latin1_bin')))
drop table t1;
+set names utf8;
+create table t1 (a char(10) character set utf8 not null);
+insert into t1 values ('bbbb'),(_koi8r''),(_latin1'');
+select a from t1 where a in ('bbbb',_koi8r'',_latin1'') order by a;
+a
+ÄÄÄÄ
+bbbb
+цццц
+drop table t1;
+set names latin1;
select '1.0' in (1,2);
'1.0' in (1,2)
1
diff --git a/mysql-test/r/func_like.result b/mysql-test/r/func_like.result
index e32a4dcd85a..e9434b1749d 100644
--- a/mysql-test/r/func_like.result
+++ b/mysql-test/r/func_like.result
@@ -155,3 +155,6 @@ select * from t1 where a like '%ESKA%';
a
PPUH PESKA-I Maria Struniarska
DROP TABLE t1;
+select _cp866'aaaaaaaaa' like _cp866'%aaaa%' collate cp866_bin;
+_cp866'aaaaaaaaa' like _cp866'%aaaa%' collate cp866_bin
+1
diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result
index 12eef4aa881..90aa04515d7 100644
--- a/mysql-test/r/func_math.result
+++ b/mysql-test/r/func_math.result
@@ -43,7 +43,7 @@ Warnings:
Note 1003 select abs(-(10)) AS `abs(-10)`,sign(-(5)) AS `sign(-5)`,sign(5) AS `sign(5)`,sign(0) AS `sign(0)`
select log(exp(10)),exp(log(sqrt(10))*2),log(-1),log(NULL),log(1,1),log(3,9),log(-1,2),log(NULL,2);
log(exp(10)) exp(log(sqrt(10))*2) log(-1) log(NULL) log(1,1) log(3,9) log(-1,2) log(NULL,2)
-10.000000 10.000000 NULL NULL NULL 2.000000 NULL NULL
+10 10 NULL NULL NULL 2 NULL NULL
explain extended select log(exp(10)),exp(log(sqrt(10))*2),log(-1),log(NULL),log(1,1),log(3,9),log(-1,2),log(NULL,2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -51,7 +51,7 @@ Warnings:
Note 1003 select log(exp(10)) AS `log(exp(10))`,exp((log(sqrt(10)) * 2)) AS `exp(log(sqrt(10))*2)`,log(-(1)) AS `log(-1)`,log(NULL) AS `log(NULL)`,log(1,1) AS `log(1,1)`,log(3,9) AS `log(3,9)`,log(-(1),2) AS `log(-1,2)`,log(NULL,2) AS `log(NULL,2)`
select ln(exp(10)),exp(ln(sqrt(10))*2),ln(-1),ln(0),ln(NULL);
ln(exp(10)) exp(ln(sqrt(10))*2) ln(-1) ln(0) ln(NULL)
-10.000000 10.000000 NULL NULL NULL
+10 10 NULL NULL NULL
explain extended select ln(exp(10)),exp(ln(sqrt(10))*2),ln(-1),ln(0),ln(NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -59,7 +59,7 @@ Warnings:
Note 1003 select ln(exp(10)) AS `ln(exp(10))`,exp((ln(sqrt(10)) * 2)) AS `exp(ln(sqrt(10))*2)`,ln(-(1)) AS `ln(-1)`,ln(0) AS `ln(0)`,ln(NULL) AS `ln(NULL)`
select log2(8),log2(15),log2(-2),log2(0),log2(NULL);
log2(8) log2(15) log2(-2) log2(0) log2(NULL)
-3.000000 3.906891 NULL NULL NULL
+3 3.9068905956085 NULL NULL NULL
explain extended select log2(8),log2(15),log2(-2),log2(0),log2(NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -67,7 +67,7 @@ Warnings:
Note 1003 select log2(8) AS `log2(8)`,log2(15) AS `log2(15)`,log2(-(2)) AS `log2(-2)`,log2(0) AS `log2(0)`,log2(NULL) AS `log2(NULL)`
select log10(100),log10(18),log10(-4),log10(0),log10(NULL);
log10(100) log10(18) log10(-4) log10(0) log10(NULL)
-2.000000 1.255273 NULL NULL NULL
+2 1.2552725051033 NULL NULL NULL
explain extended select log10(100),log10(18),log10(-4),log10(0),log10(NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -75,7 +75,7 @@ Warnings:
Note 1003 select log10(100) AS `log10(100)`,log10(18) AS `log10(18)`,log10(-(4)) AS `log10(-4)`,log10(0) AS `log10(0)`,log10(NULL) AS `log10(NULL)`
select pow(10,log10(10)),power(2,4);
pow(10,log10(10)) power(2,4)
-10.000000 16.000000
+10 16
explain extended select pow(10,log10(10)),power(2,4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -90,35 +90,35 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 select sql_no_cache rand(999999) AS `rand(999999)`,rand() AS `rand()`
-select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
-pi() sin(pi()/2) cos(pi()/2) abs(tan(pi())) cot(1) asin(1) acos(0) atan(1)
-3.141593 1.000000 0.000000 0.000000 0.64209262 1.570796 1.570796 0.785398
-explain extended select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
+select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
+pi() format(sin(pi()/2),6) format(cos(pi()/2),6) format(abs(tan(pi())),6) format(cot(1),6) format(asin(1),6) format(acos(0),6) format(atan(1),6)
+3.141593 1.000000 0.000000 0.000000 0.642093 1.570796 1.570796 0.785398
+explain extended select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select pi() AS `pi()`,sin((pi() / 2)) AS `sin(pi()/2)`,cos((pi() / 2)) AS `cos(pi()/2)`,abs(tan(pi())) AS `abs(tan(pi()))`,(1 / tan(1)) AS `cot(1)`,asin(1) AS `asin(1)`,acos(0) AS `acos(0)`,atan(1) AS `atan(1)`
+Note 1003 select pi() AS `pi()`,format(sin((pi() / 2)),6) AS `format(sin(pi()/2),6)`,format(cos((pi() / 2)),6) AS `format(cos(pi()/2),6)`,format(abs(tan(pi())),6) AS `format(abs(tan(pi())),6)`,format((1 / tan(1)),6) AS `format(cot(1),6)`,format(asin(1),6) AS `format(asin(1),6)`,format(acos(0),6) AS `format(acos(0),6)`,format(atan(1),6) AS `format(atan(1),6)`
select degrees(pi()),radians(360);
degrees(pi()) radians(360)
180 6.2831853071796
SELECT ACOS(1.0);
ACOS(1.0)
-0.000000
+0
SELECT ASIN(1.0);
ASIN(1.0)
-1.570796
+1.5707963267949
SELECT ACOS(0.2*5.0);
ACOS(0.2*5.0)
-0.000000
+0
SELECT ACOS(0.5*2.0);
ACOS(0.5*2.0)
-0.000000
+0
SELECT ASIN(0.8+0.2);
ASIN(0.8+0.2)
-1.570796
+1.5707963267949
SELECT ASIN(1.2-0.2);
ASIN(1.2-0.2)
-1.570796
+1.5707963267949
explain extended select degrees(pi()),radians(360);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
diff --git a/mysql-test/r/func_set.result b/mysql-test/r/func_set.result
index 4918617f85f..2431406c128 100644
--- a/mysql-test/r/func_set.result
+++ b/mysql-test/r/func_set.result
@@ -41,8 +41,7 @@ interval(null, 1, 10, 100)
-1
drop table if exists t1,t2;
create table t1 (id int(10) not null unique);
-create table t2 (id int(10) not null primary key,
-val int(10) not null);
+create table t2 (id int(10) not null primary key, val int(10) not null);
insert into t1 values (1),(2),(4);
insert into t2 values (1,1),(2,1),(3,1),(4,2);
select one.id, elt(two.val,'one','two') from t1 one, t2 two where two.id=one.id;
@@ -56,3 +55,12 @@ id elt(two.val,'one','two')
2 one
4 two
drop table t1,t2;
+select find_in_set(binary 'a',binary 'A,B,C');
+find_in_set(binary 'a',binary 'A,B,C')
+0
+select find_in_set('a',binary 'A,B,C');
+find_in_set('a',binary 'A,B,C')
+0
+select find_in_set(binary 'a', 'A,B,C');
+find_in_set(binary 'a', 'A,B,C')
+0
diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result
index e07ee4f0add..7b2fc4b21a5 100644
--- a/mysql-test/r/func_str.result
+++ b/mysql-test/r/func_str.result
@@ -638,7 +638,7 @@ explain extended select md5('hello'), sha('abc'), sha1('abc'), soundex(''), 'moo
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select md5(_latin1'hello') AS `md5('hello')`,sha(_latin1'abc') AS `sha('abc')`,sha(_latin1'abc') AS `sha1('abc')`,soundex(_latin1'') AS `soundex('')`,(soundex(_latin1'mood') = soundex(_latin1'mud')) AS `'mood' sounds like 'mud'`,aes_decrypt(aes_encrypt(_latin1'abc',_latin1'1'),_latin1'1') AS `aes_decrypt(aes_encrypt('abc','1'),'1')`,concat(_latin1'*',repeat(_latin1' ',5),_latin1'*') AS `concat('*',space(5),'*')`,reverse(_latin1'abc') AS `reverse('abc')`,rpad(_latin1'a',4,_latin1'1') AS `rpad('a',4,'1')`,lpad(_latin1'a',4,_latin1'1') AS `lpad('a',4,'1')`,concat_ws(_latin1',',_latin1'',NULL,_latin1'a') AS `concat_ws(',','',NULL,'a')`,make_set(255,_latin2'a',_latin2'b',_latin2'c') AS `make_set(255,_latin2'a',_latin2'b',_latin2'c')`,elt(2,1) AS `elt(2,1)`,locate(_latin1'a',_latin1'b',2) AS `locate("a","b",2)`,format(130,10) AS `format(130,10)`,char(0) AS `char(0)`,conv(130,16,10) AS `conv(130,16,10)`,hex(130) AS `hex(130)`,(_latin1'HE' collate _latin1'BINARY') AS `binary 'HE'`,export_set(255,_latin2'y',_latin2'n',_latin2' ') AS `export_set(255,_latin2'y',_latin2'n',_latin2' ')`,field((_latin1'b' collate _latin1'latin1_bin'),_latin1'A',_latin1'B') AS `FIELD('b' COLLATE latin1_bin,'A','B')`,find_in_set(_latin1'B',_latin1'a,b,c,d') AS `FIND_IN_SET(_latin1'B',_latin1'a,b,c,d')`,collation(conv(130,16,10)) AS `collation(conv(130,16,10))`,coercibility(conv(130,16,10)) AS `coercibility(conv(130,16,10))`,length(_latin1'\n \r\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`,bit_length(_latin1'\n \r\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`,bit_length(_latin1'\n \r\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`,concat(_latin1'monty',_latin1' was here ',_latin1'again') AS `concat('monty',' was here ','again')`,length(_latin1'hello') AS `length('hello')`,char(ascii(_latin1'h')) AS `char(ascii('h'))`,ord(_latin1'h') AS `ord('h')`,quote((1 / 0)) AS `quote(1/0)`,crc32(_latin1'123') AS `crc32("123")`,replace(_latin1'aaaa',_latin1'a',_latin1'b') AS `replace('aaaa','a','b')`,insert(_latin1'txs',2,1,_latin1'hi') AS `insert('txs',2,1,'hi')`,left(_latin2'a',1) AS `left(_latin2'a',1)`,right(_latin2'a',1) AS `right(_latin2'a',1)`,lcase(_latin2'a') AS `lcase(_latin2'a')`,ucase(_latin2'a') AS `ucase(_latin2'a')`,substr(_latin1'abcdefg',3,2) AS `SUBSTR('abcdefg',3,2)`,substr_index(_latin1'1abcd;2abcd;3abcd;4abcd',_latin1';',2) AS `substring_index("1abcd;2abcd;3abcd;4abcd", ';', 2)`,trim(_latin2' a ') AS `trim(_latin2' a ')`,ltrim(_latin2' a ') AS `ltrim(_latin2' a ')`,rtrim(_latin2' a ') AS `rtrim(_latin2' a ')`,decode(encode(repeat(_latin1'a',100000))) AS `decode(encode(repeat("a",100000),"monty"),"monty")`
+Note 1003 select md5(_latin1'hello') AS `md5('hello')`,sha(_latin1'abc') AS `sha('abc')`,sha(_latin1'abc') AS `sha1('abc')`,soundex(_latin1'') AS `soundex('')`,(soundex(_latin1'mood') = soundex(_latin1'mud')) AS `'mood' sounds like 'mud'`,aes_decrypt(aes_encrypt(_latin1'abc',_latin1'1'),_latin1'1') AS `aes_decrypt(aes_encrypt('abc','1'),'1')`,concat(_latin1'*',repeat(_latin1' ',5),_latin1'*') AS `concat('*',space(5),'*')`,reverse(_latin1'abc') AS `reverse('abc')`,rpad(_latin1'a',4,_latin1'1') AS `rpad('a',4,'1')`,lpad(_latin1'a',4,_latin1'1') AS `lpad('a',4,'1')`,concat_ws(_latin1',',_latin1'',NULL,_latin1'a') AS `concat_ws(',','',NULL,'a')`,make_set(255,_latin2'a',_latin2'b',_latin2'c') AS `make_set(255,_latin2'a',_latin2'b',_latin2'c')`,elt(2,1) AS `elt(2,1)`,locate(_latin1'a',_latin1'b',2) AS `locate("a","b",2)`,format(130,10) AS `format(130,10)`,char(0) AS `char(0)`,conv(130,16,10) AS `conv(130,16,10)`,hex(130) AS `hex(130)`,cast(_latin1'HE' as char charset binary) AS `binary 'HE'`,export_set(255,_latin2'y',_latin2'n',_latin2' ') AS `export_set(255,_latin2'y',_latin2'n',_latin2' ')`,field((_latin1'b' collate _latin1'latin1_bin'),_latin1'A',_latin1'B') AS `FIELD('b' COLLATE latin1_bin,'A','B')`,find_in_set(_latin1'B',_latin1'a,b,c,d') AS `FIND_IN_SET(_latin1'B',_latin1'a,b,c,d')`,collation(conv(130,16,10)) AS `collation(conv(130,16,10))`,coercibility(conv(130,16,10)) AS `coercibility(conv(130,16,10))`,length(_latin1'\n \r\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`,bit_length(_latin1'\n \r\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`,bit_length(_latin1'\n \r\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`,concat(_latin1'monty',_latin1' was here ',_latin1'again') AS `concat('monty',' was here ','again')`,length(_latin1'hello') AS `length('hello')`,char(ascii(_latin1'h')) AS `char(ascii('h'))`,ord(_latin1'h') AS `ord('h')`,quote((1 / 0)) AS `quote(1/0)`,crc32(_latin1'123') AS `crc32("123")`,replace(_latin1'aaaa',_latin1'a',_latin1'b') AS `replace('aaaa','a','b')`,insert(_latin1'txs',2,1,_latin1'hi') AS `insert('txs',2,1,'hi')`,left(_latin2'a',1) AS `left(_latin2'a',1)`,right(_latin2'a',1) AS `right(_latin2'a',1)`,lcase(_latin2'a') AS `lcase(_latin2'a')`,ucase(_latin2'a') AS `ucase(_latin2'a')`,substr(_latin1'abcdefg',3,2) AS `SUBSTR('abcdefg',3,2)`,substr_index(_latin1'1abcd;2abcd;3abcd;4abcd',_latin1';',2) AS `substring_index("1abcd;2abcd;3abcd;4abcd", ';', 2)`,trim(_latin2' a ') AS `trim(_latin2' a ')`,ltrim(_latin2' a ') AS `ltrim(_latin2' a ')`,rtrim(_latin2' a ') AS `rtrim(_latin2' a ')`,decode(encode(repeat(_latin1'a',100000))) AS `decode(encode(repeat("a",100000),"monty"),"monty")`
SELECT lpad(12345, 5, "#");
lpad(12345, 5, "#")
12345
@@ -673,3 +673,6 @@ c1 c2
2147483647 4294967295
-2147483648 0
drop table t1;
+select left(1234, 3) + 0;
+left(1234, 3) + 0
+123
diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result
index d8ea18f9744..864d9d04ff5 100644
--- a/mysql-test/r/gis.result
+++ b/mysql-test/r/gis.result
@@ -466,3 +466,22 @@ insert IGNORE into t1 values ('Garbage');
ERROR HY000: Unknown error
alter table t1 add spatial index(a);
drop table t1;
+create table t1(a geometry not null, spatial index(a));
+insert into t1 values
+(GeomFromText('POINT(1 1)')), (GeomFromText('POINT(3 3)')),
+(GeomFromText('POINT(4 4)')), (GeomFromText('POINT(6 6)'));
+select AsText(a) from t1 where
+MBRContains(GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a)
+or
+MBRContains(GeomFromText('Polygon((2 2, 2 5, 5 5, 5 2, 2 2))'), a);
+AsText(a)
+POINT(1 1)
+POINT(3 3)
+POINT(4 4)
+select AsText(a) from t1 where
+MBRContains(GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a)
+and
+MBRContains(GeomFromText('Polygon((0 0, 0 7, 7 7, 7 0, 0 0))'), a);
+AsText(a)
+POINT(1 1)
+drop table t1;
diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result
index 5d18c3f483f..866c19155db 100644
--- a/mysql-test/r/grant.result
+++ b/mysql-test/r/grant.result
@@ -229,6 +229,25 @@ GRANT SELECT () ON ``.`` TO ''@'localhost'
REVOKE SELECT () ON . FROM @localhost;
DROP DATABASE ;
SET NAMES latin1;
+insert into mysql.user (host, user) values ('localhost', 'test11');
+insert into mysql.db (host, db, user, select_priv) values
+('localhost', 'a%', 'test11', 'Y'), ('localhost', 'ab%', 'test11', 'Y');
+alter table mysql.db order by db asc;
+flush privileges;
+show grants for test11@localhost;
+Grants for test11@localhost
+GRANT USAGE ON *.* TO 'test11'@'localhost'
+GRANT SELECT ON `ab%`.* TO 'test11'@'localhost'
+GRANT SELECT ON `a%`.* TO 'test11'@'localhost'
+alter table mysql.db order by db desc;
+flush privileges;
+show grants for test11@localhost;
+Grants for test11@localhost
+GRANT USAGE ON *.* TO 'test11'@'localhost'
+GRANT SELECT ON `ab%`.* TO 'test11'@'localhost'
+GRANT SELECT ON `a%`.* TO 'test11'@'localhost'
+delete from mysql.user where user='test11';
+delete from mysql.db where user='test11';
SHOW PRIVILEGES;
Privilege Context Comment
Alter Tables To alter the table
diff --git a/mysql-test/r/have_csv.require b/mysql-test/r/have_csv.require
new file mode 100644
index 00000000000..cc2fb28289c
--- /dev/null
+++ b/mysql-test/r/have_csv.require
@@ -0,0 +1,2 @@
+Variable_name Value
+have_csv YES
diff --git a/mysql-test/r/have_exampledb.require b/mysql-test/r/have_exampledb.require
new file mode 100644
index 00000000000..4b0938660fe
--- /dev/null
+++ b/mysql-test/r/have_exampledb.require
@@ -0,0 +1,2 @@
+Variable_name Value
+have_exampledb YES
diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result
index 0556aa3a2b7..2e94974e953 100644
--- a/mysql-test/r/having.result
+++ b/mysql-test/r/having.result
@@ -87,3 +87,44 @@ sqty
5
9
drop table t1;
+CREATE TABLE t1 (
+`id` bigint(20) NOT NULL default '0',
+`description` text
+) ENGINE=MyISAM;
+CREATE TABLE t2 (
+`id` bigint(20) NOT NULL default '0',
+`description` varchar(20)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1, 'test');
+INSERT INTO t2 VALUES (1, 'test');
+CREATE TABLE t3 (
+`id` bigint(20) NOT NULL default '0',
+`order_id` bigint(20) NOT NULL default '0'
+) ENGINE=MyISAM;
+select
+a.id, a.description,
+count(b.id) as c
+from t1 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+id description c
+1 test 0
+select
+a.*,
+count(b.id) as c
+from t2 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+id description c
+1 test 0
+INSERT INTO t1 VALUES (2, 'test2');
+select
+a.id, a.description,
+count(b.id) as c
+from t1 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+id description c
+1 test 0
+2 test2 0
+drop table t1,t2,t3;
diff --git a/mysql-test/r/heap.result b/mysql-test/r/heap.result
index 6100301b2c6..a33d26f9efa 100644
--- a/mysql-test/r/heap.result
+++ b/mysql-test/r/heap.result
@@ -217,3 +217,13 @@ DELETE from t1 where a < 100;
SELECT * from t1;
a
DROP TABLE t1;
+CREATE TABLE `job_titles` (
+`job_title_id` int(6) unsigned NOT NULL default '0',
+`job_title` char(18) NOT NULL default '',
+PRIMARY KEY (`job_title_id`),
+UNIQUE KEY `job_title_id` (`job_title_id`,`job_title`)
+) ENGINE=HEAP;
+SELECT MAX(job_title_id) FROM job_titles;
+MAX(job_title_id)
+NULL
+DROP TABLE job_titles;
diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result
index db9b051a58f..dc763472b0e 100644
--- a/mysql-test/r/join.result
+++ b/mysql-test/r/join.result
@@ -283,6 +283,12 @@ ID Value1 Value2
SELECT * FROM t1 NATURAL JOIN t2 WHERE (Value1 = 'A' AND Value2 <> 'B') AND 1;
ID Value1 Value2
drop table t1,t2;
+CREATE TABLE t1 (a int);
+CREATE TABLE t2 (b int);
+CREATE TABLE t3 (c int);
+SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN t3;
+a b c
+DROP TABLE t1, t2, t3;
create table t1 (i int);
create table t2 (i int);
create table t3 (i int);
diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result
index 7915521a9b7..ec377b92371 100644
--- a/mysql-test/r/join_outer.result
+++ b/mysql-test/r/join_outer.result
@@ -628,7 +628,7 @@ insert into t2 values (10,1),(20,2),(30,3);
explain select * from t2 left join t1 on t1.fooID = t2.fooID and t1.fooID = 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL PRIMARY 4 NULL 3 Using index
-1 SIMPLE t1 eq_ref PRIMARY PRIMARY 2 const 1 Using index
+1 SIMPLE t1 const PRIMARY PRIMARY 2 const 1 Using where; Using index
select * from t2 left join t1 on t1.fooID = t2.fooID and t1.fooID = 30;
fooID barID fooID
10 1 NULL
@@ -676,3 +676,72 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2
1 SIMPLE t3 ALL NULL NULL NULL NULL 2
drop table t1, t2, t3;
+create table t1 (
+match_id tinyint(3) unsigned not null auto_increment,
+home tinyint(3) unsigned default '0',
+unique key match_id (match_id),
+key match_id_2 (match_id)
+);
+insert into t1 values("1", "2");
+create table t2 (
+player_id tinyint(3) unsigned default '0',
+match_1_h tinyint(3) unsigned default '0',
+key player_id (player_id)
+);
+insert into t2 values("1", "5");
+insert into t2 values("2", "9");
+insert into t2 values("3", "3");
+insert into t2 values("4", "7");
+insert into t2 values("5", "6");
+insert into t2 values("6", "8");
+insert into t2 values("7", "4");
+insert into t2 values("8", "12");
+insert into t2 values("9", "11");
+insert into t2 values("10", "10");
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+(t2 s left join t1 m on m.match_id = 1)
+order by m.match_id desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE s ALL NULL NULL NULL NULL 10
+1 SIMPLE m const match_id,match_id_2 match_id 1 const 1 Using where
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+(t2 s left join t1 m on m.match_id = 1)
+order by UUX desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE s ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
+1 SIMPLE m const match_id,match_id_2 match_id 1 const 1 Using where
+select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+(t2 s left join t1 m on m.match_id = 1)
+order by UUX desc;
+player_id match_1_h * match_id home UUX
+8 12 * 1 2 10
+9 11 * 1 2 9
+10 10 * 1 2 8
+2 9 * 1 2 7
+6 8 * 1 2 6
+4 7 * 1 2 5
+5 6 * 1 2 4
+1 5 * 1 2 3
+7 4 * 1 2 2
+3 3 * 1 2 1
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+t2 s straight_join t1 m where m.match_id = 1
+order by UUX desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE s ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
+1 SIMPLE m const match_id,match_id_2 match_id 1 const 1 Using where
+select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+t2 s straight_join t1 m where m.match_id = 1
+order by UUX desc;
+player_id match_1_h * match_id home UUX
+8 12 * 1 2 10
+9 11 * 1 2 9
+10 10 * 1 2 8
+2 9 * 1 2 7
+6 8 * 1 2 6
+4 7 * 1 2 5
+5 6 * 1 2 4
+1 5 * 1 2 3
+7 4 * 1 2 2
+3 3 * 1 2 1
+drop table t1, t2;
diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result
index dd03ef73883..bb553825caa 100644
--- a/mysql-test/r/key.result
+++ b/mysql-test/r/key.result
@@ -76,13 +76,12 @@ CCident varchar(50) DEFAULT '' NOT NULL,
PRIMARY KEY (name,author,category)
);
INSERT INTO t1 VALUES
-('patnom','patauteur',0,'p.favre@cryo-networks.fr',NULL,NULL,'#p2sndnq6ae5g1u6t','essai\nsalut','scol://195.242.78.119:patauteur.patnom',NULL,NULL,NULL,950036174,-882087474,NULL,3,0,3,'1','Pub/patnom/futur_divers.scs',NULL,'pat','CC1');
+('patnom','patauteur',0,'p.favre@cryo-networks.fr',NULL,NULL,'#p2sndnq6ae5g1u6t','essai salut','scol://195.242.78.119:patauteur.patnom',NULL,NULL,NULL,950036174,-882087474,NULL,3,0,3,'1','Pub/patnom/futur_divers.scs',NULL,'pat','CC1');
INSERT INTO t1 VALUES
('LeNomDeMonSite','Marc',0,'m.barilley@cryo-networks.fr',NULL,NULL,NULL,NULL,'scol://195.242.78.119:Marc.LeNomDeMonSite',NULL,NULL,NULL,950560434,-881563214,NULL,3,0,3,'1','Pub/LeNomDeMonSite/domus_hibere.scs',NULL,'Marq','CC1');
select * from t1 where name='patnom' and author='patauteur' and category=0;
name author category email password proxy bitmap msg urlscol urlhttp timeout nbcnx creation livinguntil lang type subcat subtype reg scs capacity userISP CCident
-patnom patauteur 0 p.favre@cryo-networks.fr NULL NULL #p2sndnq6ae5g1u6t essai
-salut scol://195.242.78.119:patauteur.patnom NULL NULL NULL 950036174 -882087474 NULL 3 0 3 1 Pub/patnom/futur_divers.scs NULL pat CC1
+patnom patauteur 0 p.favre@cryo-networks.fr NULL NULL #p2sndnq6ae5g1u6t essai salut scol://195.242.78.119:patauteur.patnom NULL NULL NULL 950036174 -882087474 NULL 3 0 3 1 Pub/patnom/futur_divers.scs NULL pat CC1
drop table t1;
create table t1
(
@@ -235,3 +234,36 @@ SELECT numeropost FROM t1 WHERE numreponse='1';
numeropost
1
drop table t1;
+create table t1 (c varchar(30) character set utf8, t text character set utf8, unique (c(2)), unique (t(3))) engine=myisam;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` varchar(30) character set utf8 default NULL,
+ `t` text character set utf8,
+ UNIQUE KEY `c` (`c`(2)),
+ UNIQUE KEY `t` (`t`(3))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t1 values ('cccc', 'tttt'),
+(0xD0B1212223D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1212223D0B1D0B1D0B1D0B1),
+(0xD0B1222123D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1222123D0B1D0B1D0B1D0B1);
+insert t1 (c) values ('cc22');
+ERROR 23000: Duplicate entry 'cc22' for key 1
+insert t1 (t) values ('ttt22');
+ERROR 23000: Duplicate entry 'ttt22' for key 2
+insert t1 (c) values (0xD0B1212322D0B1D0B1D0B1D0B1D0B1);
+ERROR 23000: Duplicate entry 'б!#"' for key 1
+insert t1 (t) values (0xD0B1D0B1212322D0B1D0B1D0B1D0B1);
+ERROR 23000: Duplicate entry 'бб!#"б' for key 2
+select c from t1 where c='cccc';
+c
+cccc
+select t from t1 where t='tttt';
+t
+tttt
+select c from t1 where c=0xD0B1212223D0B1D0B1D0B1D0B1D0B1;
+c
+?!"#?????
+select t from t1 where t=0xD0B1D0B1212223D0B1D0B1D0B1D0B1;
+t
+??!"#????
+drop table t1;
diff --git a/mysql-test/r/lowercase_table.result b/mysql-test/r/lowercase_table.result
index d400dcd9795..a8bc2e3d342 100644
--- a/mysql-test/r/lowercase_table.result
+++ b/mysql-test/r/lowercase_table.result
@@ -1,4 +1,6 @@
drop table if exists t1,t2,t3,t4;
+drop table if exists t0,t5,t6,t7,t8,t9;
+drop database if exists mysqltest;
create table T1 (id int primary key, Word varchar(40) not null, Index(Word));
create table t4 (id int primary key, Word varchar(40) not null);
INSERT INTO T1 VALUES (1, 'a'), (2, 'b'), (3, 'c');
@@ -42,6 +44,26 @@ select count(bags.a) from t1 as Bags;
count(bags.a)
0
drop table t1;
+create database mysqltest;
+use MYSQLTEST;
+create table t1 (a int);
+select T1.a from MYSQLTEST.T1;
+a
+select t1.a from MYSQLTEST.T1;
+a
+select mysqltest.t1.* from MYSQLTEST.t1;
+a
+select MYSQLTEST.t1.* from MYSQLTEST.t1;
+a
+select MYSQLTEST.T1.* from MYSQLTEST.T1;
+a
+select MYSQLTEST.T1.* from T1;
+a
+alter table t1 rename to T1;
+select MYSQLTEST.t1.* from MYSQLTEST.t1;
+a
+drop database mysqltest;
+use test;
create table t1 (a int);
create table t2 (a int);
delete p1.*,P2.* from t1 as p1, t2 as p2 where p1.a=P2.a;
diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result
index 83f8230f11c..5755033190b 100644
--- a/mysql-test/r/merge.result
+++ b/mysql-test/r/merge.result
@@ -642,3 +642,12 @@ x y
1 3
1 2
drop table t1,t2,t3;
+create table t1 (a int);
+create table t2 (a int);
+insert into t1 values (0);
+insert into t2 values (1);
+create table t3 engine=merge union=(t1, t2) select * from t1;
+ERROR HY000: You can't specify target table 't1' for update in FROM clause
+create table t3 engine=merge union=(t1, t2) select * from t2;
+ERROR HY000: You can't specify target table 't2' for update in FROM clause
+drop table t1, t2;
diff --git a/mysql-test/r/metadata.result b/mysql-test/r/metadata.result
index 80a01a0ca90..ced3ca61f80 100644
--- a/mysql-test/r/metadata.result
+++ b/mysql-test/r/metadata.result
@@ -21,7 +21,7 @@ def test t1 t1 g g 5 4 0 Y 32768 3 63
def test t1 t1 h h 0 7 0 Y 32768 4 63
def test t1 t1 i i 13 4 0 Y 32864 0 63
def test t1 t1 j j 10 10 0 Y 128 0 63
-def test t1 t1 k k 7 19 0 N 1217 0 63
+def test t1 t1 k k 7 19 0 N 1249 0 63
def test t1 t1 l l 12 19 0 Y 128 0 63
def test t1 t1 m m 254 1 0 Y 256 0 8
def test t1 t1 n n 254 3 0 Y 2048 0 8
diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result
index 9a123729c4b..26dcce43d08 100644
--- a/mysql-test/r/myisam.result
+++ b/mysql-test/r/myisam.result
@@ -428,6 +428,22 @@ select * from t1 where a='807780' and b='477' and c='165';
a b c
807780 477 165
drop table t1;
+DROP TABLE IF EXISTS t1;
+Warnings:
+Note 1051 Unknown table 't1'
+CREATE TABLE t1 (a varchar(150) NOT NULL, KEY (a));
+INSERT t1 VALUES ("can \tcan");
+INSERT t1 VALUES ("can can");
+INSERT t1 VALUES ("can");
+SELECT * FROM t1;
+a
+can can
+can
+can can
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
create table t1 (a blob);
insert into t1 values('a '),('a');
select concat(a,'.') from t1 where a='a';
@@ -529,3 +545,12 @@ show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a NULL NULL NULL NULL YES HASH
drop table t1,t2;
+create table t1 ( a tinytext, b char(1), index idx (a(1),b) );
+insert into t1 values (null,''), (null,'');
+explain select count(*) from t1 where a is null;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref idx idx 4 const 1 Using where
+select count(*) from t1 where a is null;
+count(*)
+2
+drop table t1;
diff --git a/mysql-test/r/mysql_protocols.result b/mysql-test/r/mysql_protocols.result
new file mode 100644
index 00000000000..272e3bda6f0
--- /dev/null
+++ b/mysql-test/r/mysql_protocols.result
@@ -0,0 +1,9 @@
+<default>
+ ok
+TCP
+ ok
+SOCKET
+ ok
+ERROR 2047: Wrong or unknown protocol
+ERROR 2047: Wrong or unknown protocol
+Unknown option to protocol: NullS
diff --git a/mysql-test/r/mysqlbinlog2.result b/mysql-test/r/mysqlbinlog2.result
new file mode 100644
index 00000000000..3c1b85e05a1
--- /dev/null
+++ b/mysql-test/r/mysqlbinlog2.result
@@ -0,0 +1,446 @@
+drop table if exists t1;
+reset master;
+set @a=UNIX_TIMESTAMP("2020-01-21 15:32:22");
+set timestamp=@a;
+create table t1 (a int auto_increment not null primary key, b char(3));
+insert into t1 values(null, "a");
+insert into t1 values(null, "b");
+set timestamp=@a+2;
+insert into t1 values(null, "c");
+set timestamp=@a+4;
+insert into t1 values(null, "d");
+insert into t1 values(null, "e");
+flush logs;
+set timestamp=@a+1;
+insert into t1 values(null, "f");
+
+--- Local --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- offset --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=1;
+use test;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- start-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- stop-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+
+--- start-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=3;
+use test;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- stop-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+
+--- Local with 2 binlogs on command line --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- offset --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=1;
+use test;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- start-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- stop-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+
+--- start-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=3;
+use test;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- stop-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+
+--- Remote --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- offset --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=1;
+use test;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- start-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- stop-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+
+--- start-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=3;
+use test;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+
+--- stop-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+
+--- Remote with 2 binlogs on command line --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- offset --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=1;
+use test;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- start-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- stop-position --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+
+--- start-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+SET INSERT_ID=3;
+use test;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+use test;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- stop-datetime --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+
+--- to-last-log --
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+use test;
+SET TIMESTAMP=1579609942;
+create table t1 (a int auto_increment not null primary key, b char(3));
+SET INSERT_ID=1;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "a");
+SET INSERT_ID=2;
+SET TIMESTAMP=1579609942;
+insert into t1 values(null, "b");
+SET INSERT_ID=3;
+SET TIMESTAMP=1579609944;
+insert into t1 values(null, "c");
+SET INSERT_ID=4;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "d");
+SET INSERT_ID=5;
+SET TIMESTAMP=1579609946;
+insert into t1 values(null, "e");
+SET INSERT_ID=6;
+SET TIMESTAMP=1579609943;
+insert into t1 values(null, "f");
+
+--- end of test --
+drop table t1;
diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result
index 6cc6a89d5ad..4e7f4b2a72b 100644
--- a/mysql-test/r/ndb_alter_table.result
+++ b/mysql-test/r/ndb_alter_table.result
@@ -15,15 +15,60 @@ col2 varchar(30) not null,
col3 varchar (20) not null,
col4 varchar(4) not null,
col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null,
-col6 int not null, to_be_deleted int);
-insert into t1 values (2,4,3,5,"PENDING",1,7);
+col6 int not null, to_be_deleted int) ENGINE=ndbcluster;
+show table status;
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 0 NULL NULL NULL latin1_swedish_ci NULL
+insert into t1 values
+(0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7);
+show table status;
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL
+select * from t1 order by col1;
+col1 col2 col3 col4 col5 col6 to_be_deleted
+0 4 3 5 PENDING 1 7
+1 4 3 5 PENDING 1 7
+7 4 3 5 PENDING 1 7
+8 4 3 5 PENDING 1 7
+31 4 3 5 PENDING 1 7
+32 4 3 5 PENDING 1 7
+99 4 3 5 PENDING 1 7
+100 4 3 5 PENDING 1 7
+101 4 3 5 PENDING 1 7
alter table t1
add column col4_5 varchar(20) not null after col4,
add column col7 varchar(30) not null after col5,
add column col8 datetime not null, drop column to_be_deleted,
change column col2 fourth varchar(30) not null after col3,
modify column col6 int not null first;
-select * from t1;
+show table status;
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL
+select * from t1 order by col1;
col6 col1 col3 fourth col4 col4_5 col5 col7 col8
-1 2 3 4 5 PENDING 0000-00-00 00:00:00
+1 0 3 4 5 PENDING 0000-00-00 00:00:00
+1 1 3 4 5 PENDING 0000-00-00 00:00:00
+1 7 3 4 5 PENDING 0000-00-00 00:00:00
+1 8 3 4 5 PENDING 0000-00-00 00:00:00
+1 31 3 4 5 PENDING 0000-00-00 00:00:00
+1 32 3 4 5 PENDING 0000-00-00 00:00:00
+1 99 3 4 5 PENDING 0000-00-00 00:00:00
+1 100 3 4 5 PENDING 0000-00-00 00:00:00
+1 101 3 4 5 PENDING 0000-00-00 00:00:00
+insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00');
+show table status;
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 103 NULL NULL NULL latin1_swedish_ci NULL
+select * from t1 order by col1;
+col6 col1 col3 fourth col4 col4_5 col5 col7 col8
+1 0 3 4 5 PENDING 0000-00-00 00:00:00
+1 1 3 4 5 PENDING 0000-00-00 00:00:00
+1 7 3 4 5 PENDING 0000-00-00 00:00:00
+1 8 3 4 5 PENDING 0000-00-00 00:00:00
+1 31 3 4 5 PENDING 0000-00-00 00:00:00
+1 32 3 4 5 PENDING 0000-00-00 00:00:00
+1 99 3 4 5 PENDING 0000-00-00 00:00:00
+1 100 3 4 5 PENDING 0000-00-00 00:00:00
+1 101 3 4 5 PENDING 0000-00-00 00:00:00
+2 102 4 3 5 99 PENDING EXTRA 2004-01-01 00:00:00
drop table t1;
diff --git a/mysql-test/r/ndb_autodiscover.result b/mysql-test/r/ndb_autodiscover.result
index b86575ff2d9..313003f0a1f 100644
--- a/mysql-test/r/ndb_autodiscover.result
+++ b/mysql-test/r/ndb_autodiscover.result
@@ -35,10 +35,10 @@ update t1 set name="Autodiscover" where id = 2;
show status like 'handler_discover%';
Variable_name Value
Handler_discover 4
-select * from t1 order by name;
+select * from t1 order by id;
id name
-2 Autodiscover
1 Autodiscover
+2 Autodiscover
3 Discover 3
show status like 'handler_discover%';
Variable_name Value
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index 0e7b039a5f9..80e1aa7939a 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -1,98 +1,106 @@
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+drop database if exists mysqltest;
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
-attr1 INT NOT NULL
+attr1 INT NOT NULL,
+attr2 INT,
+attr3 VARCHAR(10)
) ENGINE=ndbcluster;
-INSERT INTO t1 VALUES (9410,9412);
-SELECT pk1 FROM t1;
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
+SELECT pk1 FROM t1 ORDER BY pk1;
pk1
9410
-SELECT * FROM t1;
-pk1 attr1
-9410 9412
-SELECT t1.* FROM t1;
-pk1 attr1
-9410 9412
+9411
+SELECT * FROM t1 ORDER BY pk1;
+pk1 attr1 attr2 attr3
+9410 9412 NULL 9412
+9411 9413 17 9413
+SELECT t1.* FROM t1 ORDER BY pk1;
+pk1 attr1 attr2 attr3
+9410 9412 NULL 9412
+9411 9413 17 9413
UPDATE t1 SET attr1=1 WHERE pk1=9410;
-SELECT * FROM t1;
-pk1 attr1
-9410 1
+SELECT * FROM t1 ORDER BY pk1;
+pk1 attr1 attr2 attr3
+9410 1 NULL 9412
+9411 9413 17 9413
UPDATE t1 SET pk1=2 WHERE attr1=1;
-ERROR 42000: Table 't1' uses an extension that doesn't exist in this MySQL version
-SELECT * FROM t1;
-pk1 attr1
-9410 1
+SELECT * FROM t1 ORDER BY pk1;
+pk1 attr1 attr2 attr3
+2 1 NULL 9412
+9411 9413 17 9413
+UPDATE t1 SET pk1=pk1 + 1;
+SELECT * FROM t1 ORDER BY pk1;
+pk1 attr1 attr2 attr3
+3 1 NULL 9412
+9412 9413 17 9413
DELETE FROM t1;
SELECT * FROM t1;
-pk1 attr1
-INSERT INTO t1 VALUES (9410,9412), (9411, 9413), (9408, 8765),
-(7,8), (8,9), (9,10), (10,11), (11,12), (12,13), (13,14);
+pk1 attr1 attr2 attr3
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9408, 8765, NULL, '8765'),
+(7,8, NULL, NULL), (8,9, NULL, NULL), (9,10, NULL, NULL), (10,11, NULL, NULL), (11,12, NULL, NULL), (12,13, NULL, NULL), (13,14, NULL, NULL);
UPDATE t1 SET attr1 = 9999;
SELECT * FROM t1 ORDER BY pk1;
-pk1 attr1
-7 9999
-8 9999
-9 9999
-10 9999
-11 9999
-12 9999
-13 9999
-9408 9999
-9410 9999
-9411 9999
+pk1 attr1 attr2 attr3
+7 9999 NULL NULL
+8 9999 NULL NULL
+9 9999 NULL NULL
+10 9999 NULL NULL
+11 9999 NULL NULL
+12 9999 NULL NULL
+13 9999 NULL NULL
+9408 9999 NULL 8765
+9410 9999 NULL 9412
UPDATE t1 SET attr1 = 9998 WHERE pk1 < 1000;
SELECT * FROM t1 ORDER BY pk1;
-pk1 attr1
-7 9998
-8 9998
-9 9998
-10 9998
-11 9998
-12 9998
-13 9998
-9408 9999
-9410 9999
-9411 9999
+pk1 attr1 attr2 attr3
+7 9998 NULL NULL
+8 9998 NULL NULL
+9 9998 NULL NULL
+10 9998 NULL NULL
+11 9998 NULL NULL
+12 9998 NULL NULL
+13 9998 NULL NULL
+9408 9999 NULL 8765
+9410 9999 NULL 9412
UPDATE t1 SET attr1 = 9997 WHERE attr1 = 9999;
SELECT * FROM t1 ORDER BY pk1;
-pk1 attr1
-7 9998
-8 9998
-9 9998
-10 9998
-11 9998
-12 9998
-13 9998
-9408 9997
-9410 9997
-9411 9997
+pk1 attr1 attr2 attr3
+7 9998 NULL NULL
+8 9998 NULL NULL
+9 9998 NULL NULL
+10 9998 NULL NULL
+11 9998 NULL NULL
+12 9998 NULL NULL
+13 9998 NULL NULL
+9408 9997 NULL 8765
+9410 9997 NULL 9412
DELETE FROM t1 WHERE pk1 = 9410;
SELECT * FROM t1 ORDER BY pk1;
-pk1 attr1
-7 9998
-8 9998
-9 9998
-10 9998
-11 9998
-12 9998
-13 9998
-9408 9997
-9411 9997
+pk1 attr1 attr2 attr3
+7 9998 NULL NULL
+8 9998 NULL NULL
+9 9998 NULL NULL
+10 9998 NULL NULL
+11 9998 NULL NULL
+12 9998 NULL NULL
+13 9998 NULL NULL
+9408 9997 NULL 8765
DELETE FROM t1;
SELECT * FROM t1;
-pk1 attr1
-INSERT INTO t1 values (1, 4), (2, 4), (3, 5), (4, 4), (5, 5);
+pk1 attr1 attr2 attr3
+INSERT INTO t1 values (1, 4, NULL, NULL), (2, 4, NULL, NULL), (3, 5, NULL, NULL), (4, 4, NULL, NULL), (5, 5, NULL, NULL);
DELETE FROM t1 WHERE attr1=4;
SELECT * FROM t1 order by pk1;
-pk1 attr1
-3 5
-5 5
+pk1 attr1 attr2 attr3
+3 5 NULL NULL
+5 5 NULL NULL
DELETE FROM t1;
-INSERT INTO t1 VALUES (9410,9412), (9411, 9413);
+INSERT INTO t1 VALUES (9410,9412, NULL, NULL), (9411, 9413, NULL, NULL);
DELETE FROM t1 WHERE pk1 = 9410;
SELECT * FROM t1;
-pk1 attr1
-9411 9413
+pk1 attr1 attr2 attr3
+9411 9413 NULL NULL
DROP TABLE t1;
CREATE TABLE t1 (id INT, id2 int) engine=ndbcluster;
INSERT INTO t1 values(3456, 7890);
@@ -108,13 +116,17 @@ SELECT * FROM t1;
id id2
1234 7890
DELETE FROM t1;
-INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890);
-SELECT * FROM t1;
+INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890);
+SELECT * FROM t1 ORDER BY id;
id id2
+3454 7890
3456 7890
3456 7890
3456 7890
DELETE FROM t1 WHERE id = 3456;
+SELECT * FROM t1 ORDER BY id;
+id id2
+3454 7890
DROP TABLE t1;
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
@@ -222,6 +234,18 @@ select * from t4 where a = 7 and b = 17 order by a;
a b c d
select * from t4 where a = 7 and b != 16 order by b;
a b c d
+delete from t2 where a > 5;
+select x1.a, x1.b from t2 x1, t2 x2 where x1.b = x2.b order by x1.a;
+a b
+1 10
+3 12
+5 14
+select a, b FROM t2 outer_table where
+a = (select a from t2 where b = outer_table.b ) order by a;
+a b
+1 10
+3 12
+5 14
delete from t2;
delete from t3;
delete from t4;
@@ -338,3 +362,30 @@ select * from t7;
adress a b c
No adress 8 NULL 12
drop table t7;
+CREATE TABLE t1 (
+pk1 INT NOT NULL PRIMARY KEY,
+attr1 INT NOT NULL,
+attr2 INT,
+attr3 VARCHAR(10)
+) ENGINE=ndbcluster;
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
+create database mysqltest;
+use mysqltest;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+insert into t2 select pk1,attr1,attr2 from test.t1;
+select * from t2 order by a;
+a b c
+9410 9412 NULL
+9411 9413 17
+select b from test.t1, t2 where c = test.t1.attr2;
+b
+9413
+select b,test.t1.attr1 from test.t1, t2 where test.t1.pk1 < a;
+b attr1
+9413 9412
+drop table test.t1, t2;
+drop database mysqltest;
diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result
new file mode 100644
index 00000000000..c590815b233
--- /dev/null
+++ b/mysql-test/r/ndb_blob.result
@@ -0,0 +1,324 @@
+drop table if exists t1;
+drop database if exists mysqltest;
+set autocommit=0;
+create table t1 (
+a int not null primary key,
+b text not null,
+c int not null,
+d longblob,
+key (c)
+) engine=ndbcluster;
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+select length(@x0),length(@b1),length(@d1) from dual;
+length(@x0) length(@b1) length(@d1)
+256 2256 3000
+select length(@x0),length(@b2),length(@d2) from dual;
+length(@x0) length(@b2) length(@d2)
+256 20000 30000
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where a = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a=1;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where a=2;
+a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
+2 20000 b2 30000 dd2
+update t1 set b=@b2,d=@d2 where a=1;
+update t1 set b=@b1,d=@d1 where a=2;
+commit;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where a=1;
+a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
+1 20000 b2 30000 dd2
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a=2;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+2 2256 b1 3000 dd1
+update t1 set b=concat(b,b),d=concat(d,d) where a=1;
+update t1 set b=concat(b,b),d=concat(d,d) where a=2;
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 where a=1;
+a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
+1 40000 b2 60000 dd2
+select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
+from t1 where a=2;
+a length(b) substr(b,1+4*900,2) length(d) substr(d,1+6*900,3)
+2 4512 b1 6000 dd1
+update t1 set d=null where a=1;
+commit;
+select a from t1 where d is null;
+a
+1
+delete from t1 where a=1;
+delete from t1 where a=2;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where c = 111;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref c c 4 const 10 Using where
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c=111;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where c=222;
+a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
+2 20000 b2 30000 dd2
+update t1 set b=@b2,d=@d2 where c=111;
+update t1 set b=@b1,d=@d1 where c=222;
+commit;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where c=111;
+a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
+1 20000 b2 30000 dd2
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c=222;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+2 2256 b1 3000 dd1
+update t1 set d=null where c=111;
+commit;
+select a from t1 where d is null;
+a
+1
+delete from t1 where c=111;
+delete from t1 where c=222;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,'b1',111,'dd1');
+insert into t1 values(2,'b2',222,'dd2');
+insert into t1 values(3,'b3',333,'dd3');
+insert into t1 values(4,'b4',444,'dd4');
+insert into t1 values(5,'b5',555,'dd5');
+insert into t1 values(6,'b6',666,'dd6');
+insert into t1 values(7,'b7',777,'dd7');
+insert into t1 values(8,'b8',888,'dd8');
+insert into t1 values(9,'b9',999,'dd9');
+commit;
+explain select * from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+select * from t1 order by a;
+a b c d
+1 b1 111 dd1
+2 b2 222 dd2
+3 b3 333 dd3
+4 b4 444 dd4
+5 b5 555 dd5
+6 b6 666 dd6
+7 b7 777 dd7
+8 b8 888 dd8
+9 b9 999 dd9
+update t1 set b=concat(a,'x',b),d=concat(a,'x',d);
+commit;
+select * from t1 order by a;
+a b c d
+1 1xb1 111 1xdd1
+2 2xb2 222 2xdd2
+3 3xb3 333 3xdd3
+4 4xb4 444 4xdd4
+5 5xb5 555 5xdd5
+6 6xb6 666 6xdd6
+7 7xb7 777 7xdd7
+8 8xb8 888 8xdd8
+9 9xb9 999 9xdd9
+delete from t1;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 order by a;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+2 20000 b2 30000 dd2
+update t1 set b=concat(b,b),d=concat(d,d);
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 order by a;
+a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
+1 4512 6000
+2 40000 b2 60000 dd2
+delete from t1;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,'b1',111,'dd1');
+insert into t1 values(2,'b2',222,'dd2');
+insert into t1 values(3,'b3',333,'dd3');
+insert into t1 values(4,'b4',444,'dd4');
+insert into t1 values(5,'b5',555,'dd5');
+insert into t1 values(6,'b6',666,'dd6');
+insert into t1 values(7,'b7',777,'dd7');
+insert into t1 values(8,'b8',888,'dd8');
+insert into t1 values(9,'b9',999,'dd9');
+commit;
+explain select * from t1 where c >= 100 order by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
+select * from t1 where c >= 100 order by a;
+a b c d
+1 b1 111 dd1
+2 b2 222 dd2
+3 b3 333 dd3
+4 b4 444 dd4
+5 b5 555 dd5
+6 b6 666 dd6
+7 b7 777 dd7
+8 b8 888 dd8
+9 b9 999 dd9
+update t1 set b=concat(a,'x',b),d=concat(a,'x',d)
+where c >= 100;
+commit;
+select * from t1 where c >= 100 order by a;
+a b c d
+1 1xb1 111 1xdd1
+2 2xb2 222 2xdd2
+3 3xb3 333 3xdd3
+4 4xb4 444 4xdd4
+5 5xb5 555 5xdd5
+6 6xb6 666 6xdd6
+7 7xb7 777 7xdd7
+8 8xb8 888 8xdd8
+9 9xb9 999 9xdd9
+select * from t1 order by a;
+a b c d
+1 1xb1 111 1xdd1
+2 2xb2 222 2xdd2
+3 3xb3 333 3xdd3
+4 4xb4 444 4xdd4
+5 5xb5 555 5xdd5
+6 6xb6 666 6xdd6
+7 7xb7 777 7xdd7
+8 8xb8 888 8xdd8
+9 9xb9 999 9xdd9
+alter table t1 add x int;
+select * from t1 order by a;
+a b c d x
+1 1xb1 111 1xdd1 NULL
+2 2xb2 222 2xdd2 NULL
+3 3xb3 333 3xdd3 NULL
+4 4xb4 444 4xdd4 NULL
+5 5xb5 555 5xdd5 NULL
+6 6xb6 666 6xdd6 NULL
+7 7xb7 777 7xdd7 NULL
+8 8xb8 888 8xdd8 NULL
+9 9xb9 999 9xdd9 NULL
+alter table t1 drop x;
+select * from t1 order by a;
+a b c d
+1 1xb1 111 1xdd1
+2 2xb2 222 2xdd2
+3 3xb3 333 3xdd3
+4 4xb4 444 4xdd4
+5 5xb5 555 5xdd5
+6 6xb6 666 6xdd6
+7 7xb7 777 7xdd7
+8 8xb8 888 8xdd8
+9 9xb9 999 9xdd9
+create database mysqltest;
+use mysqltest;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+insert into t2 values (1,1,1),(2,2,2);
+select * from test.t1,t2 where test.t1.a = t2.a order by test.t1.a;
+a b c d a b c
+1 1xb1 111 1xdd1 1 1 1
+2 2xb2 222 2xdd2 2 2 2
+drop table t2;
+use test;
+delete from t1 where c >= 100;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where c >= 100 order by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c >= 100 order by a;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+2 20000 b2 30000 dd2
+update t1 set b=concat(b,b),d=concat(d,d);
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 where c >= 100 order by a;
+a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
+1 4512 6000
+2 40000 b2 60000 dd2
+delete from t1 where c >= 100;
+commit;
+select count(*) from t1;
+count(*)
+0
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 0;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 1;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 2;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+2 20000 b2 30000 dd2
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 order by a;
+a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
+1 2256 b1 3000 dd1
+2 20000 b2 30000 dd2
+rollback;
+select count(*) from t1;
+count(*)
+0
+drop table t1;
+drop database mysqltest;
diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result
index 0e3bd555b0a..2f1ad251e40 100644
--- a/mysql-test/r/ndb_index_ordered.result
+++ b/mysql-test/r/ndb_index_ordered.result
@@ -82,6 +82,24 @@ a b c
4 5 12
5 6 12
6 7 12
+update t1 set b = b + 1 where b > 4 and b < 7;
+select * from t1 order by a;
+a b c
+1 2 13
+2 3 13
+3 4 12
+4 6 12
+5 7 12
+6 7 12
+update t1 set a = a + 10 where b > 1 and b < 7;
+select * from t1 order by a;
+a b c
+5 7 12
+6 7 12
+11 2 13
+12 3 13
+13 4 12
+14 6 12
drop table t1;
CREATE TABLE t1 (
a int unsigned NOT NULL PRIMARY KEY,
@@ -187,4 +205,55 @@ a b c
select * from t1 where b<=5 and c=0 or b<=5 and c=2;
a b c
19 4 0
+select count(*) from t1 where b = 0;
+count(*)
+0
+select count(*) from t1 where b = 1;
+count(*)
+1
+drop table t1;
+CREATE TABLE t1 (
+a int unsigned NOT NULL PRIMARY KEY,
+b int unsigned,
+c int unsigned,
+KEY bc(b,c)
+) engine = ndb;
+insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL);
+select * from t1 use index (bc) where b IS NULL;
+a b c
+3 NULL NULL
+2 NULL 2
+select * from t1 use index (bc)order by a;
+a b c
+1 1 1
+2 NULL 2
+3 NULL NULL
+4 4 NULL
+select * from t1 use index (bc) order by a;
+a b c
+1 1 1
+2 NULL 2
+3 NULL NULL
+4 4 NULL
+select * from t1 use index (PRIMARY) where b IS NULL order by a;
+a b c
+2 NULL 2
+3 NULL NULL
+select * from t1 use index (bc) where b IS NULL order by a;
+a b c
+2 NULL 2
+3 NULL NULL
+select * from t1 use index (bc) where b IS NULL and c IS NULL order by a;
+a b c
+3 NULL NULL
+select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
+a b c
+2 NULL 2
+select * from t1 use index (bc) where b < 4 order by a;
+a b c
+1 1 1
+select * from t1 use index (bc) where b IS NOT NULL order by a;
+a b c
+1 1 1
+4 4 NULL
drop table t1;
diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result
index ed97e0b110a..4362de94b48 100644
--- a/mysql-test/r/ndb_index_unique.result
+++ b/mysql-test/r/ndb_index_unique.result
@@ -21,6 +21,28 @@ insert into t1 values(7,8,3);
select * from t1 where b = 4 order by a;
a b c
3 4 6
+insert into t1 values(8, 2, 3);
+ERROR 23000: Can't write, because of unique constraint, to table 't1'
+select * from t1 order by a;
+a b c
+1 2 3
+2 3 5
+3 4 6
+4 5 8
+5 6 2
+6 7 2
+7 8 3
+delete from t1 where a = 1;
+insert into t1 values(8, 2, 3);
+select * from t1 order by a;
+a b c
+2 3 5
+3 4 6
+4 5 8
+5 6 2
+6 7 2
+7 8 3
+8 2 3
drop table t1;
CREATE TABLE t2 (
a int unsigned NOT NULL PRIMARY KEY,
@@ -42,6 +64,28 @@ insert into t2 values(7,8,3);
select * from t2 where b = 4 order by a;
a b c
3 4 6
+insert into t2 values(8, 2, 3);
+ERROR 23000: Can't write, because of unique constraint, to table 't2'
+select * from t2 order by a;
+a b c
+1 2 3
+2 3 5
+3 4 6
+4 5 8
+5 6 2
+6 7 2
+7 8 3
+delete from t2 where a = 1;
+insert into t2 values(8, 2, 3);
+select * from t2 order by a;
+a b c
+2 3 5
+3 4 6
+4 5 8
+5 6 2
+6 7 2
+7 8 3
+8 2 3
drop table t2;
CREATE TABLE t3 (
a int unsigned NOT NULL,
@@ -65,6 +109,68 @@ a b c
3 4 6
drop table t3;
CREATE TABLE t1 (
+pk int NOT NULL PRIMARY KEY,
+a int unsigned,
+UNIQUE KEY (a)
+) engine=ndbcluster;
+insert into t1 values (-1,NULL), (0,0), (1,NULL),(2,2),(3,NULL),(4,4);
+select * from t1 order by pk;
+pk a
+-1 NULL
+0 0
+1 NULL
+2 2
+3 NULL
+4 4
+insert into t1 values (5,0);
+ERROR 23000: Can't write, because of unique constraint, to table 't1'
+select * from t1 order by pk;
+pk a
+-1 NULL
+0 0
+1 NULL
+2 2
+3 NULL
+4 4
+delete from t1 where a = 0;
+insert into t1 values (5,0);
+select * from t1 order by pk;
+pk a
+-1 NULL
+1 NULL
+2 2
+3 NULL
+4 4
+5 0
+CREATE TABLE t2 (
+pk int NOT NULL PRIMARY KEY,
+a int unsigned,
+b tinyint NOT NULL,
+c VARCHAR(10),
+UNIQUE KEY si(a, c)
+) engine=ndbcluster;
+insert into t2 values (-1,1,17,NULL),(0,NULL,18,NULL),(1,3,19,'abc');
+select * from t2 order by pk;
+pk a b c
+-1 1 17 NULL
+0 NULL 18 NULL
+1 3 19 abc
+insert into t2 values(2,3,19,'abc');
+ERROR 23000: Can't write, because of unique constraint, to table 't2'
+select * from t2 order by pk;
+pk a b c
+-1 1 17 NULL
+0 NULL 18 NULL
+1 3 19 abc
+delete from t2 where c IS NOT NULL;
+insert into t2 values(2,3,19,'abc');
+select * from t2 order by pk;
+pk a b c
+-1 1 17 NULL
+0 NULL 18 NULL
+2 3 19 abc
+drop table t1, t2;
+CREATE TABLE t1 (
cid smallint(5) unsigned NOT NULL default '0',
cv varchar(250) NOT NULL default '',
PRIMARY KEY (cid),
@@ -74,8 +180,10 @@ INSERT INTO t1 VALUES (8,'dummy');
CREATE TABLE t2 (
cid bigint(20) unsigned NOT NULL auto_increment,
cap varchar(255) NOT NULL default '',
-PRIMARY KEY (cid)
+PRIMARY KEY (cid),
+UNIQUE KEY (cid, cap)
) engine=ndbcluster;
+INSERT INTO t2 VALUES (NULL,'another dummy');
CREATE TABLE t3 (
gid bigint(20) unsigned NOT NULL auto_increment,
gn varchar(255) NOT NULL default '',
@@ -132,6 +240,9 @@ cid cv
8 dummy
select * from t1 where cv = 'test';
cid cv
+select * from t2 where cap = 'another dummy';
+cid cap
+0 another dummy
select * from t4 where uid = 1 and gid=1 and rid=2 and cid=4;
uid gid rid cid
1 1 2 4
diff --git a/mysql-test/r/ndb_limit.result b/mysql-test/r/ndb_limit.result
new file mode 100644
index 00000000000..6574aa0bb1a
--- /dev/null
+++ b/mysql-test/r/ndb_limit.result
@@ -0,0 +1,31 @@
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+select count(*) from t2;
+count(*)
+10000
+delete from t2 limit 1;
+select count(*) from t2;
+count(*)
+9999
+delete from t2 limit 100;
+select count(*) from t2;
+count(*)
+9899
+delete from t2 limit 1000;
+select count(*) from t2;
+count(*)
+8899
+update t2 set c=12345678 limit 100;
+select count(*) from t2 where c=12345678;
+count(*)
+100
+select count(*) from t2 where c=12345678 limit 1000;
+count(*)
+100
+select * from t2 limit 0;
+a b c
+drop table t2;
diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result
new file mode 100644
index 00000000000..56661913e22
--- /dev/null
+++ b/mysql-test/r/ndb_lock.result
@@ -0,0 +1,30 @@
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+create table t1 (x integer not null primary key, y varchar(32)) engine = ndb;
+insert into t1 values (1,'one'), (2,'two');
+select * from t1 order by x;
+x y
+1 one
+2 two
+select * from t1 order by x;
+x y
+1 one
+2 two
+start transaction;
+insert into t1 values (3,'three');
+select * from t1 order by x;
+x y
+1 one
+2 two
+3 three
+start transaction;
+select * from t1 order by x;
+x y
+1 one
+2 two
+commit;
+select * from t1 order by x;
+x y
+1 one
+2 two
+3 three
+commit;
diff --git a/mysql-test/r/ndb_replace.result b/mysql-test/r/ndb_replace.result
index 45af0f7fcb5..63fd8b55c8e 100644
--- a/mysql-test/r/ndb_replace.result
+++ b/mysql-test/r/ndb_replace.result
@@ -11,7 +11,7 @@ insert into t1 (gesuchnr, benutzer_id) value (3,2);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
insert into t1 (gesuchnr,benutzer_id) values (1,1);
-ERROR 23000: Can't write; duplicate key in table 't1'
+ERROR 23000: Duplicate entry '1-1' for key 1
replace into t1 (gesuchnr,benutzer_id) values (1,1);
select * from t1 order by gesuchnr;
gesuchnr benutzer_id
diff --git a/mysql-test/r/ndb_transaction.result b/mysql-test/r/ndb_transaction.result
new file mode 100644
index 00000000000..691b91b1d36
--- /dev/null
+++ b/mysql-test/r/ndb_transaction.result
@@ -0,0 +1,257 @@
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+drop database if exists mysqltest;
+CREATE TABLE t1 (
+pk1 INT NOT NULL PRIMARY KEY,
+attr1 INT NOT NULL
+) ENGINE=ndbcluster;
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 1
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+attr1
+2
+rollback;
+select count(*) from t1;
+count(*)
+0
+select * from t1 where pk1 = 1;
+pk1 attr1
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+attr1
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+commit;
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 1
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+attr1
+2
+begin;
+update t1 set attr1 = attr1 * 2;
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 2
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+2 4 1 2
+rollback;
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 1
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+begin;
+update t1 set attr1 = attr1 * 2;
+commit;
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 2
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+2 4 1 2
+begin;
+delete from t1 where attr1 = 2;
+select count(*) from t1;
+count(*)
+1
+select * from t1 where pk1 = 1;
+pk1 attr1
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+rollback;
+select count(*) from t1;
+count(*)
+2
+select * from t1 where pk1 = 1;
+pk1 attr1
+1 2
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+2 4 1 2
+begin;
+delete from t1 where attr1 = 2;
+commit;
+select count(*) from t1;
+count(*)
+1
+select * from t1 where pk1 = 1;
+pk1 attr1
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+pk1 attr1 pk1 attr1
+DROP TABLE t1;
+CREATE TABLE t1 (id INT, id2 int) engine=ndbcluster;
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+select sum(id) from t1;
+sum(id)
+3
+select * from t1 where id = 1;
+id id2
+1 1
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+id
+2
+rollback;
+select sum(id) from t1;
+sum(id)
+NULL
+select * from t1 where id = 1;
+id id2
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+id
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+commit;
+select sum(id) from t1;
+sum(id)
+3
+select * from t1 where id = 1;
+id id2
+1 1
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+id
+2
+begin;
+update t1 set id = id * 2;
+select sum(id) from t1;
+sum(id)
+6
+select * from t1 where id = 2;
+id id2
+2 1
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+id id2 id id2
+4 2 2 1
+rollback;
+select sum(id) from t1;
+sum(id)
+3
+select * from t1 where id = 2;
+id id2
+2 2
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+id id2 id id2
+begin;
+update t1 set id = id * 2;
+commit;
+select sum(id) from t1;
+sum(id)
+6
+select * from t1 where id = 2;
+id id2
+2 1
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+id id2 id id2
+4 2 2 1
+DROP TABLE t1;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+CREATE TABLE t3 (
+a bigint unsigned NOT NULL,
+b bigint unsigned not null,
+c bigint unsigned,
+PRIMARY KEY(a)
+) engine=ndbcluster;
+CREATE TABLE t4 (
+a bigint unsigned NOT NULL,
+b bigint unsigned not null,
+c bigint unsigned NOT NULL,
+d int unsigned,
+PRIMARY KEY(a, b, c)
+) engine=ndbcluster;
+select count(*) from t2;
+count(*)
+0
+select count(*) from t3;
+count(*)
+0
+select count(*) from t4;
+count(*)
+0
+select count(*) from t2;
+count(*)
+100
+select count(*) from t3;
+count(*)
+100
+select count(*) from t4;
+count(*)
+100
+begin;
+begin;
+drop table t2;
+drop table t3;
+drop table t4;
+CREATE TABLE t1 (
+pk1 INT NOT NULL PRIMARY KEY,
+attr1 INT NOT NULL
+) ENGINE=ndbcluster;
+create database mysqltest;
+use mysqltest;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+begin;
+insert into test.t1 values(1,1);
+insert into t2 values(1,1,1);
+insert into test.t1 values(2,2);
+insert into t2 values(2,2,2);
+select count(*) from test.t1;
+count(*)
+2
+select count(*) from t2;
+count(*)
+2
+select * from test.t1 where pk1 = 1;
+pk1 attr1
+1 1
+select * from t2 where a = 1;
+a b c
+1 1 1
+select test.t1.attr1
+from test.t1, test.t1 as t1x where test.t1.pk1 = t1x.pk1 + 1;
+attr1
+2
+select t2.a
+from t2, t2 as t2x where t2.a = t2x.a + 1;
+a
+2
+select test.t1.pk1, a from test.t1,t2 where b > test.t1.attr1;
+pk1 a
+1 2
+rollback;
+select count(*) from test.t1;
+count(*)
+0
+select count(*) from t2;
+count(*)
+0
+drop table test.t1, t2;
+drop database mysqltest;
diff --git a/mysql-test/r/ndb_truncate.result b/mysql-test/r/ndb_truncate.result
new file mode 100644
index 00000000000..38f3a78029c
--- /dev/null
+++ b/mysql-test/r/ndb_truncate.result
@@ -0,0 +1,14 @@
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t2 (
+a bigint unsigned NOT NULL PRIMARY KEY,
+b int unsigned not null,
+c int unsigned
+) engine=ndbcluster;
+select count(*) from t2;
+count(*)
+5000
+truncate table t2;
+select count(*) from t2;
+count(*)
+0
+drop table t2;
diff --git a/mysql-test/r/ndb_types.result b/mysql-test/r/ndb_types.result
new file mode 100644
index 00000000000..9a45b77149b
--- /dev/null
+++ b/mysql-test/r/ndb_types.result
@@ -0,0 +1,36 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+auto int(5) unsigned NOT NULL auto_increment,
+string char(10) default "hello",
+tiny tinyint(4) DEFAULT '0' NOT NULL ,
+short smallint(6) DEFAULT '1' NOT NULL ,
+medium mediumint(8) DEFAULT '0' NOT NULL,
+long_int int(11) DEFAULT '0' NOT NULL,
+longlong bigint(13) DEFAULT '0' NOT NULL,
+real_float float(13,1) DEFAULT 0.0 NOT NULL,
+real_double double(16,4),
+utiny tinyint(3) unsigned DEFAULT '0' NOT NULL,
+ushort smallint(5) unsigned zerofill DEFAULT '00000' NOT NULL,
+umedium mediumint(8) unsigned DEFAULT '0' NOT NULL,
+ulong int(11) unsigned DEFAULT '0' NOT NULL,
+ulonglong bigint(13) unsigned DEFAULT '0' NOT NULL,
+time_stamp timestamp,
+date_field date,
+time_field time,
+date_time datetime,
+options enum('one','two','tree') not null,
+flags set('one','two','tree') not null,
+PRIMARY KEY (auto),
+KEY (utiny),
+KEY (tiny),
+KEY (short),
+KEY any_name (medium),
+KEY (longlong),
+KEY (real_float),
+KEY (ushort),
+KEY (umedium),
+KEY (ulong),
+KEY (ulonglong,ulong),
+KEY (options,flags)
+);
+drop table t1;
diff --git a/mysql-test/r/negation_elimination.result b/mysql-test/r/negation_elimination.result
index a3a2bad7ec6..9193a125cd1 100644
--- a/mysql-test/r/negation_elimination.result
+++ b/mysql-test/r/negation_elimination.result
@@ -375,4 +375,17 @@ a
13
14
15
+delete from t1 where a > 3;
+select a, not(not(a)) from t1;
+a not(not(a))
+NULL NULL
+0 0
+1 1
+2 1
+3 1
+explain extended select a, not(not(a)), not(a <= 2 and not(a)), not(a not like "1"), not (a not in (1,2)), not(a != 2) from t1 where not(not(a)) having not(not(a));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 5 NULL 5 Using where; Using index
+Warnings:
+Note 1003 select test.t1.a AS `a`,(test.t1.a <> 0) AS `not(not(a))`,((test.t1.a > 2) or test.t1.a) AS `not(a <= 2 and not(a))`,(test.t1.a like _latin1'1') AS `not(a not like "1")`,(test.t1.a in (1,2)) AS `not (a not in (1,2))`,(test.t1.a = 2) AS `not(a != 2)` from test.t1 where test.t1.a having test.t1.a
drop table t1;
diff --git a/mysql-test/r/null.result b/mysql-test/r/null.result
index 1d76fbf2fb3..bd90b3fe3f3 100644
--- a/mysql-test/r/null.result
+++ b/mysql-test/r/null.result
@@ -156,3 +156,22 @@ drop table t1;
select cast(NULL as signed);
cast(NULL as signed)
NULL
+create table t1(i int, key(i));
+insert into t1 values(1);
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+explain select * from t1 where i=2 or i is null;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref_or_null i i 5 const 10 Using where; Using index
+alter table t1 change i i int not null;
+explain select * from t1 where i=2 or i is null;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i i 4 const 7 Using where; Using index
+drop table t1;
diff --git a/mysql-test/r/olap.result b/mysql-test/r/olap.result
index c862a020a93..f398c69a424 100644
--- a/mysql-test/r/olap.result
+++ b/mysql-test/r/olap.result
@@ -271,3 +271,39 @@ i i COUNT(*)
100 NULL 2
NULL NULL 2
drop table t1,t2;
+CREATE TABLE user_day(
+user_id INT NOT NULL,
+date DATE NOT NULL,
+UNIQUE INDEX user_date (user_id, date)
+);
+INSERT INTO user_day VALUES
+(1, '2004-06-06' ),
+(1, '2004-06-07' ),
+(2, '2004-06-06' );
+SELECT
+d.date AS day,
+COUNT(d.user_id) as sample,
+COUNT(next_day.user_id) AS not_cancelled
+FROM user_day d
+LEFT JOIN user_day next_day
+ON next_day.user_id=d.user_id AND
+next_day.date= DATE_ADD( d.date, interval 1 day )
+GROUP BY day;
+day sample not_cancelled
+2004-06-06 2 1
+2004-06-07 1 0
+SELECT
+d.date AS day,
+COUNT(d.user_id) as sample,
+COUNT(next_day.user_id) AS not_cancelled
+FROM user_day d
+LEFT JOIN user_day next_day
+ON next_day.user_id=d.user_id AND
+next_day.date= DATE_ADD( d.date, interval 1 day )
+GROUP BY day
+WITH ROLLUP;
+day sample not_cancelled
+2004-06-06 2 1
+2004-06-07 1 0
+NULL 3 1
+DROP TABLE user_day;
diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result
index 6b03dc239e7..a431807a560 100644
--- a/mysql-test/r/order_by.result
+++ b/mysql-test/r/order_by.result
@@ -116,7 +116,7 @@ col1
2
3
2
-select col1 as id from t1 order by t1.id;
+select col1 as id from t1 order by id;
id
1
1
@@ -126,16 +126,16 @@ id
2
2
3
-select concat(col1) as id from t1 order by t1.id;
+select concat(col1) as id from t1 order by id;
id
-2
-2
1
1
1
2
-3
2
+2
+2
+3
drop table t1;
CREATE TABLE t1 (id int auto_increment primary key,aika varchar(40),aikakentta timestamp);
insert into t1 (aika) values ('Keskiviikko');
@@ -660,3 +660,66 @@ a b c d
1 1 12 -1
1 1 2 0
drop table t1, t2;
+create table t1 (col1 int, col int);
+create table t2 (col2 int, col int);
+insert into t1 values (1,1),(2,2),(3,3);
+insert into t2 values (1,3),(2,2),(3,1);
+select t1.* , t2.col as t2_col from t1 left join t2 on (t1.col1=t2.col2)
+order by col;
+col1 col t2_col
+1 1 3
+2 2 2
+3 3 1
+select col1 as col, col from t1 order by col;
+ERROR 23000: Column 'col' in order clause is ambiguous
+select t1.col as c1, t2.col as c2 from t1, t2 where t1.col1=t2.col2
+order by col;
+ERROR 23000: Column 'col' in order clause is ambiguous
+select t1.col as c1, t2.col as c2 from t1, t2 where t1.col1=t2.col2
+order by col;
+ERROR 23000: Column 'col' in order clause is ambiguous
+select col1 from t1, t2 where t1.col1=t2.col2 order by col;
+ERROR 23000: Column 'col' in order clause is ambiguous
+select t1.col as t1_col, t2.col from t1, t2 where t1.col1=t2.col2
+order by col;
+t1_col col
+3 1
+2 2
+1 3
+select col2 as c, col as c from t2 order by col;
+c c
+3 1
+2 2
+1 3
+select col2 as col, col as col2 from t2 order by col;
+col col2
+1 3
+2 2
+3 1
+select t1.col as t1_col, t2.col2 from t1, t2 where t1.col1=t2.col2
+order by col;
+t1_col col2
+1 1
+2 2
+3 3
+select t2.col2, t2.col, t2.col from t2 order by col;
+col2 col col
+3 1 1
+2 2 2
+1 3 3
+select t2.col2 as col from t2 order by t2.col;
+col
+3
+2
+1
+select t2.col2 as col, t2.col from t2 order by t2.col;
+col col
+3 1
+2 2
+1 3
+select t2.col2, t2.col, t2.col from t2 order by t2.col;
+col2 col col
+3 1 1
+2 2 2
+1 3 3
+drop table t1, t2;
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index 27f4ce7f815..321b8894796 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -219,3 +219,54 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I
t1 MyISAM 9 Dynamic 0 0 0 4294967295 1024 0 NULL # # # latin1_swedish_ci NULL
deallocate prepare stmt1 ;
drop table t1;
+create table t1(a varchar(2), b varchar(3));
+prepare stmt1 from "select a, b from t1 where (not (a='aa' and b < 'zzz'))";
+execute stmt1;
+a b
+execute stmt1;
+a b
+deallocate prepare stmt1;
+drop table t1;
+prepare stmt1 from "select 1 into @var";
+execute stmt1;
+execute stmt1;
+prepare stmt1 from "create table t1 select 1 as i";
+execute stmt1;
+drop table t1;
+execute stmt1;
+prepare stmt1 from "insert into t1 select i from t1";
+execute stmt1;
+execute stmt1;
+prepare stmt1 from "select * from t1 into outfile 'f1.txt'";
+execute stmt1;
+deallocate prepare stmt1;
+drop table t1;
+prepare stmt1 from 'select 1';
+prepare STMT1 from 'select 2';
+execute sTmT1;
+2
+2
+deallocate prepare StMt1;
+deallocate prepare Stmt1;
+ERROR HY000: Unknown prepared statement handler (Stmt1) given to DEALLOCATE PREPARE
+set names utf8;
+prepare `ü` from 'select 1234';
+execute `ü` ;
+1234
+1234
+set names latin1;
+execute ``;
+1234
+1234
+set names default;
+create table t1 (a varchar(10)) charset=utf8;
+insert into t1 (a) values ('yahoo');
+set character_set_connection=latin1;
+prepare stmt from 'select a from t1 where a like ?';
+set @var='google';
+execute stmt using @var;
+a
+execute stmt using @var;
+a
+deallocate prepare stmt;
+drop table t1;
diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result
index 6a21055cd3e..dc67fda7efa 100644
--- a/mysql-test/r/ps_1general.result
+++ b/mysql-test/r/ps_1general.result
@@ -317,6 +317,7 @@ NDBCLUSTER YES/NO Clustered, fault-tolerant, memory-based tables
NDB YES/NO Alias for NDBCLUSTER
EXAMPLE YES/NO Example storage engine
ARCHIVE YES/NO Archive storage engine
+CSV YES/NO CSV storage engine
drop table if exists tx;
prepare stmt1 from ' drop table if exists tx ' ;
execute stmt1 ;
@@ -333,12 +334,12 @@ prepare stmt1 from ' deallocate prepare never_prepared ' ;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'never_prepared' at line 1
prepare stmt4 from ' use test ' ;
ERROR HY000: This command is not supported in the prepared statement protocol yet
-prepare stmt3 from ' create database drop_me ';
+prepare stmt3 from ' create database mysqltest ';
ERROR HY000: This command is not supported in the prepared statement protocol yet
-create database drop_me ;
-prepare stmt3 from ' drop database drop_me ';
+create database mysqltest ;
+prepare stmt3 from ' drop database mysqltest ';
ERROR HY000: This command is not supported in the prepared statement protocol yet
-drop database drop_me ;
+drop database mysqltest ;
prepare stmt3 from ' grant all on test.t1 to drop_user@localhost
identified by ''looser'' ';
ERROR HY000: This command is not supported in the prepared statement protocol yet
diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result
index a6d09202e4e..344f02bb8ab 100644
--- a/mysql-test/r/ps_2myisam.result
+++ b/mysql-test/r/ps_2myisam.result
@@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result
index 606716f6583..162004c7b0c 100644
--- a/mysql-test/r/ps_3innodb.result
+++ b/mysql-test/r/ps_3innodb.result
@@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result
index 5643ec637fc..23874c9b02b 100644
--- a/mysql-test/r/ps_4heap.result
+++ b/mysql-test/r/ps_4heap.result
@@ -871,7 +871,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result
index 97069823c26..d1c3597b22b 100644
--- a/mysql-test/r/ps_5merge.result
+++ b/mysql-test/r/ps_5merge.result
@@ -913,7 +913,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
@@ -2105,7 +2105,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result
index b849d874b9a..31dfdd7bb87 100644
--- a/mysql-test/r/ps_6bdb.result
+++ b/mysql-test/r/ps_6bdb.result
@@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63
def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63
def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63
-def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63
+def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63
def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63
def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63
def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63
diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result
index bdd87919fd9..01ee018a434 100644
--- a/mysql-test/r/range.result
+++ b/mysql-test/r/range.result
@@ -475,3 +475,89 @@ id name uid id name uid
1025 Y 25 1025 Y 25
1026 Z 26 1026 Z 26
drop table t1,t2;
+create table t1 (x bigint unsigned not null);
+insert into t1(x) values (0xfffffffffffffff0);
+insert into t1(x) values (0xfffffffffffffff1);
+select * from t1;
+x
+18446744073709551600
+18446744073709551601
+select count(*) from t1 where x>0;
+count(*)
+2
+select count(*) from t1 where x=0;
+count(*)
+0
+select count(*) from t1 where x<0;
+count(*)
+0
+select count(*) from t1 where x < -16;
+count(*)
+0
+select count(*) from t1 where x = -16;
+count(*)
+0
+select count(*) from t1 where x > -16;
+count(*)
+2
+select count(*) from t1 where x = 18446744073709551601;
+count(*)
+1
+create table t2 (x bigint not null);
+insert into t2(x) values (0xfffffffffffffff0);
+insert into t2(x) values (0xfffffffffffffff1);
+select * from t2;
+x
+-16
+-15
+select count(*) from t2 where x>0;
+count(*)
+0
+select count(*) from t2 where x=0;
+count(*)
+0
+select count(*) from t2 where x<0;
+count(*)
+2
+select count(*) from t2 where x < -16;
+count(*)
+0
+select count(*) from t2 where x = -16;
+count(*)
+1
+select count(*) from t2 where x > -16;
+count(*)
+1
+select count(*) from t2 where x = 18446744073709551601;
+count(*)
+0
+drop table t1;
+create table t1 (x bigint unsigned not null primary key) engine=innodb;
+insert into t1(x) values (0xfffffffffffffff0);
+insert into t1(x) values (0xfffffffffffffff1);
+select * from t1;
+x
+18446744073709551600
+18446744073709551601
+select count(*) from t1 where x>0;
+count(*)
+2
+select count(*) from t1 where x=0;
+count(*)
+0
+select count(*) from t1 where x<0;
+count(*)
+0
+select count(*) from t1 where x < -16;
+count(*)
+0
+select count(*) from t1 where x = -16;
+count(*)
+0
+select count(*) from t1 where x > -16;
+count(*)
+1
+select count(*) from t1 where x = 18446744073709551601;
+count(*)
+1
+drop table t1;
diff --git a/mysql-test/r/rename.result b/mysql-test/r/rename.result
index c2dfd9fa219..c6da8285479 100644
--- a/mysql-test/r/rename.result
+++ b/mysql-test/r/rename.result
@@ -1,4 +1,5 @@
drop table if exists t0,t1,t2,t3,t4;
+drop table if exists t0,t5,t6,t7,t8,t9;
create table t0 SELECT 1,"table 1";
create table t2 SELECT 2,"table 2";
create table t3 SELECT 3,"table 3";
diff --git a/mysql-test/r/rpl_charset.result b/mysql-test/r/rpl_charset.result
index 5522fd3e89b..d961173f849 100644
--- a/mysql-test/r/rpl_charset.result
+++ b/mysql-test/r/rpl_charset.result
@@ -4,41 +4,41 @@ reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
-drop database if exists test2;
-drop database if exists test3;
-create database test2 character set latin2;
+drop database if exists mysqltest2;
+drop database if exists mysqltest3;
+create database mysqltest2 character set latin2;
set @@character_set_server=latin5;
-create database test3;
+create database mysqltest3;
--- --master--
-show create database test2;
+show create database mysqltest2;
Database Create Database
-test2 CREATE DATABASE `test2` /*!40100 DEFAULT CHARACTER SET latin2 */
-show create database test3;
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET latin2 */
+show create database mysqltest3;
Database Create Database
-test3 CREATE DATABASE `test3` /*!40100 DEFAULT CHARACTER SET latin5 */
+mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET latin5 */
--- --slave--
-show create database test2;
+show create database mysqltest2;
Database Create Database
-test2 CREATE DATABASE `test2` /*!40100 DEFAULT CHARACTER SET latin2 */
-show create database test3;
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET latin2 */
+show create database mysqltest3;
Database Create Database
-test3 CREATE DATABASE `test3` /*!40100 DEFAULT CHARACTER SET latin5 */
+mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET latin5 */
set @@collation_server=armscii8_bin;
-drop database test3;
-create database test3;
+drop database mysqltest3;
+create database mysqltest3;
--- --master--
-show create database test3;
+show create database mysqltest3;
Database Create Database
-test3 CREATE DATABASE `test3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
+mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
--- --slave--
-show create database test3;
+show create database mysqltest3;
Database Create Database
-test3 CREATE DATABASE `test3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
-use test2;
+mysqltest3 CREATE DATABASE `mysqltest3` /*!40100 DEFAULT CHARACTER SET armscii8 COLLATE armscii8_bin */
+use mysqltest2;
create table t1 (a int auto_increment primary key, b varchar(100));
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
insert into t1 (b) values(@@character_set_server);
@@ -57,7 +57,7 @@ a b
5 latin2_croatian_ci
--- --slave--
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
a b
1 armscii8
2 armscii8_bin
@@ -81,7 +81,7 @@ a b
4 Mller
--- --slave--
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
a b
1 latin1_german1_ci
2 Muffler
@@ -98,69 +98,69 @@ a b
1 cp850_general_ci
--- --slave--
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
a b
1 cp850_general_ci
-drop database test2;
-drop database test3;
+drop database mysqltest2;
+drop database mysqltest3;
show binlog events from 95;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 95 Query 1 181 use `test`; drop database if exists test2
-master-bin.000001 181 Query 1 267 use `test`; drop database if exists test3
-master-bin.000001 267 Query 1 366 use `test`; create database test2 character set latin2
+master-bin.000001 95 Query 1 181 use `test`; drop database if exists mysqltest2
+master-bin.000001 181 Query 1 267 use `test`; drop database if exists mysqltest3
+master-bin.000001 267 Query 1 366 use `test`; create database mysqltest2 character set latin2
master-bin.000001 366 Query 1 522 use `test`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=8,COLLATION_DATABASE=8,COLLATION_SERVER=30
-master-bin.000001 522 Query 1 600 use `test`; create database test3
+master-bin.000001 522 Query 1 600 use `test`; create database mysqltest3
master-bin.000001 600 Query 1 756 use `test`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=8,COLLATION_DATABASE=8,COLLATION_SERVER=64
-master-bin.000001 756 Query 1 832 use `test`; drop database test3
+master-bin.000001 756 Query 1 832 use `test`; drop database mysqltest3
master-bin.000001 832 Query 1 988 use `test`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=8,COLLATION_DATABASE=8,COLLATION_SERVER=64
-master-bin.000001 988 Query 1 1066 use `test`; create database test3
-master-bin.000001 1066 Query 1 1223 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=8,COLLATION_DATABASE=9,COLLATION_SERVER=64
-master-bin.000001 1223 Query 1 1347 use `test2`; create table t1 (a int auto_increment primary key, b varchar(100))
-master-bin.000001 1347 Query 1 1505 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 988 Query 1 1066 use `test`; create database mysqltest3
+master-bin.000001 1066 Query 1 1223 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=8,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 1223 Query 1 1347 use `mysqltest2`; create table t1 (a int auto_increment primary key, b varchar(100))
+master-bin.000001 1347 Query 1 1505 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 1505 Intvar 1 1533 INSERT_ID=1
-master-bin.000001 1533 Query 1 1640 use `test2`; insert into t1 (b) values(@@character_set_server)
-master-bin.000001 1640 Query 1 1798 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 1533 Query 1 1640 use `mysqltest2`; insert into t1 (b) values(@@character_set_server)
+master-bin.000001 1640 Query 1 1798 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 1798 Intvar 1 1826 INSERT_ID=2
-master-bin.000001 1826 Query 1 1929 use `test2`; insert into t1 (b) values(@@collation_server)
-master-bin.000001 1929 Query 1 2087 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 1826 Query 1 1929 use `mysqltest2`; insert into t1 (b) values(@@collation_server)
+master-bin.000001 1929 Query 1 2087 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 2087 Intvar 1 2115 INSERT_ID=3
-master-bin.000001 2115 Query 1 2222 use `test2`; insert into t1 (b) values(@@character_set_client)
-master-bin.000001 2222 Query 1 2380 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 2115 Query 1 2222 use `mysqltest2`; insert into t1 (b) values(@@character_set_client)
+master-bin.000001 2222 Query 1 2380 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 2380 Intvar 1 2408 INSERT_ID=4
-master-bin.000001 2408 Query 1 2519 use `test2`; insert into t1 (b) values(@@character_set_connection)
-master-bin.000001 2519 Query 1 2677 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 2408 Query 1 2519 use `mysqltest2`; insert into t1 (b) values(@@character_set_connection)
+master-bin.000001 2519 Query 1 2677 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=4,COLLATION_CONNECTION=27,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 2677 Intvar 1 2705 INSERT_ID=5
-master-bin.000001 2705 Query 1 2812 use `test2`; insert into t1 (b) values(@@collation_connection)
-master-bin.000001 2812 Query 1 2969 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
-master-bin.000001 2969 Query 1 3044 use `test2`; truncate table t1
-master-bin.000001 3044 Query 1 3201 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 2705 Query 1 2812 use `mysqltest2`; insert into t1 (b) values(@@collation_connection)
+master-bin.000001 2812 Query 1 2969 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 2969 Query 1 3044 use `mysqltest2`; truncate table t1
+master-bin.000001 3044 Query 1 3201 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 3201 Intvar 1 3229 INSERT_ID=1
-master-bin.000001 3229 Query 1 3336 use `test2`; insert into t1 (b) values(@@collation_connection)
-master-bin.000001 3336 Query 1 3493 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 3229 Query 1 3336 use `mysqltest2`; insert into t1 (b) values(@@collation_connection)
+master-bin.000001 3336 Query 1 3493 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=5,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 3493 Intvar 1 3521 INSERT_ID=2
-master-bin.000001 3521 Query 1 3631 use `test2`; insert into t1 (b) values(LEAST("Mller","Muffler"))
-master-bin.000001 3631 Query 1 3789 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 3521 Query 1 3631 use `mysqltest2`; insert into t1 (b) values(LEAST("Mller","Muffler"))
+master-bin.000001 3631 Query 1 3789 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 3789 Intvar 1 3817 INSERT_ID=3
-master-bin.000001 3817 Query 1 3924 use `test2`; insert into t1 (b) values(@@collation_connection)
-master-bin.000001 3924 Query 1 4082 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 3817 Query 1 3924 use `mysqltest2`; insert into t1 (b) values(@@collation_connection)
+master-bin.000001 3924 Query 1 4082 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 4082 Intvar 1 4110 INSERT_ID=4
-master-bin.000001 4110 Query 1 4220 use `test2`; insert into t1 (b) values(LEAST("Mller","Muffler"))
-master-bin.000001 4220 Query 1 4378 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 4110 Query 1 4220 use `mysqltest2`; insert into t1 (b) values(LEAST("Mller","Muffler"))
+master-bin.000001 4220 Query 1 4378 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 4378 Intvar 1 4406 INSERT_ID=74
-master-bin.000001 4406 Create_file 1 5074 db=test2;table=t1;file_id=1;block_len=581
-master-bin.000001 5074 Query 1 5232 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 4406 Create_file 1 5074 db=mysqltest2;table=t1;file_id=1;block_len=581
+master-bin.000001 5074 Query 1 5232 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 5232 Intvar 1 5260 INSERT_ID=5
master-bin.000001 5260 Exec_load 1 5283 ;file_id=1
-master-bin.000001 5283 Query 1 5441 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
-master-bin.000001 5441 Query 1 5516 use `test2`; truncate table t1
-master-bin.000001 5516 Query 1 5674 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 5283 Query 1 5441 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 5441 Query 1 5516 use `mysqltest2`; truncate table t1
+master-bin.000001 5516 Query 1 5674 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
master-bin.000001 5674 Intvar 1 5702 INSERT_ID=1
master-bin.000001 5702 User var 1 5742 @`a`=_cp850 0x4DFC6C6C6572 COLLATE cp850_general_ci
-master-bin.000001 5742 Query 1 5840 use `test2`; insert into t1 (b) values(collation(@a))
-master-bin.000001 5840 Query 1 5998 use `test2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
-master-bin.000001 5998 Query 1 6075 use `test2`; drop database test2
+master-bin.000001 5742 Query 1 5840 use `mysqltest2`; insert into t1 (b) values(collation(@a))
+master-bin.000001 5840 Query 1 5998 use `mysqltest2`; SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
+master-bin.000001 5998 Query 1 6075 use `mysqltest2`; drop database mysqltest2
master-bin.000001 6075 Query 1 6228 SET ONE_SHOT CHARACTER_SET_CLIENT=8,COLLATION_CONNECTION=31,COLLATION_DATABASE=9,COLLATION_SERVER=64
-master-bin.000001 6228 Query 1 6300 drop database test3
+master-bin.000001 6228 Query 1 6300 drop database mysqltest3
set global character_set_server=latin2;
ERROR HY000: Binary logging and replication forbid changing the global server character set or collation
set global character_set_server=latin2;
diff --git a/mysql-test/r/rpl_delete_all.result b/mysql-test/r/rpl_delete_all.result
index 97a535490dd..5ed221823e8 100644
--- a/mysql-test/r/rpl_delete_all.result
+++ b/mysql-test/r/rpl_delete_all.result
@@ -4,12 +4,12 @@ reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
-create database test1;
-drop database if exists test1;
+create database mysqltest;
+drop database if exists mysqltest;
Warnings:
-Note 1008 Can't drop database 'test1'; database doesn't exist
-show tables from test1;
-ERROR HY000: Can't read dir of './test1/' (Errcode: X)
+Note 1008 Can't drop database 'mysqltest'; database doesn't exist
+show tables from mysqltest;
+ERROR HY000: Can't read dir of './mysqltest/' (Errcode: X)
create table t1 (a int);
drop table if exists t1;
Warnings:
diff --git a/mysql-test/r/rpl_heap.result b/mysql-test/r/rpl_heap.result
index 1556bcd5f25..1facbcb7676 100644
--- a/mysql-test/r/rpl_heap.result
+++ b/mysql-test/r/rpl_heap.result
@@ -1,22 +1,22 @@
reset master;
drop table if exists t1;
-create table t1 (a int) type=HEAP;
-insert into t1 values(10);
+create table t1 type=HEAP select 10 as a;
+insert into t1 values(11);
show binlog events from 79;
Log_name Pos Event_type Server_id Orig_log_pos Info
-master-bin.001 79 Query 1 79 use `test`; create table t1 (a int) type=HEAP
-master-bin.001 147 Query 1 147 use `test`; DELETE FROM `test`.`t1`
-master-bin.001 205 Query 1 205 use `test`; insert into t1 values(10)
+master-bin.001 79 Query 1 79 use `test`; create table t1 type=HEAP select 10 as a
+master-bin.001 154 Query 1 154 use `test`; insert into t1 values(11)
reset slave;
start slave;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` int(11) default NULL
+ `a` bigint(2) NOT NULL default '0'
) TYPE=HEAP
select * from t1;
a
10
+11
select * from t1;
a
select * from t1 limit 10;
diff --git a/mysql-test/r/select_found.result b/mysql-test/r/select_found.result
index 470a3e8439f..00dbcb54d93 100644
--- a/mysql-test/r/select_found.result
+++ b/mysql-test/r/select_found.result
@@ -81,7 +81,10 @@ email varchar(50) NOT NULL default '',
PRIMARY KEY (id),
UNIQUE KEY e_n (email,name)
);
-INSERT INTO t2 VALUES (1,'name1','email1'),(2,'name2','email2'),(3,'name3','email3'),(4,'name4','email4'),(5,'name5','email5'),(6,'name6','email6'),(7,'name7','email7'),(8,'name8','email8'),(9,'name9','email9'),(10,'name10','email10'),(11,'name11','email11'),(12,'name12','email12'),(13,'name13','email13'),(14,'name14','email14'),(15,'name15','email15'),(16,'name16','email16'),(17,'name17','email17'),(18,'name18','email18'),(19,'name19','email19'),(20,'name20','email20'),(21,'name21','email21'),(22,'name22','email22'),(23,'name23','email23'),(24,'name24','email24'),(25,'name25','email25'),(26,'name26','email26'),(27,'name27','email27'),(28,'name28','email28'),(29,'name29','email29'),(30,'name30','email30'),(31,'name31','email31'),(32,'name32','email32'),(33,'name33','email33'),(34,'name34','email34'),(35,'name35','email35'),(36,'name36','email36'),(37,'name37','email37'),(38,'name38','email38'),(39,'name39','email39'),(40,'name40','email40'),(41,'name41','email41'),(42,'name42','email42'),(43,'name43','email43'),(44,'name44','email44'),(45,'name45','email45'),(46,'name46','email46'),(47,'name47','email47'),(48,'name48','email48'),(49,'name49','email49'),(50,'name50','email50'),(51,'name51','email51'),(52,'name52','email52'),(53,'name53','email53'),(54,'name54','email54'),(55,'name55','email55'),(56,'name56','email56'),(57,'name57','email57'),(58,'name58','email58'),(59,'name59','email59'),(60,'name60','email60'),(61,'name61','email61'),(62,'name62','email62'),(63,'name63','email63'),(64,'name64','email64'),(65,'name65','email65'),(66,'name66','email66'),(67,'name67','email67'),(68,'name68','email68'),(69,'name69','email69'),(70,'name70','email70'),(71,'name71','email71'),(72,'name72','email72'),(73,'name73','email73'),(74,'name74','email74'),(75,'name75','email75'),(76,'name76','email76'),(77,'name77','email77'),(78,'name78','email78'),(79,'name79','email79'),(80,'name80','email80'),(81,'name81','email81'),(82,'name82','email82'),(83,'name83','email83'),(84,'name84','email84'),(85,'name85','email85'),(86,'name86','email86'),(87,'name87','email87'),(88,'name88','email88'),(89,'name89','email89'),(90,'name90','email90'),(91,'name91','email91'),(92,'name92','email92'),(93,'name93','email93'),(94,'name94','email94'),(95,'name95','email95'),(96,'name96','email96'),(97,'name97','email97'),(98,'name98','email98'),(99,'name99','email99'),(100,'name100','email100'),(101,'name101','email101'),(102,'name102','email102'),(103,'name103','email103'),(104,'name104','email104'),(105,'name105','email105'),(106,'name106','email106'),(107,'name107','email107'),(108,'name108','email108'),(109,'name109','email109'),(110,'name110','email110'),(111,'name111','email111'),(112,'name112','email112'),(113,'name113','email113'),(114,'name114','email114'),(115,'name115','email115'),(116,'name116','email116'),(117,'name117','email117'),(118,'name118','email118'),(119,'name119','email119'),(120,'name120','email120'),(121,'name121','email121'),(122,'name122','email122'),(123,'name123','email123'),(124,'name124','email124'),(125,'name125','email125'),(126,'name126','email126'),(127,'name127','email127'),(128,'name128','email128'),(129,'name129','email129'),(130,'name130','email130'),(131,'name131','email131'),(132,'name132','email132'),(133,'name133','email133'),(134,'name134','email134'),(135,'name135','email135'),(136,'name136','email136'),(137,'name137','email137'),(138,'name138','email138'),(139,'name139','email139'),(140,'name140','email140'),(141,'name141','email141'),(142,'name142','email142'),(143,'name143','email143'),(144,'name144','email144'),(145,'name145','email145'),(146,'name146','email146'),(147,'name147','email147'),(148,'name148','email148'),(149,'name149','email149'),(150,'name150','email150'),(151,'name151','email151'),(152,'name152','email152'),(153,'name153','email153'),(154,'name154','email154'),(155,'name155','email155'),(156,'name156','email156'),(157,'name157','email157'),(158,'name158','email158'),(159,'name159','email159'),(160,'name160','email160'),(161,'name161','email161'),(162,'name162','email162'),(163,'name163','email163'),(164,'name164','email164'),(165,'name165','email165'),(166,'name166','email166'),(167,'name167','email167'),(168,'name168','email168'),(169,'name169','email169'),(170,'name170','email170'),(171,'name171','email171'),(172,'name172','email172'),(173,'name173','email173'),(174,'name174','email174'),(175,'name175','email175'),(176,'name176','email176'),(177,'name177','email177'),(178,'name178','email178'),(179,'name179','email179'),(180,'name180','email180'),(181,'name181','email181'),(182,'name182','email182'),(183,'name183','email183'),(184,'name184','email184'),(185,'name185','email185'),(186,'name186','email186'),(187,'name187','email187'),(188,'name188','email188'),(189,'name189','email189'),(190,'name190','email190'),(191,'name191','email191'),(192,'name192','email192'),(193,'name193','email193'),(194,'name194','email194'),(195,'name195','email195'),(196,'name196','email196'),(197,'name197','email197'),(198,'name198','email198'),(199,'name199','email199'),(200,'name200','email200');
+EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 system PRIMARY,kid NULL NULL NULL 0 const row not found
+1 SIMPLE t2 index NULL e_n 100 NULL 200
SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
email
email1
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index aefebad7e49..ecf91fe6f68 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2;
+drop database if exists mysqltest;
create table t1 (a int not null primary key, b int not null,c int not null, key(b,c));
insert into t1 values (1,2,2),(2,2,3),(3,2,4),(4,2,4);
check table t1 fast;
@@ -361,39 +362,39 @@ t1 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 HEAP 9 Fixed 0 9 # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL
drop table t1, t2, t3;
-create database test_$1;
-show create database test_$1;
+create database mysqltest;
+show create database mysqltest;
Database Create Database
-test_$1 CREATE DATABASE `test_$1` /*!40100 DEFAULT CHARACTER SET latin1 */
-create table test_$1.t1(a int);
-insert into test_$1.t1 values(1);
-grant select on `test_$1`.* to mysqltest_1@localhost;
-grant usage on `test_$1`.* to mysqltest_2@localhost;
-grant drop on `test_$1`.* to mysqltest_3@localhost;
+mysqltest CREATE DATABASE `mysqltest` /*!40100 DEFAULT CHARACTER SET latin1 */
+create table mysqltest.t1(a int);
+insert into mysqltest.t1 values(1);
+grant select on `mysqltest`.* to mysqltest_1@localhost;
+grant usage on `mysqltest`.* to mysqltest_2@localhost;
+grant drop on `mysqltest`.* to mysqltest_3@localhost;
select * from t1;
a
1
-show create database test_$1;
+show create database mysqltest;
Database Create Database
-test_$1 CREATE DATABASE `test_$1` /*!40100 DEFAULT CHARACTER SET latin1 */
+mysqltest CREATE DATABASE `mysqltest` /*!40100 DEFAULT CHARACTER SET latin1 */
drop table t1;
-ERROR 42000: Access denied for user 'mysqltest_1'@'localhost' to database 'test_$1'
-drop database test_$1;
-ERROR 42000: Access denied for user 'mysqltest_1'@'localhost' to database 'test_$1'
-select * from test_$1.t1;
-ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'test_$1'
-show create database test_$1;
-ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'test_$1'
-drop table test_$1.t1;
-ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'test_$1'
-drop database test_$1;
-ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'test_$1'
-select * from test_$1.t1;
-ERROR 42000: Access denied for user 'mysqltest_3'@'localhost' to database 'test_$1'
-show create database test_$1;
-ERROR 42000: Access denied for user 'mysqltest_3'@'localhost' to database 'test_$1'
-drop table test_$1.t1;
-drop database test_$1;
+ERROR 42000: Access denied for user 'mysqltest_1'@'localhost' to database 'mysqltest'
+drop database mysqltest;
+ERROR 42000: Access denied for user 'mysqltest_1'@'localhost' to database 'mysqltest'
+select * from mysqltest.t1;
+ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'mysqltest'
+show create database mysqltest;
+ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'mysqltest'
+drop table mysqltest.t1;
+ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'mysqltest'
+drop database mysqltest;
+ERROR 42000: Access denied for user 'mysqltest_2'@'localhost' to database 'mysqltest'
+select * from mysqltest.t1;
+ERROR 42000: Access denied for user 'mysqltest_3'@'localhost' to database 'mysqltest'
+show create database mysqltest;
+ERROR 42000: Access denied for user 'mysqltest_3'@'localhost' to database 'mysqltest'
+drop table mysqltest.t1;
+drop database mysqltest;
set names binary;
delete from mysql.user
where user='mysqltest_1' || user='mysqltest_2' || user='mysqltest_3';
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index 80dd5c4077a..b67989231e0 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -1259,11 +1259,6 @@ a b
2 NULL
3 1
drop table t1, t2;
-create table t1(City VARCHAR(30),Location geometry);
-insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)'));
-select City from t1 where (select intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 50, 2.5 47, 2 47, 2 50))'))=0);
-City
-drop table t1;
CREATE TABLE `t1` (
`id` mediumint(8) unsigned NOT NULL auto_increment,
`pseudo` varchar(35) NOT NULL default '',
@@ -1907,3 +1902,77 @@ select t000.a, count(*) `C` FROM t1 t000 GROUP BY t000.a HAVING count(*) > ALL (
a C
1 1
drop table t1,t2;
+create table t1 (a int not null auto_increment primary key, b varchar(40), fulltext(b));
+insert into t1 (b) values ('ball'),('ball games'), ('games'), ('foo'), ('foobar'), ('Serg'), ('Sergei'),('Georg'), ('Patrik'),('Hakan');
+create table t2 (a int);
+insert into t2 values (1),(3),(2),(7);
+select a,b from t1 where match(b) against ('Ball') > 0;
+a b
+1 ball
+2 ball games
+select a from t2 where a in (select a from t1 where match(b) against ('Ball') > 0);
+a
+1
+2
+drop table t1,t2;
+CREATE TABLE t1(`IZAVORGANG_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`KUERZEL` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,`IZAANALYSEART_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`IZAPMKZ_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin);
+CREATE INDEX AK01IZAVORGANG ON t1(izaAnalyseart_id,Kuerzel);
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000001','601','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000002','602','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000003','603','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000004','101','D0000000001','I0000000001');
+SELECT `IZAVORGANG_ID` FROM t1 WHERE `KUERZEL` IN(SELECT MIN(`KUERZEL`)`Feld1` FROM t1 WHERE `KUERZEL` LIKE'601%'And`IZAANALYSEART_ID`='D0000000001');
+IZAVORGANG_ID
+D0000000001
+drop table t1;
+CREATE TABLE `t1` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`));
+CREATE TABLE `t2` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`));
+insert into t1 values (1,1),(1,2),(2,1),(2,2);
+insert into t2 values (1,2),(2,2);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+aid bid
+1 1
+2 1
+alter table t2 drop primary key;
+alter table t2 add key KEY1 (aid, bid);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+aid bid
+1 1
+2 1
+alter table t2 drop key KEY1;
+alter table t2 add primary key (bid, aid);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+aid bid
+1 1
+2 1
+drop table t1,t2;
+CREATE TABLE t1 (howmanyvalues bigint, avalue int);
+INSERT INTO t1 VALUES (1, 1),(2, 1),(2, 2),(3, 1),(3, 2),(3, 3),(4, 1),(4, 2),(4, 3),(4, 4);
+SELECT howmanyvalues, count(*) from t1 group by howmanyvalues;
+howmanyvalues count(*)
+1 1
+2 2
+3 3
+4 4
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues;
+howmanyvalues mycount
+1 1
+2 2
+3 3
+4 4
+CREATE INDEX t1_howmanyvalues_idx ON t1 (howmanyvalues);
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues+1 = a.howmanyvalues+1) as mycount from t1 a group by a.howmanyvalues;
+howmanyvalues mycount
+1 1
+2 2
+3 3
+4 4
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues;
+howmanyvalues mycount
+1 1
+2 2
+3 3
+4 4
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.avalue) as mycount from t1 a group by a.howmanyvalues;
+ERROR 42S22: Unknown column 'a.avalue' in 'where clause'
+drop table t1;
diff --git a/mysql-test/r/subselect_gis.result b/mysql-test/r/subselect_gis.result
new file mode 100644
index 00000000000..34ab7748656
--- /dev/null
+++ b/mysql-test/r/subselect_gis.result
@@ -0,0 +1,8 @@
+drop table if exists t1;
+create table t1(City VARCHAR(30),Location geometry);
+insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)'));
+select City from t1 where (select
+intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5
+50, 2.5 47, 2 47, 2 50))'))=0);
+City
+drop table t1;
diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result
index 2d0daae09be..1b5fc4f8b27 100644
--- a/mysql-test/r/system_mysql_db.result
+++ b/mysql-test/r/system_mysql_db.result
@@ -19,77 +19,77 @@ user BASE TABLE
show create table db;
Table Create Table
db CREATE TABLE `db` (
- `Host` char(60) character set latin1 collate latin1_bin NOT NULL default '',
- `Db` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `User` char(16) character set latin1 collate latin1_bin NOT NULL default '',
- `Select_priv` enum('N','Y') NOT NULL default 'N',
- `Insert_priv` enum('N','Y') NOT NULL default 'N',
- `Update_priv` enum('N','Y') NOT NULL default 'N',
- `Delete_priv` enum('N','Y') NOT NULL default 'N',
- `Create_priv` enum('N','Y') NOT NULL default 'N',
- `Drop_priv` enum('N','Y') NOT NULL default 'N',
- `Grant_priv` enum('N','Y') NOT NULL default 'N',
- `References_priv` enum('N','Y') NOT NULL default 'N',
- `Index_priv` enum('N','Y') NOT NULL default 'N',
- `Alter_priv` enum('N','Y') NOT NULL default 'N',
- `Create_tmp_table_priv` enum('N','Y') NOT NULL default 'N',
- `Lock_tables_priv` enum('N','Y') NOT NULL default 'N',
- `Create_view_priv` enum('N','Y') NOT NULL default 'N',
- `Show_view_priv` enum('N','Y') NOT NULL default 'N',
+ `Host` char(60) collate utf8_bin NOT NULL default '',
+ `Db` char(64) collate utf8_bin NOT NULL default '',
+ `User` char(16) collate utf8_bin NOT NULL default '',
+ `Select_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Insert_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Update_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Delete_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Drop_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Grant_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `References_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Index_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Alter_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_tmp_table_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Lock_tables_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Show_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
PRIMARY KEY (`Host`,`Db`,`User`),
KEY `User` (`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Database privileges'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
show create table host;
Table Create Table
host CREATE TABLE `host` (
- `Host` char(60) character set latin1 collate latin1_bin NOT NULL default '',
- `Db` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `Select_priv` enum('N','Y') NOT NULL default 'N',
- `Insert_priv` enum('N','Y') NOT NULL default 'N',
- `Update_priv` enum('N','Y') NOT NULL default 'N',
- `Delete_priv` enum('N','Y') NOT NULL default 'N',
- `Create_priv` enum('N','Y') NOT NULL default 'N',
- `Drop_priv` enum('N','Y') NOT NULL default 'N',
- `Grant_priv` enum('N','Y') NOT NULL default 'N',
- `References_priv` enum('N','Y') NOT NULL default 'N',
- `Index_priv` enum('N','Y') NOT NULL default 'N',
- `Alter_priv` enum('N','Y') NOT NULL default 'N',
- `Create_tmp_table_priv` enum('N','Y') NOT NULL default 'N',
- `Lock_tables_priv` enum('N','Y') NOT NULL default 'N',
- `Create_view_priv` enum('N','Y') NOT NULL default 'N',
- `Show_view_priv` enum('N','Y') NOT NULL default 'N',
+ `Host` char(60) collate utf8_bin NOT NULL default '',
+ `Db` char(64) collate utf8_bin NOT NULL default '',
+ `Select_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Insert_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Update_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Delete_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Drop_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Grant_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `References_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Index_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Alter_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_tmp_table_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Lock_tables_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Show_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
PRIMARY KEY (`Host`,`Db`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Host privileges; Merged with database privileges'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
show create table user;
Table Create Table
user CREATE TABLE `user` (
- `Host` varchar(60) character set latin1 collate latin1_bin NOT NULL default '',
- `User` varchar(16) character set latin1 collate latin1_bin NOT NULL default '',
- `Password` varchar(41) character set latin1 collate latin1_bin NOT NULL default '',
- `Select_priv` enum('N','Y') NOT NULL default 'N',
- `Insert_priv` enum('N','Y') NOT NULL default 'N',
- `Update_priv` enum('N','Y') NOT NULL default 'N',
- `Delete_priv` enum('N','Y') NOT NULL default 'N',
- `Create_priv` enum('N','Y') NOT NULL default 'N',
- `Drop_priv` enum('N','Y') NOT NULL default 'N',
- `Reload_priv` enum('N','Y') NOT NULL default 'N',
- `Shutdown_priv` enum('N','Y') NOT NULL default 'N',
- `Process_priv` enum('N','Y') NOT NULL default 'N',
- `File_priv` enum('N','Y') NOT NULL default 'N',
- `Grant_priv` enum('N','Y') NOT NULL default 'N',
- `References_priv` enum('N','Y') NOT NULL default 'N',
- `Index_priv` enum('N','Y') NOT NULL default 'N',
- `Alter_priv` enum('N','Y') NOT NULL default 'N',
- `Show_db_priv` enum('N','Y') NOT NULL default 'N',
- `Super_priv` enum('N','Y') NOT NULL default 'N',
- `Create_tmp_table_priv` enum('N','Y') NOT NULL default 'N',
- `Lock_tables_priv` enum('N','Y') NOT NULL default 'N',
- `Execute_priv` enum('N','Y') NOT NULL default 'N',
- `Repl_slave_priv` enum('N','Y') NOT NULL default 'N',
- `Repl_client_priv` enum('N','Y') NOT NULL default 'N',
- `Create_view_priv` enum('N','Y') NOT NULL default 'N',
- `Show_view_priv` enum('N','Y') NOT NULL default 'N',
- `ssl_type` enum('','ANY','X509','SPECIFIED') NOT NULL default '',
+ `Host` varchar(60) collate utf8_bin NOT NULL default '',
+ `User` varchar(16) collate utf8_bin NOT NULL default '',
+ `Password` varchar(41) collate utf8_bin NOT NULL default '',
+ `Select_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Insert_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Update_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Delete_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Drop_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Reload_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Shutdown_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Process_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `File_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Grant_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `References_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Index_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Alter_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Show_db_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Super_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_tmp_table_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Lock_tables_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Execute_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Repl_slave_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Repl_client_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Create_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `Show_view_priv` enum('N','Y') collate utf8_bin NOT NULL default 'N',
+ `ssl_type` enum('','ANY','X509','SPECIFIED') collate utf8_bin NOT NULL default '',
`ssl_cipher` blob NOT NULL,
`x509_issuer` blob NOT NULL,
`x509_subject` blob NOT NULL,
@@ -97,41 +97,41 @@ user CREATE TABLE `user` (
`max_updates` int(11) unsigned NOT NULL default '0',
`max_connections` int(11) unsigned NOT NULL default '0',
PRIMARY KEY (`Host`,`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Users and global privileges'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
show create table func;
Table Create Table
func CREATE TABLE `func` (
- `name` char(64) character set latin1 collate latin1_bin NOT NULL default '',
+ `name` char(64) collate utf8_bin NOT NULL default '',
`ret` tinyint(1) NOT NULL default '0',
- `dl` char(128) NOT NULL default '',
- `type` enum('function','aggregate') NOT NULL default 'function',
+ `dl` char(128) collate utf8_bin NOT NULL default '',
+ `type` enum('function','aggregate') collate utf8_bin NOT NULL default 'function',
PRIMARY KEY (`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='User defined functions'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User defined functions'
show create table tables_priv;
Table Create Table
tables_priv CREATE TABLE `tables_priv` (
- `Host` char(60) character set latin1 collate latin1_bin NOT NULL default '',
- `Db` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `User` char(16) character set latin1 collate latin1_bin NOT NULL default '',
- `Table_name` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `Grantor` char(77) NOT NULL default '',
+ `Host` char(60) collate utf8_bin NOT NULL default '',
+ `Db` char(64) collate utf8_bin NOT NULL default '',
+ `User` char(16) collate utf8_bin NOT NULL default '',
+ `Table_name` char(64) collate utf8_bin NOT NULL default '',
+ `Grantor` char(77) collate utf8_bin NOT NULL default '',
`Timestamp` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
- `Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') NOT NULL default '',
- `Column_priv` set('Select','Insert','Update','References') NOT NULL default '',
+ `Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') collate utf8_bin NOT NULL default '',
+ `Column_priv` set('Select','Insert','Update','References') collate utf8_bin NOT NULL default '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Table privileges'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Table privileges'
show create table columns_priv;
Table Create Table
columns_priv CREATE TABLE `columns_priv` (
- `Host` char(60) character set latin1 collate latin1_bin NOT NULL default '',
- `Db` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `User` char(16) character set latin1 collate latin1_bin NOT NULL default '',
- `Table_name` char(64) character set latin1 collate latin1_bin NOT NULL default '',
- `Column_name` char(64) character set latin1 collate latin1_bin NOT NULL default '',
+ `Host` char(60) collate utf8_bin NOT NULL default '',
+ `Db` char(64) collate utf8_bin NOT NULL default '',
+ `User` char(16) collate utf8_bin NOT NULL default '',
+ `Table_name` char(64) collate utf8_bin NOT NULL default '',
+ `Column_name` char(64) collate utf8_bin NOT NULL default '',
`Timestamp` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP,
- `Column_priv` set('Select','Insert','Update','References') NOT NULL default '',
+ `Column_priv` set('Select','Insert','Update','References') collate utf8_bin NOT NULL default '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Column privileges'
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Column privileges'
show tables;
Tables_in_test table_type
diff --git a/mysql-test/r/timezone2.result b/mysql-test/r/timezone2.result
index 5361ff4ffe6..02406b77a65 100644
--- a/mysql-test/r/timezone2.result
+++ b/mysql-test/r/timezone2.result
@@ -244,3 +244,10 @@ NULL
select convert_tz( NULL, 'MET', 'UTC');
convert_tz( NULL, 'MET', 'UTC')
NULL
+create table t1 (ts timestamp);
+set timestamp=1000000000;
+insert into t1 (ts) values (now());
+select convert_tz(ts, @@time_zone, 'Japan') from t1;
+convert_tz(ts, @@time_zone, 'Japan')
+2001-09-09 10:46:40
+drop table t1;
diff --git a/mysql-test/r/truncate.result b/mysql-test/r/truncate.result
index d777bd184b2..74a6cb72cc6 100644
--- a/mysql-test/r/truncate.result
+++ b/mysql-test/r/truncate.result
@@ -31,4 +31,25 @@ SELECT * from t1;
a
1
2
+delete from t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
+a
+3
+4
+drop table t1;
+create temporary table t1 (a integer auto_increment primary key);
+insert into t1 (a) values (NULL),(NULL);
+truncate table t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
+a
+1
+2
+delete from t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
+a
+3
+4
drop table t1;
diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result
index 580fc9a8d0b..95bba1d4ec7 100644
--- a/mysql-test/r/type_blob.result
+++ b/mysql-test/r/type_blob.result
@@ -593,9 +593,12 @@ create table t1 (id integer primary key auto_increment, txt text, unique index t
insert into t1 (txt) values ('Chevy'), ('Chevy '), (NULL);
select * from t1 where txt='Chevy' or txt is NULL;
id txt
+3 NULL
1 Chevy
2 Chevy
-3 NULL
+explain select * from t1 where txt='Chevy' or txt is NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range txt_index txt_index 23 NULL 2 Using where
select * from t1 where txt='Chevy ';
id txt
1 Chevy
@@ -663,6 +666,21 @@ id txt
1 Chevy
2 Chevy
4 Ford
+alter table t1 modify column txt blob;
+explain select * from t1 where txt='Chevy' or txt is NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 2 Using where
+select * from t1 where txt='Chevy' or txt is NULL;
+id txt
+1 Chevy
+3 NULL
+explain select * from t1 where txt='Chevy' or txt is NULL order by txt;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 2 Using where; Using filesort
+select * from t1 where txt='Chevy' or txt is NULL order by txt;
+id txt
+3 NULL
+1 Chevy
drop table t1;
CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1)));
INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result
index e77ef9f975b..71d1b9ad381 100644
--- a/mysql-test/r/type_date.result
+++ b/mysql-test/r/type_date.result
@@ -79,3 +79,20 @@ SELECT DATE_FORMAT(f1, "%l.%i %p") , DATE_FORMAT(f2, "%l.%i %p") FROM t1;
DATE_FORMAT(f1, "%l.%i %p") DATE_FORMAT(f2, "%l.%i %p")
9.00 AM 12.00 PM
DROP TABLE t1;
+CREATE TABLE t1 (f1 DATE);
+CREATE TABLE t2 (f2 VARCHAR(8));
+CREATE TABLE t3 (f2 CHAR(8));
+INSERT INTO t1 VALUES ('1978-11-26');
+INSERT INTO t2 SELECT f1+0 FROM t1;
+INSERT INTO t2 SELECT f1+0 FROM t1 UNION SELECT f1+0 FROM t1;
+INSERT INTO t3 SELECT f1+0 FROM t1;
+INSERT INTO t3 SELECT f1+0 FROM t1 UNION SELECT f1+0 FROM t1;
+SELECT * FROM t2;
+f2
+19781126
+19781126
+SELECT * FROM t3;
+f2
+19781126
+19781126
+DROP TABLE t1, t2, t3;
diff --git a/mysql-test/r/type_float.result b/mysql-test/r/type_float.result
index 30de1e62df7..843bdc2bdc5 100644
--- a/mysql-test/r/type_float.result
+++ b/mysql-test/r/type_float.result
@@ -75,14 +75,17 @@ insert t1 values (121,"16");
select c1 + c1 * (c2 / 100) as col from t1;
col
140.36
-create table t2 select c1 + c1 * (c2 / 100) as col from t1;
+create table t2 select c1 + c1 * (c2 / 100) as col1, round(c1, 5) as col2, round(c1, 35) as col3, sqrt(c1*1e-15) col4 from t1;
select * from t2;
-col
-140.36
+col1 col2 col3 col4
+140.36 121.00000 121 3.47850542618522e-07
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
- `col` double default NULL
+ `col1` double default NULL,
+ `col2` double(22,5) default NULL,
+ `col3` double default NULL,
+ `col4` double default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1,t2;
create table t1 (a float);
@@ -132,6 +135,9 @@ t1 CREATE TABLE `t1` (
`b` double(200,30) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
+create table t1 (c20 char);
+insert into t1 values (5000.0);
+drop table t1;
create table t1 (f float(54));
ERROR 42000: Incorrect column specifier for column 'f'
drop table if exists t1;
diff --git a/mysql-test/r/type_ranges.result b/mysql-test/r/type_ranges.result
index e803fde14a6..5a65c90c5c7 100644
--- a/mysql-test/r/type_ranges.result
+++ b/mysql-test/r/type_ranges.result
@@ -272,7 +272,7 @@ auto bigint(17) unsigned NULL PRI 0 select,insert,update,references
t1 bigint(1) NULL 0 select,insert,update,references
t2 char(1) latin1_swedish_ci select,insert,update,references
t3 longtext latin1_swedish_ci select,insert,update,references
-t4 longtext latin1_bin select,insert,update,references
+t4 longblob NULL select,insert,update,references
select * from t2;
auto t1 t2 t3 t4
11 1 a aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index aa8c0903558..425e4a05586 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -365,3 +365,15 @@ select * from t1;
t1 i
2004-04-01 00:00:00 10
drop table t1;
+create table t1 (ts timestamp(19));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `ts` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+set TIMESTAMP=1000000000;
+insert into t1 values ();
+select * from t1;
+ts
+2001-09-09 04:46:40
+drop table t1;
diff --git a/mysql-test/r/type_uint.result b/mysql-test/r/type_uint.result
index 07eb47faa7c..d8edf9085b7 100644
--- a/mysql-test/r/type_uint.result
+++ b/mysql-test/r/type_uint.result
@@ -5,8 +5,12 @@ insert into t1 values (1);
insert into t1 values (-1);
Warnings:
Warning 1264 Data truncated; out of range for column 'this' at row 1
+insert into t1 values ('5000000000');
+Warnings:
+Warning 1265 Data truncated for column 'this' at row 1
select * from t1;
this
1
0
+4294967295
drop table t1;
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index 4a0c500c15c..99e0a69834e 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -436,6 +436,14 @@ select length(version()) > 1 as `*` UNION select 2;
*
1
2
+create table t1 (a int);
+insert into t1 values (0), (3), (1), (2);
+explain (select * from t1) union (select * from t1) order by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+2 UNION t1 ALL NULL NULL NULL NULL 4
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL Using filesort
+drop table t1;
CREATE TABLE t1 ( id int(3) unsigned default '0') ENGINE=MyISAM;
INSERT INTO t1 (id) VALUES("1");
CREATE TABLE t2 ( id int(3) unsigned default '0', id_master int(5) default '0', text1 varchar(5) default NULL, text2 varchar(5) default NULL) ENGINE=MyISAM;
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index 498e1634dc4..a84cf733f21 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -460,3 +460,19 @@ SELECT @@global.session.key_buffer_size;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'key_buffer_size' at line 1
SELECT @@global.local.key_buffer_size;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'key_buffer_size' at line 1
+set @tstlw = @@log_warnings;
+show global variables like 'log_warnings';
+Variable_name Value
+log_warnings 1
+set global log_warnings = 0;
+show global variables like 'log_warnings';
+Variable_name Value
+log_warnings 0
+set global log_warnings = 42;
+show global variables like 'log_warnings';
+Variable_name Value
+log_warnings 42
+set global log_warnings = @tstlw;
+show global variables like 'log_warnings';
+Variable_name Value
+log_warnings 1
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index 07427c616f6..eb35aa90fe2 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -312,3 +312,14 @@ insert into t1 values (1,1), (2,2);
alter table t1 drop key no_such_key;
alter table t1 drop key a;
drop table t1;
+
+#
+# BUG#4717 - check for valid table names
+#
+create table t1 (a int);
+--error 1103
+alter table t1 rename to `t1\\`;
+--error 1103
+rename table t1 to `t1\\`;
+drop table t1;
+
diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test
index 73588a91aac..e5986e6755d 100644
--- a/mysql-test/t/auto_increment.test
+++ b/mysql-test/t/auto_increment.test
@@ -138,6 +138,13 @@ insert into t1(b)values(5);
insert into t1(b)values(6);
insert into t1(b)values(7);
select * from t1 order by b;
+alter table t1 modify b mediumint;
+select * from t1 order by b;
+create table t2 (a int);
+insert t2 values (1),(2);
+alter table t2 add b int auto_increment primary key;
+select * from t2;
+drop table t2;
delete from t1 where a=0;
update t1 set a=0 where b=5;
select * from t1 order by b;
diff --git a/mysql-test/t/binary.test b/mysql-test/t/binary.test
index 20a047e0b26..3e702fd5257 100644
--- a/mysql-test/t/binary.test
+++ b/mysql-test/t/binary.test
@@ -38,6 +38,7 @@ select concat("-",a,"-",b,"-") from t1 where b="hello ";
select concat("-",a,"-",b,"-") from t1 ignore index (b) where b="hello ";
# blob test
alter table t1 modify b tinytext not null, drop key b, add key (b(100));
+select concat("-",a,"-",b,"-") from t1;
select concat("-",a,"-",b,"-") from t1 where b="hello ";
select concat("-",a,"-",b,"-") from t1 ignore index (b) where b="hello ";
drop table t1;
@@ -66,3 +67,16 @@ select * from t1 where lower(b)='bbb';
select charset(a), charset(b), charset(binary 'ccc') from t1 limit 1;
select collation(a), collation(b), collation(binary 'ccc') from t1 limit 1;
drop table t1;
+
+#
+# Bug5134: WHERE x = 'bar' AND x LIKE BINARY 'bar' returns wrong results
+#
+
+create table t1( firstname char(20), lastname char(20));
+insert into t1 values ("john","doe"),("John","Doe");
+select * from t1 where firstname='john' and firstname like binary 'john';
+select * from t1 where firstname='john' and binary 'john' = firstname;
+select * from t1 where firstname='john' and firstname = binary 'john';
+select * from t1 where firstname='John' and firstname like binary 'john';
+select * from t1 where firstname='john' and firstname like binary 'John';
+drop table t1;
diff --git a/mysql-test/t/connect.test b/mysql-test/t/connect.test
index 32c1479ae04..4598ca5ea15 100644
--- a/mysql-test/t/connect.test
+++ b/mysql-test/t/connect.test
@@ -48,6 +48,9 @@ flush privileges;
#connect (con1,localhost,test,gambling2,"");
#show tables;
connect (con1,localhost,test,gambling2,mysql);
+set password="";
+--error 1105
+set password='gambling3';
set password=old_password('gambling3');
show tables;
connect (con1,localhost,test,gambling3,test);
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index 30441fb9aae..26c527ca7cb 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -4,7 +4,7 @@
--disable_warnings
drop table if exists t1,t2,t3;
-drop database if exists test_$1;
+drop database if exists mysqltest;
--enable_warnings
create table t1 (b char(0));
@@ -69,14 +69,14 @@ drop table 1ea10;
create table t1 (t1.index int);
drop table t1;
# Test that we get warning for this
-drop database if exists test_$1;
-create database test_$1;
-create table test_$1.$test1 (a$1 int, $b int, c$ int);
-insert into test_$1.$test1 values (1,2,3);
-select a$1, $b, c$ from test_$1.$test1;
-create table test_$1.test2$ (a int);
-drop table test_$1.test2$;
-drop database test_$1;
+drop database if exists mysqltest;
+create database mysqltest;
+create table mysqltest.$test1 (a$1 int, $b int, c$ int);
+insert into mysqltest.$test1 values (1,2,3);
+select a$1, $b, c$ from mysqltest.$test1;
+create table mysqltest.test2$ (a int);
+drop table mysqltest.test2$;
+drop database mysqltest;
--error 1103
create table `` (a int);
@@ -281,16 +281,16 @@ drop table t3;
show create table t3;
select * from t3;
drop table t2, t3;
-create database test_$1;
-create table test_$1.t3 like t1;
-create temporary table t3 like test_$1.t3;
+create database mysqltest;
+create table mysqltest.t3 like t1;
+create temporary table t3 like mysqltest.t3;
show create table t3;
create table t2 like t3;
show create table t2;
select * from t2;
create table t3 like t1;
--error 1050
-create table t3 like test_$1.t3;
+create table t3 like mysqltest.t3;
--error 1044,1
create table non_existing_database.t1 like t1;
--error 1051
@@ -301,7 +301,7 @@ create temporary table t3 like t1;
create table t3 like `a/a`;
drop table t1, t2, t3;
drop table t3;
-drop database test_$1;
+drop database mysqltest;
#
# Test default table type
@@ -393,10 +393,10 @@ drop table t1, t2, t3;
# Bug #1209
#
-create database test_$1;
-use test_$1;
+create database mysqltest;
+use mysqltest;
select database();
-drop database test_$1;
+drop database mysqltest;
select database();
# Connect without a database
diff --git a/mysql-test/t/create_select_tmp.test b/mysql-test/t/create_select_tmp.test
index 166d32fb17c..d81a3799d98 100644
--- a/mysql-test/t/create_select_tmp.test
+++ b/mysql-test/t/create_select_tmp.test
@@ -12,18 +12,18 @@ drop table if exists t1, t2;
CREATE TABLE t1 ( a int );
INSERT INTO t1 VALUES (1),(2),(1);
--error 1062;
-CREATE TABLE t2 ( PRIMARY KEY (a) ) TYPE=INNODB SELECT a FROM t1;
+CREATE TABLE t2 ( PRIMARY KEY (a) ) ENGINE=INNODB SELECT a FROM t1;
--error 1146;
select * from t2;
--error 1062;
-CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) TYPE=INNODB SELECT a FROM t1;
+CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) ENGINE=INNODB SELECT a FROM t1;
--error 1146;
select * from t2;
--error 1062;
-CREATE TABLE t2 ( PRIMARY KEY (a) ) TYPE=MYISAM SELECT a FROM t1;
+CREATE TABLE t2 ( PRIMARY KEY (a) ) ENGINE=MYISAM SELECT a FROM t1;
--error 1146;
select * from t2;
--error 1062;
-CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) TYPE=MYISAM SELECT a FROM t1;
+CREATE TEMPORARY TABLE t2 ( PRIMARY KEY (a) ) ENGINE=MYISAM SELECT a FROM t1;
--error 1146;
select * from t2;
diff --git a/mysql-test/t/csv.test b/mysql-test/t/csv.test
new file mode 100644
index 00000000000..591fab3961a
--- /dev/null
+++ b/mysql-test/t/csv.test
@@ -0,0 +1,1315 @@
+#
+# Test for the CSV engine
+#
+
+-- source include/have_csv.inc
+
+#
+# Simple select test
+#
+
+--disable_warnings
+drop table if exists t1,t2,t3,t4;
+--enable_warnings
+
+CREATE TABLE t1 (
+ Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+ Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE = CSV;
+
+INSERT INTO t1 VALUES (9410,9412);
+
+select period from t1;
+select * from t1;
+select t1.* from t1;
+
+#
+# Create test table
+#
+
+CREATE TABLE t2 (
+ auto int not null,
+ fld1 int(6) unsigned zerofill DEFAULT '000000' NOT NULL,
+ companynr tinyint(2) unsigned zerofill DEFAULT '00' NOT NULL,
+ fld3 char(30) DEFAULT '' NOT NULL,
+ fld4 char(35) DEFAULT '' NOT NULL,
+ fld5 char(35) DEFAULT '' NOT NULL,
+ fld6 char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+
+#
+# Populate table
+#
+
+--disable_query_log
+INSERT INTO t2 VALUES (1,000001,00,'Omaha','teethe','neat','');
+INSERT INTO t2 VALUES (2,011401,37,'breaking','dreaded','Steinberg','W');
+INSERT INTO t2 VALUES (3,011402,37,'Romans','scholastics','jarring','');
+INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
+INSERT INTO t2 VALUES (5,011501,37,'bewilderingly','wallet','balled','');
+INSERT INTO t2 VALUES (6,011701,37,'astound','parters','persist','W');
+INSERT INTO t2 VALUES (7,011702,37,'admonishing','eschew','attainments','');
+INSERT INTO t2 VALUES (8,011703,37,'sumac','quitter','fanatic','');
+INSERT INTO t2 VALUES (9,012001,37,'flanking','neat','measures','FAS');
+INSERT INTO t2 VALUES (10,012003,37,'combed','Steinberg','rightfulness','');
+INSERT INTO t2 VALUES (11,012004,37,'subjective','jarring','capably','');
+INSERT INTO t2 VALUES (12,012005,37,'scatterbrain','tinily','impulsive','');
+INSERT INTO t2 VALUES (13,012301,37,'Eulerian','balled','starlet','');
+INSERT INTO t2 VALUES (14,012302,36,'dubbed','persist','terminators','');
+INSERT INTO t2 VALUES (15,012303,37,'Kane','attainments','untying','');
+INSERT INTO t2 VALUES (16,012304,37,'overlay','fanatic','announces','FAS');
+INSERT INTO t2 VALUES (17,012305,37,'perturb','measures','featherweight','FAS');
+INSERT INTO t2 VALUES (18,012306,37,'goblins','rightfulness','pessimist','FAS');
+INSERT INTO t2 VALUES (19,012501,37,'annihilates','capably','daughter','');
+INSERT INTO t2 VALUES (20,012602,37,'Wotan','impulsive','decliner','FAS');
+INSERT INTO t2 VALUES (21,012603,37,'snatching','starlet','lawgiver','');
+INSERT INTO t2 VALUES (22,012604,37,'concludes','terminators','stated','');
+INSERT INTO t2 VALUES (23,012605,37,'laterally','untying','readable','');
+INSERT INTO t2 VALUES (24,012606,37,'yelped','announces','attrition','');
+INSERT INTO t2 VALUES (25,012701,37,'grazing','featherweight','cascade','FAS');
+INSERT INTO t2 VALUES (26,012702,37,'Baird','pessimist','motors','FAS');
+INSERT INTO t2 VALUES (27,012703,37,'celery','daughter','interrogate','');
+INSERT INTO t2 VALUES (28,012704,37,'misunderstander','decliner','pests','W');
+INSERT INTO t2 VALUES (29,013601,37,'handgun','lawgiver','stairway','');
+INSERT INTO t2 VALUES (30,013602,37,'foldout','stated','dopers','FAS');
+INSERT INTO t2 VALUES (31,013603,37,'mystic','readable','testicle','W');
+INSERT INTO t2 VALUES (32,013604,37,'succumbed','attrition','Parsifal','W');
+INSERT INTO t2 VALUES (33,013605,37,'Nabisco','cascade','leavings','');
+INSERT INTO t2 VALUES (34,013606,37,'fingerings','motors','postulation','W');
+INSERT INTO t2 VALUES (35,013607,37,'aging','interrogate','squeaking','');
+INSERT INTO t2 VALUES (36,013608,37,'afield','pests','contrasted','');
+INSERT INTO t2 VALUES (37,013609,37,'ammonium','stairway','leftover','');
+INSERT INTO t2 VALUES (38,013610,37,'boat','dopers','whiteners','');
+INSERT INTO t2 VALUES (39,013801,37,'intelligibility','testicle','erases','W');
+INSERT INTO t2 VALUES (40,013802,37,'Augustine','Parsifal','Punjab','W');
+INSERT INTO t2 VALUES (41,013803,37,'teethe','leavings','Merritt','');
+INSERT INTO t2 VALUES (42,013804,37,'dreaded','postulation','Quixotism','');
+INSERT INTO t2 VALUES (43,013901,37,'scholastics','squeaking','sweetish','FAS');
+INSERT INTO t2 VALUES (44,016001,37,'audiology','contrasted','dogging','FAS');
+INSERT INTO t2 VALUES (45,016201,37,'wallet','leftover','scornfully','FAS');
+INSERT INTO t2 VALUES (46,016202,37,'parters','whiteners','bellow','');
+INSERT INTO t2 VALUES (47,016301,37,'eschew','erases','bills','');
+INSERT INTO t2 VALUES (48,016302,37,'quitter','Punjab','cupboard','FAS');
+INSERT INTO t2 VALUES (49,016303,37,'neat','Merritt','sureties','FAS');
+INSERT INTO t2 VALUES (50,016304,37,'Steinberg','Quixotism','puddings','');
+INSERT INTO t2 VALUES (51,018001,37,'jarring','sweetish','tapestry','');
+INSERT INTO t2 VALUES (52,018002,37,'tinily','dogging','fetters','');
+INSERT INTO t2 VALUES (53,018003,37,'balled','scornfully','bivalves','');
+INSERT INTO t2 VALUES (54,018004,37,'persist','bellow','incurring','');
+INSERT INTO t2 VALUES (55,018005,37,'attainments','bills','Adolph','');
+INSERT INTO t2 VALUES (56,018007,37,'fanatic','cupboard','pithed','');
+INSERT INTO t2 VALUES (57,018008,37,'measures','sureties','emergency','');
+INSERT INTO t2 VALUES (58,018009,37,'rightfulness','puddings','Miles','');
+INSERT INTO t2 VALUES (59,018010,37,'capably','tapestry','trimmings','');
+INSERT INTO t2 VALUES (60,018012,37,'impulsive','fetters','tragedies','W');
+INSERT INTO t2 VALUES (61,018013,37,'starlet','bivalves','skulking','W');
+INSERT INTO t2 VALUES (62,018014,37,'terminators','incurring','flint','');
+INSERT INTO t2 VALUES (63,018015,37,'untying','Adolph','flopping','W');
+INSERT INTO t2 VALUES (64,018016,37,'announces','pithed','relaxing','FAS');
+INSERT INTO t2 VALUES (65,018017,37,'featherweight','emergency','offload','FAS');
+INSERT INTO t2 VALUES (66,018018,37,'pessimist','Miles','suites','W');
+INSERT INTO t2 VALUES (67,018019,37,'daughter','trimmings','lists','FAS');
+INSERT INTO t2 VALUES (68,018020,37,'decliner','tragedies','animized','FAS');
+INSERT INTO t2 VALUES (69,018021,37,'lawgiver','skulking','multilayer','W');
+INSERT INTO t2 VALUES (70,018022,37,'stated','flint','standardizes','FAS');
+INSERT INTO t2 VALUES (71,018023,37,'readable','flopping','Judas','');
+INSERT INTO t2 VALUES (72,018024,37,'attrition','relaxing','vacuuming','W');
+INSERT INTO t2 VALUES (73,018025,37,'cascade','offload','dentally','W');
+INSERT INTO t2 VALUES (74,018026,37,'motors','suites','humanness','W');
+INSERT INTO t2 VALUES (75,018027,37,'interrogate','lists','inch','W');
+INSERT INTO t2 VALUES (76,018028,37,'pests','animized','Weissmuller','W');
+INSERT INTO t2 VALUES (77,018029,37,'stairway','multilayer','irresponsibly','W');
+INSERT INTO t2 VALUES (78,018030,37,'dopers','standardizes','luckily','FAS');
+INSERT INTO t2 VALUES (79,018032,37,'testicle','Judas','culled','W');
+INSERT INTO t2 VALUES (80,018033,37,'Parsifal','vacuuming','medical','FAS');
+INSERT INTO t2 VALUES (81,018034,37,'leavings','dentally','bloodbath','FAS');
+INSERT INTO t2 VALUES (82,018035,37,'postulation','humanness','subschema','W');
+INSERT INTO t2 VALUES (83,018036,37,'squeaking','inch','animals','W');
+INSERT INTO t2 VALUES (84,018037,37,'contrasted','Weissmuller','Micronesia','');
+INSERT INTO t2 VALUES (85,018038,37,'leftover','irresponsibly','repetitions','');
+INSERT INTO t2 VALUES (86,018039,37,'whiteners','luckily','Antares','');
+INSERT INTO t2 VALUES (87,018040,37,'erases','culled','ventilate','W');
+INSERT INTO t2 VALUES (88,018041,37,'Punjab','medical','pityingly','');
+INSERT INTO t2 VALUES (89,018042,37,'Merritt','bloodbath','interdependent','');
+INSERT INTO t2 VALUES (90,018043,37,'Quixotism','subschema','Graves','FAS');
+INSERT INTO t2 VALUES (91,018044,37,'sweetish','animals','neonatal','');
+INSERT INTO t2 VALUES (92,018045,37,'dogging','Micronesia','scribbled','FAS');
+INSERT INTO t2 VALUES (93,018046,37,'scornfully','repetitions','chafe','W');
+INSERT INTO t2 VALUES (94,018048,37,'bellow','Antares','honoring','');
+INSERT INTO t2 VALUES (95,018049,37,'bills','ventilate','realtor','');
+INSERT INTO t2 VALUES (96,018050,37,'cupboard','pityingly','elite','');
+INSERT INTO t2 VALUES (97,018051,37,'sureties','interdependent','funereal','');
+INSERT INTO t2 VALUES (98,018052,37,'puddings','Graves','abrogating','');
+INSERT INTO t2 VALUES (99,018053,50,'tapestry','neonatal','sorters','');
+INSERT INTO t2 VALUES (100,018054,37,'fetters','scribbled','Conley','');
+INSERT INTO t2 VALUES (101,018055,37,'bivalves','chafe','lectured','');
+INSERT INTO t2 VALUES (102,018056,37,'incurring','honoring','Abraham','');
+INSERT INTO t2 VALUES (103,018057,37,'Adolph','realtor','Hawaii','W');
+INSERT INTO t2 VALUES (104,018058,37,'pithed','elite','cage','');
+INSERT INTO t2 VALUES (105,018059,36,'emergency','funereal','hushes','');
+INSERT INTO t2 VALUES (106,018060,37,'Miles','abrogating','Simla','');
+INSERT INTO t2 VALUES (107,018061,37,'trimmings','sorters','reporters','');
+INSERT INTO t2 VALUES (108,018101,37,'tragedies','Conley','Dutchman','FAS');
+INSERT INTO t2 VALUES (109,018102,37,'skulking','lectured','descendants','FAS');
+INSERT INTO t2 VALUES (110,018103,37,'flint','Abraham','groupings','FAS');
+INSERT INTO t2 VALUES (111,018104,37,'flopping','Hawaii','dissociate','');
+INSERT INTO t2 VALUES (112,018201,37,'relaxing','cage','coexist','W');
+INSERT INTO t2 VALUES (113,018202,37,'offload','hushes','Beebe','');
+INSERT INTO t2 VALUES (114,018402,37,'suites','Simla','Taoism','');
+INSERT INTO t2 VALUES (115,018403,37,'lists','reporters','Connally','');
+INSERT INTO t2 VALUES (116,018404,37,'animized','Dutchman','fetched','FAS');
+INSERT INTO t2 VALUES (117,018405,37,'multilayer','descendants','checkpoints','FAS');
+INSERT INTO t2 VALUES (118,018406,37,'standardizes','groupings','rusting','');
+INSERT INTO t2 VALUES (119,018409,37,'Judas','dissociate','galling','');
+INSERT INTO t2 VALUES (120,018601,37,'vacuuming','coexist','obliterates','');
+INSERT INTO t2 VALUES (121,018602,37,'dentally','Beebe','traitor','');
+INSERT INTO t2 VALUES (122,018603,37,'humanness','Taoism','resumes','FAS');
+INSERT INTO t2 VALUES (123,018801,37,'inch','Connally','analyzable','FAS');
+INSERT INTO t2 VALUES (124,018802,37,'Weissmuller','fetched','terminator','FAS');
+INSERT INTO t2 VALUES (125,018803,37,'irresponsibly','checkpoints','gritty','FAS');
+INSERT INTO t2 VALUES (126,018804,37,'luckily','rusting','firearm','W');
+INSERT INTO t2 VALUES (127,018805,37,'culled','galling','minima','');
+INSERT INTO t2 VALUES (128,018806,37,'medical','obliterates','Selfridge','');
+INSERT INTO t2 VALUES (129,018807,37,'bloodbath','traitor','disable','');
+INSERT INTO t2 VALUES (130,018808,37,'subschema','resumes','witchcraft','W');
+INSERT INTO t2 VALUES (131,018809,37,'animals','analyzable','betroth','W');
+INSERT INTO t2 VALUES (132,018810,37,'Micronesia','terminator','Manhattanize','');
+INSERT INTO t2 VALUES (133,018811,37,'repetitions','gritty','imprint','');
+INSERT INTO t2 VALUES (134,018812,37,'Antares','firearm','peeked','');
+INSERT INTO t2 VALUES (135,019101,37,'ventilate','minima','swelling','');
+INSERT INTO t2 VALUES (136,019102,37,'pityingly','Selfridge','interrelationships','W');
+INSERT INTO t2 VALUES (137,019103,37,'interdependent','disable','riser','');
+INSERT INTO t2 VALUES (138,019201,37,'Graves','witchcraft','Gandhian','W');
+INSERT INTO t2 VALUES (139,030501,37,'neonatal','betroth','peacock','A');
+INSERT INTO t2 VALUES (140,030502,50,'scribbled','Manhattanize','bee','A');
+INSERT INTO t2 VALUES (141,030503,37,'chafe','imprint','kanji','');
+INSERT INTO t2 VALUES (142,030504,37,'honoring','peeked','dental','');
+INSERT INTO t2 VALUES (143,031901,37,'realtor','swelling','scarf','FAS');
+INSERT INTO t2 VALUES (144,036001,37,'elite','interrelationships','chasm','A');
+INSERT INTO t2 VALUES (145,036002,37,'funereal','riser','insolence','A');
+INSERT INTO t2 VALUES (146,036004,37,'abrogating','Gandhian','syndicate','');
+INSERT INTO t2 VALUES (147,036005,37,'sorters','peacock','alike','');
+INSERT INTO t2 VALUES (148,038001,37,'Conley','bee','imperial','A');
+INSERT INTO t2 VALUES (149,038002,37,'lectured','kanji','convulsion','A');
+INSERT INTO t2 VALUES (150,038003,37,'Abraham','dental','railway','A');
+INSERT INTO t2 VALUES (151,038004,37,'Hawaii','scarf','validate','A');
+INSERT INTO t2 VALUES (152,038005,37,'cage','chasm','normalizes','A');
+INSERT INTO t2 VALUES (153,038006,37,'hushes','insolence','comprehensive','');
+INSERT INTO t2 VALUES (154,038007,37,'Simla','syndicate','chewing','');
+INSERT INTO t2 VALUES (155,038008,37,'reporters','alike','denizen','');
+INSERT INTO t2 VALUES (156,038009,37,'Dutchman','imperial','schemer','');
+INSERT INTO t2 VALUES (157,038010,37,'descendants','convulsion','chronicle','');
+INSERT INTO t2 VALUES (158,038011,37,'groupings','railway','Kline','');
+INSERT INTO t2 VALUES (159,038012,37,'dissociate','validate','Anatole','');
+INSERT INTO t2 VALUES (160,038013,37,'coexist','normalizes','partridges','');
+INSERT INTO t2 VALUES (161,038014,37,'Beebe','comprehensive','brunch','');
+INSERT INTO t2 VALUES (162,038015,37,'Taoism','chewing','recruited','');
+INSERT INTO t2 VALUES (163,038016,37,'Connally','denizen','dimensions','W');
+INSERT INTO t2 VALUES (164,038017,37,'fetched','schemer','Chicana','W');
+INSERT INTO t2 VALUES (165,038018,37,'checkpoints','chronicle','announced','');
+INSERT INTO t2 VALUES (166,038101,37,'rusting','Kline','praised','FAS');
+INSERT INTO t2 VALUES (167,038102,37,'galling','Anatole','employing','');
+INSERT INTO t2 VALUES (168,038103,37,'obliterates','partridges','linear','');
+INSERT INTO t2 VALUES (169,038104,37,'traitor','brunch','quagmire','');
+INSERT INTO t2 VALUES (170,038201,37,'resumes','recruited','western','A');
+INSERT INTO t2 VALUES (171,038202,37,'analyzable','dimensions','relishing','');
+INSERT INTO t2 VALUES (172,038203,37,'terminator','Chicana','serving','A');
+INSERT INTO t2 VALUES (173,038204,37,'gritty','announced','scheduling','');
+INSERT INTO t2 VALUES (174,038205,37,'firearm','praised','lore','');
+INSERT INTO t2 VALUES (175,038206,37,'minima','employing','eventful','');
+INSERT INTO t2 VALUES (176,038208,37,'Selfridge','linear','arteriole','A');
+INSERT INTO t2 VALUES (177,042801,37,'disable','quagmire','disentangle','');
+INSERT INTO t2 VALUES (178,042802,37,'witchcraft','western','cured','A');
+INSERT INTO t2 VALUES (179,046101,37,'betroth','relishing','Fenton','W');
+INSERT INTO t2 VALUES (180,048001,37,'Manhattanize','serving','avoidable','A');
+INSERT INTO t2 VALUES (181,048002,37,'imprint','scheduling','drains','A');
+INSERT INTO t2 VALUES (182,048003,37,'peeked','lore','detectably','FAS');
+INSERT INTO t2 VALUES (183,048004,37,'swelling','eventful','husky','');
+INSERT INTO t2 VALUES (184,048005,37,'interrelationships','arteriole','impelling','');
+INSERT INTO t2 VALUES (185,048006,37,'riser','disentangle','undoes','');
+INSERT INTO t2 VALUES (186,048007,37,'Gandhian','cured','evened','');
+INSERT INTO t2 VALUES (187,048008,37,'peacock','Fenton','squeezes','');
+INSERT INTO t2 VALUES (188,048101,37,'bee','avoidable','destroyer','FAS');
+INSERT INTO t2 VALUES (189,048102,37,'kanji','drains','rudeness','');
+INSERT INTO t2 VALUES (190,048201,37,'dental','detectably','beaner','FAS');
+INSERT INTO t2 VALUES (191,048202,37,'scarf','husky','boorish','');
+INSERT INTO t2 VALUES (192,048203,37,'chasm','impelling','Everhart','');
+INSERT INTO t2 VALUES (193,048204,37,'insolence','undoes','encompass','A');
+INSERT INTO t2 VALUES (194,048205,37,'syndicate','evened','mushrooms','');
+INSERT INTO t2 VALUES (195,048301,37,'alike','squeezes','Alison','A');
+INSERT INTO t2 VALUES (196,048302,37,'imperial','destroyer','externally','FAS');
+INSERT INTO t2 VALUES (197,048303,37,'convulsion','rudeness','pellagra','');
+INSERT INTO t2 VALUES (198,048304,37,'railway','beaner','cult','');
+INSERT INTO t2 VALUES (199,048305,37,'validate','boorish','creek','A');
+INSERT INTO t2 VALUES (200,048401,37,'normalizes','Everhart','Huffman','');
+INSERT INTO t2 VALUES (201,048402,37,'comprehensive','encompass','Majorca','FAS');
+INSERT INTO t2 VALUES (202,048403,37,'chewing','mushrooms','governing','A');
+INSERT INTO t2 VALUES (203,048404,37,'denizen','Alison','gadfly','FAS');
+INSERT INTO t2 VALUES (204,048405,37,'schemer','externally','reassigned','FAS');
+INSERT INTO t2 VALUES (205,048406,37,'chronicle','pellagra','intentness','W');
+INSERT INTO t2 VALUES (206,048407,37,'Kline','cult','craziness','');
+INSERT INTO t2 VALUES (207,048408,37,'Anatole','creek','psychic','');
+INSERT INTO t2 VALUES (208,048409,37,'partridges','Huffman','squabbled','');
+INSERT INTO t2 VALUES (209,048410,37,'brunch','Majorca','burlesque','');
+INSERT INTO t2 VALUES (210,048411,37,'recruited','governing','capped','');
+INSERT INTO t2 VALUES (211,048412,37,'dimensions','gadfly','extracted','A');
+INSERT INTO t2 VALUES (212,048413,37,'Chicana','reassigned','DiMaggio','');
+INSERT INTO t2 VALUES (213,048601,37,'announced','intentness','exclamation','FAS');
+INSERT INTO t2 VALUES (214,048602,37,'praised','craziness','subdirectory','');
+INSERT INTO t2 VALUES (215,048603,37,'employing','psychic','fangs','');
+INSERT INTO t2 VALUES (216,048604,37,'linear','squabbled','buyer','A');
+INSERT INTO t2 VALUES (217,048801,37,'quagmire','burlesque','pithing','A');
+INSERT INTO t2 VALUES (218,050901,37,'western','capped','transistorizing','A');
+INSERT INTO t2 VALUES (219,051201,37,'relishing','extracted','nonbiodegradable','');
+INSERT INTO t2 VALUES (220,056002,37,'serving','DiMaggio','dislocate','');
+INSERT INTO t2 VALUES (221,056003,37,'scheduling','exclamation','monochromatic','FAS');
+INSERT INTO t2 VALUES (222,056004,37,'lore','subdirectory','batting','');
+INSERT INTO t2 VALUES (223,056102,37,'eventful','fangs','postcondition','A');
+INSERT INTO t2 VALUES (224,056203,37,'arteriole','buyer','catalog','FAS');
+INSERT INTO t2 VALUES (225,056204,37,'disentangle','pithing','Remus','');
+INSERT INTO t2 VALUES (226,058003,37,'cured','transistorizing','devices','A');
+INSERT INTO t2 VALUES (227,058004,37,'Fenton','nonbiodegradable','bike','A');
+INSERT INTO t2 VALUES (228,058005,37,'avoidable','dislocate','qualify','');
+INSERT INTO t2 VALUES (229,058006,37,'drains','monochromatic','detained','');
+INSERT INTO t2 VALUES (230,058007,37,'detectably','batting','commended','');
+INSERT INTO t2 VALUES (231,058101,37,'husky','postcondition','civilize','');
+INSERT INTO t2 VALUES (232,058102,37,'impelling','catalog','Elmhurst','');
+INSERT INTO t2 VALUES (233,058103,37,'undoes','Remus','anesthetizing','');
+INSERT INTO t2 VALUES (234,058105,37,'evened','devices','deaf','');
+INSERT INTO t2 VALUES (235,058111,37,'squeezes','bike','Brigham','');
+INSERT INTO t2 VALUES (236,058112,37,'destroyer','qualify','title','');
+INSERT INTO t2 VALUES (237,058113,37,'rudeness','detained','coarse','');
+INSERT INTO t2 VALUES (238,058114,37,'beaner','commended','combinations','');
+INSERT INTO t2 VALUES (239,058115,37,'boorish','civilize','grayness','');
+INSERT INTO t2 VALUES (240,058116,37,'Everhart','Elmhurst','innumerable','FAS');
+INSERT INTO t2 VALUES (241,058117,37,'encompass','anesthetizing','Caroline','A');
+INSERT INTO t2 VALUES (242,058118,37,'mushrooms','deaf','fatty','FAS');
+INSERT INTO t2 VALUES (243,058119,37,'Alison','Brigham','eastbound','');
+INSERT INTO t2 VALUES (244,058120,37,'externally','title','inexperienced','');
+INSERT INTO t2 VALUES (245,058121,37,'pellagra','coarse','hoarder','A');
+INSERT INTO t2 VALUES (246,058122,37,'cult','combinations','scotch','W');
+INSERT INTO t2 VALUES (247,058123,37,'creek','grayness','passport','A');
+INSERT INTO t2 VALUES (248,058124,37,'Huffman','innumerable','strategic','FAS');
+INSERT INTO t2 VALUES (249,058125,37,'Majorca','Caroline','gated','');
+INSERT INTO t2 VALUES (250,058126,37,'governing','fatty','flog','');
+INSERT INTO t2 VALUES (251,058127,37,'gadfly','eastbound','Pipestone','');
+INSERT INTO t2 VALUES (252,058128,37,'reassigned','inexperienced','Dar','');
+INSERT INTO t2 VALUES (253,058201,37,'intentness','hoarder','Corcoran','');
+INSERT INTO t2 VALUES (254,058202,37,'craziness','scotch','flyers','A');
+INSERT INTO t2 VALUES (255,058303,37,'psychic','passport','competitions','W');
+INSERT INTO t2 VALUES (256,058304,37,'squabbled','strategic','suppliers','FAS');
+INSERT INTO t2 VALUES (257,058602,37,'burlesque','gated','skips','');
+INSERT INTO t2 VALUES (258,058603,37,'capped','flog','institutes','');
+INSERT INTO t2 VALUES (259,058604,37,'extracted','Pipestone','troop','A');
+INSERT INTO t2 VALUES (260,058605,37,'DiMaggio','Dar','connective','W');
+INSERT INTO t2 VALUES (261,058606,37,'exclamation','Corcoran','denies','');
+INSERT INTO t2 VALUES (262,058607,37,'subdirectory','flyers','polka','');
+INSERT INTO t2 VALUES (263,060401,36,'fangs','competitions','observations','FAS');
+INSERT INTO t2 VALUES (264,061701,36,'buyer','suppliers','askers','');
+INSERT INTO t2 VALUES (265,066201,36,'pithing','skips','homeless','FAS');
+INSERT INTO t2 VALUES (266,066501,36,'transistorizing','institutes','Anna','');
+INSERT INTO t2 VALUES (267,068001,36,'nonbiodegradable','troop','subdirectories','W');
+INSERT INTO t2 VALUES (268,068002,36,'dislocate','connective','decaying','FAS');
+INSERT INTO t2 VALUES (269,068005,36,'monochromatic','denies','outwitting','W');
+INSERT INTO t2 VALUES (270,068006,36,'batting','polka','Harpy','W');
+INSERT INTO t2 VALUES (271,068007,36,'postcondition','observations','crazed','');
+INSERT INTO t2 VALUES (272,068008,36,'catalog','askers','suffocate','');
+INSERT INTO t2 VALUES (273,068009,36,'Remus','homeless','provers','FAS');
+INSERT INTO t2 VALUES (274,068010,36,'devices','Anna','technically','');
+INSERT INTO t2 VALUES (275,068011,36,'bike','subdirectories','Franklinizations','');
+INSERT INTO t2 VALUES (276,068202,36,'qualify','decaying','considered','');
+INSERT INTO t2 VALUES (277,068302,36,'detained','outwitting','tinnily','');
+INSERT INTO t2 VALUES (278,068303,36,'commended','Harpy','uninterruptedly','');
+INSERT INTO t2 VALUES (279,068401,36,'civilize','crazed','whistled','A');
+INSERT INTO t2 VALUES (280,068501,36,'Elmhurst','suffocate','automate','');
+INSERT INTO t2 VALUES (281,068502,36,'anesthetizing','provers','gutting','W');
+INSERT INTO t2 VALUES (282,068503,36,'deaf','technically','surreptitious','');
+INSERT INTO t2 VALUES (283,068602,36,'Brigham','Franklinizations','Choctaw','');
+INSERT INTO t2 VALUES (284,068603,36,'title','considered','cooks','');
+INSERT INTO t2 VALUES (285,068701,36,'coarse','tinnily','millivolt','FAS');
+INSERT INTO t2 VALUES (286,068702,36,'combinations','uninterruptedly','counterpoise','');
+INSERT INTO t2 VALUES (287,068703,36,'grayness','whistled','Gothicism','');
+INSERT INTO t2 VALUES (288,076001,36,'innumerable','automate','feminine','');
+INSERT INTO t2 VALUES (289,076002,36,'Caroline','gutting','metaphysically','W');
+INSERT INTO t2 VALUES (290,076101,36,'fatty','surreptitious','sanding','A');
+INSERT INTO t2 VALUES (291,076102,36,'eastbound','Choctaw','contributorily','');
+INSERT INTO t2 VALUES (292,076103,36,'inexperienced','cooks','receivers','FAS');
+INSERT INTO t2 VALUES (293,076302,36,'hoarder','millivolt','adjourn','');
+INSERT INTO t2 VALUES (294,076303,36,'scotch','counterpoise','straggled','A');
+INSERT INTO t2 VALUES (295,076304,36,'passport','Gothicism','druggists','');
+INSERT INTO t2 VALUES (296,076305,36,'strategic','feminine','thanking','FAS');
+INSERT INTO t2 VALUES (297,076306,36,'gated','metaphysically','ostrich','');
+INSERT INTO t2 VALUES (298,076307,36,'flog','sanding','hopelessness','FAS');
+INSERT INTO t2 VALUES (299,076402,36,'Pipestone','contributorily','Eurydice','');
+INSERT INTO t2 VALUES (300,076501,36,'Dar','receivers','excitation','W');
+INSERT INTO t2 VALUES (301,076502,36,'Corcoran','adjourn','presumes','FAS');
+INSERT INTO t2 VALUES (302,076701,36,'flyers','straggled','imaginable','FAS');
+INSERT INTO t2 VALUES (303,078001,36,'competitions','druggists','concoct','W');
+INSERT INTO t2 VALUES (304,078002,36,'suppliers','thanking','peering','W');
+INSERT INTO t2 VALUES (305,078003,36,'skips','ostrich','Phelps','FAS');
+INSERT INTO t2 VALUES (306,078004,36,'institutes','hopelessness','ferociousness','FAS');
+INSERT INTO t2 VALUES (307,078005,36,'troop','Eurydice','sentences','');
+INSERT INTO t2 VALUES (308,078006,36,'connective','excitation','unlocks','');
+INSERT INTO t2 VALUES (309,078007,36,'denies','presumes','engrossing','W');
+INSERT INTO t2 VALUES (310,078008,36,'polka','imaginable','Ruth','');
+INSERT INTO t2 VALUES (311,078101,36,'observations','concoct','tying','');
+INSERT INTO t2 VALUES (312,078103,36,'askers','peering','exclaimers','');
+INSERT INTO t2 VALUES (313,078104,36,'homeless','Phelps','synergy','');
+INSERT INTO t2 VALUES (314,078105,36,'Anna','ferociousness','Huey','W');
+INSERT INTO t2 VALUES (315,082101,36,'subdirectories','sentences','merging','');
+INSERT INTO t2 VALUES (316,083401,36,'decaying','unlocks','judges','A');
+INSERT INTO t2 VALUES (317,084001,36,'outwitting','engrossing','Shylock','W');
+INSERT INTO t2 VALUES (318,084002,36,'Harpy','Ruth','Miltonism','');
+INSERT INTO t2 VALUES (319,086001,36,'crazed','tying','hen','W');
+INSERT INTO t2 VALUES (320,086102,36,'suffocate','exclaimers','honeybee','FAS');
+INSERT INTO t2 VALUES (321,086201,36,'provers','synergy','towers','');
+INSERT INTO t2 VALUES (322,088001,36,'technically','Huey','dilutes','W');
+INSERT INTO t2 VALUES (323,088002,36,'Franklinizations','merging','numerals','FAS');
+INSERT INTO t2 VALUES (324,088003,36,'considered','judges','democracy','FAS');
+INSERT INTO t2 VALUES (325,088004,36,'tinnily','Shylock','Ibero-','');
+INSERT INTO t2 VALUES (326,088101,36,'uninterruptedly','Miltonism','invalids','');
+INSERT INTO t2 VALUES (327,088102,36,'whistled','hen','behavior','');
+INSERT INTO t2 VALUES (328,088103,36,'automate','honeybee','accruing','');
+INSERT INTO t2 VALUES (329,088104,36,'gutting','towers','relics','A');
+INSERT INTO t2 VALUES (330,088105,36,'surreptitious','dilutes','rackets','');
+INSERT INTO t2 VALUES (331,088106,36,'Choctaw','numerals','Fischbein','W');
+INSERT INTO t2 VALUES (332,088201,36,'cooks','democracy','phony','W');
+INSERT INTO t2 VALUES (333,088203,36,'millivolt','Ibero-','cross','FAS');
+INSERT INTO t2 VALUES (334,088204,36,'counterpoise','invalids','cleanup','');
+INSERT INTO t2 VALUES (335,088302,37,'Gothicism','behavior','conspirator','');
+INSERT INTO t2 VALUES (336,088303,37,'feminine','accruing','label','FAS');
+INSERT INTO t2 VALUES (337,088305,37,'metaphysically','relics','university','');
+INSERT INTO t2 VALUES (338,088402,37,'sanding','rackets','cleansed','FAS');
+INSERT INTO t2 VALUES (339,088501,36,'contributorily','Fischbein','ballgown','');
+INSERT INTO t2 VALUES (340,088502,36,'receivers','phony','starlet','');
+INSERT INTO t2 VALUES (341,088503,36,'adjourn','cross','aqueous','');
+INSERT INTO t2 VALUES (342,098001,58,'straggled','cleanup','portrayal','A');
+INSERT INTO t2 VALUES (343,098002,58,'druggists','conspirator','despising','W');
+INSERT INTO t2 VALUES (344,098003,58,'thanking','label','distort','W');
+INSERT INTO t2 VALUES (345,098004,58,'ostrich','university','palmed','');
+INSERT INTO t2 VALUES (346,098005,58,'hopelessness','cleansed','faced','');
+INSERT INTO t2 VALUES (347,098006,58,'Eurydice','ballgown','silverware','');
+INSERT INTO t2 VALUES (348,141903,29,'excitation','starlet','assessor','');
+INSERT INTO t2 VALUES (349,098008,58,'presumes','aqueous','spiders','');
+INSERT INTO t2 VALUES (350,098009,58,'imaginable','portrayal','artificially','');
+INSERT INTO t2 VALUES (351,098010,58,'concoct','despising','reminiscence','');
+INSERT INTO t2 VALUES (352,098011,58,'peering','distort','Mexican','');
+INSERT INTO t2 VALUES (353,098012,58,'Phelps','palmed','obnoxious','');
+INSERT INTO t2 VALUES (354,098013,58,'ferociousness','faced','fragile','');
+INSERT INTO t2 VALUES (355,098014,58,'sentences','silverware','apprehensible','');
+INSERT INTO t2 VALUES (356,098015,58,'unlocks','assessor','births','');
+INSERT INTO t2 VALUES (357,098016,58,'engrossing','spiders','garages','');
+INSERT INTO t2 VALUES (358,098017,58,'Ruth','artificially','panty','');
+INSERT INTO t2 VALUES (359,098018,58,'tying','reminiscence','anteater','');
+INSERT INTO t2 VALUES (360,098019,58,'exclaimers','Mexican','displacement','A');
+INSERT INTO t2 VALUES (361,098020,58,'synergy','obnoxious','drovers','A');
+INSERT INTO t2 VALUES (362,098021,58,'Huey','fragile','patenting','A');
+INSERT INTO t2 VALUES (363,098022,58,'merging','apprehensible','far','A');
+INSERT INTO t2 VALUES (364,098023,58,'judges','births','shrieks','');
+INSERT INTO t2 VALUES (365,098024,58,'Shylock','garages','aligning','W');
+INSERT INTO t2 VALUES (366,098025,37,'Miltonism','panty','pragmatism','');
+INSERT INTO t2 VALUES (367,106001,36,'hen','anteater','fevers','W');
+INSERT INTO t2 VALUES (368,108001,36,'honeybee','displacement','reexamines','A');
+INSERT INTO t2 VALUES (369,108002,36,'towers','drovers','occupancies','');
+INSERT INTO t2 VALUES (370,108003,36,'dilutes','patenting','sweats','FAS');
+INSERT INTO t2 VALUES (371,108004,36,'numerals','far','modulators','');
+INSERT INTO t2 VALUES (372,108005,36,'democracy','shrieks','demand','W');
+INSERT INTO t2 VALUES (373,108007,36,'Ibero-','aligning','Madeira','');
+INSERT INTO t2 VALUES (374,108008,36,'invalids','pragmatism','Viennese','W');
+INSERT INTO t2 VALUES (375,108009,36,'behavior','fevers','chillier','W');
+INSERT INTO t2 VALUES (376,108010,36,'accruing','reexamines','wildcats','FAS');
+INSERT INTO t2 VALUES (377,108011,36,'relics','occupancies','gentle','');
+INSERT INTO t2 VALUES (378,108012,36,'rackets','sweats','Angles','W');
+INSERT INTO t2 VALUES (379,108101,36,'Fischbein','modulators','accuracies','');
+INSERT INTO t2 VALUES (380,108102,36,'phony','demand','toggle','');
+INSERT INTO t2 VALUES (381,108103,36,'cross','Madeira','Mendelssohn','W');
+INSERT INTO t2 VALUES (382,108111,50,'cleanup','Viennese','behaviorally','');
+INSERT INTO t2 VALUES (383,108105,36,'conspirator','chillier','Rochford','');
+INSERT INTO t2 VALUES (384,108106,36,'label','wildcats','mirror','W');
+INSERT INTO t2 VALUES (385,108107,36,'university','gentle','Modula','');
+INSERT INTO t2 VALUES (386,108108,50,'cleansed','Angles','clobbering','');
+INSERT INTO t2 VALUES (387,108109,36,'ballgown','accuracies','chronography','');
+INSERT INTO t2 VALUES (388,108110,36,'starlet','toggle','Eskimoizeds','');
+INSERT INTO t2 VALUES (389,108201,36,'aqueous','Mendelssohn','British','W');
+INSERT INTO t2 VALUES (390,108202,36,'portrayal','behaviorally','pitfalls','');
+INSERT INTO t2 VALUES (391,108203,36,'despising','Rochford','verify','W');
+INSERT INTO t2 VALUES (392,108204,36,'distort','mirror','scatter','FAS');
+INSERT INTO t2 VALUES (393,108205,36,'palmed','Modula','Aztecan','');
+INSERT INTO t2 VALUES (394,108301,36,'faced','clobbering','acuity','W');
+INSERT INTO t2 VALUES (395,108302,36,'silverware','chronography','sinking','W');
+INSERT INTO t2 VALUES (396,112101,36,'assessor','Eskimoizeds','beasts','FAS');
+INSERT INTO t2 VALUES (397,112102,36,'spiders','British','Witt','W');
+INSERT INTO t2 VALUES (398,113701,36,'artificially','pitfalls','physicists','FAS');
+INSERT INTO t2 VALUES (399,116001,36,'reminiscence','verify','folksong','A');
+INSERT INTO t2 VALUES (400,116201,36,'Mexican','scatter','strokes','FAS');
+INSERT INTO t2 VALUES (401,116301,36,'obnoxious','Aztecan','crowder','');
+INSERT INTO t2 VALUES (402,116302,36,'fragile','acuity','merry','');
+INSERT INTO t2 VALUES (403,116601,36,'apprehensible','sinking','cadenced','');
+INSERT INTO t2 VALUES (404,116602,36,'births','beasts','alimony','A');
+INSERT INTO t2 VALUES (405,116603,36,'garages','Witt','principled','A');
+INSERT INTO t2 VALUES (406,116701,36,'panty','physicists','golfing','');
+INSERT INTO t2 VALUES (407,116702,36,'anteater','folksong','undiscovered','');
+INSERT INTO t2 VALUES (408,118001,36,'displacement','strokes','irritates','');
+INSERT INTO t2 VALUES (409,118002,36,'drovers','crowder','patriots','A');
+INSERT INTO t2 VALUES (410,118003,36,'patenting','merry','rooms','FAS');
+INSERT INTO t2 VALUES (411,118004,36,'far','cadenced','towering','W');
+INSERT INTO t2 VALUES (412,118005,36,'shrieks','alimony','displease','');
+INSERT INTO t2 VALUES (413,118006,36,'aligning','principled','photosensitive','');
+INSERT INTO t2 VALUES (414,118007,36,'pragmatism','golfing','inking','');
+INSERT INTO t2 VALUES (415,118008,36,'fevers','undiscovered','gainers','');
+INSERT INTO t2 VALUES (416,118101,36,'reexamines','irritates','leaning','A');
+INSERT INTO t2 VALUES (417,118102,36,'occupancies','patriots','hydrant','A');
+INSERT INTO t2 VALUES (418,118103,36,'sweats','rooms','preserve','');
+INSERT INTO t2 VALUES (419,118202,36,'modulators','towering','blinded','A');
+INSERT INTO t2 VALUES (420,118203,36,'demand','displease','interactions','A');
+INSERT INTO t2 VALUES (421,118204,36,'Madeira','photosensitive','Barry','');
+INSERT INTO t2 VALUES (422,118302,36,'Viennese','inking','whiteness','A');
+INSERT INTO t2 VALUES (423,118304,36,'chillier','gainers','pastimes','W');
+INSERT INTO t2 VALUES (424,118305,36,'wildcats','leaning','Edenization','');
+INSERT INTO t2 VALUES (425,118306,36,'gentle','hydrant','Muscat','');
+INSERT INTO t2 VALUES (426,118307,36,'Angles','preserve','assassinated','');
+INSERT INTO t2 VALUES (427,123101,36,'accuracies','blinded','labeled','');
+INSERT INTO t2 VALUES (428,123102,36,'toggle','interactions','glacial','A');
+INSERT INTO t2 VALUES (429,123301,36,'Mendelssohn','Barry','implied','W');
+INSERT INTO t2 VALUES (430,126001,36,'behaviorally','whiteness','bibliographies','W');
+INSERT INTO t2 VALUES (431,126002,36,'Rochford','pastimes','Buchanan','');
+INSERT INTO t2 VALUES (432,126003,36,'mirror','Edenization','forgivably','FAS');
+INSERT INTO t2 VALUES (433,126101,36,'Modula','Muscat','innuendo','A');
+INSERT INTO t2 VALUES (434,126301,36,'clobbering','assassinated','den','FAS');
+INSERT INTO t2 VALUES (435,126302,36,'chronography','labeled','submarines','W');
+INSERT INTO t2 VALUES (436,126402,36,'Eskimoizeds','glacial','mouthful','A');
+INSERT INTO t2 VALUES (437,126601,36,'British','implied','expiring','');
+INSERT INTO t2 VALUES (438,126602,36,'pitfalls','bibliographies','unfulfilled','FAS');
+INSERT INTO t2 VALUES (439,126702,36,'verify','Buchanan','precession','');
+INSERT INTO t2 VALUES (440,128001,36,'scatter','forgivably','nullified','');
+INSERT INTO t2 VALUES (441,128002,36,'Aztecan','innuendo','affects','');
+INSERT INTO t2 VALUES (442,128003,36,'acuity','den','Cynthia','');
+INSERT INTO t2 VALUES (443,128004,36,'sinking','submarines','Chablis','A');
+INSERT INTO t2 VALUES (444,128005,36,'beasts','mouthful','betterments','FAS');
+INSERT INTO t2 VALUES (445,128007,36,'Witt','expiring','advertising','');
+INSERT INTO t2 VALUES (446,128008,36,'physicists','unfulfilled','rubies','A');
+INSERT INTO t2 VALUES (447,128009,36,'folksong','precession','southwest','FAS');
+INSERT INTO t2 VALUES (448,128010,36,'strokes','nullified','superstitious','A');
+INSERT INTO t2 VALUES (449,128011,36,'crowder','affects','tabernacle','W');
+INSERT INTO t2 VALUES (450,128012,36,'merry','Cynthia','silk','A');
+INSERT INTO t2 VALUES (451,128013,36,'cadenced','Chablis','handsomest','A');
+INSERT INTO t2 VALUES (452,128014,36,'alimony','betterments','Persian','A');
+INSERT INTO t2 VALUES (453,128015,36,'principled','advertising','analog','W');
+INSERT INTO t2 VALUES (454,128016,36,'golfing','rubies','complex','W');
+INSERT INTO t2 VALUES (455,128017,36,'undiscovered','southwest','Taoist','');
+INSERT INTO t2 VALUES (456,128018,36,'irritates','superstitious','suspend','');
+INSERT INTO t2 VALUES (457,128019,36,'patriots','tabernacle','relegated','');
+INSERT INTO t2 VALUES (458,128020,36,'rooms','silk','awesome','W');
+INSERT INTO t2 VALUES (459,128021,36,'towering','handsomest','Bruxelles','');
+INSERT INTO t2 VALUES (460,128022,36,'displease','Persian','imprecisely','A');
+INSERT INTO t2 VALUES (461,128023,36,'photosensitive','analog','televise','');
+INSERT INTO t2 VALUES (462,128101,36,'inking','complex','braking','');
+INSERT INTO t2 VALUES (463,128102,36,'gainers','Taoist','true','FAS');
+INSERT INTO t2 VALUES (464,128103,36,'leaning','suspend','disappointing','FAS');
+INSERT INTO t2 VALUES (465,128104,36,'hydrant','relegated','navally','W');
+INSERT INTO t2 VALUES (466,128106,36,'preserve','awesome','circus','');
+INSERT INTO t2 VALUES (467,128107,36,'blinded','Bruxelles','beetles','');
+INSERT INTO t2 VALUES (468,128108,36,'interactions','imprecisely','trumps','');
+INSERT INTO t2 VALUES (469,128202,36,'Barry','televise','fourscore','W');
+INSERT INTO t2 VALUES (470,128203,36,'whiteness','braking','Blackfoots','');
+INSERT INTO t2 VALUES (471,128301,36,'pastimes','true','Grady','');
+INSERT INTO t2 VALUES (472,128302,36,'Edenization','disappointing','quiets','FAS');
+INSERT INTO t2 VALUES (473,128303,36,'Muscat','navally','floundered','FAS');
+INSERT INTO t2 VALUES (474,128304,36,'assassinated','circus','profundity','W');
+INSERT INTO t2 VALUES (475,128305,36,'labeled','beetles','Garrisonian','W');
+INSERT INTO t2 VALUES (476,128307,36,'glacial','trumps','Strauss','');
+INSERT INTO t2 VALUES (477,128401,36,'implied','fourscore','cemented','FAS');
+INSERT INTO t2 VALUES (478,128502,36,'bibliographies','Blackfoots','contrition','A');
+INSERT INTO t2 VALUES (479,128503,36,'Buchanan','Grady','mutations','');
+INSERT INTO t2 VALUES (480,128504,36,'forgivably','quiets','exhibits','W');
+INSERT INTO t2 VALUES (481,128505,36,'innuendo','floundered','tits','');
+INSERT INTO t2 VALUES (482,128601,36,'den','profundity','mate','A');
+INSERT INTO t2 VALUES (483,128603,36,'submarines','Garrisonian','arches','');
+INSERT INTO t2 VALUES (484,128604,36,'mouthful','Strauss','Moll','');
+INSERT INTO t2 VALUES (485,128702,36,'expiring','cemented','ropers','');
+INSERT INTO t2 VALUES (486,128703,36,'unfulfilled','contrition','bombast','');
+INSERT INTO t2 VALUES (487,128704,36,'precession','mutations','difficultly','A');
+INSERT INTO t2 VALUES (488,138001,36,'nullified','exhibits','adsorption','');
+INSERT INTO t2 VALUES (489,138002,36,'affects','tits','definiteness','FAS');
+INSERT INTO t2 VALUES (490,138003,36,'Cynthia','mate','cultivation','A');
+INSERT INTO t2 VALUES (491,138004,36,'Chablis','arches','heals','A');
+INSERT INTO t2 VALUES (492,138005,36,'betterments','Moll','Heusen','W');
+INSERT INTO t2 VALUES (493,138006,36,'advertising','ropers','target','FAS');
+INSERT INTO t2 VALUES (494,138007,36,'rubies','bombast','cited','A');
+INSERT INTO t2 VALUES (495,138008,36,'southwest','difficultly','congresswoman','W');
+INSERT INTO t2 VALUES (496,138009,36,'superstitious','adsorption','Katherine','');
+INSERT INTO t2 VALUES (497,138102,36,'tabernacle','definiteness','titter','A');
+INSERT INTO t2 VALUES (498,138103,36,'silk','cultivation','aspire','A');
+INSERT INTO t2 VALUES (499,138104,36,'handsomest','heals','Mardis','');
+INSERT INTO t2 VALUES (500,138105,36,'Persian','Heusen','Nadia','W');
+INSERT INTO t2 VALUES (501,138201,36,'analog','target','estimating','FAS');
+INSERT INTO t2 VALUES (502,138302,36,'complex','cited','stuck','A');
+INSERT INTO t2 VALUES (503,138303,36,'Taoist','congresswoman','fifteenth','A');
+INSERT INTO t2 VALUES (504,138304,36,'suspend','Katherine','Colombo','');
+INSERT INTO t2 VALUES (505,138401,29,'relegated','titter','survey','A');
+INSERT INTO t2 VALUES (506,140102,29,'awesome','aspire','staffing','');
+INSERT INTO t2 VALUES (507,140103,29,'Bruxelles','Mardis','obtain','');
+INSERT INTO t2 VALUES (508,140104,29,'imprecisely','Nadia','loaded','');
+INSERT INTO t2 VALUES (509,140105,29,'televise','estimating','slaughtered','');
+INSERT INTO t2 VALUES (510,140201,29,'braking','stuck','lights','A');
+INSERT INTO t2 VALUES (511,140701,29,'true','fifteenth','circumference','');
+INSERT INTO t2 VALUES (512,141501,29,'disappointing','Colombo','dull','A');
+INSERT INTO t2 VALUES (513,141502,29,'navally','survey','weekly','A');
+INSERT INTO t2 VALUES (514,141901,29,'circus','staffing','wetness','');
+INSERT INTO t2 VALUES (515,141902,29,'beetles','obtain','visualized','');
+INSERT INTO t2 VALUES (516,142101,29,'trumps','loaded','Tannenbaum','');
+INSERT INTO t2 VALUES (517,142102,29,'fourscore','slaughtered','moribund','');
+INSERT INTO t2 VALUES (518,142103,29,'Blackfoots','lights','demultiplex','');
+INSERT INTO t2 VALUES (519,142701,29,'Grady','circumference','lockings','');
+INSERT INTO t2 VALUES (520,143001,29,'quiets','dull','thugs','FAS');
+INSERT INTO t2 VALUES (521,143501,29,'floundered','weekly','unnerves','');
+INSERT INTO t2 VALUES (522,143502,29,'profundity','wetness','abut','');
+INSERT INTO t2 VALUES (523,148001,29,'Garrisonian','visualized','Chippewa','A');
+INSERT INTO t2 VALUES (524,148002,29,'Strauss','Tannenbaum','stratifications','A');
+INSERT INTO t2 VALUES (525,148003,29,'cemented','moribund','signaled','');
+INSERT INTO t2 VALUES (526,148004,29,'contrition','demultiplex','Italianizes','A');
+INSERT INTO t2 VALUES (527,148005,29,'mutations','lockings','algorithmic','A');
+INSERT INTO t2 VALUES (528,148006,29,'exhibits','thugs','paranoid','FAS');
+INSERT INTO t2 VALUES (529,148007,29,'tits','unnerves','camping','A');
+INSERT INTO t2 VALUES (530,148009,29,'mate','abut','signifying','A');
+INSERT INTO t2 VALUES (531,148010,29,'arches','Chippewa','Patrice','W');
+INSERT INTO t2 VALUES (532,148011,29,'Moll','stratifications','search','A');
+INSERT INTO t2 VALUES (533,148012,29,'ropers','signaled','Angeles','A');
+INSERT INTO t2 VALUES (534,148013,29,'bombast','Italianizes','semblance','');
+INSERT INTO t2 VALUES (535,148023,36,'difficultly','algorithmic','taxed','');
+INSERT INTO t2 VALUES (536,148015,29,'adsorption','paranoid','Beatrice','');
+INSERT INTO t2 VALUES (537,148016,29,'definiteness','camping','retrace','');
+INSERT INTO t2 VALUES (538,148017,29,'cultivation','signifying','lockout','');
+INSERT INTO t2 VALUES (539,148018,29,'heals','Patrice','grammatic','');
+INSERT INTO t2 VALUES (540,148019,29,'Heusen','search','helmsman','');
+INSERT INTO t2 VALUES (541,148020,29,'target','Angeles','uniform','W');
+INSERT INTO t2 VALUES (542,148021,29,'cited','semblance','hamming','');
+INSERT INTO t2 VALUES (543,148022,29,'congresswoman','taxed','disobedience','');
+INSERT INTO t2 VALUES (544,148101,29,'Katherine','Beatrice','captivated','A');
+INSERT INTO t2 VALUES (545,148102,29,'titter','retrace','transferals','A');
+INSERT INTO t2 VALUES (546,148201,29,'aspire','lockout','cartographer','A');
+INSERT INTO t2 VALUES (547,148401,29,'Mardis','grammatic','aims','FAS');
+INSERT INTO t2 VALUES (548,148402,29,'Nadia','helmsman','Pakistani','');
+INSERT INTO t2 VALUES (549,148501,29,'estimating','uniform','burglarized','FAS');
+INSERT INTO t2 VALUES (550,148502,29,'stuck','hamming','saucepans','A');
+INSERT INTO t2 VALUES (551,148503,29,'fifteenth','disobedience','lacerating','A');
+INSERT INTO t2 VALUES (552,148504,29,'Colombo','captivated','corny','');
+INSERT INTO t2 VALUES (553,148601,29,'survey','transferals','megabytes','FAS');
+INSERT INTO t2 VALUES (554,148602,29,'staffing','cartographer','chancellor','');
+INSERT INTO t2 VALUES (555,150701,29,'obtain','aims','bulk','A');
+INSERT INTO t2 VALUES (556,152101,29,'loaded','Pakistani','commits','A');
+INSERT INTO t2 VALUES (557,152102,29,'slaughtered','burglarized','meson','W');
+INSERT INTO t2 VALUES (558,155202,36,'lights','saucepans','deputies','');
+INSERT INTO t2 VALUES (559,155203,29,'circumference','lacerating','northeaster','A');
+INSERT INTO t2 VALUES (560,155204,29,'dull','corny','dipole','');
+INSERT INTO t2 VALUES (561,155205,29,'weekly','megabytes','machining','0');
+INSERT INTO t2 VALUES (562,156001,29,'wetness','chancellor','therefore','');
+INSERT INTO t2 VALUES (563,156002,29,'visualized','bulk','Telefunken','');
+INSERT INTO t2 VALUES (564,156102,29,'Tannenbaum','commits','salvaging','');
+INSERT INTO t2 VALUES (565,156301,29,'moribund','meson','Corinthianizes','A');
+INSERT INTO t2 VALUES (566,156302,29,'demultiplex','deputies','restlessly','A');
+INSERT INTO t2 VALUES (567,156303,29,'lockings','northeaster','bromides','');
+INSERT INTO t2 VALUES (568,156304,29,'thugs','dipole','generalized','A');
+INSERT INTO t2 VALUES (569,156305,29,'unnerves','machining','mishaps','');
+INSERT INTO t2 VALUES (570,156306,29,'abut','therefore','quelling','');
+INSERT INTO t2 VALUES (571,156501,29,'Chippewa','Telefunken','spiritual','A');
+INSERT INTO t2 VALUES (572,158001,29,'stratifications','salvaging','beguiles','FAS');
+INSERT INTO t2 VALUES (573,158002,29,'signaled','Corinthianizes','Trobriand','FAS');
+INSERT INTO t2 VALUES (574,158101,29,'Italianizes','restlessly','fleeing','A');
+INSERT INTO t2 VALUES (575,158102,29,'algorithmic','bromides','Armour','A');
+INSERT INTO t2 VALUES (576,158103,29,'paranoid','generalized','chin','A');
+INSERT INTO t2 VALUES (577,158201,29,'camping','mishaps','provers','A');
+INSERT INTO t2 VALUES (578,158202,29,'signifying','quelling','aeronautic','A');
+INSERT INTO t2 VALUES (579,158203,29,'Patrice','spiritual','voltage','W');
+INSERT INTO t2 VALUES (580,158204,29,'search','beguiles','sash','');
+INSERT INTO t2 VALUES (581,158301,29,'Angeles','Trobriand','anaerobic','A');
+INSERT INTO t2 VALUES (582,158302,29,'semblance','fleeing','simultaneous','A');
+INSERT INTO t2 VALUES (583,158303,29,'taxed','Armour','accumulating','A');
+INSERT INTO t2 VALUES (584,158304,29,'Beatrice','chin','Medusan','A');
+INSERT INTO t2 VALUES (585,158305,29,'retrace','provers','shouted','A');
+INSERT INTO t2 VALUES (586,158306,29,'lockout','aeronautic','freakish','');
+INSERT INTO t2 VALUES (587,158501,29,'grammatic','voltage','index','FAS');
+INSERT INTO t2 VALUES (588,160301,29,'helmsman','sash','commercially','');
+INSERT INTO t2 VALUES (589,166101,50,'uniform','anaerobic','mistiness','A');
+INSERT INTO t2 VALUES (590,166102,50,'hamming','simultaneous','endpoint','');
+INSERT INTO t2 VALUES (591,168001,29,'disobedience','accumulating','straight','A');
+INSERT INTO t2 VALUES (592,168002,29,'captivated','Medusan','flurried','');
+INSERT INTO t2 VALUES (593,168003,29,'transferals','shouted','denotative','A');
+INSERT INTO t2 VALUES (594,168101,29,'cartographer','freakish','coming','FAS');
+INSERT INTO t2 VALUES (595,168102,29,'aims','index','commencements','FAS');
+INSERT INTO t2 VALUES (596,168103,29,'Pakistani','commercially','gentleman','');
+INSERT INTO t2 VALUES (597,168104,29,'burglarized','mistiness','gifted','');
+INSERT INTO t2 VALUES (598,168202,29,'saucepans','endpoint','Shanghais','');
+INSERT INTO t2 VALUES (599,168301,29,'lacerating','straight','sportswriting','A');
+INSERT INTO t2 VALUES (600,168502,29,'corny','flurried','sloping','A');
+INSERT INTO t2 VALUES (601,168503,29,'megabytes','denotative','navies','');
+INSERT INTO t2 VALUES (602,168601,29,'chancellor','coming','leaflet','A');
+INSERT INTO t2 VALUES (603,173001,40,'bulk','commencements','shooter','');
+INSERT INTO t2 VALUES (604,173701,40,'commits','gentleman','Joplin','FAS');
+INSERT INTO t2 VALUES (605,173702,40,'meson','gifted','babies','');
+INSERT INTO t2 VALUES (606,176001,40,'deputies','Shanghais','subdivision','FAS');
+INSERT INTO t2 VALUES (607,176101,40,'northeaster','sportswriting','burstiness','W');
+INSERT INTO t2 VALUES (608,176201,40,'dipole','sloping','belted','FAS');
+INSERT INTO t2 VALUES (609,176401,40,'machining','navies','assails','FAS');
+INSERT INTO t2 VALUES (610,176501,40,'therefore','leaflet','admiring','W');
+INSERT INTO t2 VALUES (611,176601,40,'Telefunken','shooter','swaying','0');
+INSERT INTO t2 VALUES (612,176602,40,'salvaging','Joplin','Goldstine','FAS');
+INSERT INTO t2 VALUES (613,176603,40,'Corinthianizes','babies','fitting','');
+INSERT INTO t2 VALUES (614,178001,40,'restlessly','subdivision','Norwalk','W');
+INSERT INTO t2 VALUES (615,178002,40,'bromides','burstiness','weakening','W');
+INSERT INTO t2 VALUES (616,178003,40,'generalized','belted','analogy','FAS');
+INSERT INTO t2 VALUES (617,178004,40,'mishaps','assails','deludes','');
+INSERT INTO t2 VALUES (618,178005,40,'quelling','admiring','cokes','');
+INSERT INTO t2 VALUES (619,178006,40,'spiritual','swaying','Clayton','');
+INSERT INTO t2 VALUES (620,178007,40,'beguiles','Goldstine','exhausts','');
+INSERT INTO t2 VALUES (621,178008,40,'Trobriand','fitting','causality','');
+INSERT INTO t2 VALUES (622,178101,40,'fleeing','Norwalk','sating','FAS');
+INSERT INTO t2 VALUES (623,178102,40,'Armour','weakening','icon','');
+INSERT INTO t2 VALUES (624,178103,40,'chin','analogy','throttles','');
+INSERT INTO t2 VALUES (625,178201,40,'provers','deludes','communicants','FAS');
+INSERT INTO t2 VALUES (626,178202,40,'aeronautic','cokes','dehydrate','FAS');
+INSERT INTO t2 VALUES (627,178301,40,'voltage','Clayton','priceless','FAS');
+INSERT INTO t2 VALUES (628,178302,40,'sash','exhausts','publicly','');
+INSERT INTO t2 VALUES (629,178401,40,'anaerobic','causality','incidentals','FAS');
+INSERT INTO t2 VALUES (630,178402,40,'simultaneous','sating','commonplace','');
+INSERT INTO t2 VALUES (631,178403,40,'accumulating','icon','mumbles','');
+INSERT INTO t2 VALUES (632,178404,40,'Medusan','throttles','furthermore','W');
+INSERT INTO t2 VALUES (633,178501,40,'shouted','communicants','cautioned','W');
+INSERT INTO t2 VALUES (634,186002,37,'freakish','dehydrate','parametrized','A');
+INSERT INTO t2 VALUES (635,186102,37,'index','priceless','registration','A');
+INSERT INTO t2 VALUES (636,186201,40,'commercially','publicly','sadly','FAS');
+INSERT INTO t2 VALUES (637,186202,40,'mistiness','incidentals','positioning','');
+INSERT INTO t2 VALUES (638,186203,40,'endpoint','commonplace','babysitting','');
+INSERT INTO t2 VALUES (639,186302,37,'straight','mumbles','eternal','A');
+INSERT INTO t2 VALUES (640,188007,37,'flurried','furthermore','hoarder','');
+INSERT INTO t2 VALUES (641,188008,37,'denotative','cautioned','congregates','');
+INSERT INTO t2 VALUES (642,188009,37,'coming','parametrized','rains','');
+INSERT INTO t2 VALUES (643,188010,37,'commencements','registration','workers','W');
+INSERT INTO t2 VALUES (644,188011,37,'gentleman','sadly','sags','A');
+INSERT INTO t2 VALUES (645,188012,37,'gifted','positioning','unplug','W');
+INSERT INTO t2 VALUES (646,188013,37,'Shanghais','babysitting','garage','A');
+INSERT INTO t2 VALUES (647,188014,37,'sportswriting','eternal','boulder','A');
+INSERT INTO t2 VALUES (648,188015,37,'sloping','hoarder','hollowly','A');
+INSERT INTO t2 VALUES (649,188016,37,'navies','congregates','specifics','');
+INSERT INTO t2 VALUES (650,188017,37,'leaflet','rains','Teresa','');
+INSERT INTO t2 VALUES (651,188102,37,'shooter','workers','Winsett','');
+INSERT INTO t2 VALUES (652,188103,37,'Joplin','sags','convenient','A');
+INSERT INTO t2 VALUES (653,188202,37,'babies','unplug','buckboards','FAS');
+INSERT INTO t2 VALUES (654,188301,40,'subdivision','garage','amenities','');
+INSERT INTO t2 VALUES (655,188302,40,'burstiness','boulder','resplendent','FAS');
+INSERT INTO t2 VALUES (656,188303,40,'belted','hollowly','priding','FAS');
+INSERT INTO t2 VALUES (657,188401,37,'assails','specifics','configurations','');
+INSERT INTO t2 VALUES (658,188402,37,'admiring','Teresa','untidiness','A');
+INSERT INTO t2 VALUES (659,188503,37,'swaying','Winsett','Brice','W');
+INSERT INTO t2 VALUES (660,188504,37,'Goldstine','convenient','sews','FAS');
+INSERT INTO t2 VALUES (661,188505,37,'fitting','buckboards','participated','');
+INSERT INTO t2 VALUES (662,190701,37,'Norwalk','amenities','Simon','FAS');
+INSERT INTO t2 VALUES (663,190703,50,'weakening','resplendent','certificates','');
+INSERT INTO t2 VALUES (664,191701,37,'analogy','priding','Fitzpatrick','');
+INSERT INTO t2 VALUES (665,191702,37,'deludes','configurations','Evanston','A');
+INSERT INTO t2 VALUES (666,191703,37,'cokes','untidiness','misted','');
+INSERT INTO t2 VALUES (667,196001,37,'Clayton','Brice','textures','A');
+INSERT INTO t2 VALUES (668,196002,37,'exhausts','sews','save','');
+INSERT INTO t2 VALUES (669,196003,37,'causality','participated','count','');
+INSERT INTO t2 VALUES (670,196101,37,'sating','Simon','rightful','A');
+INSERT INTO t2 VALUES (671,196103,37,'icon','certificates','chaperone','');
+INSERT INTO t2 VALUES (672,196104,37,'throttles','Fitzpatrick','Lizzy','A');
+INSERT INTO t2 VALUES (673,196201,37,'communicants','Evanston','clenched','A');
+INSERT INTO t2 VALUES (674,196202,37,'dehydrate','misted','effortlessly','');
+INSERT INTO t2 VALUES (675,196203,37,'priceless','textures','accessed','');
+INSERT INTO t2 VALUES (676,198001,37,'publicly','save','beaters','A');
+INSERT INTO t2 VALUES (677,198003,37,'incidentals','count','Hornblower','FAS');
+INSERT INTO t2 VALUES (678,198004,37,'commonplace','rightful','vests','A');
+INSERT INTO t2 VALUES (679,198005,37,'mumbles','chaperone','indulgences','FAS');
+INSERT INTO t2 VALUES (680,198006,37,'furthermore','Lizzy','infallibly','A');
+INSERT INTO t2 VALUES (681,198007,37,'cautioned','clenched','unwilling','FAS');
+INSERT INTO t2 VALUES (682,198008,37,'parametrized','effortlessly','excrete','FAS');
+INSERT INTO t2 VALUES (683,198009,37,'registration','accessed','spools','A');
+INSERT INTO t2 VALUES (684,198010,37,'sadly','beaters','crunches','FAS');
+INSERT INTO t2 VALUES (685,198011,37,'positioning','Hornblower','overestimating','FAS');
+INSERT INTO t2 VALUES (686,198012,37,'babysitting','vests','ineffective','');
+INSERT INTO t2 VALUES (687,198013,37,'eternal','indulgences','humiliation','A');
+INSERT INTO t2 VALUES (688,198014,37,'hoarder','infallibly','sophomore','');
+INSERT INTO t2 VALUES (689,198015,37,'congregates','unwilling','star','');
+INSERT INTO t2 VALUES (690,198017,37,'rains','excrete','rifles','');
+INSERT INTO t2 VALUES (691,198018,37,'workers','spools','dialysis','');
+INSERT INTO t2 VALUES (692,198019,37,'sags','crunches','arriving','');
+INSERT INTO t2 VALUES (693,198020,37,'unplug','overestimating','indulge','');
+INSERT INTO t2 VALUES (694,198021,37,'garage','ineffective','clockers','');
+INSERT INTO t2 VALUES (695,198022,37,'boulder','humiliation','languages','');
+INSERT INTO t2 VALUES (696,198023,50,'hollowly','sophomore','Antarctica','A');
+INSERT INTO t2 VALUES (697,198024,37,'specifics','star','percentage','');
+INSERT INTO t2 VALUES (698,198101,37,'Teresa','rifles','ceiling','A');
+INSERT INTO t2 VALUES (699,198103,37,'Winsett','dialysis','specification','');
+INSERT INTO t2 VALUES (700,198105,37,'convenient','arriving','regimented','A');
+INSERT INTO t2 VALUES (701,198106,37,'buckboards','indulge','ciphers','');
+INSERT INTO t2 VALUES (702,198201,37,'amenities','clockers','pictures','A');
+INSERT INTO t2 VALUES (703,198204,37,'resplendent','languages','serpents','A');
+INSERT INTO t2 VALUES (704,198301,53,'priding','Antarctica','allot','A');
+INSERT INTO t2 VALUES (705,198302,53,'configurations','percentage','realized','A');
+INSERT INTO t2 VALUES (706,198303,53,'untidiness','ceiling','mayoral','A');
+INSERT INTO t2 VALUES (707,198304,53,'Brice','specification','opaquely','A');
+INSERT INTO t2 VALUES (708,198401,37,'sews','regimented','hostess','FAS');
+INSERT INTO t2 VALUES (709,198402,37,'participated','ciphers','fiftieth','');
+INSERT INTO t2 VALUES (710,198403,37,'Simon','pictures','incorrectly','');
+INSERT INTO t2 VALUES (711,202101,37,'certificates','serpents','decomposition','FAS');
+INSERT INTO t2 VALUES (712,202301,37,'Fitzpatrick','allot','stranglings','');
+INSERT INTO t2 VALUES (713,202302,37,'Evanston','realized','mixture','FAS');
+INSERT INTO t2 VALUES (714,202303,37,'misted','mayoral','electroencephalography','FAS');
+INSERT INTO t2 VALUES (715,202304,37,'textures','opaquely','similarities','FAS');
+INSERT INTO t2 VALUES (716,202305,37,'save','hostess','charges','W');
+INSERT INTO t2 VALUES (717,202601,37,'count','fiftieth','freest','FAS');
+INSERT INTO t2 VALUES (718,202602,37,'rightful','incorrectly','Greenberg','FAS');
+INSERT INTO t2 VALUES (719,202605,37,'chaperone','decomposition','tinting','');
+INSERT INTO t2 VALUES (720,202606,37,'Lizzy','stranglings','expelled','W');
+INSERT INTO t2 VALUES (721,202607,37,'clenched','mixture','warm','');
+INSERT INTO t2 VALUES (722,202901,37,'effortlessly','electroencephalography','smoothed','');
+INSERT INTO t2 VALUES (723,202902,37,'accessed','similarities','deductions','FAS');
+INSERT INTO t2 VALUES (724,202903,37,'beaters','charges','Romano','W');
+INSERT INTO t2 VALUES (725,202904,37,'Hornblower','freest','bitterroot','');
+INSERT INTO t2 VALUES (726,202907,37,'vests','Greenberg','corset','');
+INSERT INTO t2 VALUES (727,202908,37,'indulgences','tinting','securing','');
+INSERT INTO t2 VALUES (728,203101,37,'infallibly','expelled','environing','FAS');
+INSERT INTO t2 VALUES (729,203103,37,'unwilling','warm','cute','');
+INSERT INTO t2 VALUES (730,203104,37,'excrete','smoothed','Crays','');
+INSERT INTO t2 VALUES (731,203105,37,'spools','deductions','heiress','FAS');
+INSERT INTO t2 VALUES (732,203401,37,'crunches','Romano','inform','FAS');
+INSERT INTO t2 VALUES (733,203402,37,'overestimating','bitterroot','avenge','');
+INSERT INTO t2 VALUES (734,203404,37,'ineffective','corset','universals','');
+INSERT INTO t2 VALUES (735,203901,37,'humiliation','securing','Kinsey','W');
+INSERT INTO t2 VALUES (736,203902,37,'sophomore','environing','ravines','FAS');
+INSERT INTO t2 VALUES (737,203903,37,'star','cute','bestseller','');
+INSERT INTO t2 VALUES (738,203906,37,'rifles','Crays','equilibrium','');
+INSERT INTO t2 VALUES (739,203907,37,'dialysis','heiress','extents','0');
+INSERT INTO t2 VALUES (740,203908,37,'arriving','inform','relatively','');
+INSERT INTO t2 VALUES (741,203909,37,'indulge','avenge','pressure','FAS');
+INSERT INTO t2 VALUES (742,206101,37,'clockers','universals','critiques','FAS');
+INSERT INTO t2 VALUES (743,206201,37,'languages','Kinsey','befouled','');
+INSERT INTO t2 VALUES (744,206202,37,'Antarctica','ravines','rightfully','FAS');
+INSERT INTO t2 VALUES (745,206203,37,'percentage','bestseller','mechanizing','FAS');
+INSERT INTO t2 VALUES (746,206206,37,'ceiling','equilibrium','Latinizes','');
+INSERT INTO t2 VALUES (747,206207,37,'specification','extents','timesharing','');
+INSERT INTO t2 VALUES (748,206208,37,'regimented','relatively','Aden','');
+INSERT INTO t2 VALUES (749,208001,37,'ciphers','pressure','embassies','');
+INSERT INTO t2 VALUES (750,208002,37,'pictures','critiques','males','FAS');
+INSERT INTO t2 VALUES (751,208003,37,'serpents','befouled','shapelessly','FAS');
+INSERT INTO t2 VALUES (752,208004,37,'allot','rightfully','genres','FAS');
+INSERT INTO t2 VALUES (753,208008,37,'realized','mechanizing','mastering','');
+INSERT INTO t2 VALUES (754,208009,37,'mayoral','Latinizes','Newtonian','');
+INSERT INTO t2 VALUES (755,208010,37,'opaquely','timesharing','finishers','FAS');
+INSERT INTO t2 VALUES (756,208011,37,'hostess','Aden','abates','');
+INSERT INTO t2 VALUES (757,208101,37,'fiftieth','embassies','teem','');
+INSERT INTO t2 VALUES (758,208102,37,'incorrectly','males','kiting','FAS');
+INSERT INTO t2 VALUES (759,208103,37,'decomposition','shapelessly','stodgy','FAS');
+INSERT INTO t2 VALUES (760,208104,37,'stranglings','genres','scalps','FAS');
+INSERT INTO t2 VALUES (761,208105,37,'mixture','mastering','feed','FAS');
+INSERT INTO t2 VALUES (762,208110,37,'electroencephalography','Newtonian','guitars','');
+INSERT INTO t2 VALUES (763,208111,37,'similarities','finishers','airships','');
+INSERT INTO t2 VALUES (764,208112,37,'charges','abates','store','');
+INSERT INTO t2 VALUES (765,208113,37,'freest','teem','denounces','');
+INSERT INTO t2 VALUES (766,208201,37,'Greenberg','kiting','Pyle','FAS');
+INSERT INTO t2 VALUES (767,208203,37,'tinting','stodgy','Saxony','');
+INSERT INTO t2 VALUES (768,208301,37,'expelled','scalps','serializations','FAS');
+INSERT INTO t2 VALUES (769,208302,37,'warm','feed','Peruvian','FAS');
+INSERT INTO t2 VALUES (770,208305,37,'smoothed','guitars','taxonomically','FAS');
+INSERT INTO t2 VALUES (771,208401,37,'deductions','airships','kingdom','A');
+INSERT INTO t2 VALUES (772,208402,37,'Romano','store','stint','A');
+INSERT INTO t2 VALUES (773,208403,37,'bitterroot','denounces','Sault','A');
+INSERT INTO t2 VALUES (774,208404,37,'corset','Pyle','faithful','');
+INSERT INTO t2 VALUES (775,208501,37,'securing','Saxony','Ganymede','FAS');
+INSERT INTO t2 VALUES (776,208502,37,'environing','serializations','tidiness','FAS');
+INSERT INTO t2 VALUES (777,208503,37,'cute','Peruvian','gainful','FAS');
+INSERT INTO t2 VALUES (778,208504,37,'Crays','taxonomically','contrary','FAS');
+INSERT INTO t2 VALUES (779,208505,37,'heiress','kingdom','Tipperary','FAS');
+INSERT INTO t2 VALUES (780,210101,37,'inform','stint','tropics','W');
+INSERT INTO t2 VALUES (781,210102,37,'avenge','Sault','theorizers','');
+INSERT INTO t2 VALUES (782,210103,37,'universals','faithful','renew','0');
+INSERT INTO t2 VALUES (783,210104,37,'Kinsey','Ganymede','already','');
+INSERT INTO t2 VALUES (784,210105,37,'ravines','tidiness','terminal','');
+INSERT INTO t2 VALUES (785,210106,37,'bestseller','gainful','Hegelian','');
+INSERT INTO t2 VALUES (786,210107,37,'equilibrium','contrary','hypothesizer','');
+INSERT INTO t2 VALUES (787,210401,37,'extents','Tipperary','warningly','FAS');
+INSERT INTO t2 VALUES (788,213201,37,'relatively','tropics','journalizing','FAS');
+INSERT INTO t2 VALUES (789,213203,37,'pressure','theorizers','nested','');
+INSERT INTO t2 VALUES (790,213204,37,'critiques','renew','Lars','');
+INSERT INTO t2 VALUES (791,213205,37,'befouled','already','saplings','');
+INSERT INTO t2 VALUES (792,213206,37,'rightfully','terminal','foothill','');
+INSERT INTO t2 VALUES (793,213207,37,'mechanizing','Hegelian','labeled','');
+INSERT INTO t2 VALUES (794,216101,37,'Latinizes','hypothesizer','imperiously','FAS');
+INSERT INTO t2 VALUES (795,216103,37,'timesharing','warningly','reporters','FAS');
+INSERT INTO t2 VALUES (796,218001,37,'Aden','journalizing','furnishings','FAS');
+INSERT INTO t2 VALUES (797,218002,37,'embassies','nested','precipitable','FAS');
+INSERT INTO t2 VALUES (798,218003,37,'males','Lars','discounts','FAS');
+INSERT INTO t2 VALUES (799,218004,37,'shapelessly','saplings','excises','FAS');
+INSERT INTO t2 VALUES (800,143503,50,'genres','foothill','Stalin','');
+INSERT INTO t2 VALUES (801,218006,37,'mastering','labeled','despot','FAS');
+INSERT INTO t2 VALUES (802,218007,37,'Newtonian','imperiously','ripeness','FAS');
+INSERT INTO t2 VALUES (803,218008,37,'finishers','reporters','Arabia','');
+INSERT INTO t2 VALUES (804,218009,37,'abates','furnishings','unruly','');
+INSERT INTO t2 VALUES (805,218010,37,'teem','precipitable','mournfulness','');
+INSERT INTO t2 VALUES (806,218011,37,'kiting','discounts','boom','FAS');
+INSERT INTO t2 VALUES (807,218020,37,'stodgy','excises','slaughter','A');
+INSERT INTO t2 VALUES (808,218021,50,'scalps','Stalin','Sabine','');
+INSERT INTO t2 VALUES (809,218022,37,'feed','despot','handy','FAS');
+INSERT INTO t2 VALUES (810,218023,37,'guitars','ripeness','rural','');
+INSERT INTO t2 VALUES (811,218024,37,'airships','Arabia','organizer','');
+INSERT INTO t2 VALUES (812,218101,37,'store','unruly','shipyard','FAS');
+INSERT INTO t2 VALUES (813,218102,37,'denounces','mournfulness','civics','FAS');
+INSERT INTO t2 VALUES (814,218103,37,'Pyle','boom','inaccuracy','FAS');
+INSERT INTO t2 VALUES (815,218201,37,'Saxony','slaughter','rules','FAS');
+INSERT INTO t2 VALUES (816,218202,37,'serializations','Sabine','juveniles','FAS');
+INSERT INTO t2 VALUES (817,218203,37,'Peruvian','handy','comprised','W');
+INSERT INTO t2 VALUES (818,218204,37,'taxonomically','rural','investigations','');
+INSERT INTO t2 VALUES (819,218205,37,'kingdom','organizer','stabilizes','A');
+INSERT INTO t2 VALUES (820,218301,37,'stint','shipyard','seminaries','FAS');
+INSERT INTO t2 VALUES (821,218302,37,'Sault','civics','Hunter','A');
+INSERT INTO t2 VALUES (822,218401,37,'faithful','inaccuracy','sporty','FAS');
+INSERT INTO t2 VALUES (823,218402,37,'Ganymede','rules','test','FAS');
+INSERT INTO t2 VALUES (824,218403,37,'tidiness','juveniles','weasels','');
+INSERT INTO t2 VALUES (825,218404,37,'gainful','comprised','CERN','');
+INSERT INTO t2 VALUES (826,218407,37,'contrary','investigations','tempering','');
+INSERT INTO t2 VALUES (827,218408,37,'Tipperary','stabilizes','afore','FAS');
+INSERT INTO t2 VALUES (828,218409,37,'tropics','seminaries','Galatean','');
+INSERT INTO t2 VALUES (829,218410,37,'theorizers','Hunter','techniques','W');
+INSERT INTO t2 VALUES (830,226001,37,'renew','sporty','error','');
+INSERT INTO t2 VALUES (831,226002,37,'already','test','veranda','');
+INSERT INTO t2 VALUES (832,226003,37,'terminal','weasels','severely','');
+INSERT INTO t2 VALUES (833,226004,37,'Hegelian','CERN','Cassites','FAS');
+INSERT INTO t2 VALUES (834,226005,37,'hypothesizer','tempering','forthcoming','');
+INSERT INTO t2 VALUES (835,226006,37,'warningly','afore','guides','');
+INSERT INTO t2 VALUES (836,226007,37,'journalizing','Galatean','vanish','FAS');
+INSERT INTO t2 VALUES (837,226008,37,'nested','techniques','lied','A');
+INSERT INTO t2 VALUES (838,226203,37,'Lars','error','sawtooth','FAS');
+INSERT INTO t2 VALUES (839,226204,37,'saplings','veranda','fated','FAS');
+INSERT INTO t2 VALUES (840,226205,37,'foothill','severely','gradually','');
+INSERT INTO t2 VALUES (841,226206,37,'labeled','Cassites','widens','');
+INSERT INTO t2 VALUES (842,226207,37,'imperiously','forthcoming','preclude','');
+INSERT INTO t2 VALUES (843,226208,37,'reporters','guides','Jobrel','');
+INSERT INTO t2 VALUES (844,226209,37,'furnishings','vanish','hooker','');
+INSERT INTO t2 VALUES (845,226210,37,'precipitable','lied','rainstorm','');
+INSERT INTO t2 VALUES (846,226211,37,'discounts','sawtooth','disconnects','');
+INSERT INTO t2 VALUES (847,228001,37,'excises','fated','cruelty','');
+INSERT INTO t2 VALUES (848,228004,37,'Stalin','gradually','exponentials','A');
+INSERT INTO t2 VALUES (849,228005,37,'despot','widens','affective','A');
+INSERT INTO t2 VALUES (850,228006,37,'ripeness','preclude','arteries','');
+INSERT INTO t2 VALUES (851,228007,37,'Arabia','Jobrel','Crosby','FAS');
+INSERT INTO t2 VALUES (852,228008,37,'unruly','hooker','acquaint','');
+INSERT INTO t2 VALUES (853,228009,37,'mournfulness','rainstorm','evenhandedly','');
+INSERT INTO t2 VALUES (854,228101,37,'boom','disconnects','percentage','');
+INSERT INTO t2 VALUES (855,228108,37,'slaughter','cruelty','disobedience','');
+INSERT INTO t2 VALUES (856,228109,37,'Sabine','exponentials','humility','');
+INSERT INTO t2 VALUES (857,228110,37,'handy','affective','gleaning','A');
+INSERT INTO t2 VALUES (858,228111,37,'rural','arteries','petted','A');
+INSERT INTO t2 VALUES (859,228112,37,'organizer','Crosby','bloater','A');
+INSERT INTO t2 VALUES (860,228113,37,'shipyard','acquaint','minion','A');
+INSERT INTO t2 VALUES (861,228114,37,'civics','evenhandedly','marginal','A');
+INSERT INTO t2 VALUES (862,228115,37,'inaccuracy','percentage','apiary','A');
+INSERT INTO t2 VALUES (863,228116,37,'rules','disobedience','measures','');
+INSERT INTO t2 VALUES (864,228117,37,'juveniles','humility','precaution','');
+INSERT INTO t2 VALUES (865,228118,37,'comprised','gleaning','repelled','');
+INSERT INTO t2 VALUES (866,228119,37,'investigations','petted','primary','FAS');
+INSERT INTO t2 VALUES (867,228120,37,'stabilizes','bloater','coverings','');
+INSERT INTO t2 VALUES (868,228121,37,'seminaries','minion','Artemia','A');
+INSERT INTO t2 VALUES (869,228122,37,'Hunter','marginal','navigate','');
+INSERT INTO t2 VALUES (870,228201,37,'sporty','apiary','spatial','');
+INSERT INTO t2 VALUES (871,228206,37,'test','measures','Gurkha','');
+INSERT INTO t2 VALUES (872,228207,37,'weasels','precaution','meanwhile','A');
+INSERT INTO t2 VALUES (873,228208,37,'CERN','repelled','Melinda','A');
+INSERT INTO t2 VALUES (874,228209,37,'tempering','primary','Butterfield','');
+INSERT INTO t2 VALUES (875,228210,37,'afore','coverings','Aldrich','A');
+INSERT INTO t2 VALUES (876,228211,37,'Galatean','Artemia','previewing','A');
+INSERT INTO t2 VALUES (877,228212,37,'techniques','navigate','glut','A');
+INSERT INTO t2 VALUES (878,228213,37,'error','spatial','unaffected','');
+INSERT INTO t2 VALUES (879,228214,37,'veranda','Gurkha','inmate','');
+INSERT INTO t2 VALUES (880,228301,37,'severely','meanwhile','mineral','');
+INSERT INTO t2 VALUES (881,228305,37,'Cassites','Melinda','impending','A');
+INSERT INTO t2 VALUES (882,228306,37,'forthcoming','Butterfield','meditation','A');
+INSERT INTO t2 VALUES (883,228307,37,'guides','Aldrich','ideas','');
+INSERT INTO t2 VALUES (884,228308,37,'vanish','previewing','miniaturizes','W');
+INSERT INTO t2 VALUES (885,228309,37,'lied','glut','lewdly','');
+INSERT INTO t2 VALUES (886,228310,37,'sawtooth','unaffected','title','');
+INSERT INTO t2 VALUES (887,228311,37,'fated','inmate','youthfulness','');
+INSERT INTO t2 VALUES (888,228312,37,'gradually','mineral','creak','FAS');
+INSERT INTO t2 VALUES (889,228313,37,'widens','impending','Chippewa','');
+INSERT INTO t2 VALUES (890,228314,37,'preclude','meditation','clamored','');
+INSERT INTO t2 VALUES (891,228401,65,'Jobrel','ideas','freezes','');
+INSERT INTO t2 VALUES (892,228402,65,'hooker','miniaturizes','forgivably','FAS');
+INSERT INTO t2 VALUES (893,228403,65,'rainstorm','lewdly','reduce','FAS');
+INSERT INTO t2 VALUES (894,228404,65,'disconnects','title','McGovern','W');
+INSERT INTO t2 VALUES (895,228405,65,'cruelty','youthfulness','Nazis','W');
+INSERT INTO t2 VALUES (896,228406,65,'exponentials','creak','epistle','W');
+INSERT INTO t2 VALUES (897,228407,65,'affective','Chippewa','socializes','W');
+INSERT INTO t2 VALUES (898,228408,65,'arteries','clamored','conceptions','');
+INSERT INTO t2 VALUES (899,228409,65,'Crosby','freezes','Kevin','');
+INSERT INTO t2 VALUES (900,228410,65,'acquaint','forgivably','uncovering','');
+INSERT INTO t2 VALUES (901,230301,37,'evenhandedly','reduce','chews','FAS');
+INSERT INTO t2 VALUES (902,230302,37,'percentage','McGovern','appendixes','FAS');
+INSERT INTO t2 VALUES (903,230303,37,'disobedience','Nazis','raining','');
+INSERT INTO t2 VALUES (904,018062,37,'humility','epistle','infest','');
+INSERT INTO t2 VALUES (905,230501,37,'gleaning','socializes','compartment','');
+INSERT INTO t2 VALUES (906,230502,37,'petted','conceptions','minting','');
+INSERT INTO t2 VALUES (907,230503,37,'bloater','Kevin','ducks','');
+INSERT INTO t2 VALUES (908,230504,37,'minion','uncovering','roped','A');
+INSERT INTO t2 VALUES (909,230505,37,'marginal','chews','waltz','');
+INSERT INTO t2 VALUES (910,230506,37,'apiary','appendixes','Lillian','');
+INSERT INTO t2 VALUES (911,230507,37,'measures','raining','repressions','A');
+INSERT INTO t2 VALUES (912,230508,37,'precaution','infest','chillingly','');
+INSERT INTO t2 VALUES (913,230509,37,'repelled','compartment','noncritical','');
+INSERT INTO t2 VALUES (914,230901,37,'primary','minting','lithograph','');
+INSERT INTO t2 VALUES (915,230902,37,'coverings','ducks','spongers','');
+INSERT INTO t2 VALUES (916,230903,37,'Artemia','roped','parenthood','');
+INSERT INTO t2 VALUES (917,230904,37,'navigate','waltz','posed','');
+INSERT INTO t2 VALUES (918,230905,37,'spatial','Lillian','instruments','');
+INSERT INTO t2 VALUES (919,230906,37,'Gurkha','repressions','filial','');
+INSERT INTO t2 VALUES (920,230907,37,'meanwhile','chillingly','fixedly','');
+INSERT INTO t2 VALUES (921,230908,37,'Melinda','noncritical','relives','');
+INSERT INTO t2 VALUES (922,230909,37,'Butterfield','lithograph','Pandora','');
+INSERT INTO t2 VALUES (923,230910,37,'Aldrich','spongers','watering','A');
+INSERT INTO t2 VALUES (924,230911,37,'previewing','parenthood','ungrateful','');
+INSERT INTO t2 VALUES (925,230912,37,'glut','posed','secures','');
+INSERT INTO t2 VALUES (926,230913,37,'unaffected','instruments','chastisers','');
+INSERT INTO t2 VALUES (927,230914,37,'inmate','filial','icon','');
+INSERT INTO t2 VALUES (928,231304,37,'mineral','fixedly','reuniting','A');
+INSERT INTO t2 VALUES (929,231305,37,'impending','relives','imagining','A');
+INSERT INTO t2 VALUES (930,231306,37,'meditation','Pandora','abiding','A');
+INSERT INTO t2 VALUES (931,231307,37,'ideas','watering','omnisciently','');
+INSERT INTO t2 VALUES (932,231308,37,'miniaturizes','ungrateful','Britannic','');
+INSERT INTO t2 VALUES (933,231309,37,'lewdly','secures','scholastics','A');
+INSERT INTO t2 VALUES (934,231310,37,'title','chastisers','mechanics','A');
+INSERT INTO t2 VALUES (935,231311,37,'youthfulness','icon','humidly','A');
+INSERT INTO t2 VALUES (936,231312,37,'creak','reuniting','masterpiece','');
+INSERT INTO t2 VALUES (937,231313,37,'Chippewa','imagining','however','');
+INSERT INTO t2 VALUES (938,231314,37,'clamored','abiding','Mendelian','');
+INSERT INTO t2 VALUES (939,231315,37,'freezes','omnisciently','jarred','');
+INSERT INTO t2 VALUES (940,232102,37,'forgivably','Britannic','scolds','');
+INSERT INTO t2 VALUES (941,232103,37,'reduce','scholastics','infatuate','');
+INSERT INTO t2 VALUES (942,232104,37,'McGovern','mechanics','willed','A');
+INSERT INTO t2 VALUES (943,232105,37,'Nazis','humidly','joyfully','');
+INSERT INTO t2 VALUES (944,232106,37,'epistle','masterpiece','Microsoft','');
+INSERT INTO t2 VALUES (945,232107,37,'socializes','however','fibrosities','');
+INSERT INTO t2 VALUES (946,232108,37,'conceptions','Mendelian','Baltimorean','');
+INSERT INTO t2 VALUES (947,232601,37,'Kevin','jarred','equestrian','');
+INSERT INTO t2 VALUES (948,232602,37,'uncovering','scolds','Goodrich','');
+INSERT INTO t2 VALUES (949,232603,37,'chews','infatuate','apish','A');
+INSERT INTO t2 VALUES (950,232605,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (5950,1232605,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (5951,1232606,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (5952,1232607,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (5953,1232608,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (5954,1232609,37,'appendixes','willed','Adlerian','');
+INSERT INTO t2 VALUES (951,232606,37,'raining','joyfully','Tropez','');
+INSERT INTO t2 VALUES (952,232607,37,'infest','Microsoft','nouns','');
+INSERT INTO t2 VALUES (953,232608,37,'compartment','fibrosities','distracting','');
+INSERT INTO t2 VALUES (954,232609,37,'minting','Baltimorean','mutton','');
+INSERT INTO t2 VALUES (955,236104,37,'ducks','equestrian','bridgeable','A');
+INSERT INTO t2 VALUES (956,236105,37,'roped','Goodrich','stickers','A');
+INSERT INTO t2 VALUES (957,236106,37,'waltz','apish','transcontinental','A');
+INSERT INTO t2 VALUES (958,236107,37,'Lillian','Adlerian','amateurish','');
+INSERT INTO t2 VALUES (959,236108,37,'repressions','Tropez','Gandhian','');
+INSERT INTO t2 VALUES (960,236109,37,'chillingly','nouns','stratified','');
+INSERT INTO t2 VALUES (961,236110,37,'noncritical','distracting','chamberlains','');
+INSERT INTO t2 VALUES (962,236111,37,'lithograph','mutton','creditably','');
+INSERT INTO t2 VALUES (963,236112,37,'spongers','bridgeable','philosophic','');
+INSERT INTO t2 VALUES (964,236113,37,'parenthood','stickers','ores','');
+INSERT INTO t2 VALUES (965,238005,37,'posed','transcontinental','Carleton','');
+INSERT INTO t2 VALUES (966,238006,37,'instruments','amateurish','tape','A');
+INSERT INTO t2 VALUES (967,238007,37,'filial','Gandhian','afloat','A');
+INSERT INTO t2 VALUES (968,238008,37,'fixedly','stratified','goodness','A');
+INSERT INTO t2 VALUES (969,238009,37,'relives','chamberlains','welcoming','');
+INSERT INTO t2 VALUES (970,238010,37,'Pandora','creditably','Pinsky','FAS');
+INSERT INTO t2 VALUES (971,238011,37,'watering','philosophic','halting','');
+INSERT INTO t2 VALUES (972,238012,37,'ungrateful','ores','bibliography','');
+INSERT INTO t2 VALUES (973,238013,37,'secures','Carleton','decoding','');
+INSERT INTO t2 VALUES (974,240401,41,'chastisers','tape','variance','A');
+INSERT INTO t2 VALUES (975,240402,41,'icon','afloat','allowed','A');
+INSERT INTO t2 VALUES (976,240901,41,'reuniting','goodness','dire','A');
+INSERT INTO t2 VALUES (977,240902,41,'imagining','welcoming','dub','A');
+INSERT INTO t2 VALUES (978,241801,41,'abiding','Pinsky','poisoning','');
+INSERT INTO t2 VALUES (979,242101,41,'omnisciently','halting','Iraqis','A');
+INSERT INTO t2 VALUES (980,242102,41,'Britannic','bibliography','heaving','');
+INSERT INTO t2 VALUES (981,242201,41,'scholastics','decoding','population','A');
+INSERT INTO t2 VALUES (982,242202,41,'mechanics','variance','bomb','A');
+INSERT INTO t2 VALUES (983,242501,41,'humidly','allowed','Majorca','A');
+INSERT INTO t2 VALUES (984,242502,41,'masterpiece','dire','Gershwins','');
+INSERT INTO t2 VALUES (985,246201,41,'however','dub','explorers','');
+INSERT INTO t2 VALUES (986,246202,41,'Mendelian','poisoning','libretto','A');
+INSERT INTO t2 VALUES (987,246203,41,'jarred','Iraqis','occurred','');
+INSERT INTO t2 VALUES (988,246204,41,'scolds','heaving','Lagos','');
+INSERT INTO t2 VALUES (989,246205,41,'infatuate','population','rats','');
+INSERT INTO t2 VALUES (990,246301,41,'willed','bomb','bankruptcies','A');
+INSERT INTO t2 VALUES (991,246302,41,'joyfully','Majorca','crying','');
+INSERT INTO t2 VALUES (992,248001,41,'Microsoft','Gershwins','unexpected','');
+INSERT INTO t2 VALUES (993,248002,41,'fibrosities','explorers','accessed','A');
+INSERT INTO t2 VALUES (994,248003,41,'Baltimorean','libretto','colorful','A');
+INSERT INTO t2 VALUES (995,248004,41,'equestrian','occurred','versatility','A');
+INSERT INTO t2 VALUES (996,248005,41,'Goodrich','Lagos','cosy','');
+INSERT INTO t2 VALUES (997,248006,41,'apish','rats','Darius','A');
+INSERT INTO t2 VALUES (998,248007,41,'Adlerian','bankruptcies','mastering','A');
+INSERT INTO t2 VALUES (999,248008,41,'Tropez','crying','Asiaticizations','A');
+INSERT INTO t2 VALUES (1000,248009,41,'nouns','unexpected','offerers','A');
+INSERT INTO t2 VALUES (1001,248010,41,'distracting','accessed','uncles','A');
+INSERT INTO t2 VALUES (1002,248011,41,'mutton','colorful','sleepwalk','');
+INSERT INTO t2 VALUES (1003,248012,41,'bridgeable','versatility','Ernestine','');
+INSERT INTO t2 VALUES (1004,248013,41,'stickers','cosy','checksumming','');
+INSERT INTO t2 VALUES (1005,248014,41,'transcontinental','Darius','stopped','');
+INSERT INTO t2 VALUES (1006,248015,41,'amateurish','mastering','sicker','');
+INSERT INTO t2 VALUES (1007,248016,41,'Gandhian','Asiaticizations','Italianization','');
+INSERT INTO t2 VALUES (1008,248017,41,'stratified','offerers','alphabetic','');
+INSERT INTO t2 VALUES (1009,248018,41,'chamberlains','uncles','pharmaceutic','');
+INSERT INTO t2 VALUES (1010,248019,41,'creditably','sleepwalk','creator','');
+INSERT INTO t2 VALUES (1011,248020,41,'philosophic','Ernestine','chess','');
+INSERT INTO t2 VALUES (1012,248021,41,'ores','checksumming','charcoal','');
+INSERT INTO t2 VALUES (1013,248101,41,'Carleton','stopped','Epiphany','A');
+INSERT INTO t2 VALUES (1014,248102,41,'tape','sicker','bulldozes','A');
+INSERT INTO t2 VALUES (1015,248201,41,'afloat','Italianization','Pygmalion','A');
+INSERT INTO t2 VALUES (1016,248202,41,'goodness','alphabetic','caressing','A');
+INSERT INTO t2 VALUES (1017,248203,41,'welcoming','pharmaceutic','Palestine','A');
+INSERT INTO t2 VALUES (1018,248204,41,'Pinsky','creator','regimented','A');
+INSERT INTO t2 VALUES (1019,248205,41,'halting','chess','scars','A');
+INSERT INTO t2 VALUES (1020,248206,41,'bibliography','charcoal','realest','A');
+INSERT INTO t2 VALUES (1021,248207,41,'decoding','Epiphany','diffusing','A');
+INSERT INTO t2 VALUES (1022,248208,41,'variance','bulldozes','clubroom','A');
+INSERT INTO t2 VALUES (1023,248209,41,'allowed','Pygmalion','Blythe','A');
+INSERT INTO t2 VALUES (1024,248210,41,'dire','caressing','ahead','');
+INSERT INTO t2 VALUES (1025,248211,50,'dub','Palestine','reviver','');
+INSERT INTO t2 VALUES (1026,250501,34,'poisoning','regimented','retransmitting','A');
+INSERT INTO t2 VALUES (1027,250502,34,'Iraqis','scars','landslide','');
+INSERT INTO t2 VALUES (1028,250503,34,'heaving','realest','Eiffel','');
+INSERT INTO t2 VALUES (1029,250504,34,'population','diffusing','absentee','');
+INSERT INTO t2 VALUES (1030,250505,34,'bomb','clubroom','aye','');
+INSERT INTO t2 VALUES (1031,250601,34,'Majorca','Blythe','forked','A');
+INSERT INTO t2 VALUES (1032,250602,34,'Gershwins','ahead','Peruvianizes','');
+INSERT INTO t2 VALUES (1033,250603,34,'explorers','reviver','clerked','');
+INSERT INTO t2 VALUES (1034,250604,34,'libretto','retransmitting','tutor','');
+INSERT INTO t2 VALUES (1035,250605,34,'occurred','landslide','boulevard','');
+INSERT INTO t2 VALUES (1036,251001,34,'Lagos','Eiffel','shuttered','');
+INSERT INTO t2 VALUES (1037,251002,34,'rats','absentee','quotes','A');
+INSERT INTO t2 VALUES (1038,251003,34,'bankruptcies','aye','Caltech','');
+INSERT INTO t2 VALUES (1039,251004,34,'crying','forked','Mossberg','');
+INSERT INTO t2 VALUES (1040,251005,34,'unexpected','Peruvianizes','kept','');
+INSERT INTO t2 VALUES (1041,251301,34,'accessed','clerked','roundly','');
+INSERT INTO t2 VALUES (1042,251302,34,'colorful','tutor','features','A');
+INSERT INTO t2 VALUES (1043,251303,34,'versatility','boulevard','imaginable','A');
+INSERT INTO t2 VALUES (1044,251304,34,'cosy','shuttered','controller','');
+INSERT INTO t2 VALUES (1045,251305,34,'Darius','quotes','racial','');
+INSERT INTO t2 VALUES (1046,251401,34,'mastering','Caltech','uprisings','A');
+INSERT INTO t2 VALUES (1047,251402,34,'Asiaticizations','Mossberg','narrowed','A');
+INSERT INTO t2 VALUES (1048,251403,34,'offerers','kept','cannot','A');
+INSERT INTO t2 VALUES (1049,251404,34,'uncles','roundly','vest','');
+INSERT INTO t2 VALUES (1050,251405,34,'sleepwalk','features','famine','');
+INSERT INTO t2 VALUES (1051,251406,34,'Ernestine','imaginable','sugars','');
+INSERT INTO t2 VALUES (1052,251801,34,'checksumming','controller','exterminated','A');
+INSERT INTO t2 VALUES (1053,251802,34,'stopped','racial','belays','');
+INSERT INTO t2 VALUES (1054,252101,34,'sicker','uprisings','Hodges','A');
+INSERT INTO t2 VALUES (1055,252102,34,'Italianization','narrowed','translatable','');
+INSERT INTO t2 VALUES (1056,252301,34,'alphabetic','cannot','duality','A');
+INSERT INTO t2 VALUES (1057,252302,34,'pharmaceutic','vest','recording','A');
+INSERT INTO t2 VALUES (1058,252303,34,'creator','famine','rouses','A');
+INSERT INTO t2 VALUES (1059,252304,34,'chess','sugars','poison','');
+INSERT INTO t2 VALUES (1060,252305,34,'charcoal','exterminated','attitude','');
+INSERT INTO t2 VALUES (1061,252306,34,'Epiphany','belays','dusted','');
+INSERT INTO t2 VALUES (1062,252307,34,'bulldozes','Hodges','encompasses','');
+INSERT INTO t2 VALUES (1063,252308,34,'Pygmalion','translatable','presentation','');
+INSERT INTO t2 VALUES (1064,252309,34,'caressing','duality','Kantian','');
+INSERT INTO t2 VALUES (1065,256001,34,'Palestine','recording','imprecision','A');
+INSERT INTO t2 VALUES (1066,256002,34,'regimented','rouses','saving','');
+INSERT INTO t2 VALUES (1067,256003,34,'scars','poison','maternal','');
+INSERT INTO t2 VALUES (1068,256004,34,'realest','attitude','hewed','');
+INSERT INTO t2 VALUES (1069,256005,34,'diffusing','dusted','kerosene','');
+INSERT INTO t2 VALUES (1070,258001,34,'clubroom','encompasses','Cubans','');
+INSERT INTO t2 VALUES (1071,258002,34,'Blythe','presentation','photographers','');
+INSERT INTO t2 VALUES (1072,258003,34,'ahead','Kantian','nymph','A');
+INSERT INTO t2 VALUES (1073,258004,34,'reviver','imprecision','bedlam','A');
+INSERT INTO t2 VALUES (1074,258005,34,'retransmitting','saving','north','A');
+INSERT INTO t2 VALUES (1075,258006,34,'landslide','maternal','Schoenberg','A');
+INSERT INTO t2 VALUES (1076,258007,34,'Eiffel','hewed','botany','A');
+INSERT INTO t2 VALUES (1077,258008,34,'absentee','kerosene','curs','');
+INSERT INTO t2 VALUES (1078,258009,34,'aye','Cubans','solidification','');
+INSERT INTO t2 VALUES (1079,258010,34,'forked','photographers','inheritresses','');
+INSERT INTO t2 VALUES (1080,258011,34,'Peruvianizes','nymph','stiller','');
+INSERT INTO t2 VALUES (1081,258101,68,'clerked','bedlam','t1','A');
+INSERT INTO t2 VALUES (1082,258102,68,'tutor','north','suite','A');
+INSERT INTO t2 VALUES (1083,258103,34,'boulevard','Schoenberg','ransomer','');
+INSERT INTO t2 VALUES (1084,258104,68,'shuttered','botany','Willy','');
+INSERT INTO t2 VALUES (1085,258105,68,'quotes','curs','Rena','A');
+INSERT INTO t2 VALUES (1086,258106,68,'Caltech','solidification','Seattle','A');
+INSERT INTO t2 VALUES (1087,258107,68,'Mossberg','inheritresses','relaxes','A');
+INSERT INTO t2 VALUES (1088,258108,68,'kept','stiller','exclaim','');
+INSERT INTO t2 VALUES (1089,258109,68,'roundly','t1','implicated','A');
+INSERT INTO t2 VALUES (1090,258110,68,'features','suite','distinguish','');
+INSERT INTO t2 VALUES (1091,258111,68,'imaginable','ransomer','assayed','');
+INSERT INTO t2 VALUES (1092,258112,68,'controller','Willy','homeowner','');
+INSERT INTO t2 VALUES (1093,258113,68,'racial','Rena','and','');
+INSERT INTO t2 VALUES (1094,258201,34,'uprisings','Seattle','stealth','');
+INSERT INTO t2 VALUES (1095,258202,34,'narrowed','relaxes','coinciding','A');
+INSERT INTO t2 VALUES (1096,258203,34,'cannot','exclaim','founder','A');
+INSERT INTO t2 VALUES (1097,258204,34,'vest','implicated','environing','');
+INSERT INTO t2 VALUES (1098,258205,34,'famine','distinguish','jewelry','');
+INSERT INTO t2 VALUES (1099,258301,34,'sugars','assayed','lemons','A');
+INSERT INTO t2 VALUES (1100,258401,34,'exterminated','homeowner','brokenness','A');
+INSERT INTO t2 VALUES (1101,258402,34,'belays','and','bedpost','A');
+INSERT INTO t2 VALUES (1102,258403,34,'Hodges','stealth','assurers','A');
+INSERT INTO t2 VALUES (1103,258404,34,'translatable','coinciding','annoyers','');
+INSERT INTO t2 VALUES (1104,258405,34,'duality','founder','affixed','');
+INSERT INTO t2 VALUES (1105,258406,34,'recording','environing','warbling','');
+INSERT INTO t2 VALUES (1106,258407,34,'rouses','jewelry','seriously','');
+INSERT INTO t2 VALUES (1107,228123,37,'poison','lemons','boasted','');
+INSERT INTO t2 VALUES (1108,250606,34,'attitude','brokenness','Chantilly','');
+INSERT INTO t2 VALUES (1109,208405,37,'dusted','bedpost','Iranizes','');
+INSERT INTO t2 VALUES (1110,212101,37,'encompasses','assurers','violinist','');
+INSERT INTO t2 VALUES (1111,218206,37,'presentation','annoyers','extramarital','');
+INSERT INTO t2 VALUES (1112,150401,37,'Kantian','affixed','spates','');
+INSERT INTO t2 VALUES (1113,248212,41,'imprecision','warbling','cloakroom','');
+INSERT INTO t2 VALUES (1114,128026,00,'saving','seriously','gazer','');
+INSERT INTO t2 VALUES (1115,128024,00,'maternal','boasted','hand','');
+INSERT INTO t2 VALUES (1116,128027,00,'hewed','Chantilly','tucked','');
+INSERT INTO t2 VALUES (1117,128025,00,'kerosene','Iranizes','gems','');
+INSERT INTO t2 VALUES (1118,128109,00,'Cubans','violinist','clinker','');
+INSERT INTO t2 VALUES (1119,128705,00,'photographers','extramarital','refiner','');
+INSERT INTO t2 VALUES (1120,126303,00,'nymph','spates','callus','');
+INSERT INTO t2 VALUES (1121,128308,00,'bedlam','cloakroom','leopards','');
+INSERT INTO t2 VALUES (1122,128204,00,'north','gazer','comfortingly','');
+INSERT INTO t2 VALUES (1123,128205,00,'Schoenberg','hand','generically','');
+INSERT INTO t2 VALUES (1124,128206,00,'botany','tucked','getters','');
+INSERT INTO t2 VALUES (1125,128207,00,'curs','gems','sexually','');
+INSERT INTO t2 VALUES (1126,118205,00,'solidification','clinker','spear','');
+INSERT INTO t2 VALUES (1127,116801,00,'inheritresses','refiner','serums','');
+INSERT INTO t2 VALUES (1128,116803,00,'stiller','callus','Italianization','');
+INSERT INTO t2 VALUES (1129,116804,00,'t1','leopards','attendants','');
+INSERT INTO t2 VALUES (1130,116802,00,'suite','comfortingly','spies','');
+INSERT INTO t2 VALUES (1131,128605,00,'ransomer','generically','Anthony','');
+INSERT INTO t2 VALUES (1132,118308,00,'Willy','getters','planar','');
+INSERT INTO t2 VALUES (1133,113702,00,'Rena','sexually','cupped','');
+INSERT INTO t2 VALUES (1134,113703,00,'Seattle','spear','cleanser','');
+INSERT INTO t2 VALUES (1135,112103,00,'relaxes','serums','commuters','');
+INSERT INTO t2 VALUES (1136,118009,00,'exclaim','Italianization','honeysuckle','');
+INSERT INTO t2 VALUES (5136,1118009,00,'exclaim','Italianization','honeysuckle','');
+INSERT INTO t2 VALUES (1137,138011,00,'implicated','attendants','orphanage','');
+INSERT INTO t2 VALUES (1138,138010,00,'distinguish','spies','skies','');
+INSERT INTO t2 VALUES (1139,138012,00,'assayed','Anthony','crushers','');
+INSERT INTO t2 VALUES (1140,068304,00,'homeowner','planar','Puritan','');
+INSERT INTO t2 VALUES (1141,078009,00,'and','cupped','squeezer','');
+INSERT INTO t2 VALUES (1142,108013,00,'stealth','cleanser','bruises','');
+INSERT INTO t2 VALUES (1143,084004,00,'coinciding','commuters','bonfire','');
+INSERT INTO t2 VALUES (1144,083402,00,'founder','honeysuckle','Colombo','');
+INSERT INTO t2 VALUES (1145,084003,00,'environing','orphanage','nondecreasing','');
+INSERT INTO t2 VALUES (1146,088504,00,'jewelry','skies','innocents','');
+INSERT INTO t2 VALUES (1147,088005,00,'lemons','crushers','masked','');
+INSERT INTO t2 VALUES (1148,088007,00,'brokenness','Puritan','file','');
+INSERT INTO t2 VALUES (1149,088006,00,'bedpost','squeezer','brush','');
+INSERT INTO t2 VALUES (1150,148025,00,'assurers','bruises','mutilate','');
+INSERT INTO t2 VALUES (1151,148024,00,'annoyers','bonfire','mommy','');
+INSERT INTO t2 VALUES (1152,138305,00,'affixed','Colombo','bulkheads','');
+INSERT INTO t2 VALUES (1153,138306,00,'warbling','nondecreasing','undeclared','');
+INSERT INTO t2 VALUES (1154,152701,00,'seriously','innocents','displacements','');
+INSERT INTO t2 VALUES (1155,148505,00,'boasted','masked','nieces','');
+INSERT INTO t2 VALUES (1156,158003,00,'Chantilly','file','coeducation','');
+INSERT INTO t2 VALUES (1157,156201,00,'Iranizes','brush','brassy','');
+INSERT INTO t2 VALUES (1158,156202,00,'violinist','mutilate','authenticator','');
+INSERT INTO t2 VALUES (1159,158307,00,'extramarital','mommy','Washoe','');
+INSERT INTO t2 VALUES (1160,158402,00,'spates','bulkheads','penny','');
+INSERT INTO t2 VALUES (1161,158401,00,'cloakroom','undeclared','Flagler','');
+INSERT INTO t2 VALUES (1162,068013,00,'gazer','displacements','stoned','');
+INSERT INTO t2 VALUES (1163,068012,00,'hand','nieces','cranes','');
+INSERT INTO t2 VALUES (1164,068203,00,'tucked','coeducation','masterful','');
+INSERT INTO t2 VALUES (1165,088205,00,'gems','brassy','biracial','');
+INSERT INTO t2 VALUES (1166,068704,00,'clinker','authenticator','steamships','');
+INSERT INTO t2 VALUES (1167,068604,00,'refiner','Washoe','windmills','');
+INSERT INTO t2 VALUES (1168,158502,00,'callus','penny','exploit','');
+INSERT INTO t2 VALUES (1169,123103,00,'leopards','Flagler','riverfront','');
+INSERT INTO t2 VALUES (1170,148026,00,'comfortingly','stoned','sisterly','');
+INSERT INTO t2 VALUES (1171,123302,00,'generically','cranes','sharpshoot','');
+INSERT INTO t2 VALUES (1172,076503,00,'getters','masterful','mittens','');
+INSERT INTO t2 VALUES (1173,126304,00,'sexually','biracial','interdependency','');
+INSERT INTO t2 VALUES (1174,068306,00,'spear','steamships','policy','');
+INSERT INTO t2 VALUES (1175,143504,00,'serums','windmills','unleashing','');
+INSERT INTO t2 VALUES (1176,160201,00,'Italianization','exploit','pretenders','');
+INSERT INTO t2 VALUES (1177,148028,00,'attendants','riverfront','overstatements','');
+INSERT INTO t2 VALUES (1178,148027,00,'spies','sisterly','birthed','');
+INSERT INTO t2 VALUES (1179,143505,00,'Anthony','sharpshoot','opportunism','');
+INSERT INTO t2 VALUES (1180,108014,00,'planar','mittens','showroom','');
+INSERT INTO t2 VALUES (1181,076104,00,'cupped','interdependency','compromisingly','');
+INSERT INTO t2 VALUES (1182,078106,00,'cleanser','policy','Medicare','');
+INSERT INTO t2 VALUES (1183,126102,00,'commuters','unleashing','corresponds','');
+INSERT INTO t2 VALUES (1184,128029,00,'honeysuckle','pretenders','hardware','');
+INSERT INTO t2 VALUES (1185,128028,00,'orphanage','overstatements','implant','');
+INSERT INTO t2 VALUES (1186,018410,00,'skies','birthed','Alicia','');
+INSERT INTO t2 VALUES (1187,128110,00,'crushers','opportunism','requesting','');
+INSERT INTO t2 VALUES (1188,148506,00,'Puritan','showroom','produced','');
+INSERT INTO t2 VALUES (1189,123303,00,'squeezer','compromisingly','criticizes','');
+INSERT INTO t2 VALUES (1190,123304,00,'bruises','Medicare','backer','');
+INSERT INTO t2 VALUES (1191,068504,00,'bonfire','corresponds','positively','');
+INSERT INTO t2 VALUES (1192,068305,00,'Colombo','hardware','colicky','');
+INSERT INTO t2 VALUES (1193,000000,00,'nondecreasing','implant','thrillingly','');
+--enable_query_log
+
+#
+# Search with a key
+#
+
+select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%";
+select fld3 from t2 where fld3 like "%cultivation" ;
+
+#
+# Search with a key using sorting and limit the same time
+#
+
+select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3;
+select fld3,companynr from t2 where companynr = 58 order by fld3;
+
+select fld3 from t2 order by fld3 desc limit 10;
+select fld3 from t2 order by fld3 desc limit 5;
+select fld3 from t2 order by fld3 desc limit 5,5;
+
+#
+# Update
+#
+
+UPDATE t2 SET fld3="foo" WHERE fld3="b%";
+select fld3 from t2;
+
+
+#
+# Update randomly
+#
+
+UPDATE t2 SET fld3="bar" WHERE fld3="s%";
+select fld3 from t2;
+
+#
+# Delete with constant
+#
+
+DELETE FROM t2 WHERE fld3="r%";
+SELECT fld3 FROM t2;
+
+#
+# Delete with Random
+#
+
+DELETE FROM t2 WHERE fld3="d%" ORDER BY RAND();
+SELECT fld3 FROM t2;
+
+#
+# Rename table
+#
+
+DROP TABLE t1;
+ALTER TABLE t2 RENAME t1
+
+#
+# Drop and recreate
+#
+
+
+DROP TABLE t1;
+CREATE TABLE t1 (
+ Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+ Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE = CSV;
+
+INSERT INTO t1 VALUES (9410,9412);
+
+select period from t1;
+
+drop table if exists t1,t2,t3,t4;
diff --git a/mysql-test/t/ctype_create.test b/mysql-test/t/ctype_create.test
index 6d7ed6fc205..9a5cb025474 100644
--- a/mysql-test/t/ctype_create.test
+++ b/mysql-test/t/ctype_create.test
@@ -71,6 +71,18 @@ SHOW CREATE TABLE t1;
DROP TABLE t1;
#
+# Bug#
+# CREATE TABLE and CREATE DATABASE didn't fail in some cases
+#
+--error 1302
+create table t1 (a char) character set latin1 character set latin2;
+--error 1253
+create table t1 (a char) character set latin1 collate latin2_bin;
+--error 1302
+create database d1 default character set latin1 character set latin2;
+--error 1253
+create database d1 default character set latin1 collate latin2_bin;
+
#
#
DROP DATABASE mysqltest1;
diff --git a/mysql-test/t/ctype_recoding.test b/mysql-test/t/ctype_recoding.test
index de6332f272c..82d0643b577 100644
--- a/mysql-test/t/ctype_recoding.test
+++ b/mysql-test/t/ctype_recoding.test
@@ -98,6 +98,20 @@ SET NAMES utf8;
SET character_set_connection=binary;
SELECT 'тест' as s;
+# Bug#4417, another aspect:
+# Check that both "SHOW CREATE TABLE" and "SHOW COLUMNS"
+# return column names and default values in UTF8 after "SET NAMES BINARY"
+
+SET NAMES latin1;
+CREATE TABLE t1 (`` CHAR(128) DEFAULT '', `1` ENUM('1','2') DEFAULT '2');
+SHOW CREATE TABLE t1;
+SHOW COLUMNS FROM t1;
+SET NAMES binary;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS FROM t1;
+DROP TABLE t1;
+
+
#
# Test that we allow only well-formed UTF8 identitiers
#
diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test
index 0ab46a5a637..187d21f9ab7 100644
--- a/mysql-test/t/ctype_uca.test
+++ b/mysql-test/t/ctype_uca.test
@@ -176,7 +176,7 @@ select group_concat(c1 order by c1) from t1 group by c1 collate utf8_turkish_ci;
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_czech_ci;
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_danish_ci;
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_lithuanian_ci;
---select group_concat(c1 order by c1) from t1 group by c1 collate utf8_slovak_ci;
+select group_concat(c1 order by c1) from t1 group by c1 collate utf8_slovak_ci;
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_spanish2_ci;
select group_concat(c1 order by c1) from t1 group by c1 collate utf8_roman_ci;
diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test
index b8574fb7623..d9ef91496e9 100644
--- a/mysql-test/t/ctype_ucs.test
+++ b/mysql-test/t/ctype_ucs.test
@@ -293,3 +293,25 @@ SELECT HEX(a) FROM t1;
DROP TABLE t1;
-- the same should be also done with enum and set
+
+
+#
+# Bug #5024 Server crashes with queries on fields
+# with certain charset/collation settings
+#
+
+create table t1 (s1 char character set `ucs2` collate `ucs2_czech_ci`);
+insert into t1 values ('0'),('1'),('2'),('a'),('b'),('c');
+select s1 from t1 where s1 > 'a' order by s1;
+drop table t1;
+
+#
+# Bug #5081 : UCS2 fields are filled with '0x2020'
+# after extending field length
+#
+create table t1(a char(1)) default charset = ucs2;
+insert into t1 values ('a'),('b'),('c');
+alter table t1 modify a char(5);
+select a, hex(a) from t1;
+drop table t1;
+
diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test
index 07baee1b3bd..2c531d4e5d2 100644
--- a/mysql-test/t/ctype_utf8.test
+++ b/mysql-test/t/ctype_utf8.test
@@ -3,7 +3,7 @@
#
--disable_warnings
-drop table if exists t1;
+drop table if exists t1,t2;
--enable_warnings
set names utf8;
@@ -45,6 +45,15 @@ SELECT 'a\0' < 'a ';
SELECT 'a\t' < 'a';
SELECT 'a\t' < 'a ';
+#
+# The same for binary collation
+#
+SELECT 'a' = 'a ' collate utf8_bin;
+SELECT 'a\0' < 'a' collate utf8_bin;
+SELECT 'a\0' < 'a ' collate utf8_bin;
+SELECT 'a\t' < 'a' collate utf8_bin;
+SELECT 'a\t' < 'a ' collate utf8_bin;
+
CREATE TABLE t1 (a char(10) character set utf8 not null);
INSERT INTO t1 VALUES ('a'),('a\0'),('a\t'),('a ');
SELECT hex(a),STRCMP(a,'a'), STRCMP(a,'a ') FROM t1;
@@ -63,6 +72,12 @@ select insert('txs',2,1,'hi'),insert('is ',4,0,'a'),insert('txxxxt',2,4,'es');
select insert("aa",100,1,"b"),insert("aa",1,3,"b");
#
+# LELF() didn't work well with utf8 in some cases too.
+#
+select char_length(left(@a:='тест',5)), length(@a), @a;
+
+
+#
# CREATE ... SELECT
#
create table t1 select date_format("2004-01-19 10:10:10", "%Y-%m-%d");
@@ -165,3 +180,315 @@ select 'zвасяz' rlike '[[:<:]]вася[[:>:]]';
CREATE TABLE t1 (a enum ('Y', 'N') DEFAULT 'N' COLLATE utf8_unicode_ci);
ALTER TABLE t1 ADD COLUMN b CHAR(20);
DROP TABLE t1;
+
+# Customer Support Center issue # 3299
+# ENUM and SET multibyte fields computed their length wronly
+# when converted into a char field
+set names utf8;
+create table t1 (a enum('aaaa','проба') character set utf8);
+show create table t1;
+insert into t1 values ('проба');
+select * from t1;
+create table t2 select ifnull(a,a) from t1;
+show create table t2;
+select * from t2;
+drop table t1;
+drop table t2;
+
+#
+# Bug 4521: unique key prefix interacts poorly with utf8
+# MYISAM: keys with prefix compression, case insensitive collation.
+#
+create table t1 (c varchar(30) character set utf8, unique(c(10)));
+insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z');
+insert into t1 values ('aaaaaaaaaa');
+--error 1062
+insert into t1 values ('aaaaaaaaaaa');
+--error 1062
+insert into t1 values ('aaaaaaaaaaaa');
+insert into t1 values (repeat('b',20));
+select c c1 from t1 where c='1';
+select c c2 from t1 where c='2';
+select c c3 from t1 where c='3';
+select c cx from t1 where c='x';
+select c cy from t1 where c='y';
+select c cz from t1 where c='z';
+select c ca10 from t1 where c='aaaaaaaaaa';
+select c cb20 from t1 where c=repeat('b',20);
+drop table t1;
+
+#
+# Bug 4521: unique key prefix interacts poorly with utf8
+# MYISAM: fixed length keys, case insensitive collation
+#
+create table t1 (c char(3) character set utf8, unique (c(2)));
+insert into t1 values ('1'),('2'),('3'),('4'),('x'),('y'),('z');
+insert into t1 values ('a');
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('b');
+insert into t1 values ('bb');
+--error 1062
+insert into t1 values ('bbb');
+insert into t1 values ('а');
+insert into t1 values ('аа');
+--error 1062
+insert into t1 values ('ааа');
+insert into t1 values ('б');
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+insert into t1 values ('ꪪ');
+insert into t1 values ('ꪪꪪ');
+--error 1062
+insert into t1 values ('ꪪꪪꪪ');
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check HEAP+HASH, case insensitive collation
+#
+create table t1 (
+c char(10) character set utf8,
+unique key a using hash (c(1))
+) engine=heap;
+show create table t1;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check HEAP+BTREE, case insensitive collation
+#
+create table t1 (
+c char(10) character set utf8,
+unique key a using btree (c(1))
+) engine=heap;
+show create table t1;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check BDB, case insensitive collation
+#
+--disable_warnings
+create table t1 (
+c char(10) character set utf8,
+unique key a (c(1))
+) engine=bdb;
+--enable_warnings
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+#
+# Bug 4521: unique key prefix interacts poorly with utf8
+# MYISAM: keys with prefix compression, binary collation.
+#
+create table t1 (c varchar(30) character set utf8 collate utf8_bin, unique(c(10)));
+insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z');
+insert into t1 values ('aaaaaaaaaa');
+--error 1062
+insert into t1 values ('aaaaaaaaaaa');
+--error 1062
+insert into t1 values ('aaaaaaaaaaaa');
+insert into t1 values (repeat('b',20));
+select c c1 from t1 where c='1';
+select c c2 from t1 where c='2';
+select c c3 from t1 where c='3';
+select c cx from t1 where c='x';
+select c cy from t1 where c='y';
+select c cz from t1 where c='z';
+select c ca10 from t1 where c='aaaaaaaaaa';
+select c cb20 from t1 where c=repeat('b',20);
+drop table t1;
+
+#
+# Bug 4521: unique key prefix interacts poorly with utf8
+# MYISAM: fixed length keys, binary collation
+#
+create table t1 (c char(3) character set utf8 collate utf8_bin, unique (c(2)));
+insert into t1 values ('1'),('2'),('3'),('4'),('x'),('y'),('z');
+insert into t1 values ('a');
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('b');
+insert into t1 values ('bb');
+--error 1062
+insert into t1 values ('bbb');
+insert into t1 values ('а');
+insert into t1 values ('аа');
+--error 1062
+insert into t1 values ('ааа');
+insert into t1 values ('б');
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+insert into t1 values ('ꪪ');
+insert into t1 values ('ꪪꪪ');
+--error 1062
+insert into t1 values ('ꪪꪪꪪ');
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check HEAP+HASH, binary collation
+#
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a using hash (c(1))
+) engine=heap;
+show create table t1;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check HEAP+BTREE, binary collation
+#
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a using btree (c(1))
+) engine=heap;
+show create table t1;
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+#
+# Bug 4531: unique key prefix interacts poorly with utf8
+# Check BDB, binary collation
+#
+--disable_warnings
+create table t1 (
+c char(10) character set utf8 collate utf8_bin,
+unique key a (c(1))
+) engine=bdb;
+--enable_warnings
+insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
+--error 1062
+insert into t1 values ('aa');
+--error 1062
+insert into t1 values ('aaa');
+insert into t1 values ('б');
+--error 1062
+insert into t1 values ('бб');
+--error 1062
+insert into t1 values ('ббб');
+select c as c_all from t1 order by c;
+select c as c_a from t1 where c='a';
+select c as c_a from t1 where c='б';
+drop table t1;
+
+
+# Bug#4594: column index make = failed for gbk, but like works
+# Check MYISAM
+#
+create table t1 (
+ str varchar(255) character set utf8 not null,
+ key str (str(2))
+) engine=myisam;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+drop table t1;
+
+# the same for HEAP+BTREE
+#
+
+create table t1 (
+ str varchar(255) character set utf8 not null,
+ key str using btree (str(2))
+) engine=heap;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+drop table t1;
+
+# the same for HEAP+HASH
+#
+
+create table t1 (
+ str varchar(255) character set utf8 not null,
+ key str using hash (str(2))
+) engine=heap;
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+drop table t1;
+
+# the same for BDB
+#
+
+--disable_warnings
+create table t1 (
+ str varchar(255) character set utf8 not null,
+ key str (str(2))
+) engine=bdb;
+--enable_warnings
+INSERT INTO t1 VALUES ('str');
+INSERT INTO t1 VALUES ('str2');
+select * from t1 where str='str';
+drop table t1;
+
diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test
index 1fc04cb907b..f769fe7af04 100644
--- a/mysql-test/t/date_formats.test
+++ b/mysql-test/t/date_formats.test
@@ -132,16 +132,23 @@ insert into t1 values
('2003-01-02 11:11:12Pm', '%Y-%m-%d %h:%i:%S%p'),
('10:20:10', '%H:%i:%s'),
('10:20:10', '%h:%i:%s.%f'),
+('10:20:10', '%T'),
('10:20:10AM', '%h:%i:%s%p'),
+('10:20:10AM', '%r'),
('10:20:10.44AM', '%h:%i:%s.%f%p'),
('15-01-2001 12:59:58', '%d-%m-%Y %H:%i:%S'),
('15 September 2001', '%d %M %Y'),
('15 SEPTEMB 2001', '%d %M %Y'),
('15 MAY 2001', '%d %b %Y'),
+('15th May 2001', '%D %b %Y'),
('Sunday 15 MAY 2001', '%W %d %b %Y'),
('Sund 15 MAY 2001', '%W %d %b %Y'),
('Tuesday 00 2002', '%W %U %Y'),
('Thursday 53 1998', '%W %u %Y'),
+('Sunday 01 2001', '%W %v %x'),
+('Tuesday 52 2001', '%W %V %X'),
+('060 2004', '%j %Y'),
+('4 53 1998', '%w %u %Y'),
('15-01-2001', '%d-%m-%Y %H:%i:%S'),
('15-01-20', '%d-%m-%y'),
('15-2001-1', '%d-%Y-%c');
@@ -156,7 +163,7 @@ select date,format,DATE(str_to_date(date, format)) as date2 from t1;
select date,format,TIME(str_to_date(date, format)) as time from t1;
select date,format,concat(TIME(str_to_date(date, format))) as time2 from t1;
-# Test wrong dates
+# Test wrong dates or converion specifiers
truncate table t1;
insert into t1 values
@@ -169,10 +176,13 @@ insert into t1 values
('15 Septembei 2001', '%d %M %Y'),
('15 Ju 2001', '%d %M %Y'),
('Sund 15 MA', '%W %d %b %Y'),
-('Sunday 01 2001', '%W %V %X'),
('Thursdai 12 1998', '%W %u %Y'),
-(NULL, get_format(DATE,'USA')),
-('Tuesday 52 2001', '%W %V %X');
+('Sunday 01 2001', '%W %v %X'),
+('Tuesday 52 2001', '%W %V %x'),
+('Tuesday 52 2001', '%W %V %Y'),
+('Tuesday 52 2001', '%W %u %x'),
+('7 53 1998', '%w %u %Y'),
+(NULL, get_format(DATE,'USA'));
select date,format,str_to_date(date, format) as str_to_date from t1;
select date,format,concat(str_to_date(date, format),'') as con from t1;
@@ -196,6 +206,7 @@ drop table t1;
select get_format(DATE, 'USA') as a;
select get_format(TIME, 'internal') as a;
select get_format(DATETIME, 'eur') as a;
+select get_format(TIMESTAMP, 'eur') as a;
select get_format(DATE, 'TEST') as a;
select str_to_date('15-01-2001 12:59:59', GET_FORMAT(DATE,'USA'));
diff --git a/mysql-test/t/endspace.test b/mysql-test/t/endspace.test
index a9933ff93b5..9ee5e32967a 100644
--- a/mysql-test/t/endspace.test
+++ b/mysql-test/t/endspace.test
@@ -31,19 +31,25 @@ explain select * from t1 order by text1;
alter table t1 modify text1 char(32) binary not null;
check table t1;
select * from t1 ignore key (key1) where text1='teststring' or text1 like 'teststring_%';
-select * from t1 where text1='teststring' or text1 like 'teststring_%';
-select * from t1 where text1='teststring' or text1 > 'teststring\t';
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 like 'teststring_%';
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
select text1, length(text1) from t1 order by text1;
select text1, length(text1) from t1 order by binary text1;
alter table t1 modify text1 blob not null, drop key key1, add key key1 (text1(20));
insert into t1 values ('teststring ');
select concat('|', text1, '|') from t1 order by text1;
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
+select concat('|', text1, '|') from t1 where text1='teststring';
+select concat('|', text1, '|') from t1 where text1='teststring ';
alter table t1 modify text1 text not null, pack_keys=1;
+select concat('|', text1, '|') from t1 where text1='teststring';
+select concat('|', text1, '|') from t1 where text1='teststring ';
+explain select concat('|', text1, '|') from t1 where text1='teststring ';
select * from t1 where text1 like 'teststring_%';
select * from t1 where text1='teststring' or text1 like 'teststring_%';
-select * from t1 where text1='teststring' or text1 > 'teststring\t';
+select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring\t';
select concat('|', text1, '|') from t1 order by text1;
drop table t1;
diff --git a/mysql-test/t/exampledb.test b/mysql-test/t/exampledb.test
new file mode 100644
index 00000000000..c60a9d7f930
--- /dev/null
+++ b/mysql-test/t/exampledb.test
@@ -0,0 +1,16 @@
+#
+# Simple test for the example storage engine
+# Taken fromm the select test
+#
+-- source include/have_exampledb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+ Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
+) ENGINE=example;
+
+drop table t1;
diff --git a/mysql-test/t/flush_block_commit.test b/mysql-test/t/flush_block_commit.test
new file mode 100644
index 00000000000..3d13086f517
--- /dev/null
+++ b/mysql-test/t/flush_block_commit.test
@@ -0,0 +1,52 @@
+# Let's see if FLUSH TABLES WITH READ LOCK blocks COMMIT of existing
+# transactions.
+# We verify that we did not introduce a deadlock.
+
+-- source include/have_innodb.inc
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+connect (con3,localhost,root,,);
+connection con1;
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1 (a int) engine=innodb;
+
+# blocks COMMIT ?
+
+begin;
+insert into t1 values(1);
+connection con2;
+flush tables with read lock;
+select * from t1;
+connection con1;
+send commit; # blocked by con2
+sleep 1;
+connection con2;
+select * from t1; # verify con1 was blocked and data did not move
+unlock tables;
+connection con1;
+reap;
+
+# No deadlock ?
+
+connection con1;
+begin;
+select * from t1 for update;
+connection con2;
+begin;
+send select * from t1 for update; # blocked by con1
+sleep 1;
+connection con3;
+send flush tables with read lock; # blocked by con2
+connection con1;
+commit; # should not be blocked by con3
+connection con2;
+reap;
+connection con3;
+reap;
+unlock tables;
+connection con1;
+drop table t1;
diff --git a/mysql-test/t/fulltext2.test b/mysql-test/t/fulltext2.test
index cab1d096fe7..1d3a5307412 100644
--- a/mysql-test/t/fulltext2.test
+++ b/mysql-test/t/fulltext2.test
@@ -44,6 +44,9 @@ while ($1)
# converting to two-level
repair table t1 quick;
+check table t1;
+optimize table t1; # BUG#5327 - mi_sort_index() of 2-level tree
+check table t1;
select count(*) from t1 where match a against ('aaaxxx');
select count(*) from t1 where match a against ('aaayyy');
@@ -102,6 +105,11 @@ CREATE TABLE t1 (
FULLTEXT KEY (a)
) ENGINE=MyISAM;
+#
+# now same as about but w/o repair table
+# 2-level tree created by mi_write
+#
+
# two-level entry, second-level tree with depth 2
--disable_query_log
let $1=260;
diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test
index 62343fa2af8..3f671377c4e 100644
--- a/mysql-test/t/func_gconcat.test
+++ b/mysql-test/t/func_gconcat.test
@@ -169,9 +169,11 @@ create table t2 (a int, c int);
insert into t2 values (1, 5), (2, 4), (3, 3), (3,3);
select group_concat(c) from t1;
select group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1;
-
select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1;
select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1;
+select t1.a, group_concat(c order by (select c from t2 where t2.a=t1.a limit 1)) as grp from t1 group by 1;
+select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1 group by 1;
+select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1;
# The following returns random results as we are sorting on blob addresses
# select group_concat(c order by (select group_concat(c order by a) from t2 where t2.a=t1.a)) as grp from t1;
@@ -201,3 +203,40 @@ SELECT GROUP_CONCAT(a SEPARATOR '||') AS names FROM t1
SELECT GROUP_CONCAT(a SEPARATOR '###') AS names FROM t1
HAVING LEFT(names, 1) ='J';
DROP TABLE t1;
+
+#
+# check blobs
+#
+
+CREATE TABLE t1 ( a int, b TEXT );
+INSERT INTO t1 VALUES (1,'First Row'), (2,'Second Row');
+SELECT GROUP_CONCAT(b ORDER BY b) FROM t1 GROUP BY a;
+DROP TABLE t1;
+
+#
+# check null values #1
+#
+
+--disable_warnings
+CREATE TABLE t1 (a_id tinyint(4) NOT NULL default '0', PRIMARY KEY (a_id)) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (b_id tinyint(4) NOT NULL default '0',b_a tinyint(4) NOT NULL default '0', PRIMARY KEY (b_id), KEY (b_a),
+ CONSTRAINT fk_b_a FOREIGN KEY (b_a) REFERENCES t1 (a_id) ON DELETE CASCADE ON UPDATE NO ACTION) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+--enable_warnings
+INSERT INTO t2 VALUES (1,1),(2,1),(3,1),(4,2),(5,2);
+SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN (t2) on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
+DROP TABLE t2;
+DROP TABLE t1;
+
+#
+# check null values #2
+#
+
+CREATE TABLE t1 (A_ID INT NOT NULL,A_DESC CHAR(3) NOT NULL,PRIMARY KEY (A_ID));
+INSERT INTO t1 VALUES (1,'ABC'), (2,'EFG'), (3,'HIJ');
+CREATE TABLE t2 (A_ID INT NOT NULL,B_DESC CHAR(3) NOT NULL,PRIMARY KEY (A_ID,B_DESC));
+INSERT INTO t2 VALUES (1,'A'),(1,'B'),(3,'F');
+SELECT t1.A_ID, GROUP_CONCAT(t2.B_DESC) AS B_DESC FROM t1 LEFT JOIN t2 ON t1.A_ID=t2.A_ID GROUP BY t1.A_ID ORDER BY t1.A_DESC;
+DROP TABLE t1;
+DROP TABLE t2;
+
diff --git a/mysql-test/t/func_in.test b/mysql-test/t/func_in.test
index 855a7cbd28f..22079377ad2 100644
--- a/mysql-test/t/func_in.test
+++ b/mysql-test/t/func_in.test
@@ -75,6 +75,13 @@ select * from t1 where 'a' in (a,b,c collate latin1_bin);
explain extended select * from t1 where 'a' in (a,b,c collate latin1_bin);
drop table t1;
+set names utf8;
+create table t1 (a char(10) character set utf8 not null);
+insert into t1 values ('bbbb'),(_koi8r''),(_latin1'');
+select a from t1 where a in ('bbbb',_koi8r'',_latin1'') order by a;
+drop table t1;
+set names latin1;
+
select '1.0' in (1,2);
select 1 in ('1.0',2);
select 1 in (1,'2.0');
diff --git a/mysql-test/t/func_like.test b/mysql-test/t/func_like.test
index ad83202afa0..4ca2f28fa6e 100644
--- a/mysql-test/t/func_like.test
+++ b/mysql-test/t/func_like.test
@@ -90,3 +90,9 @@ select * from t1 where a like '%PES%';
select * from t1 where a like '%PESKA%';
select * from t1 where a like '%ESKA%';
DROP TABLE t1;
+
+#
+# LIKE crashed for binary collations in some cases
+#
+select _cp866'aaaaaaaaa' like _cp866'%aaaa%' collate cp866_bin;
+
diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test
index 36ad2dfb0a2..e58c097b5a6 100644
--- a/mysql-test/t/func_math.test
+++ b/mysql-test/t/func_math.test
@@ -26,8 +26,8 @@ explain extended select pow(10,log10(10)),power(2,4);
set @@rand_seed1=10000000,@@rand_seed2=1000000;
select rand(999999),rand();
explain extended select rand(999999),rand();
-select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
-explain extended select pi(),sin(pi()/2),cos(pi()/2),abs(tan(pi())),cot(1),asin(1),acos(0),atan(1);
+select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
+explain extended select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
select degrees(pi()),radians(360);
#
diff --git a/mysql-test/t/func_set.test b/mysql-test/t/func_set.test
index 03843fd3da5..d669739bcb4 100644
--- a/mysql-test/t/func_set.test
+++ b/mysql-test/t/func_set.test
@@ -31,11 +31,19 @@ drop table if exists t1,t2;
--enable_warnings
create table t1 (id int(10) not null unique);
-create table t2 (id int(10) not null primary key,
-val int(10) not null);
+create table t2 (id int(10) not null primary key, val int(10) not null);
insert into t1 values (1),(2),(4);
insert into t2 values (1,1),(2,1),(3,1),(4,2);
select one.id, elt(two.val,'one','two') from t1 one, t2 two where two.id=one.id;
select one.id, elt(two.val,'one','two') from t1 one, t2 two where two.id=one.id order by one.id;
drop table t1,t2;
+
+#
+# Bug4340: find_in_set is case insensitive even on binary operators
+#
+
+select find_in_set(binary 'a',binary 'A,B,C');
+select find_in_set('a',binary 'A,B,C');
+select find_in_set(binary 'a', 'A,B,C');
+
diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test
index 61d0326f7dd..e7852df40b3 100644
--- a/mysql-test/t/func_str.test
+++ b/mysql-test/t/func_str.test
@@ -402,3 +402,9 @@ insert into t1 values ('-21474836461','-21474836461');
show warnings;
select * from t1;
drop table t1;
+
+#
+# Bug #4878: LEFT() in integer/float context
+#
+
+select left(1234, 3) + 0;
diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test
index 590007caba1..e35b9996a44 100644
--- a/mysql-test/t/gis.test
+++ b/mysql-test/t/gis.test
@@ -172,3 +172,21 @@ insert IGNORE into t1 values ('Garbage');
alter table t1 add spatial index(a);
drop table t1;
+
+#
+# Bug #5219: problem with range optimizer
+#
+
+create table t1(a geometry not null, spatial index(a));
+insert into t1 values
+(GeomFromText('POINT(1 1)')), (GeomFromText('POINT(3 3)')),
+(GeomFromText('POINT(4 4)')), (GeomFromText('POINT(6 6)'));
+select AsText(a) from t1 where
+ MBRContains(GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a)
+ or
+ MBRContains(GeomFromText('Polygon((2 2, 2 5, 5 5, 5 2, 2 2))'), a);
+select AsText(a) from t1 where
+ MBRContains(GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a)
+ and
+ MBRContains(GeomFromText('Polygon((0 0, 0 7, 7 7, 7 0, 0 0))'), a);
+drop table t1;
diff --git a/mysql-test/t/grant.test b/mysql-test/t/grant.test
index eb8704ac845..a892b761964 100644
--- a/mysql-test/t/grant.test
+++ b/mysql-test/t/grant.test
@@ -175,6 +175,22 @@ DROP DATABASE ;
SET NAMES latin1;
#
+# Bug #4898: User privileges depending on ORDER BY Settings of table db
+#
+insert into mysql.user (host, user) values ('localhost', 'test11');
+insert into mysql.db (host, db, user, select_priv) values
+('localhost', 'a%', 'test11', 'Y'), ('localhost', 'ab%', 'test11', 'Y');
+alter table mysql.db order by db asc;
+flush privileges;
+show grants for test11@localhost;
+alter table mysql.db order by db desc;
+flush privileges;
+show grants for test11@localhost;
+delete from mysql.user where user='test11';
+delete from mysql.db where user='test11';
+
+
+#
# just SHOW PRIVILEGES test
#
SHOW PRIVILEGES;
diff --git a/mysql-test/t/grant_cache.test b/mysql-test/t/grant_cache.test
index fc06e04f014..1ec4a52fdd1 100644
--- a/mysql-test/t/grant_cache.test
+++ b/mysql-test/t/grant_cache.test
@@ -67,7 +67,8 @@ show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
show status like "Qcache_not_cached";
-connect (unkuser,localhost,,,,$MASTER_MYPORT,$MASTER_MYSOCK);
+# Don't use '' as user because it will pick Unix login
+connect (unkuser,localhost,unkuser,,,$MASTER_MYPORT,$MASTER_MYSOCK);
connection unkuser;
show grants for current_user();
diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test
index 7e4cbe76cca..12a44fd75dc 100644
--- a/mysql-test/t/having.test
+++ b/mysql-test/t/having.test
@@ -75,3 +75,50 @@ select id, sum(qty) as sqty from t1 group by id having sqty>2;
select sum(qty) as sqty from t1 group by id having count(id) > 0;
select sum(qty) as sqty from t1 group by id having count(distinct id) > 0;
drop table t1;
+
+#
+# Test case for Bug #4358 Problem with HAVING clause that uses alias from the
+# select list and TEXT field
+#
+
+CREATE TABLE t1 (
+ `id` bigint(20) NOT NULL default '0',
+ `description` text
+) ENGINE=MyISAM;
+
+CREATE TABLE t2 (
+ `id` bigint(20) NOT NULL default '0',
+ `description` varchar(20)
+) ENGINE=MyISAM;
+
+INSERT INTO t1 VALUES (1, 'test');
+INSERT INTO t2 VALUES (1, 'test');
+
+CREATE TABLE t3 (
+ `id` bigint(20) NOT NULL default '0',
+ `order_id` bigint(20) NOT NULL default '0'
+) ENGINE=MyISAM;
+
+select
+ a.id, a.description,
+ count(b.id) as c
+from t1 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+
+select
+ a.*,
+ count(b.id) as c
+from t2 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+
+INSERT INTO t1 VALUES (2, 'test2');
+
+select
+ a.id, a.description,
+ count(b.id) as c
+from t1 a left join t3 b on a.id=b.order_id
+group by a.id, a.description
+having (a.description is not null) and (c=0);
+drop table t1,t2,t3;
diff --git a/mysql-test/t/heap.test b/mysql-test/t/heap.test
index d867d5f4323..37fc5a43227 100644
--- a/mysql-test/t/heap.test
+++ b/mysql-test/t/heap.test
@@ -150,3 +150,17 @@ INSERT into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11);
DELETE from t1 where a < 100;
SELECT * from t1;
DROP TABLE t1;
+
+#
+# Bug#4411 Server hangs when trying to SELECT MAX(id) from an empty HEAP table
+#
+CREATE TABLE `job_titles` (
+ `job_title_id` int(6) unsigned NOT NULL default '0',
+ `job_title` char(18) NOT NULL default '',
+ PRIMARY KEY (`job_title_id`),
+ UNIQUE KEY `job_title_id` (`job_title_id`,`job_title`)
+) ENGINE=HEAP;
+
+SELECT MAX(job_title_id) FROM job_titles;
+
+DROP TABLE job_titles;
diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test
index bba5cdeee58..1d18e020543 100644
--- a/mysql-test/t/join.test
+++ b/mysql-test/t/join.test
@@ -285,6 +285,16 @@ SELECT * FROM t1 NATURAL JOIN t2 WHERE (Value1 = 'A' AND Value2 <> 'B') AND 1;
drop table t1,t2;
#
+# dummy natural join (no common columns) Bug #4807
+#
+
+CREATE TABLE t1 (a int);
+CREATE TABLE t2 (b int);
+CREATE TABLE t3 (c int);
+SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN t3;
+DROP TABLE t1, t2, t3;
+
+#
# Test combination of join methods
#
diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test
index 4ffe1c075b6..0c4c9614d88 100644
--- a/mysql-test/t/join_outer.test
+++ b/mysql-test/t/join_outer.test
@@ -451,3 +451,53 @@ select * from t1 left join t2 on b1 = a1 left join t3 on c1 = a1 and b1 is
explain select * from t1 left join t2 on b1 = a1 left join t3 on c1 = a1 and b1 is null;
drop table t1, t2, t3;
+
+# Test for BUG#5088
+
+create table t1 (
+ match_id tinyint(3) unsigned not null auto_increment,
+ home tinyint(3) unsigned default '0',
+ unique key match_id (match_id),
+ key match_id_2 (match_id)
+);
+
+insert into t1 values("1", "2");
+
+create table t2 (
+ player_id tinyint(3) unsigned default '0',
+ match_1_h tinyint(3) unsigned default '0',
+ key player_id (player_id)
+);
+
+insert into t2 values("1", "5");
+insert into t2 values("2", "9");
+insert into t2 values("3", "3");
+insert into t2 values("4", "7");
+insert into t2 values("5", "6");
+insert into t2 values("6", "8");
+insert into t2 values("7", "4");
+insert into t2 values("8", "12");
+insert into t2 values("9", "11");
+insert into t2 values("10", "10");
+
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+ (t2 s left join t1 m on m.match_id = 1)
+ order by m.match_id desc;
+
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+ (t2 s left join t1 m on m.match_id = 1)
+ order by UUX desc;
+
+select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+ (t2 s left join t1 m on m.match_id = 1)
+ order by UUX desc;
+
+explain select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+ t2 s straight_join t1 m where m.match_id = 1
+ order by UUX desc;
+
+select s.*, '*', m.*, (s.match_1_h - m.home) UUX from
+ t2 s straight_join t1 m where m.match_id = 1
+ order by UUX desc;
+
+drop table t1, t2;
diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test
index 8d399abfec9..ce10f07cf07 100644
--- a/mysql-test/t/key.test
+++ b/mysql-test/t/key.test
@@ -97,7 +97,7 @@ CREATE TABLE t1 (
PRIMARY KEY (name,author,category)
);
INSERT INTO t1 VALUES
-('patnom','patauteur',0,'p.favre@cryo-networks.fr',NULL,NULL,'#p2sndnq6ae5g1u6t','essai\nsalut','scol://195.242.78.119:patauteur.patnom',NULL,NULL,NULL,950036174,-882087474,NULL,3,0,3,'1','Pub/patnom/futur_divers.scs',NULL,'pat','CC1');
+('patnom','patauteur',0,'p.favre@cryo-networks.fr',NULL,NULL,'#p2sndnq6ae5g1u6t','essai salut','scol://195.242.78.119:patauteur.patnom',NULL,NULL,NULL,950036174,-882087474,NULL,3,0,3,'1','Pub/patnom/futur_divers.scs',NULL,'pat','CC1');
INSERT INTO t1 VALUES
('LeNomDeMonSite','Marc',0,'m.barilley@cryo-networks.fr',NULL,NULL,NULL,NULL,'scol://195.242.78.119:Marc.LeNomDeMonSite',NULL,NULL,NULL,950560434,-881563214,NULL,3,0,3,'1','Pub/LeNomDeMonSite/domus_hibere.scs',NULL,'Marq','CC1');
select * from t1 where name='patnom' and author='patauteur' and category=0;
@@ -228,3 +228,27 @@ EXPLAIN SELECT numeropost FROM t1 WHERE numreponse='1';
FLUSH TABLES;
SELECT numeropost FROM t1 WHERE numreponse='1';
drop table t1;
+
+#
+# UNIQUE prefix keys and multi-byte charsets
+#
+
+create table t1 (c varchar(30) character set utf8, t text character set utf8, unique (c(2)), unique (t(3))) engine=myisam;
+show create table t1;
+insert t1 values ('cccc', 'tttt'),
+ (0xD0B1212223D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1212223D0B1D0B1D0B1D0B1),
+ (0xD0B1222123D0B1D0B1D0B1D0B1D0B1, 0xD0B1D0B1222123D0B1D0B1D0B1D0B1);
+--error 1062
+insert t1 (c) values ('cc22');
+--error 1062
+insert t1 (t) values ('ttt22');
+--error 1062
+insert t1 (c) values (0xD0B1212322D0B1D0B1D0B1D0B1D0B1);
+--error 1062
+insert t1 (t) values (0xD0B1D0B1212322D0B1D0B1D0B1D0B1);
+select c from t1 where c='cccc';
+select t from t1 where t='tttt';
+select c from t1 where c=0xD0B1212223D0B1D0B1D0B1D0B1D0B1;
+select t from t1 where t=0xD0B1D0B1212223D0B1D0B1D0B1D0B1;
+drop table t1;
+
diff --git a/mysql-test/t/lowercase_table.test b/mysql-test/t/lowercase_table.test
index db46f3d432d..a9c0c976afc 100644
--- a/mysql-test/t/lowercase_table.test
+++ b/mysql-test/t/lowercase_table.test
@@ -4,6 +4,9 @@
--disable_warnings
drop table if exists t1,t2,t3,t4;
+# Clear up from other tests (to ensure that SHOW TABLES below is right)
+drop table if exists t0,t5,t6,t7,t8,t9;
+drop database if exists mysqltest;
--enable_warnings
create table T1 (id int primary key, Word varchar(40) not null, Index(Word));
@@ -32,6 +35,23 @@ select count(bags.a) from t1 as Bags;
drop table t1;
#
+# Test all caps database name
+#
+create database mysqltest;
+use MYSQLTEST;
+create table t1 (a int);
+select T1.a from MYSQLTEST.T1;
+select t1.a from MYSQLTEST.T1;
+select mysqltest.t1.* from MYSQLTEST.t1;
+select MYSQLTEST.t1.* from MYSQLTEST.t1;
+select MYSQLTEST.T1.* from MYSQLTEST.T1;
+select MYSQLTEST.T1.* from T1;
+alter table t1 rename to T1;
+select MYSQLTEST.t1.* from MYSQLTEST.t1;
+drop database mysqltest;
+use test;
+
+#
# multiupdate/delete & --lower-case-table-names
#
create table t1 (a int);
diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test
index cf55c26fb69..9580c1ab44c 100644
--- a/mysql-test/t/merge.test
+++ b/mysql-test/t/merge.test
@@ -271,3 +271,17 @@ select * from t3 where x = 1 and y < 5 order by y;
# Bug is that followng query returns empty set while it must be same as above
select * from t3 where x = 1 and y < 5 order by y desc;
drop table t1,t2,t3;
+
+#
+# Bug#5232: CREATE TABLE ... SELECT
+#
+
+create table t1 (a int);
+create table t2 (a int);
+insert into t1 values (0);
+insert into t2 values (1);
+--error 1093
+create table t3 engine=merge union=(t1, t2) select * from t1;
+--error 1093
+create table t3 engine=merge union=(t1, t2) select * from t2;
+drop table t1, t2;
diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test
index 298a8b1b61b..f9081e8769b 100644
--- a/mysql-test/t/myisam.test
+++ b/mysql-test/t/myisam.test
@@ -423,6 +423,18 @@ select * from t1 where a='807780' and b='477' and c='165';
drop table t1;
#
+# space-stripping in _mi_prefix_search: BUG#5284
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a varchar(150) NOT NULL, KEY (a));
+INSERT t1 VALUES ("can \tcan");
+INSERT t1 VALUES ("can can");
+INSERT t1 VALUES ("can");
+SELECT * FROM t1;
+CHECK TABLE t1;
+DROP TABLE t1;
+
+#
# Verify blob handling
#
create table t1 (a blob);
@@ -502,3 +514,12 @@ alter table t1 disable keys;
show keys from t1;
drop table t1,t2;
+#
+# index search for NULL in blob. Bug #4816
+#
+create table t1 ( a tinytext, b char(1), index idx (a(1),b) );
+insert into t1 values (null,''), (null,'');
+explain select count(*) from t1 where a is null;
+select count(*) from t1 where a is null;
+drop table t1;
+
diff --git a/mysql-test/t/mysql_protocols.test b/mysql-test/t/mysql_protocols.test
new file mode 100644
index 00000000000..942ba2722d8
--- /dev/null
+++ b/mysql-test/t/mysql_protocols.test
@@ -0,0 +1,10 @@
+
+# test for Bug #4998 "--protocol doesn't reject bad values"
+
+--exec echo "select ' ok' as '<default>'" | $MYSQL
+--exec echo "select ' ok' as 'TCP'" | $MYSQL --protocol=TCP
+--exec echo "select ' ok' as 'SOCKET'" | $MYSQL --protocol=SOCKET
+--exec echo "select ' ok' as 'PIPE'" | $MYSQL --protocol=PIPE 2>&1
+--exec echo "select ' ok' as 'MEMORY'" | $MYSQL --protocol=MEMORY 2>&1
+--exec echo "select ' ok' as 'NullS'" | $MYSQL --protocol=NullS 2>&1
+
diff --git a/mysql-test/t/mysqlbinlog2.test b/mysql-test/t/mysqlbinlog2.test
new file mode 100644
index 00000000000..c6cff7558d4
--- /dev/null
+++ b/mysql-test/t/mysqlbinlog2.test
@@ -0,0 +1,156 @@
+# Test for the new options --start-datetime, stop-datetime,
+# and a few others.
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+reset master;
+
+# We need this for getting fixed timestamps inside of this test.
+# I use a date in the future to keep a growing timestamp along the
+# binlog (including the Start_log_event). This test will work
+# unchanged everywhere, because mysql-test-run has fixed TZ, which it
+# exports (so mysqlbinlog has same fixed TZ).
+set @a=UNIX_TIMESTAMP("2020-01-21 15:32:22");
+set timestamp=@a;
+create table t1 (a int auto_increment not null primary key, b char(3));
+insert into t1 values(null, "a");
+insert into t1 values(null, "b");
+set timestamp=@a+2;
+insert into t1 values(null, "c");
+set timestamp=@a+4;
+insert into t1 values(null, "d");
+insert into t1 values(null, "e");
+
+flush logs;
+set timestamp=@a+1; # this could happen on a slave
+insert into t1 values(null, "f");
+
+# delimiters are for easier debugging in future
+
+--disable_query_log
+select "--- Local --" as "";
+--enable_query_log
+
+#
+# We should use --short-form everywhere because in other case output will
+# be time dependent (the Start events). Better than nothing.
+#
+
+--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001
+
+--disable_query_log
+select "--- offset --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --offset=2 $MYSQL_TEST_DIR/var/log/master-bin.000001
+--disable_query_log
+select "--- start-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --start-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001
+--disable_query_log
+select "--- stop-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --stop-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001
+--disable_query_log
+select "--- start-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001
+--disable_query_log
+select "--- stop-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001
+
+--disable_query_log
+select "--- Local with 2 binlogs on command line --" as "";
+--enable_query_log
+
+# This is to verify that some options apply only to first, or last binlog
+
+--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+
+--disable_query_log
+select "--- offset --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --offset=2 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+--disable_query_log
+select "--- start-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --start-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+--disable_query_log
+select "--- stop-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --stop-position=32 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+--disable_query_log
+select "--- start-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+--disable_query_log
+select "--- stop-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002
+
+--disable_query_log
+select "--- Remote --" as "";
+--enable_query_log
+
+--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+
+--disable_query_log
+select "--- offset --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+--disable_query_log
+select "--- start-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --start-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+--disable_query_log
+select "--- stop-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --stop-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+--disable_query_log
+select "--- start-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+--disable_query_log
+select "--- stop-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+
+--disable_query_log
+select "--- Remote with 2 binlogs on command line --" as "";
+--enable_query_log
+
+--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+
+--disable_query_log
+select "--- offset --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+--disable_query_log
+select "--- start-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --start-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+--disable_query_log
+select "--- stop-position --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form --stop-position=32 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+--disable_query_log
+select "--- start-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--start-datetime=20200121153224" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+--disable_query_log
+select "--- stop-datetime --" as "";
+--enable_query_log
+--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020/01/21 15@32@24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+
+--disable_query_log
+select "--- to-last-log --" as "";
+--enable_query_log
+
+--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --to-last-log master-bin.000001
+
+# clean up
+--disable_query_log
+select "--- end of test --" as "";
+--enable_query_log
+drop table t1;
diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test
index f95aa82b7cc..ffcd0b99745 100644
--- a/mysql-test/t/ndb_alter_table.test
+++ b/mysql-test/t/ndb_alter_table.test
@@ -29,13 +29,23 @@ col2 varchar(30) not null,
col3 varchar (20) not null,
col4 varchar(4) not null,
col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null,
-col6 int not null, to_be_deleted int);
-insert into t1 values (2,4,3,5,"PENDING",1,7);
+col6 int not null, to_be_deleted int) ENGINE=ndbcluster;
+show table status;
+insert into t1 values
+(0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7);
+show table status;
+select * from t1 order by col1;
alter table t1
add column col4_5 varchar(20) not null after col4,
add column col7 varchar(30) not null after col5,
add column col8 datetime not null, drop column to_be_deleted,
change column col2 fourth varchar(30) not null after col3,
modify column col6 int not null first;
-select * from t1;
+show table status;
+select * from t1 order by col1;
+insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00');
+show table status;
+select * from t1 order by col1;
drop table t1;
+
+
diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test
index d04599f223e..371a130291b 100644
--- a/mysql-test/t/ndb_autodiscover.test
+++ b/mysql-test/t/ndb_autodiscover.test
@@ -50,7 +50,7 @@ flush tables;
system rm var/master-data/test/t1.frm ;
update t1 set name="Autodiscover" where id = 2;
show status like 'handler_discover%';
-select * from t1 order by name;
+select * from t1 order by id;
show status like 'handler_discover%';
#
diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test
index 271357ed561..0487e18cdb9 100644
--- a/mysql-test/t/ndb_basic.test
+++ b/mysql-test/t/ndb_basic.test
@@ -2,6 +2,7 @@
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+drop database if exists mysqltest;
--enable_warnings
#
@@ -14,31 +15,34 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
#
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
- attr1 INT NOT NULL
+ attr1 INT NOT NULL,
+ attr2 INT,
+ attr3 VARCHAR(10)
) ENGINE=ndbcluster;
-INSERT INTO t1 VALUES (9410,9412);
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
-SELECT pk1 FROM t1;
-SELECT * FROM t1;
-SELECT t1.* FROM t1;
+SELECT pk1 FROM t1 ORDER BY pk1;
+SELECT * FROM t1 ORDER BY pk1;
+SELECT t1.* FROM t1 ORDER BY pk1;
# Update on record by primary key
UPDATE t1 SET attr1=1 WHERE pk1=9410;
-SELECT * FROM t1;
+SELECT * FROM t1 ORDER BY pk1;
-# Can't UPDATE PK! Test that correct error is returned
--- error 1112
+# Update primary key
UPDATE t1 SET pk1=2 WHERE attr1=1;
-SELECT * FROM t1;
+SELECT * FROM t1 ORDER BY pk1;
+UPDATE t1 SET pk1=pk1 + 1;
+SELECT * FROM t1 ORDER BY pk1;
# Delete the record
DELETE FROM t1;
SELECT * FROM t1;
# Insert more records and update them all at once
-INSERT INTO t1 VALUES (9410,9412), (9411, 9413), (9408, 8765),
-(7,8), (8,9), (9,10), (10,11), (11,12), (12,13), (13,14);
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9408, 8765, NULL, '8765'),
+(7,8, NULL, NULL), (8,9, NULL, NULL), (9,10, NULL, NULL), (10,11, NULL, NULL), (11,12, NULL, NULL), (12,13, NULL, NULL), (13,14, NULL, NULL);
UPDATE t1 SET attr1 = 9999;
SELECT * FROM t1 ORDER BY pk1;
@@ -58,13 +62,13 @@ SELECT * FROM t1;
# Insert three records with attr1=4 and two with attr1=5
# Delete all with attr1=4
-INSERT INTO t1 values (1, 4), (2, 4), (3, 5), (4, 4), (5, 5);
+INSERT INTO t1 values (1, 4, NULL, NULL), (2, 4, NULL, NULL), (3, 5, NULL, NULL), (4, 4, NULL, NULL), (5, 5, NULL, NULL);
DELETE FROM t1 WHERE attr1=4;
SELECT * FROM t1 order by pk1;
DELETE FROM t1;
# Insert two records and delete one
-INSERT INTO t1 VALUES (9410,9412), (9411, 9413);
+INSERT INTO t1 VALUES (9410,9412, NULL, NULL), (9411, 9413, NULL, NULL);
DELETE FROM t1 WHERE pk1 = 9410;
SELECT * FROM t1;
DROP TABLE t1;
@@ -82,9 +86,10 @@ UPDATE t1 SET id=1234 WHERE id2=7890;
SELECT * FROM t1;
DELETE FROM t1;
-INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890);
-SELECT * FROM t1;
+INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890);
+SELECT * FROM t1 ORDER BY id;
DELETE FROM t1 WHERE id = 3456;
+SELECT * FROM t1 ORDER BY id;
DROP TABLE t1;
@@ -216,6 +221,12 @@ while ($1)
}
enable_query_log;
+delete from t2 where a > 5;
+select x1.a, x1.b from t2 x1, t2 x2 where x1.b = x2.b order by x1.a;
+select a, b FROM t2 outer_table where
+a = (select a from t2 where b = outer_table.b ) order by a;
+
+
delete from t2;
delete from t3;
delete from t4;
@@ -315,3 +326,34 @@ delete from t7 where b=23;
select * from t7;
drop table t7;
+
+#
+# Test multiple databases in one statement
+#
+
+CREATE TABLE t1 (
+ pk1 INT NOT NULL PRIMARY KEY,
+ attr1 INT NOT NULL,
+ attr2 INT,
+ attr3 VARCHAR(10)
+) ENGINE=ndbcluster;
+
+INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
+
+create database mysqltest;
+use mysqltest;
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+insert into t2 select pk1,attr1,attr2 from test.t1;
+select * from t2 order by a;
+select b from test.t1, t2 where c = test.t1.attr2;
+select b,test.t1.attr1 from test.t1, t2 where test.t1.pk1 < a;
+
+drop table test.t1, t2;
+drop database mysqltest;
+
diff --git a/mysql-test/t/ndb_blob.test b/mysql-test/t/ndb_blob.test
new file mode 100644
index 00000000000..36c823bda41
--- /dev/null
+++ b/mysql-test/t/ndb_blob.test
@@ -0,0 +1,275 @@
+--source include/have_ndb.inc
+
+--disable_warnings
+drop table if exists t1;
+drop database if exists mysqltest;
+--enable_warnings
+
+#
+# Minimal NDB blobs test.
+#
+# On NDB API level there is an extensive test program "testBlobs".
+# A prerequisite for this handler test is that "testBlobs" succeeds.
+#
+
+# make test harder with autocommit off
+set autocommit=0;
+
+create table t1 (
+ a int not null primary key,
+ b text not null,
+ c int not null,
+ d longblob,
+ key (c)
+) engine=ndbcluster;
+
+# -- values --
+
+# x0 size 256 (current inline size)
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+
+# b1 length 2000+256 (blob part aligned)
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+# d1 length 3000
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+
+# b2 length 20000
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+# d2 length 30000
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+
+select length(@x0),length(@b1),length(@d1) from dual;
+select length(@x0),length(@b2),length(@d2) from dual;
+
+# -- pk ops --
+
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where a = 1;
+
+# pk read
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a=1;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where a=2;
+
+# pk update
+update t1 set b=@b2,d=@d2 where a=1;
+update t1 set b=@b1,d=@d1 where a=2;
+commit;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where a=1;
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a=2;
+
+# pk update
+update t1 set b=concat(b,b),d=concat(d,d) where a=1;
+update t1 set b=concat(b,b),d=concat(d,d) where a=2;
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 where a=1;
+select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
+from t1 where a=2;
+
+# pk update to null
+update t1 set d=null where a=1;
+commit;
+select a from t1 where d is null;
+
+# pk delete
+delete from t1 where a=1;
+delete from t1 where a=2;
+commit;
+select count(*) from t1;
+
+# -- hash index ops --
+
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where c = 111;
+
+# hash key read
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c=111;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where c=222;
+
+# hash key update
+update t1 set b=@b2,d=@d2 where c=111;
+update t1 set b=@b1,d=@d1 where c=222;
+commit;
+select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
+from t1 where c=111;
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c=222;
+
+# hash key update to null
+update t1 set d=null where c=111;
+commit;
+select a from t1 where d is null;
+
+# hash key delete
+delete from t1 where c=111;
+delete from t1 where c=222;
+commit;
+select count(*) from t1;
+
+# -- table scan ops, short values --
+
+insert into t1 values(1,'b1',111,'dd1');
+insert into t1 values(2,'b2',222,'dd2');
+insert into t1 values(3,'b3',333,'dd3');
+insert into t1 values(4,'b4',444,'dd4');
+insert into t1 values(5,'b5',555,'dd5');
+insert into t1 values(6,'b6',666,'dd6');
+insert into t1 values(7,'b7',777,'dd7');
+insert into t1 values(8,'b8',888,'dd8');
+insert into t1 values(9,'b9',999,'dd9');
+commit;
+explain select * from t1;
+
+# table scan read
+select * from t1 order by a;
+
+# table scan update
+update t1 set b=concat(a,'x',b),d=concat(a,'x',d);
+commit;
+select * from t1 order by a;
+
+# table scan delete
+delete from t1;
+commit;
+select count(*) from t1;
+
+# -- table scan ops, long values --
+
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1;
+
+# table scan read
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 order by a;
+
+# table scan update
+update t1 set b=concat(b,b),d=concat(d,d);
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 order by a;
+
+# table scan delete
+delete from t1;
+commit;
+select count(*) from t1;
+
+# -- range scan ops, short values --
+
+insert into t1 values(1,'b1',111,'dd1');
+insert into t1 values(2,'b2',222,'dd2');
+insert into t1 values(3,'b3',333,'dd3');
+insert into t1 values(4,'b4',444,'dd4');
+insert into t1 values(5,'b5',555,'dd5');
+insert into t1 values(6,'b6',666,'dd6');
+insert into t1 values(7,'b7',777,'dd7');
+insert into t1 values(8,'b8',888,'dd8');
+insert into t1 values(9,'b9',999,'dd9');
+commit;
+explain select * from t1 where c >= 100 order by a;
+
+# range scan read
+select * from t1 where c >= 100 order by a;
+
+# range scan update
+update t1 set b=concat(a,'x',b),d=concat(a,'x',d)
+where c >= 100;
+commit;
+select * from t1 where c >= 100 order by a;
+
+# alter table
+
+select * from t1 order by a;
+alter table t1 add x int;
+select * from t1 order by a;
+alter table t1 drop x;
+select * from t1 order by a;
+
+# multi db
+
+create database mysqltest;
+use mysqltest;
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+insert into t2 values (1,1,1),(2,2,2);
+select * from test.t1,t2 where test.t1.a = t2.a order by test.t1.a;
+
+drop table t2;
+use test;
+
+# range scan delete
+delete from t1 where c >= 100;
+commit;
+select count(*) from t1;
+
+# -- range scan ops, long values --
+
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+commit;
+explain select * from t1 where c >= 100 order by a;
+
+# range scan read
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where c >= 100 order by a;
+
+# range scan update
+update t1 set b=concat(b,b),d=concat(d,d);
+commit;
+select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
+from t1 where c >= 100 order by a;
+
+# range scan delete
+delete from t1 where c >= 100;
+commit;
+select count(*) from t1;
+
+# -- rollback --
+
+insert into t1 values(1,@b1,111,@d1);
+insert into t1 values(2,@b2,222,@d2);
+# 626
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 0;
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 1;
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 where a = 2;
+select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
+from t1 order by a;
+rollback;
+select count(*) from t1;
+drop table t1;
+drop database mysqltest;
diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test
index feb4476f5e7..00807bfcb98 100644
--- a/mysql-test/t/ndb_index_ordered.test
+++ b/mysql-test/t/ndb_index_ordered.test
@@ -42,7 +42,11 @@ update t1 set c = 12 where b > 0;
select * from t1 order by a;
update t1 set c = 13 where b <= 3;
select * from t1 order by a;
-
+update t1 set b = b + 1 where b > 4 and b < 7;
+select * from t1 order by a;
+-- Update primary key
+update t1 set a = a + 10 where b > 1 and b < 7;
+select * from t1 order by a;
#
# Delete using ordered index scan
@@ -109,4 +113,31 @@ select * from t1 where b<=5 and c=0;
select * from t1 where b=4 and c<=5 order by a;
select * from t1 where b<=4 and c<=5 order by a;
select * from t1 where b<=5 and c=0 or b<=5 and c=2;
+
+select count(*) from t1 where b = 0;
+select count(*) from t1 where b = 1;
+drop table t1;
+
+#
+# Indexing NULL values
+#
+
+CREATE TABLE t1 (
+ a int unsigned NOT NULL PRIMARY KEY,
+ b int unsigned,
+ c int unsigned,
+ KEY bc(b,c)
+) engine = ndb;
+
+insert into t1 values(1,1,1),(2,NULL,2),(3,NULL,NULL),(4,4,NULL);
+select * from t1 use index (bc) where b IS NULL;
+
+select * from t1 use index (bc)order by a;
+select * from t1 use index (bc) order by a;
+select * from t1 use index (PRIMARY) where b IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL and c IS NULL order by a;
+select * from t1 use index (bc) where b IS NULL and c = 2 order by a;
+select * from t1 use index (bc) where b < 4 order by a;
+select * from t1 use index (bc) where b IS NOT NULL order by a;
drop table t1;
diff --git a/mysql-test/t/ndb_index_unique.test b/mysql-test/t/ndb_index_unique.test
index 7cfc9a77452..4a0c689bafb 100644
--- a/mysql-test/t/ndb_index_unique.test
+++ b/mysql-test/t/ndb_index_unique.test
@@ -21,6 +21,13 @@ select * from t1 where b = 4 order by b;
insert into t1 values(7,8,3);
select * from t1 where b = 4 order by a;
+-- error 1169
+insert into t1 values(8, 2, 3);
+select * from t1 order by a;
+delete from t1 where a = 1;
+insert into t1 values(8, 2, 3);
+select * from t1 order by a;
+
drop table t1;
@@ -42,6 +49,13 @@ select * from t2 where c = 6;
insert into t2 values(7,8,3);
select * from t2 where b = 4 order by a;
+-- error 1169
+insert into t2 values(8, 2, 3);
+select * from t2 order by a;
+delete from t2 where a = 1;
+insert into t2 values(8, 2, 3);
+select * from t2 order by a;
+
drop table t2;
#
@@ -65,6 +79,48 @@ select * from t3 where b = 4 order by a;
drop table t3;
#
+# Indexes on NULL-able columns
+#
+
+CREATE TABLE t1 (
+ pk int NOT NULL PRIMARY KEY,
+ a int unsigned,
+ UNIQUE KEY (a)
+) engine=ndbcluster;
+
+insert into t1 values (-1,NULL), (0,0), (1,NULL),(2,2),(3,NULL),(4,4);
+
+select * from t1 order by pk;
+
+--error 1169
+insert into t1 values (5,0);
+select * from t1 order by pk;
+delete from t1 where a = 0;
+insert into t1 values (5,0);
+select * from t1 order by pk;
+
+CREATE TABLE t2 (
+ pk int NOT NULL PRIMARY KEY,
+ a int unsigned,
+ b tinyint NOT NULL,
+ c VARCHAR(10),
+ UNIQUE KEY si(a, c)
+) engine=ndbcluster;
+
+insert into t2 values (-1,1,17,NULL),(0,NULL,18,NULL),(1,3,19,'abc');
+
+select * from t2 order by pk;
+
+--error 1169
+insert into t2 values(2,3,19,'abc');
+select * from t2 order by pk;
+delete from t2 where c IS NOT NULL;
+insert into t2 values(2,3,19,'abc');
+select * from t2 order by pk;
+
+drop table t1, t2;
+
+#
# More complex tables
#
@@ -78,8 +134,10 @@ INSERT INTO t1 VALUES (8,'dummy');
CREATE TABLE t2 (
cid bigint(20) unsigned NOT NULL auto_increment,
cap varchar(255) NOT NULL default '',
- PRIMARY KEY (cid)
+ PRIMARY KEY (cid),
+ UNIQUE KEY (cid, cap)
) engine=ndbcluster;
+INSERT INTO t2 VALUES (NULL,'another dummy');
CREATE TABLE t3 (
gid bigint(20) unsigned NOT NULL auto_increment,
gn varchar(255) NOT NULL default '',
@@ -134,6 +192,7 @@ INSERT INTO t7 VALUES(10, 5, 1, 1, 10);
select * from t1 where cv = 'dummy';
select * from t1 where cv = 'test';
+select * from t2 where cap = 'another dummy';
select * from t4 where uid = 1 and gid=1 and rid=2 and cid=4;
select * from t4 where uid = 1 and gid=1 and rid=1 and cid=4;
select * from t4 where uid = 1 order by cid;
diff --git a/mysql-test/t/ndb_limit.test b/mysql-test/t/ndb_limit.test
new file mode 100644
index 00000000000..b0b6f3c4f17
--- /dev/null
+++ b/mysql-test/t/ndb_limit.test
@@ -0,0 +1,44 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+
+#
+# insert records into table
+#
+let $1=1000;
+disable_query_log;
+while ($1)
+{
+ eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1);
+ dec $1;
+}
+enable_query_log;
+
+select count(*) from t2;
+
+delete from t2 limit 1;
+select count(*) from t2;
+
+delete from t2 limit 100;
+select count(*) from t2;
+
+delete from t2 limit 1000;
+select count(*) from t2;
+
+update t2 set c=12345678 limit 100;
+select count(*) from t2 where c=12345678;
+select count(*) from t2 where c=12345678 limit 1000;
+
+select * from t2 limit 0;
+
+drop table t2;
diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test
new file mode 100644
index 00000000000..c0389dced44
--- /dev/null
+++ b/mysql-test/t/ndb_lock.test
@@ -0,0 +1,41 @@
+-- source include/have_ndb.inc
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+--enable_warnings
+
+#
+# Transaction lock test to show that the NDB
+# table handler is working properly with
+# transaction locks
+#
+
+#
+# Testing of scan isolation
+#
+connection con1;
+create table t1 (x integer not null primary key, y varchar(32)) engine = ndb;
+insert into t1 values (1,'one'), (2,'two');
+select * from t1 order by x;
+
+connection con2;
+select * from t1 order by x;
+
+connection con1;
+start transaction;
+insert into t1 values (3,'three');
+select * from t1 order by x;
+
+connection con2;
+start transaction;
+select * from t1 order by x;
+
+connection con1;
+commit;
+
+connection con2;
+select * from t1 order by x;
+commit;
diff --git a/mysql-test/t/ndb_replace.test b/mysql-test/t/ndb_replace.test
index 8ba332fc7af..59454b5a9fa 100644
--- a/mysql-test/t/ndb_replace.test
+++ b/mysql-test/t/ndb_replace.test
@@ -20,7 +20,7 @@ replace into t1 (gesuchnr,benutzer_id) values (1,1);
insert into t1 (gesuchnr, benutzer_id) value (3,2);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
---error 1022
+--error 1062
insert into t1 (gesuchnr,benutzer_id) values (1,1);
replace into t1 (gesuchnr,benutzer_id) values (1,1);
select * from t1 order by gesuchnr;
diff --git a/mysql-test/t/ndb_transaction.test b/mysql-test/t/ndb_transaction.test
new file mode 100644
index 00000000000..f8ed22207ea
--- /dev/null
+++ b/mysql-test/t/ndb_transaction.test
@@ -0,0 +1,297 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
+drop database if exists mysqltest;
+--enable_warnings
+
+#
+# Transactionc test to show that the NDB
+# table handler is working properly with
+# transactions
+#
+
+#
+# Create a normal table with primary key
+#
+CREATE TABLE t1 (
+ pk1 INT NOT NULL PRIMARY KEY,
+ attr1 INT NOT NULL
+) ENGINE=ndbcluster;
+
+# insert
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+rollback;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+commit;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select t1.attr1 from t1, t1 as t1x where t1.pk1 = t1x.pk1 + 1;
+
+# update
+begin;
+update t1 set attr1 = attr1 * 2;
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+rollback;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+
+begin;
+update t1 set attr1 = attr1 * 2;
+commit;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+
+# delete
+begin;
+delete from t1 where attr1 = 2;
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+rollback;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+
+begin;
+delete from t1 where attr1 = 2;
+commit;
+
+select count(*) from t1;
+select * from t1 where pk1 = 1;
+select * from t1, t1 as t1x where t1x.attr1 = t1.attr1 - 2;
+
+DROP TABLE t1;
+
+#
+# Create table without primary key
+# a hidden primary key column is created by handler
+#
+CREATE TABLE t1 (id INT, id2 int) engine=ndbcluster;
+
+# insert
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+select sum(id) from t1;
+select * from t1 where id = 1;
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+rollback;
+
+select sum(id) from t1;
+select * from t1 where id = 1;
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+
+begin;
+insert into t1 values(1,1);
+insert into t1 values(2,2);
+commit;
+
+select sum(id) from t1;
+select * from t1 where id = 1;
+select t1.id from t1, t1 as t1x where t1.id2 = t1x.id2 + 1;
+
+# update
+begin;
+update t1 set id = id * 2;
+select sum(id) from t1;
+select * from t1 where id = 2;
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+rollback;
+
+select sum(id) from t1;
+select * from t1 where id = 2;
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+
+begin;
+update t1 set id = id * 2;
+commit;
+
+select sum(id) from t1;
+select * from t1 where id = 2;
+select * from t1, t1 as t1x where t1x.id = t1.id - 2;
+
+# delete
+
+DROP TABLE t1;
+
+#
+# A more extensive test with a lot more records
+#
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+CREATE TABLE t3 (
+ a bigint unsigned NOT NULL,
+ b bigint unsigned not null,
+ c bigint unsigned,
+ PRIMARY KEY(a)
+) engine=ndbcluster;
+
+CREATE TABLE t4 (
+ a bigint unsigned NOT NULL,
+ b bigint unsigned not null,
+ c bigint unsigned NOT NULL,
+ d int unsigned,
+ PRIMARY KEY(a, b, c)
+) engine=ndbcluster;
+
+
+#
+# insert records into tables and rollback
+#
+let $1=100;
+disable_query_log;
+begin;
+while ($1)
+{
+ eval insert into t2 values($1, $1+9, 5);
+ eval insert into t3 values($1, $1+9, 5);
+ eval insert into t4 values($1, $1+9, 5, $1+26000);
+ dec $1;
+}
+rollback;
+enable_query_log;
+
+select count(*) from t2;
+select count(*) from t3;
+select count(*) from t4;
+
+#
+# insert records into tables and commit;
+#
+let $1=100;
+disable_query_log;
+begin;
+while ($1)
+{
+ eval insert into t2 values($1, $1+9, 5);
+ eval insert into t3 values($1, $1+9, 5);
+ eval insert into t4 values($1, $1+9, 5, $1+26000);
+ dec $1;
+}
+commit;
+enable_query_log;
+
+select count(*) from t2;
+select count(*) from t3;
+select count(*) from t4;
+
+#
+# delete every other record in the tables
+#
+let $1=100;
+disable_query_log;
+while ($1)
+{
+ eval delete from t2 where a=$1;
+ eval delete from t3 where a=$1;
+ eval delete from t4 where a=$1 and b=$1+9 and c=5;
+ dec $1;
+ dec $1;
+}
+enable_query_log;
+
+#
+# update records and rollback
+#
+begin;
+let $1=100;
+disable_query_log;
+while ($1)
+{
+ eval update t2 set c=$1 where a=$1;
+ eval update t3 set c=7 where a=$1 and b=$1+9 and c=5;
+ eval update t4 set d=$1+21987 where a=$1 and b=$1+9 and c=5;
+ dec $1;
+ dec $1;
+}
+rollback;
+enable_query_log;
+
+#
+# update records and commit
+#
+begin;
+let $1=100;
+disable_query_log;
+while ($1)
+{
+ eval update t2 set c=$1 where a=$1;
+ eval update t3 set c=7 where a=$1 and b=$1+9 and c=5;
+ eval update t4 set d=$1+21987 where a=$1 and b=$1+9 and c=5;
+ dec $1;
+ dec $1;
+}
+rollback;
+enable_query_log;
+
+drop table t2;
+drop table t3;
+drop table t4;
+
+#
+# Test multiple databases in one transaction
+#
+
+CREATE TABLE t1 (
+ pk1 INT NOT NULL PRIMARY KEY,
+ attr1 INT NOT NULL
+) ENGINE=ndbcluster;
+
+create database mysqltest;
+use mysqltest;
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+begin;
+insert into test.t1 values(1,1);
+insert into t2 values(1,1,1);
+insert into test.t1 values(2,2);
+insert into t2 values(2,2,2);
+select count(*) from test.t1;
+select count(*) from t2;
+select * from test.t1 where pk1 = 1;
+select * from t2 where a = 1;
+select test.t1.attr1
+from test.t1, test.t1 as t1x where test.t1.pk1 = t1x.pk1 + 1;
+select t2.a
+from t2, t2 as t2x where t2.a = t2x.a + 1;
+select test.t1.pk1, a from test.t1,t2 where b > test.t1.attr1;
+rollback;
+
+select count(*) from test.t1;
+select count(*) from t2;
+
+drop table test.t1, t2;
+drop database mysqltest;
+
+
diff --git a/mysql-test/t/ndb_truncate.test b/mysql-test/t/ndb_truncate.test
new file mode 100644
index 00000000000..63bb8cbefb6
--- /dev/null
+++ b/mysql-test/t/ndb_truncate.test
@@ -0,0 +1,33 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+
+CREATE TABLE t2 (
+ a bigint unsigned NOT NULL PRIMARY KEY,
+ b int unsigned not null,
+ c int unsigned
+) engine=ndbcluster;
+
+
+#
+# insert records into table
+#
+let $1=500;
+disable_query_log;
+while ($1)
+{
+ eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1);
+ dec $1;
+}
+enable_query_log;
+
+select count(*) from t2;
+
+truncate table t2;
+
+select count(*) from t2;
+
+drop table t2;
diff --git a/mysql-test/t/ndb_types.test b/mysql-test/t/ndb_types.test
new file mode 100644
index 00000000000..d9f50c8b3fc
--- /dev/null
+++ b/mysql-test/t/ndb_types.test
@@ -0,0 +1,47 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+#
+# Test creation of different column types in NDB
+#
+
+CREATE TABLE t1 (
+ auto int(5) unsigned NOT NULL auto_increment,
+ string char(10) default "hello",
+ tiny tinyint(4) DEFAULT '0' NOT NULL ,
+ short smallint(6) DEFAULT '1' NOT NULL ,
+ medium mediumint(8) DEFAULT '0' NOT NULL,
+ long_int int(11) DEFAULT '0' NOT NULL,
+ longlong bigint(13) DEFAULT '0' NOT NULL,
+ real_float float(13,1) DEFAULT 0.0 NOT NULL,
+ real_double double(16,4),
+ utiny tinyint(3) unsigned DEFAULT '0' NOT NULL,
+ ushort smallint(5) unsigned zerofill DEFAULT '00000' NOT NULL,
+ umedium mediumint(8) unsigned DEFAULT '0' NOT NULL,
+ ulong int(11) unsigned DEFAULT '0' NOT NULL,
+ ulonglong bigint(13) unsigned DEFAULT '0' NOT NULL,
+ time_stamp timestamp,
+ date_field date,
+ time_field time,
+ date_time datetime,
+ options enum('one','two','tree') not null,
+ flags set('one','two','tree') not null,
+ PRIMARY KEY (auto),
+ KEY (utiny),
+ KEY (tiny),
+ KEY (short),
+ KEY any_name (medium),
+ KEY (longlong),
+ KEY (real_float),
+ KEY (ushort),
+ KEY (umedium),
+ KEY (ulong),
+ KEY (ulonglong,ulong),
+ KEY (options,flags)
+);
+
+
+drop table t1;
diff --git a/mysql-test/t/negation_elimination.test b/mysql-test/t/negation_elimination.test
index 49428cc238b..c50a9678edb 100644
--- a/mysql-test/t/negation_elimination.test
+++ b/mysql-test/t/negation_elimination.test
@@ -65,4 +65,8 @@ select * from t1 where not((a < 5 and a < 10) and (not(a > 16) or a > 17));
explain select * from t1 where ((a between 5 and 15) and (not(a like 10)));
select * from t1 where ((a between 5 and 15) and (not(a like 10)));
+delete from t1 where a > 3;
+select a, not(not(a)) from t1;
+explain extended select a, not(not(a)), not(a <= 2 and not(a)), not(a not like "1"), not (a not in (1,2)), not(a != 2) from t1 where not(not(a)) having not(not(a));
+
drop table t1;
diff --git a/mysql-test/t/null.test b/mysql-test/t/null.test
index 9f3b6646e7f..7d30fd06ba7 100644
--- a/mysql-test/t/null.test
+++ b/mysql-test/t/null.test
@@ -98,3 +98,24 @@ explain select * from t1 where a between 2 and 3;
explain select * from t1 where a between 2 and 3 or b is null;
drop table t1;
select cast(NULL as signed);
+
+#
+# Test case for bug #4256
+#
+
+create table t1(i int, key(i));
+insert into t1 values(1);
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+insert into t1 select i*2 from t1;
+explain select * from t1 where i=2 or i is null;
+alter table t1 change i i int not null;
+explain select * from t1 where i=2 or i is null;
+drop table t1;
+
diff --git a/mysql-test/t/olap.test b/mysql-test/t/olap.test
index 7443aeee6f4..674b4ade097 100644
--- a/mysql-test/t/olap.test
+++ b/mysql-test/t/olap.test
@@ -88,3 +88,40 @@ INSERT INTO t2 VALUES (100),(200);
SELECT i, COUNT(*) FROM t1 GROUP BY i WITH ROLLUP;
SELECT t1.i, t2.i, COUNT(*) FROM t1,t2 GROUP BY t1.i,t2.i WITH ROLLUP;
drop table t1,t2;
+
+#bug #4767: ROLLUP with LEFT JOIN
+
+CREATE TABLE user_day(
+ user_id INT NOT NULL,
+ date DATE NOT NULL,
+ UNIQUE INDEX user_date (user_id, date)
+);
+
+INSERT INTO user_day VALUES
+ (1, '2004-06-06' ),
+ (1, '2004-06-07' ),
+ (2, '2004-06-06' );
+
+SELECT
+ d.date AS day,
+ COUNT(d.user_id) as sample,
+ COUNT(next_day.user_id) AS not_cancelled
+ FROM user_day d
+ LEFT JOIN user_day next_day
+ ON next_day.user_id=d.user_id AND
+ next_day.date= DATE_ADD( d.date, interval 1 day )
+ GROUP BY day;
+
+SELECT
+ d.date AS day,
+ COUNT(d.user_id) as sample,
+ COUNT(next_day.user_id) AS not_cancelled
+ FROM user_day d
+ LEFT JOIN user_day next_day
+ ON next_day.user_id=d.user_id AND
+ next_day.date= DATE_ADD( d.date, interval 1 day )
+ GROUP BY day
+ WITH ROLLUP;
+
+DROP TABLE user_day;
+
diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test
index 465920deaed..5131bb8c8b8 100644
--- a/mysql-test/t/order_by.test
+++ b/mysql-test/t/order_by.test
@@ -97,8 +97,8 @@ create table t1 (id int not null,col1 int not null,col2 int not null,index(col1)
insert into t1 values(1,2,2),(2,2,1),(3,1,2),(4,1,1),(5,1,4),(6,2,3),(7,3,1),(8,2,4);
select * from t1 order by col1,col2;
select col1 from t1 order by id;
-select col1 as id from t1 order by t1.id;
-select concat(col1) as id from t1 order by t1.id;
+select col1 as id from t1 order by id;
+select concat(col1) as id from t1 order by id;
drop table t1;
#
@@ -445,3 +445,44 @@ insert into t1 select 1, b, c + (@row:=@row - 1) * 10, d - @row from t2 limit 10
select * from t1 where a=1 and b in (1) order by c, b, a;
select * from t1 where a=1 and b in (1);
drop table t1, t2;
+
+#
+# Bug #4302
+# Ambiguos order by when renamed column is identical to another in result.
+# Should not fail and prefer column from t1 for sorting.
+#
+create table t1 (col1 int, col int);
+create table t2 (col2 int, col int);
+insert into t1 values (1,1),(2,2),(3,3);
+insert into t2 values (1,3),(2,2),(3,1);
+select t1.* , t2.col as t2_col from t1 left join t2 on (t1.col1=t2.col2)
+ order by col;
+
+#
+# Let us also test various ambiguos and potentially ambiguos cases
+# related to aliases
+#
+--error 1052
+select col1 as col, col from t1 order by col;
+--error 1052
+select t1.col as c1, t2.col as c2 from t1, t2 where t1.col1=t2.col2
+ order by col;
+--error 1052
+select t1.col as c1, t2.col as c2 from t1, t2 where t1.col1=t2.col2
+ order by col;
+--error 1052
+select col1 from t1, t2 where t1.col1=t2.col2 order by col;
+
+select t1.col as t1_col, t2.col from t1, t2 where t1.col1=t2.col2
+ order by col;
+select col2 as c, col as c from t2 order by col;
+select col2 as col, col as col2 from t2 order by col;
+select t1.col as t1_col, t2.col2 from t1, t2 where t1.col1=t2.col2
+ order by col;
+select t2.col2, t2.col, t2.col from t2 order by col;
+
+select t2.col2 as col from t2 order by t2.col;
+select t2.col2 as col, t2.col from t2 order by t2.col;
+select t2.col2, t2.col, t2.col from t2 order by t2.col;
+
+drop table t1, t2;
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index 35f9b193fe4..cbc76e02b42 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -206,3 +206,75 @@ execute stmt1;
show table status from test like 't1%' ;
deallocate prepare stmt1 ;
drop table t1;
+
+#
+# Bug#4912 "mysqld crashs in case a statement is executed a second time":
+# negation elimination should work once and not break prepared statements
+#
+
+create table t1(a varchar(2), b varchar(3));
+prepare stmt1 from "select a, b from t1 where (not (a='aa' and b < 'zzz'))";
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+drop table t1;
+
+#
+# Bug#5034 "prepared "select 1 into @arg15", second execute crashes
+# server".
+# Check that descendands of select_result can be reused in prepared
+# statements or are correctly created and deleted on each execute
+#
+
+prepare stmt1 from "select 1 into @var";
+execute stmt1;
+execute stmt1;
+prepare stmt1 from "create table t1 select 1 as i";
+execute stmt1;
+drop table t1;
+execute stmt1;
+prepare stmt1 from "insert into t1 select i from t1";
+execute stmt1;
+execute stmt1;
+prepare stmt1 from "select * from t1 into outfile 'f1.txt'";
+execute stmt1;
+deallocate prepare stmt1;
+drop table t1;
+
+#
+# BUG#5242 "Prepared statement names are case sensitive"
+#
+prepare stmt1 from 'select 1';
+prepare STMT1 from 'select 2';
+execute sTmT1;
+deallocate prepare StMt1;
+
+--error 1243
+deallocate prepare Stmt1;
+
+# also check that statement names are in right charset.
+set names utf8;
+prepare `ü` from 'select 1234';
+execute `ü` ;
+set names latin1;
+execute ``;
+set names default;
+
+
+#
+# BUG#4368 "select * from t1 where a like ?" crashes server if a is in utf8
+# and ? is in latin1
+# Check that Item converting latin1 to utf8 (for LIKE function) is created
+# in memory of prepared statement.
+#
+
+create table t1 (a varchar(10)) charset=utf8;
+insert into t1 (a) values ('yahoo');
+set character_set_connection=latin1;
+prepare stmt from 'select a from t1 where a like ?';
+set @var='google';
+execute stmt using @var;
+execute stmt using @var;
+deallocate prepare stmt;
+drop table t1;
+
diff --git a/mysql-test/t/ps_1general.test b/mysql-test/t/ps_1general.test
index 5483a70f219..cc8cb4c4ba0 100644
--- a/mysql-test/t/ps_1general.test
+++ b/mysql-test/t/ps_1general.test
@@ -350,11 +350,11 @@ prepare stmt4 from ' use test ' ;
## create/drop database
--error 1295
-prepare stmt3 from ' create database drop_me ';
-create database drop_me ;
+prepare stmt3 from ' create database mysqltest ';
+create database mysqltest ;
--error 1295
-prepare stmt3 from ' drop database drop_me ';
-drop database drop_me ;
+prepare stmt3 from ' drop database mysqltest ';
+drop database mysqltest ;
## grant/revoke + drop user
--error 1295
diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test
index 471af8e4a5b..61886221fcf 100644
--- a/mysql-test/t/range.test
+++ b/mysql-test/t/range.test
@@ -383,3 +383,48 @@ select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
drop table t1,t2;
+
+# Fix for bug#4488
+#
+create table t1 (x bigint unsigned not null);
+insert into t1(x) values (0xfffffffffffffff0);
+insert into t1(x) values (0xfffffffffffffff1);
+select * from t1;
+select count(*) from t1 where x>0;
+select count(*) from t1 where x=0;
+select count(*) from t1 where x<0;
+select count(*) from t1 where x < -16;
+select count(*) from t1 where x = -16;
+select count(*) from t1 where x > -16;
+select count(*) from t1 where x = 18446744073709551601;
+
+
+create table t2 (x bigint not null);
+insert into t2(x) values (0xfffffffffffffff0);
+insert into t2(x) values (0xfffffffffffffff1);
+select * from t2;
+select count(*) from t2 where x>0;
+select count(*) from t2 where x=0;
+select count(*) from t2 where x<0;
+select count(*) from t2 where x < -16;
+select count(*) from t2 where x = -16;
+select count(*) from t2 where x > -16;
+select count(*) from t2 where x = 18446744073709551601;
+
+drop table t1;
+--disable_warnings
+create table t1 (x bigint unsigned not null primary key) engine=innodb;
+--enable_warnings
+insert into t1(x) values (0xfffffffffffffff0);
+insert into t1(x) values (0xfffffffffffffff1);
+select * from t1;
+select count(*) from t1 where x>0;
+select count(*) from t1 where x=0;
+select count(*) from t1 where x<0;
+select count(*) from t1 where x < -16;
+select count(*) from t1 where x = -16;
+select count(*) from t1 where x > -16;
+select count(*) from t1 where x = 18446744073709551601;
+
+drop table t1;
+
diff --git a/mysql-test/t/rename.test b/mysql-test/t/rename.test
index bea0641ad23..e6dc6ce9456 100644
--- a/mysql-test/t/rename.test
+++ b/mysql-test/t/rename.test
@@ -4,6 +4,8 @@
--disable_warnings
drop table if exists t0,t1,t2,t3,t4;
+# Clear up from other tests (to ensure that SHOW TABLES below is right)
+drop table if exists t0,t5,t6,t7,t8,t9;
--enable_warnings
create table t0 SELECT 1,"table 1";
diff --git a/mysql-test/t/rpl_charset.test b/mysql-test/t/rpl_charset.test
index 3dc219fef86..ba2ebe03a12 100644
--- a/mysql-test/t/rpl_charset.test
+++ b/mysql-test/t/rpl_charset.test
@@ -6,41 +6,41 @@
source include/master-slave.inc;
--disable_warnings
-drop database if exists test2;
-drop database if exists test3;
+drop database if exists mysqltest2;
+drop database if exists mysqltest3;
--enable_warnings
-create database test2 character set latin2;
+create database mysqltest2 character set latin2;
set @@character_set_server=latin5;
-create database test3;
+create database mysqltest3;
--disable_query_log
select "--- --master--" as "";
--enable_query_log
-show create database test2;
-show create database test3;
+show create database mysqltest2;
+show create database mysqltest3;
sync_slave_with_master;
--disable_query_log
select "--- --slave--" as "";
--enable_query_log
-show create database test2;
-show create database test3;
+show create database mysqltest2;
+show create database mysqltest3;
connection master;
set @@collation_server=armscii8_bin;
-drop database test3;
-create database test3;
+drop database mysqltest3;
+create database mysqltest3;
--disable_query_log
select "--- --master--" as "";
--enable_query_log
-show create database test3;
+show create database mysqltest3;
sync_slave_with_master;
--disable_query_log
select "--- --slave--" as "";
--enable_query_log
-show create database test3;
+show create database mysqltest3;
connection master;
-use test2;
+use mysqltest2;
create table t1 (a int auto_increment primary key, b varchar(100));
set character_set_client=cp850, collation_connection=latin2_croatian_ci;
insert into t1 (b) values(@@character_set_server);
@@ -59,7 +59,7 @@ sync_slave_with_master;
--disable_query_log
select "--- --slave--" as "";
--enable_query_log
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
connection master;
set character_set_client=latin1, collation_connection=latin1_german1_ci;
@@ -77,7 +77,7 @@ sync_slave_with_master;
--disable_query_log
select "--- --slave--" as "";
--enable_query_log
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
# See if SET ONE_SHOT gets into binlog when LOAD DATA
connection master;
@@ -101,11 +101,11 @@ sync_slave_with_master;
--disable_query_log
select "--- --slave--" as "";
--enable_query_log
-select * from test2.t1 order by a;
+select * from mysqltest2.t1 order by a;
connection master;
-drop database test2;
-drop database test3;
+drop database mysqltest2;
+drop database mysqltest3;
show binlog events from 95;
sync_slave_with_master;
diff --git a/mysql-test/t/rpl_delete_all.test b/mysql-test/t/rpl_delete_all.test
index 6ca98b34caf..23848720107 100644
--- a/mysql-test/t/rpl_delete_all.test
+++ b/mysql-test/t/rpl_delete_all.test
@@ -1,14 +1,14 @@
source include/master-slave.inc;
connection slave;
-create database test1;
+create database mysqltest;
connection master;
-drop database if exists test1;
+drop database if exists mysqltest;
sync_slave_with_master;
# can't read dir
--replace_result "Errcode: 1" "Errcode: X" "Errcode: 2" "Errcode: X"
--error 12
-show tables from test1;
+show tables from mysqltest;
connection slave;
create table t1 (a int);
diff --git a/mysql-test/t/rpl_heap.test b/mysql-test/t/rpl_heap.test
index f122b5ab2a1..3452f3990bf 100644
--- a/mysql-test/t/rpl_heap.test
+++ b/mysql-test/t/rpl_heap.test
@@ -13,8 +13,10 @@ connect (slave,localhost,root,,test,0,slave.sock);
connection master;
reset master;
drop table if exists t1;
-create table t1 (a int) type=HEAP;
-insert into t1 values(10);
+# we use CREATE SELECT to verify that DELETE does not get into binlog
+# before CREATE SELECT
+create table t1 type=HEAP select 10 as a;
+insert into t1 values(11);
save_master_pos;
show binlog events from 79;
connection slave;
diff --git a/mysql-test/t/rpl_relayrotate.test b/mysql-test/t/rpl_relayrotate.test
index 63986b38456..2fde590356a 100644
--- a/mysql-test/t/rpl_relayrotate.test
+++ b/mysql-test/t/rpl_relayrotate.test
@@ -8,8 +8,7 @@
# The slave is started with max_binlog_size=16384 bytes,
# to force many rotations (approximately 30 rotations)
-# If the master or slave does not support InnoDB, this test will pass
-
+source include/have_innodb.inc;
source include/master-slave.inc;
connection slave;
stop slave;
diff --git a/mysql-test/t/select_found.test b/mysql-test/t/select_found.test
index c20b6e9ab6c..943174462e3 100644
--- a/mysql-test/t/select_found.test
+++ b/mysql-test/t/select_found.test
@@ -54,8 +54,18 @@ CREATE TABLE t2 (
UNIQUE KEY e_n (email,name)
);
-INSERT INTO t2 VALUES (1,'name1','email1'),(2,'name2','email2'),(3,'name3','email3'),(4,'name4','email4'),(5,'name5','email5'),(6,'name6','email6'),(7,'name7','email7'),(8,'name8','email8'),(9,'name9','email9'),(10,'name10','email10'),(11,'name11','email11'),(12,'name12','email12'),(13,'name13','email13'),(14,'name14','email14'),(15,'name15','email15'),(16,'name16','email16'),(17,'name17','email17'),(18,'name18','email18'),(19,'name19','email19'),(20,'name20','email20'),(21,'name21','email21'),(22,'name22','email22'),(23,'name23','email23'),(24,'name24','email24'),(25,'name25','email25'),(26,'name26','email26'),(27,'name27','email27'),(28,'name28','email28'),(29,'name29','email29'),(30,'name30','email30'),(31,'name31','email31'),(32,'name32','email32'),(33,'name33','email33'),(34,'name34','email34'),(35,'name35','email35'),(36,'name36','email36'),(37,'name37','email37'),(38,'name38','email38'),(39,'name39','email39'),(40,'name40','email40'),(41,'name41','email41'),(42,'name42','email42'),(43,'name43','email43'),(44,'name44','email44'),(45,'name45','email45'),(46,'name46','email46'),(47,'name47','email47'),(48,'name48','email48'),(49,'name49','email49'),(50,'name50','email50'),(51,'name51','email51'),(52,'name52','email52'),(53,'name53','email53'),(54,'name54','email54'),(55,'name55','email55'),(56,'name56','email56'),(57,'name57','email57'),(58,'name58','email58'),(59,'name59','email59'),(60,'name60','email60'),(61,'name61','email61'),(62,'name62','email62'),(63,'name63','email63'),(64,'name64','email64'),(65,'name65','email65'),(66,'name66','email66'),(67,'name67','email67'),(68,'name68','email68'),(69,'name69','email69'),(70,'name70','email70'),(71,'name71','email71'),(72,'name72','email72'),(73,'name73','email73'),(74,'name74','email74'),(75,'name75','email75'),(76,'name76','email76'),(77,'name77','email77'),(78,'name78','email78'),(79,'name79','email79'),(80,'name80','email80'),(81,'name81','email81'),(82,'name82','email82'),(83,'name83','email83'),(84,'name84','email84'),(85,'name85','email85'),(86,'name86','email86'),(87,'name87','email87'),(88,'name88','email88'),(89,'name89','email89'),(90,'name90','email90'),(91,'name91','email91'),(92,'name92','email92'),(93,'name93','email93'),(94,'name94','email94'),(95,'name95','email95'),(96,'name96','email96'),(97,'name97','email97'),(98,'name98','email98'),(99,'name99','email99'),(100,'name100','email100'),(101,'name101','email101'),(102,'name102','email102'),(103,'name103','email103'),(104,'name104','email104'),(105,'name105','email105'),(106,'name106','email106'),(107,'name107','email107'),(108,'name108','email108'),(109,'name109','email109'),(110,'name110','email110'),(111,'name111','email111'),(112,'name112','email112'),(113,'name113','email113'),(114,'name114','email114'),(115,'name115','email115'),(116,'name116','email116'),(117,'name117','email117'),(118,'name118','email118'),(119,'name119','email119'),(120,'name120','email120'),(121,'name121','email121'),(122,'name122','email122'),(123,'name123','email123'),(124,'name124','email124'),(125,'name125','email125'),(126,'name126','email126'),(127,'name127','email127'),(128,'name128','email128'),(129,'name129','email129'),(130,'name130','email130'),(131,'name131','email131'),(132,'name132','email132'),(133,'name133','email133'),(134,'name134','email134'),(135,'name135','email135'),(136,'name136','email136'),(137,'name137','email137'),(138,'name138','email138'),(139,'name139','email139'),(140,'name140','email140'),(141,'name141','email141'),(142,'name142','email142'),(143,'name143','email143'),(144,'name144','email144'),(145,'name145','email145'),(146,'name146','email146'),(147,'name147','email147'),(148,'name148','email148'),(149,'name149','email149'),(150,'name150','email150'),(151,'name151','email151'),(152,'name152','email152'),(153,'name153','email153'),(154,'name154','email154'),(155,'name155','email155'),(156,'name156','email156'),(157,'name157','email157'),(158,'name158','email158'),(159,'name159','email159'),(160,'name160','email160'),(161,'name161','email161'),(162,'name162','email162'),(163,'name163','email163'),(164,'name164','email164'),(165,'name165','email165'),(166,'name166','email166'),(167,'name167','email167'),(168,'name168','email168'),(169,'name169','email169'),(170,'name170','email170'),(171,'name171','email171'),(172,'name172','email172'),(173,'name173','email173'),(174,'name174','email174'),(175,'name175','email175'),(176,'name176','email176'),(177,'name177','email177'),(178,'name178','email178'),(179,'name179','email179'),(180,'name180','email180'),(181,'name181','email181'),(182,'name182','email182'),(183,'name183','email183'),(184,'name184','email184'),(185,'name185','email185'),(186,'name186','email186'),(187,'name187','email187'),(188,'name188','email188'),(189,'name189','email189'),(190,'name190','email190'),(191,'name191','email191'),(192,'name192','email192'),(193,'name193','email193'),(194,'name194','email194'),(195,'name195','email195'),(196,'name196','email196'),(197,'name197','email197'),(198,'name198','email198'),(199,'name199','email199'),(200,'name200','email200');
-
+disable_query_log;
+let $1=200;
+let $2=0;
+while ($1)
+{
+ inc $2;
+ eval INSERT INTO t2 VALUES ($2,'name$2','email$2');
+ dec $1;
+}
+enable_query_log;
+
+EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
SELECT FOUND_ROWS();
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index d69ce8ac75a..9ed1ac2d63e 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -4,6 +4,7 @@
--disable_warnings
drop table if exists t1,t2;
+drop database if exists mysqltest;
--enable_warnings
create table t1 (a int not null primary key, b int not null,c int not null, key(b,c));
@@ -269,42 +270,42 @@ drop table t1, t2, t3;
# Test for bug #3342 SHOW CREATE DATABASE seems to require DROP privilege
#
-create database test_$1;
-show create database test_$1;
-create table test_$1.t1(a int);
-insert into test_$1.t1 values(1);
-grant select on `test_$1`.* to mysqltest_1@localhost;
-grant usage on `test_$1`.* to mysqltest_2@localhost;
-grant drop on `test_$1`.* to mysqltest_3@localhost;
+create database mysqltest;
+show create database mysqltest;
+create table mysqltest.t1(a int);
+insert into mysqltest.t1 values(1);
+grant select on `mysqltest`.* to mysqltest_1@localhost;
+grant usage on `mysqltest`.* to mysqltest_2@localhost;
+grant drop on `mysqltest`.* to mysqltest_3@localhost;
-connect (con1,localhost,mysqltest_1,,test_$1);
+connect (con1,localhost,mysqltest_1,,mysqltest);
connection con1;
select * from t1;
-show create database test_$1;
+show create database mysqltest;
--error 1044
drop table t1;
--error 1044
-drop database test_$1;
+drop database mysqltest;
connect (con2,localhost,mysqltest_2,,test);
connection con2;
--error 1044
-select * from test_$1.t1;
+select * from mysqltest.t1;
--error 1044
-show create database test_$1;
+show create database mysqltest;
--error 1044
-drop table test_$1.t1;
+drop table mysqltest.t1;
--error 1044
-drop database test_$1;
+drop database mysqltest;
connect (con3,localhost,mysqltest_3,,test);
connection con3;
--error 1044
-select * from test_$1.t1;
+select * from mysqltest.t1;
--error 1044
-show create database test_$1;
-drop table test_$1.t1;
-drop database test_$1;
+show create database mysqltest;
+drop table mysqltest.t1;
+drop database mysqltest;
connection default;
set names binary;
@@ -314,3 +315,11 @@ delete from mysql.db
where user='mysqltest_1' || user='mysqltest_2' || user='mysqltest_3';
flush privileges;
+# This test fails on MAC OSX, so it is temporary disabled.
+# This needs WL#1324 to be done.
+#set names latin1;
+#create database ``;
+#create table ``.`` (a int) engine=heap;
+#--replace_column 7 # 8 # 9 #
+#show table status from `` LIKE '';
+#drop database ``;
diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test
index 5211704f809..3d10b88da5c 100644
--- a/mysql-test/t/subselect.test
+++ b/mysql-test/t/subselect.test
@@ -764,14 +764,6 @@ select * from t1;
drop table t1, t2;
#
-# correct behavoiur for function from reduced subselect
-#
-create table t1(City VARCHAR(30),Location geometry);
-insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)'));
-select City from t1 where (select intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5 50, 2.5 47, 2 47, 2 50))'))=0);
-drop table t1;
-
-#
# reduced subselect in ORDER BY & GROUP BY clauses
#
@@ -1227,3 +1219,56 @@ CREATE TABLE `t2` ( `b` int(11) default NULL, `a` int(11) default NULL) ENGINE=M
insert into t2 values (1,2);
select t000.a, count(*) `C` FROM t1 t000 GROUP BY t000.a HAVING count(*) > ALL (SELECT count(*) FROM t2 t001 WHERE t001.a=1);
drop table t1,t2;
+
+#
+# BUG#4769 - fulltext in subselect
+#
+create table t1 (a int not null auto_increment primary key, b varchar(40), fulltext(b));
+insert into t1 (b) values ('ball'),('ball games'), ('games'), ('foo'), ('foobar'), ('Serg'), ('Sergei'),('Georg'), ('Patrik'),('Hakan');
+create table t2 (a int);
+insert into t2 values (1),(3),(2),(7);
+select a,b from t1 where match(b) against ('Ball') > 0;
+select a from t2 where a in (select a from t1 where match(b) against ('Ball') > 0);
+drop table t1,t2;
+
+#
+# BUG#5003 - like in subselect
+#
+CREATE TABLE t1(`IZAVORGANG_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`KUERZEL` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,`IZAANALYSEART_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin,`IZAPMKZ_ID` VARCHAR(11) CHARACTER SET latin1 COLLATE latin1_bin);
+CREATE INDEX AK01IZAVORGANG ON t1(izaAnalyseart_id,Kuerzel);
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000001','601','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000002','602','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000003','603','D0000000001','I0000000001');
+INSERT INTO t1(`IZAVORGANG_ID`,`KUERZEL`,`IZAANALYSEART_ID`,`IZAPMKZ_ID`)VALUES('D0000000004','101','D0000000001','I0000000001');
+SELECT `IZAVORGANG_ID` FROM t1 WHERE `KUERZEL` IN(SELECT MIN(`KUERZEL`)`Feld1` FROM t1 WHERE `KUERZEL` LIKE'601%'And`IZAANALYSEART_ID`='D0000000001');
+drop table t1;
+
+#
+# Optimized IN with compound index
+#
+CREATE TABLE `t1` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`));
+CREATE TABLE `t2` ( `aid` int(11) NOT NULL default '0', `bid` int(11) NOT NULL default '0', PRIMARY KEY (`aid`,`bid`));
+insert into t1 values (1,1),(1,2),(2,1),(2,2);
+insert into t2 values (1,2),(2,2);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+alter table t2 drop primary key;
+alter table t2 add key KEY1 (aid, bid);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+alter table t2 drop key KEY1;
+alter table t2 add primary key (bid, aid);
+select * from t1 where t1.aid not in (select aid from t2 where bid=t1.bid);
+drop table t1,t2;
+
+#
+# resolving fields of grouped outer SELECT
+#
+CREATE TABLE t1 (howmanyvalues bigint, avalue int);
+INSERT INTO t1 VALUES (1, 1),(2, 1),(2, 2),(3, 1),(3, 2),(3, 3),(4, 1),(4, 2),(4, 3),(4, 4);
+SELECT howmanyvalues, count(*) from t1 group by howmanyvalues;
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues;
+CREATE INDEX t1_howmanyvalues_idx ON t1 (howmanyvalues);
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues+1 = a.howmanyvalues+1) as mycount from t1 a group by a.howmanyvalues;
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.howmanyvalues) as mycount from t1 a group by a.howmanyvalues;
+-- error 1054
+SELECT a.howmanyvalues, (SELECT count(*) from t1 b where b.howmanyvalues = a.avalue) as mycount from t1 a group by a.howmanyvalues;
+drop table t1;
diff --git a/mysql-test/t/subselect_gis.test b/mysql-test/t/subselect_gis.test
new file mode 100644
index 00000000000..338051029c4
--- /dev/null
+++ b/mysql-test/t/subselect_gis.test
@@ -0,0 +1,15 @@
+-- source include/have_geometry.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# correct behavoiur for function from reduced subselect
+#
+create table t1(City VARCHAR(30),Location geometry);
+insert into t1 values("Paris",GeomFromText('POINT(2.33 48.87)'));
+select City from t1 where (select
+intersects(GeomFromText(AsText(Location)),GeomFromText('Polygon((2 50, 2.5
+50, 2.5 47, 2 47, 2 50))'))=0);
+drop table t1;
diff --git a/mysql-test/t/timezone2.test b/mysql-test/t/timezone2.test
index 49579421570..15ac3416b29 100644
--- a/mysql-test/t/timezone2.test
+++ b/mysql-test/t/timezone2.test
@@ -187,3 +187,15 @@ select convert_tz('2003-12-31 04:00:00', 'SomeNotExistingTimeZone', 'UTC');
select convert_tz('2003-12-31 04:00:00', 'MET', 'SomeNotExistingTimeZone');
select convert_tz('2003-12-31 04:00:00', 'MET', NULL);
select convert_tz( NULL, 'MET', 'UTC');
+
+#
+# Test for bug #4508 "CONVERT_TZ() function with new time zone as param
+# crashes server." (Was caused by improperly worked mechanism of time zone
+# dynamical loading).
+#
+create table t1 (ts timestamp);
+set timestamp=1000000000;
+insert into t1 (ts) values (now());
+select convert_tz(ts, @@time_zone, 'Japan') from t1;
+drop table t1;
+
diff --git a/mysql-test/t/truncate.test b/mysql-test/t/truncate.test
index 434a1907e42..b7ec506ecf1 100644
--- a/mysql-test/t/truncate.test
+++ b/mysql-test/t/truncate.test
@@ -26,7 +26,7 @@ drop table t1;
truncate non_existing_table;
#
-# test autoincrement with TRUNCATE
+# test autoincrement with TRUNCATE; verifying difference with DELETE
#
create table t1 (a integer auto_increment primary key);
@@ -34,5 +34,19 @@ insert into t1 (a) values (NULL),(NULL);
truncate table t1;
insert into t1 (a) values (NULL),(NULL);
SELECT * from t1;
+delete from t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
drop table t1;
+# Verifying that temp tables are handled the same way
+
+create temporary table t1 (a integer auto_increment primary key);
+insert into t1 (a) values (NULL),(NULL);
+truncate table t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
+delete from t1;
+insert into t1 (a) values (NULL),(NULL);
+SELECT * from t1;
+drop table t1;
diff --git a/mysql-test/t/type_blob.test b/mysql-test/t/type_blob.test
index 8c6cabd997b..bd571deff49 100644
--- a/mysql-test/t/type_blob.test
+++ b/mysql-test/t/type_blob.test
@@ -340,6 +340,7 @@ drop table t1;
create table t1 (id integer primary key auto_increment, txt text, unique index txt_index (txt (20)));
insert into t1 (txt) values ('Chevy'), ('Chevy '), (NULL);
select * from t1 where txt='Chevy' or txt is NULL;
+explain select * from t1 where txt='Chevy' or txt is NULL;
select * from t1 where txt='Chevy ';
select * from t1 where txt='Chevy ' or txt='Chevy';
select * from t1 where txt='Chevy' or txt='Chevy ';
@@ -358,7 +359,13 @@ select * from t1 where txt < 'Chevy ' or txt is NULL;
select * from t1 where txt <= 'Chevy';
select * from t1 where txt > 'Chevy';
select * from t1 where txt >= 'Chevy';
+alter table t1 modify column txt blob;
+explain select * from t1 where txt='Chevy' or txt is NULL;
+select * from t1 where txt='Chevy' or txt is NULL;
+explain select * from t1 where txt='Chevy' or txt is NULL order by txt;
+select * from t1 where txt='Chevy' or txt is NULL order by txt;
drop table t1;
+
CREATE TABLE t1 ( i int(11) NOT NULL default '0', c text NOT NULL, PRIMARY KEY (i), KEY (c(1),c(1)));
INSERT t1 VALUES (1,''),(2,''),(3,'asdfh'),(4,'');
select max(i) from t1 where c = '';
diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test
index 8d67802d42a..64420a85189 100644
--- a/mysql-test/t/type_date.test
+++ b/mysql-test/t/type_date.test
@@ -88,3 +88,22 @@ CREATE TABLE t1 (f1 time default NULL, f2 time default NULL);
INSERT INTO t1 (f1, f2) VALUES ('09:00', '12:00');
SELECT DATE_FORMAT(f1, "%l.%i %p") , DATE_FORMAT(f2, "%l.%i %p") FROM t1;
DROP TABLE t1;
+
+#
+# Bug 4937: different date -> string conversion when using SELECT ... UNION
+# and INSERT ... SELECT ... UNION
+#
+
+CREATE TABLE t1 (f1 DATE);
+CREATE TABLE t2 (f2 VARCHAR(8));
+CREATE TABLE t3 (f2 CHAR(8));
+
+INSERT INTO t1 VALUES ('1978-11-26');
+INSERT INTO t2 SELECT f1+0 FROM t1;
+INSERT INTO t2 SELECT f1+0 FROM t1 UNION SELECT f1+0 FROM t1;
+INSERT INTO t3 SELECT f1+0 FROM t1;
+INSERT INTO t3 SELECT f1+0 FROM t1 UNION SELECT f1+0 FROM t1;
+SELECT * FROM t2;
+SELECT * FROM t3;
+
+DROP TABLE t1, t2, t3;
diff --git a/mysql-test/t/type_float.test b/mysql-test/t/type_float.test
index d3ddecfc314..4b627ea9b99 100644
--- a/mysql-test/t/type_float.test
+++ b/mysql-test/t/type_float.test
@@ -31,10 +31,14 @@ select a from t1 order by a;
select min(a) from t1;
drop table t1;
+#
+# BUG#3612, BUG#4393, BUG#4356, BUG#4394
+#
+
create table t1 (c1 double, c2 varchar(20));
insert t1 values (121,"16");
select c1 + c1 * (c2 / 100) as col from t1;
-create table t2 select c1 + c1 * (c2 / 100) as col from t1;
+create table t2 select c1 + c1 * (c2 / 100) as col1, round(c1, 5) as col2, round(c1, 35) as col3, sqrt(c1*1e-15) col4 from t1;
select * from t2;
show create table t2;
drop table t1,t2;
@@ -68,6 +72,13 @@ select * from t1;
show create table t1;
drop table t1;
+#
+# float in a char(1) field
+#
+create table t1 (c20 char);
+insert into t1 values (5000.0);
+drop table t1;
+
# Errors
--error 1063
@@ -75,3 +86,4 @@ create table t1 (f float(54)); # Should give an error
--disable_warnings
drop table if exists t1;
--enable_warnings
+
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index 9b3abc9f155..a644197f757 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -234,3 +234,13 @@ alter table t1 add i int default 10;
select * from t1;
drop table t1;
+
+# Test for bug #4491, TIMESTAMP(19) should be possible to create and not
+# only read in 4.0
+#
+create table t1 (ts timestamp(19));
+show create table t1;
+set TIMESTAMP=1000000000;
+insert into t1 values ();
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/type_uint.test b/mysql-test/t/type_uint.test
index ee5f5e8123b..b1f59242e8e 100644
--- a/mysql-test/t/type_uint.test
+++ b/mysql-test/t/type_uint.test
@@ -10,5 +10,6 @@ SET SQL_WARNINGS=1;
create table t1 (this int unsigned);
insert into t1 values (1);
insert into t1 values (-1);
+insert into t1 values ('5000000000');
select * from t1;
drop table t1;
diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test
index 206b40f8ce7..263f631a65f 100644
--- a/mysql-test/t/union.test
+++ b/mysql-test/t/union.test
@@ -265,6 +265,14 @@ drop table t1,t2;
select length(version()) > 1 as `*` UNION select 2;
#
+# Bug #4980: problem with explain
+#
+
+create table t1 (a int);
+insert into t1 values (0), (3), (1), (2);
+explain (select * from t1) union (select * from t1) order by a;
+drop table t1;
+#
# Test for another bug with UNION and LEFT JOIN
#
CREATE TABLE t1 ( id int(3) unsigned default '0') ENGINE=MyISAM;
diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test
index 5669d11ddac..8c318497c22 100644
--- a/mysql-test/t/variables.test
+++ b/mysql-test/t/variables.test
@@ -336,3 +336,14 @@ SELECT @@global.global.key_buffer_size;
SELECT @@global.session.key_buffer_size;
--error 1064
SELECT @@global.local.key_buffer_size;
+
+# BUG#5135: cannot turn on log_warnings with SET in 4.1 (and 4.0)
+set @tstlw = @@log_warnings;
+show global variables like 'log_warnings';
+set global log_warnings = 0;
+show global variables like 'log_warnings';
+set global log_warnings = 42;
+show global variables like 'log_warnings';
+set global log_warnings = @tstlw;
+show global variables like 'log_warnings';
+
diff --git a/mysys/Makefile.am b/mysys/Makefile.am
index d4290bbc49b..3ffeeab0411 100644
--- a/mysys/Makefile.am
+++ b/mysys/Makefile.am
@@ -17,7 +17,8 @@
MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
MYSQLBASEdir= $(prefix)
-INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir)
+INCLUDES = @MT_INCLUDES@ \
+ @ZLIB_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir)
pkglib_LIBRARIES = libmysys.a
LDADD = libmysys.a ../dbug/libdbug.a \
../strings/libmystrings.a
diff --git a/mysys/hash.c b/mysys/hash.c
index 11cbbd6b898..ce25ae89b63 100644
--- a/mysys/hash.c
+++ b/mysys/hash.c
@@ -52,7 +52,7 @@ _hash_init(HASH *hash,CHARSET_INFO *charset,
void (*free_element)(void*),uint flags CALLER_INFO_PROTO)
{
DBUG_ENTER("hash_init");
- DBUG_PRINT("enter",("hash: %lx size: %d",hash,size));
+ DBUG_PRINT("enter",("hash: 0x%lx size: %d",hash,size));
hash->records=0;
if (my_init_dynamic_array_ci(&hash->array,sizeof(HASH_LINK),size,0))
@@ -565,7 +565,7 @@ my_bool hash_check(HASH *hash)
if ((rec_link=hash_rec_mask(hash,hash_info,blength,records)) != i)
{
DBUG_PRINT("error",
- ("Record in wrong link at %d: Start %d Record: %lx Record-link %d", idx,i,hash_info->data,rec_link));
+ ("Record in wrong link at %d: Start %d Record: 0x%lx Record-link %d", idx,i,hash_info->data,rec_link));
error=1;
}
else
diff --git a/mysys/list.c b/mysys/list.c
index 17028e8e183..64fca10dc0b 100644
--- a/mysys/list.c
+++ b/mysys/list.c
@@ -28,7 +28,7 @@
LIST *list_add(LIST *root, LIST *element)
{
DBUG_ENTER("list_add");
- DBUG_PRINT("enter",("root: %lx element: %lx", root, element));
+ DBUG_PRINT("enter",("root: 0x%lx element: %lx", root, element));
if (root)
{
if (root->prev) /* If add in mid of list */
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index f16f2b7ab72..f109df912f1 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -140,7 +140,7 @@ int init_io_cache(IO_CACHE *info, File file, uint cachesize,
uint min_cache;
my_off_t end_of_file= ~(my_off_t) 0;
DBUG_ENTER("init_io_cache");
- DBUG_PRINT("enter",("cache: %lx type: %d pos: %ld",
+ DBUG_PRINT("enter",("cache: 0x%lx type: %d pos: %ld",
(ulong) info, (int) type, (ulong) seek_offset));
info->file= file;
@@ -290,7 +290,7 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type,
pbool clear_cache)
{
DBUG_ENTER("reinit_io_cache");
- DBUG_PRINT("enter",("cache: %lx type: %d seek_offset: %lu clear_cache: %d",
+ DBUG_PRINT("enter",("cache: 0x%lx type: %d seek_offset: %lu clear_cache: %d",
(ulong) info, type, (ulong) seek_offset,
(int) clear_cache));
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index 32b3154b8ed..11aadbed6c1 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -29,10 +29,10 @@
to disk, if neccessary. This is handled in find_key_block().
With the new free list, the blocks can have three temperatures:
hot, warm and cold (which is free). This is remembered in the block header
- by the enum BLOCK_TEMPERATURE temperature variable. Remembering the
- temperature is neccessary to correctly count the number of warm blocks,
- which is required to decide when blocks are allowed to become hot. Whenever
- a block is inserted to another (sub-)chain, we take the old and new
+ by the enum BLOCK_TEMPERATURE temperature variable. Remembering the
+ temperature is neccessary to correctly count the number of warm blocks,
+ which is required to decide when blocks are allowed to become hot. Whenever
+ a block is inserted to another (sub-)chain, we take the old and new
temperature into account to decide if we got one more or less warm block.
blocks_unused is the sum of never used blocks in the pool and of currently
free blocks. blocks_used is the number of blocks fetched from the pool and
@@ -401,8 +401,8 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
keycache->waiting_for_hash_link.last_thread= NULL;
keycache->waiting_for_block.last_thread= NULL;
DBUG_PRINT("exit",
- ("disk_blocks: %d block_root: %lx hash_entries: %d\
- hash_root: %lx hash_links: %d hash_link_root %lx",
+ ("disk_blocks: %d block_root: 0x%lx hash_entries: %d\
+ hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx",
keycache->disk_blocks, keycache->block_root,
keycache->hash_entries, keycache->hash_root,
keycache->hash_links, keycache->hash_link_root));
@@ -475,13 +475,13 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
if (!keycache->key_cache_inited)
DBUG_RETURN(keycache->disk_blocks);
-
+
if(key_cache_block_size == keycache->key_cache_block_size &&
use_mem == keycache->key_cache_mem_size)
{
change_key_cache_param(keycache, division_limit, age_threshold);
DBUG_RETURN(keycache->disk_blocks);
- }
+ }
keycache_pthread_mutex_lock(&keycache->cache_lock);
@@ -504,7 +504,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
goto finish;
}
keycache->resize_in_flush= 0;
- keycache->can_be_used= 0;
+ keycache->can_be_used= 0;
while (keycache->cnt_for_resize_op)
{
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
@@ -540,9 +540,9 @@ static inline void inc_counter_for_resize_op(KEY_CACHE *keycache)
*/
static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
{
- struct st_my_thread_var *last_thread;
+ struct st_my_thread_var *last_thread;
if (!--keycache->cnt_for_resize_op &&
- (last_thread= keycache->resize_queue.last_thread))
+ (last_thread= keycache->resize_queue.last_thread))
keycache_pthread_cond_signal(&last_thread->next->suspend);
}
@@ -551,7 +551,7 @@ static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
SYNOPSIS
change_key_cache_param()
- keycache pointer to a key cache data structure
+ keycache pointer to a key cache data structure
division_limit new division limit (if not zero)
age_threshold new age threshold (if not zero)
@@ -596,7 +596,7 @@ void change_key_cache_param(KEY_CACHE *keycache, uint division_limit,
void end_key_cache(KEY_CACHE *keycache, my_bool cleanup)
{
DBUG_ENTER("end_key_cache");
- DBUG_PRINT("enter", ("key_cache: %lx", keycache));
+ DBUG_PRINT("enter", ("key_cache: 0x%lx", keycache));
if (!keycache->key_cache_inited)
DBUG_VOID_RETURN;
@@ -625,7 +625,7 @@ writes: %ld r_requests: %ld reads: %ld",
if (cleanup)
{
pthread_mutex_destroy(&keycache->cache_lock);
- keycache->key_cache_inited= 0;
+ keycache->key_cache_inited= keycache->can_be_used= 0;
KEYCACHE_DEBUG_CLOSE;
}
DBUG_VOID_RETURN;
@@ -1109,7 +1109,7 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link)
static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
{
- KEYCACHE_DBUG_PRINT("unlink_hash", ("file %u, filepos %lu #requests=%u",
+ KEYCACHE_DBUG_PRINT("unlink_hash", ("fd: %u pos_ %lu #requests=%u",
(uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests));
KEYCACHE_DBUG_ASSERT(hash_link->requests == 0);
if ((*hash_link->prev= hash_link->next))
@@ -1167,7 +1167,7 @@ static HASH_LINK *get_hash_link(KEY_CACHE *keycache,
int cnt;
#endif
- KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu",
+ KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu",
(uint) file,(ulong) filepos));
restart:
@@ -1193,7 +1193,7 @@ restart:
for (i=0, hash_link= *start ;
i < cnt ; i++, hash_link= hash_link->next)
{
- KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu",
+ KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu",
(uint) hash_link->file,(ulong) hash_link->diskpos));
}
}
@@ -1285,10 +1285,11 @@ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
DBUG_ENTER("find_key_block");
KEYCACHE_THREAD_TRACE("find_key_block:begin");
- DBUG_PRINT("enter", ("file %u, filepos %lu, wrmode %lu",
- (uint) file, (ulong) filepos, (uint) wrmode));
- KEYCACHE_DBUG_PRINT("find_key_block", ("file %u, filepos %lu, wrmode %lu",
- (uint) file, (ulong) filepos, (uint) wrmode));
+ DBUG_PRINT("enter", ("fd: %u pos %lu wrmode: %lu",
+ (uint) file, (ulong) filepos, (uint) wrmode));
+ KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %u pos: %lu wrmode: %lu",
+ (uint) file, (ulong) filepos,
+ (uint) wrmode));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
DBUG_EXECUTE("check_keycache2",
test_key_cache(keycache, "start of find_key_block", 0););
@@ -1315,7 +1316,7 @@ restart:
return 0;
}
if (!(block->status & BLOCK_IN_FLUSH))
- {
+ {
hash_link->requests--;
/*
Remove block to invalidate the page in the block buffer
@@ -1326,9 +1327,9 @@ restart:
buffer. Still we are guaranteed not to have any readers
of the key part we are writing into until the block is
removed from the cache as we set the BLOCL_REASSIGNED
- flag (see the code below that handles reading requests).
+ flag (see the code below that handles reading requests).
*/
- free_block(keycache, block);
+ free_block(keycache, block);
return 0;
}
/* Wait intil the page is flushed on disk */
@@ -1348,7 +1349,7 @@ restart:
free_block(keycache, block);
return 0;
}
-
+
if (page_status == PAGE_READ &&
(block->status & (BLOCK_IN_SWITCH | BLOCK_REASSIGNED)))
{
@@ -1542,7 +1543,7 @@ restart:
KEYCACHE_DBUG_ASSERT(page_status != -1);
*page_st=page_status;
KEYCACHE_DBUG_PRINT("find_key_block",
- ("file %u, filepos %lu, page_status %lu",
+ ("fd: %u pos %lu page_status %lu",
(uint) file,(ulong) filepos,(uint) page_status));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
@@ -1678,7 +1679,7 @@ byte *key_cache_read(KEY_CACHE *keycache,
uint offset= 0;
byte *start= buff;
DBUG_ENTER("key_cache_read");
- DBUG_PRINT("enter", ("file %u, filepos %lu, length %u",
+ DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
(uint) file, (ulong) filepos, length));
if (keycache->can_be_used)
@@ -1693,7 +1694,7 @@ byte *key_cache_read(KEY_CACHE *keycache,
do
{
keycache_pthread_mutex_lock(&keycache->cache_lock);
- if (!keycache->can_be_used)
+ if (!keycache->can_be_used)
{
keycache_pthread_mutex_unlock(&keycache->cache_lock);
goto no_key_cache;
@@ -1814,7 +1815,7 @@ int key_cache_insert(KEY_CACHE *keycache,
byte *buff, uint length)
{
DBUG_ENTER("key_cache_insert");
- DBUG_PRINT("enter", ("file %u, filepos %lu, length %u",
+ DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
(uint) file,(ulong) filepos, length));
if (keycache->can_be_used)
@@ -1829,7 +1830,7 @@ int key_cache_insert(KEY_CACHE *keycache,
{
uint offset;
keycache_pthread_mutex_lock(&keycache->cache_lock);
- if (!keycache->can_be_used)
+ if (!keycache->can_be_used)
{
keycache_pthread_mutex_unlock(&keycache->cache_lock);
DBUG_RETURN(0);
@@ -1873,7 +1874,7 @@ int key_cache_insert(KEY_CACHE *keycache,
error= (block->status & BLOCK_ERROR);
- dec_counter_for_resize_op(keycache);
+ dec_counter_for_resize_op(keycache);
keycache_pthread_mutex_unlock(&keycache->cache_lock);
@@ -1926,7 +1927,7 @@ int key_cache_write(KEY_CACHE *keycache,
int error=0;
DBUG_ENTER("key_cache_write");
DBUG_PRINT("enter",
- ("file %u filepos %lu length %u block_length %u key_block_length: %u",
+ ("fd: %u pos: %lu length: %u block_length: %u key_block_length: %u",
(uint) file, (ulong) filepos, length, block_length,
keycache ? keycache->key_cache_block_size : 0));
@@ -1953,7 +1954,7 @@ int key_cache_write(KEY_CACHE *keycache,
{
uint offset;
keycache_pthread_mutex_lock(&keycache->cache_lock);
- if (!keycache->can_be_used)
+ if (!keycache->can_be_used)
{
keycache_pthread_mutex_unlock(&keycache->cache_lock);
goto no_key_cache;
@@ -2028,7 +2029,7 @@ int key_cache_write(KEY_CACHE *keycache,
dec_counter_for_resize_op(keycache);
keycache_pthread_mutex_unlock(&keycache->cache_lock);
-
+
next_block:
buff+= read_length;
filepos+= read_length;
@@ -2149,7 +2150,7 @@ static int flush_cached_blocks(KEY_CACHE *keycache,
if (!last_errno)
last_errno= errno ? errno : -1;
}
- /*
+ /*
Let to proceed for possible waiting requests to write to the block page.
It might happen only during an operation to resize the key cache.
*/
@@ -2396,7 +2397,7 @@ int flush_key_blocks(KEY_CACHE *keycache,
{
int res;
DBUG_ENTER("flush_key_blocks");
- DBUG_PRINT("enter", ("keycache: %lx", keycache));
+ DBUG_PRINT("enter", ("keycache: 0x%lx", keycache));
if (keycache->disk_blocks <= 0)
DBUG_RETURN(0);
diff --git a/mysys/mf_keycaches.c b/mysys/mf_keycaches.c
index 806f83dc7d8..20465f3d23b 100644
--- a/mysys/mf_keycaches.c
+++ b/mysys/mf_keycaches.c
@@ -159,7 +159,7 @@ static byte *safe_hash_search(SAFE_HASH *hash, const byte *key, uint length)
result= hash->default_value;
else
result= ((SAFE_HASH_ENTRY*) result)->data;
- DBUG_PRINT("exit",("data: %lx", result));
+ DBUG_PRINT("exit",("data: 0x%lx", result));
DBUG_RETURN(result);
}
@@ -190,7 +190,7 @@ static my_bool safe_hash_set(SAFE_HASH *hash, const byte *key, uint length,
SAFE_HASH_ENTRY *entry;
my_bool error= 0;
DBUG_ENTER("safe_hash_set");
- DBUG_PRINT("enter",("key: %.*s data: %lx", length, key, data));
+ DBUG_PRINT("enter",("key: %.*s data: 0x%lx", length, key, data));
rw_wrlock(&hash->mutex);
entry= (SAFE_HASH_ENTRY*) hash_search(&hash->hash, key, length);
diff --git a/mysys/mf_tempfile.c b/mysys/mf_tempfile.c
index 0b337a74c19..af9ff0d6711 100644
--- a/mysys/mf_tempfile.c
+++ b/mysys/mf_tempfile.c
@@ -98,7 +98,7 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
if (strlen(dir)+ pfx_len > FN_REFLEN-2)
{
errno=my_errno= ENAMETOOLONG;
- return 1;
+ DBUG_RETURN(file);
}
strmov(convert_dirname(to,dir,NullS),prefix_buff);
org_file=mkstemp(to);
@@ -124,7 +124,7 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
#ifdef OS2
/* changing environ variable doesn't work with VACPP */
char buffer[256], *end;
- buffer[sizeof[buffer)-1]= 0;
+ buffer[sizeof(buffer)-1]= 0;
end= strxnmov(buffer, sizeof(buffer)-1, (char*) "TMP=", dir, NullS);
/* remove ending backslash */
if (end[-1] == '\\')
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index 34a03391bc4..c9784ddc9a0 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -26,7 +26,7 @@ void init_alloc_root(MEM_ROOT *mem_root, uint block_size,
uint pre_alloc_size __attribute__((unused)))
{
DBUG_ENTER("init_alloc_root");
- DBUG_PRINT("enter",("root: %lx", mem_root));
+ DBUG_PRINT("enter",("root: 0x%lx", mem_root));
mem_root->free= mem_root->used= mem_root->pre_alloc= 0;
mem_root->min_malloc= 32;
mem_root->block_size= block_size-MALLOC_OVERHEAD-sizeof(USED_MEM)-8;
@@ -121,7 +121,7 @@ gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size)
#if defined(HAVE_purify) && defined(EXTRA_DEBUG)
reg1 USED_MEM *next;
DBUG_ENTER("alloc_root");
- DBUG_PRINT("enter",("root: %lx", mem_root));
+ DBUG_PRINT("enter",("root: 0x%lx", mem_root));
Size+=ALIGN_SIZE(sizeof(USED_MEM));
if (!(next = (USED_MEM*) my_malloc(Size,MYF(MY_WME))))
@@ -222,7 +222,7 @@ void free_root(MEM_ROOT *root, myf MyFlags)
{
reg1 USED_MEM *next,*old;
DBUG_ENTER("free_root");
- DBUG_PRINT("enter",("root: %lx flags: %u", root, (uint) MyFlags));
+ DBUG_PRINT("enter",("root: 0x%lx flags: %u", root, (uint) MyFlags));
if (!root) /* QQ: Should be deleted */
DBUG_VOID_RETURN; /* purecov: inspected */
diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c
index 8906a288b11..e918b7b0de2 100644
--- a/mysys/my_fopen.c
+++ b/mysys/my_fopen.c
@@ -54,7 +54,7 @@ FILE *my_fopen(const char *FileName, int Flags, myf MyFlags)
my_stream_opened++;
my_file_info[fileno(fd)].type = STREAM_BY_FOPEN;
pthread_mutex_unlock(&THR_LOCK_open);
- DBUG_PRINT("exit",("stream: %lx",fd));
+ DBUG_PRINT("exit",("stream: 0x%lx",fd));
DBUG_RETURN(fd);
}
pthread_mutex_unlock(&THR_LOCK_open);
@@ -78,7 +78,7 @@ int my_fclose(FILE *fd, myf MyFlags)
{
int err,file;
DBUG_ENTER("my_fclose");
- DBUG_PRINT("my",("stream: %lx MyFlags: %d",fd, MyFlags));
+ DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",fd, MyFlags));
pthread_mutex_lock(&THR_LOCK_open);
file=fileno(fd);
@@ -138,7 +138,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags)
pthread_mutex_unlock(&THR_LOCK_open);
}
- DBUG_PRINT("exit",("stream: %lx",fd));
+ DBUG_PRINT("exit",("stream: 0x%lx",fd));
DBUG_RETURN(fd);
} /* my_fdopen */
diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c
index 94f3aaf3464..00fe5c7a009 100644
--- a/mysys/my_fstream.c
+++ b/mysys/my_fstream.c
@@ -39,7 +39,7 @@ uint my_fread(FILE *stream, byte *Buffer, uint Count, myf MyFlags)
{
uint readbytes;
DBUG_ENTER("my_fread");
- DBUG_PRINT("my",("stream: %lx Buffer: %lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d",
stream, Buffer, Count, MyFlags));
if ((readbytes = (uint) fread(Buffer,sizeof(char),(size_t) Count,stream))
@@ -80,7 +80,7 @@ uint my_fwrite(FILE *stream, const byte *Buffer, uint Count, myf MyFlags)
uint errors;
#endif
DBUG_ENTER("my_fwrite");
- DBUG_PRINT("my",("stream: %lx Buffer: %lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d",
stream, Buffer, Count, MyFlags));
#if !defined(NO_BACKGROUND) && defined(USE_MY_STREAM)
@@ -150,7 +150,7 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence,
myf MyFlags __attribute__((unused)))
{
DBUG_ENTER("my_fseek");
- DBUG_PRINT("my",("stream: %lx pos: %lu whence: %d MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %d",
stream, pos, whence, MyFlags));
DBUG_RETURN(fseek(stream, (off_t) pos, whence) ?
MY_FILEPOS_ERROR : (my_off_t) ftell(stream));
@@ -164,7 +164,7 @@ my_off_t my_ftell(FILE *stream, myf MyFlags __attribute__((unused)))
{
off_t pos;
DBUG_ENTER("my_ftell");
- DBUG_PRINT("my",("stream: %lx MyFlags: %d",stream, MyFlags));
+ DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d",stream, MyFlags));
pos=ftell(stream);
DBUG_PRINT("exit",("ftell: %lu",(ulong) pos));
DBUG_RETURN((my_off_t) pos);
diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c
index d7a9babe5e7..53e46932167 100644
--- a/mysys/my_getopt.c
+++ b/mysys/my_getopt.c
@@ -17,9 +17,12 @@
#include <my_global.h>
#include <m_string.h>
#include <stdlib.h>
-#include <my_getopt.h>
#include <my_sys.h>
#include <mysys_err.h>
+#include <my_getopt.h>
+
+static void default_reporter(enum loglevel level, const char *format, ...);
+my_error_reporter my_getopt_error_reporter= &default_reporter;
static int findopt(char *optpat, uint length,
const struct my_option **opt_res,
@@ -56,6 +59,14 @@ char *disabled_my_option= (char*) "0";
my_bool my_getopt_print_errors= 1;
+static void default_reporter(enum loglevel level __attribute__((unused)),
+ const char *format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+}
/*
function: handle_options
@@ -76,10 +87,8 @@ void my_getopt_register_get_addr(gptr* (*func_addr)(const char *, uint,
}
int handle_options(int *argc, char ***argv,
- const struct my_option *longopts,
- my_bool (*get_one_option)(int,
- const struct my_option *,
- char *))
+ const struct my_option *longopts,
+ my_get_one_option get_one_option)
{
uint opt_found, argvpos= 0, length, i;
my_bool end_of_options= 0, must_be_var, set_maximum_value,
@@ -118,8 +127,9 @@ int handle_options(int *argc, char ***argv,
if (!*++pos)
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: Option '-O' requires an argument\n",
- my_progname);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: Option '-O' requires an argument\n",
+ my_progname);
return EXIT_ARGUMENT_REQUIRED;
}
cur_arg= *pos;
@@ -135,9 +145,9 @@ int handle_options(int *argc, char ***argv,
if (!*cur_arg)
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: Option '--set-variable' requires an argument\n",
- my_progname);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: Option '--set-variable' requires an argument\n",
+ my_progname);
return EXIT_ARGUMENT_REQUIRED;
}
}
@@ -149,9 +159,9 @@ int handle_options(int *argc, char ***argv,
if (!*++pos)
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: Option '--set-variable' requires an argument\n",
- my_progname);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: Option '--set-variable' requires an argument\n",
+ my_progname);
return EXIT_ARGUMENT_REQUIRED;
}
cur_arg= *pos;
@@ -210,10 +220,11 @@ int handle_options(int *argc, char ***argv,
if (opt_found > 1)
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: ambiguous option '--%s-%s' (--%s-%s)\n",
- my_progname, special_opt_prefix[i], opt_str,
- special_opt_prefix[i], prev_found);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: ambiguous option '--%s-%s' (--%s-%s)\n",
+ my_progname, special_opt_prefix[i],
+ cur_arg, special_opt_prefix[i],
+ prev_found);
return EXIT_AMBIGUOUS_OPTION;
}
switch (i) {
@@ -245,18 +256,20 @@ int handle_options(int *argc, char ***argv,
if (must_be_var)
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: %s: unknown variable '%s'\n", my_progname,
- option_is_loose ? "WARNING" : "ERROR", opt_str);
+ my_getopt_error_reporter(option_is_loose ?
+ WARNING_LEVEL : ERROR_LEVEL,
+ "%s: unknown variable '%s'\n",
+ my_progname, cur_arg);
if (!option_is_loose)
return EXIT_UNKNOWN_VARIABLE;
}
else
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: %s: unknown option '--%s'\n", my_progname,
- option_is_loose ? "WARNING" : "ERROR", opt_str);
+ my_getopt_error_reporter(option_is_loose ?
+ WARNING_LEVEL : ERROR_LEVEL,
+ "%s: unknown option '--%s'\n",
+ my_progname, cur_arg);
if (!option_is_loose)
return EXIT_UNKNOWN_OPTION;
}
@@ -272,15 +285,18 @@ int handle_options(int *argc, char ***argv,
if (must_be_var)
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: variable prefix '%s' is not unique\n",
- my_progname, opt_str);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: variable prefix '%s' is not unique\n",
+ my_progname, opt_str);
return EXIT_VAR_PREFIX_NOT_UNIQUE;
}
else
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: ambiguous option '--%s' (%s, %s)\n",
- my_progname, opt_str, prev_found, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: ambiguous option '--%s' (%s, %s)\n",
+ my_progname, opt_str, prev_found,
+ optp->name);
return EXIT_AMBIGUOUS_OPTION;
}
}
@@ -300,8 +316,9 @@ int handle_options(int *argc, char ***argv,
if (must_be_var && (optp->var_type & GET_TYPE_MASK) == GET_NO_ARG)
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: option '%s' cannot take an argument\n",
- my_progname, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: option '%s' cannot take an argument\n",
+ my_progname, optp->name);
return EXIT_NO_ARGUMENT_ALLOWED;
}
value= optp->var_type & GET_ASK_ADDR ?
@@ -312,8 +329,9 @@ int handle_options(int *argc, char ***argv,
if (optend && (optp->var_type & GET_TYPE_MASK) != GET_BOOL)
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: option '--%s' cannot take an argument\n",
- my_progname, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: option '--%s' cannot take an argument\n",
+ my_progname, optp->name);
return EXIT_NO_ARGUMENT_ALLOWED;
}
if ((optp->var_type & GET_TYPE_MASK) == GET_BOOL)
@@ -351,8 +369,9 @@ int handle_options(int *argc, char ***argv,
if (!*++pos)
{
if (my_getopt_print_errors)
- fprintf(stderr, "%s: option '--%s' requires an argument\n",
- my_progname, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: option '--%s' requires an argument\n",
+ my_progname, optp->name);
return EXIT_ARGUMENT_REQUIRED;
}
argument= *pos;
@@ -410,9 +429,9 @@ int handle_options(int *argc, char ***argv,
if (!pos[1])
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: option '-%c' requires an argument\n",
- my_progname, optp->id);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: option '-%c' requires an argument\n",
+ my_progname, optp->id);
return EXIT_ARGUMENT_REQUIRED;
}
argument= *++pos;
@@ -423,9 +442,9 @@ int handle_options(int *argc, char ***argv,
if ((error= setval(optp, optp->value, argument,
set_maximum_value)))
{
- fprintf(stderr,
- "%s: Error while setting value '%s' to '%s'\n",
- my_progname, argument, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: Error while setting value '%s' to '%s'\n",
+ my_progname, argument, optp->name);
return error;
}
get_one_option(optp->id, optp, argument);
@@ -435,8 +454,9 @@ int handle_options(int *argc, char ***argv,
if (!opt_found)
{
if (my_getopt_print_errors)
- fprintf(stderr,
- "%s: unknown option '-%c'\n", my_progname, *optend);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: unknown option '-%c'\n",
+ my_progname, *optend);
return EXIT_UNKNOWN_OPTION;
}
}
@@ -445,9 +465,9 @@ int handle_options(int *argc, char ***argv,
}
if ((error= setval(optp, value, argument, set_maximum_value)))
{
- fprintf(stderr,
- "%s: Error while setting value '%s' to '%s'\n",
- my_progname, argument, optp->name);
+ my_getopt_error_reporter(ERROR_LEVEL,
+ "%s: Error while setting value '%s' to '%s'\n",
+ my_progname, argument, optp->name);
return error;
}
get_one_option(optp->id, optp, argument);
diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c
index fd47c532cff..d6f647254e8 100644
--- a/mysys/my_getwd.c
+++ b/mysys/my_getwd.c
@@ -45,7 +45,7 @@ int my_getwd(my_string buf, uint size, myf MyFlags)
{
my_string pos;
DBUG_ENTER("my_getwd");
- DBUG_PRINT("my",("buf: %lx size: %d MyFlags %d", buf,size,MyFlags));
+ DBUG_PRINT("my",("buf: 0x%lx size: %d MyFlags %d", buf,size,MyFlags));
#if ! defined(MSDOS)
if (curr_dir[0]) /* Current pos is saved here */
diff --git a/mysys/my_handler.c b/mysys/my_handler.c
index 6003808df25..360a7666e94 100644
--- a/mysys/my_handler.c
+++ b/mysys/my_handler.c
@@ -158,7 +158,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag=mi_compare_text(keyseg->charset,a,a_length,b,b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0),
- !(nextflag & SEARCH_PREFIX))))
+ (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a+=a_length;
b+=b_length;
@@ -171,7 +171,7 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a,
(flag= mi_compare_text(keyseg->charset, a, a_length, b, b_length,
(my_bool) ((nextflag & SEARCH_PREFIX) &&
next_key_length <= 0),
- !(nextflag & SEARCH_PREFIX))))
+ (my_bool)!(nextflag & SEARCH_PREFIX))))
return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag);
a=end;
b+=length;
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index 0207d9a3683..c3b0b57e549 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -461,17 +461,6 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
else
finfo.mystat= NULL;
- /*
- If the directory is the root directory of the drive, Windows sometimes
- creates hidden or system files there (like RECYCLER); do not show
- them. We would need to see how this can be achieved with a Borland
- compiler.
- */
-#ifndef __BORLANDC__
- if (attrib & (_A_HIDDEN | _A_SYSTEM))
- continue;
-#endif
-
if (push_dynamic(dir_entries_storage, (gptr)&finfo))
goto error;
@@ -624,7 +613,7 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags)
{
int m_used;
DBUG_ENTER("my_stat");
- DBUG_PRINT("my", ("path: '%s', stat_area: %lx, MyFlags: %d", path,
+ DBUG_PRINT("my", ("path: '%s', stat_area: 0x%lx, MyFlags: %d", path,
(byte *) stat_area, my_flags));
if ((m_used= (stat_area == NULL)))
diff --git a/mysys/my_lwrite.c b/mysys/my_lwrite.c
index e1a3decd053..3b9afdbd71f 100644
--- a/mysys/my_lwrite.c
+++ b/mysys/my_lwrite.c
@@ -23,7 +23,7 @@ uint32 my_lwrite(int Filedes, const byte *Buffer, uint32 Count, myf MyFlags)
{
uint32 writenbytes;
DBUG_ENTER("my_lwrite");
- DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %ld MyFlags: %d",
+ DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %ld MyFlags: %d",
Filedes, Buffer, Count, MyFlags));
/* Temp hack to get count to int32 while write wants int */
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index df9fe1f9bc4..3f601a42dc9 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -44,7 +44,7 @@ gptr my_malloc(unsigned int size, myf my_flags)
}
else if (my_flags & MY_ZEROFILL)
bzero(point,size);
- DBUG_PRINT("exit",("ptr: %lx",point));
+ DBUG_PRINT("exit",("ptr: 0x%lx",point));
DBUG_RETURN(point);
} /* my_malloc */
@@ -55,7 +55,7 @@ gptr my_malloc(unsigned int size, myf my_flags)
void my_no_flags_free(gptr ptr)
{
DBUG_ENTER("my_free");
- DBUG_PRINT("my",("ptr: %lx",ptr));
+ DBUG_PRINT("my",("ptr: 0x%lx",ptr));
if (ptr)
free(ptr);
DBUG_VOID_RETURN;
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 661ef48ab3e..6a55a3cd8de 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -29,7 +29,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
uint readbytes;
int error;
DBUG_ENTER("my_pread");
- DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: %lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %u MyFlags: %d",
Filedes, (ulong) offset, Buffer, Count, MyFlags));
for (;;)
@@ -82,7 +82,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset,
uint writenbytes,errors;
ulong written;
DBUG_ENTER("my_pwrite");
- DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: %lx Count: %d MyFlags: %d",
+ DBUG_PRINT("my",("Fd: %d Seek: %lu Buffer: 0x%lx Count: %d MyFlags: %d",
Filedes, (ulong) offset,Buffer, Count, MyFlags));
errors=0; written=0L;
diff --git a/mysys/my_read.c b/mysys/my_read.c
index b7621ac99eb..9de070e772d 100644
--- a/mysys/my_read.c
+++ b/mysys/my_read.c
@@ -38,7 +38,7 @@ uint my_read(File Filedes, byte *Buffer, uint Count, myf MyFlags)
{
uint readbytes,save_count;
DBUG_ENTER("my_read");
- DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d",
Filedes, Buffer, Count, MyFlags));
save_count=Count;
diff --git a/mysys/my_realloc.c b/mysys/my_realloc.c
index 5190fa75dce..c8edb172890 100644
--- a/mysys/my_realloc.c
+++ b/mysys/my_realloc.c
@@ -27,7 +27,7 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags)
{
gptr point;
DBUG_ENTER("my_realloc");
- DBUG_PRINT("my",("ptr: %lx size: %u my_flags: %d",oldpoint, size,
+ DBUG_PRINT("my",("ptr: 0x%lx size: %u my_flags: %d",oldpoint, size,
my_flags));
if (!oldpoint && (my_flags & MY_ALLOW_ZERO_PTR))
@@ -60,6 +60,6 @@ gptr my_realloc(gptr oldpoint, uint size, myf my_flags)
my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG), size);
}
#endif
- DBUG_PRINT("exit",("ptr: %lx",point));
+ DBUG_PRINT("exit",("ptr: 0x%lx",point));
DBUG_RETURN(point);
} /* my_realloc */
diff --git a/mysys/my_write.c b/mysys/my_write.c
index 61fd6097e28..37d885f04cd 100644
--- a/mysys/my_write.c
+++ b/mysys/my_write.c
@@ -26,7 +26,7 @@ uint my_write(int Filedes, const byte *Buffer, uint Count, myf MyFlags)
uint writenbytes,errors;
ulong written;
DBUG_ENTER("my_write");
- DBUG_PRINT("my",("Fd: %d Buffer: %lx Count: %d MyFlags: %d",
+ DBUG_PRINT("my",("Fd: %d Buffer: 0x%lx Count: %d MyFlags: %d",
Filedes, Buffer, Count, MyFlags));
errors=0; written=0L;
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index 07c40fd91b6..6cdf98c5f5f 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -194,7 +194,7 @@ gptr _mymalloc(uint size, const char *filename, uint lineno, myf MyFlags)
if ((MyFlags & MY_ZEROFILL) || !sf_malloc_quick)
bfill(data, size, (char) (MyFlags & MY_ZEROFILL ? 0 : ALLOC_VAL));
/* Return a pointer to the real data */
- DBUG_PRINT("exit",("ptr: %lx", data));
+ DBUG_PRINT("exit",("ptr: 0x%lx", data));
if (sf_min_adress > data)
sf_min_adress= data;
if (sf_max_adress < data)
@@ -259,7 +259,7 @@ void _myfree(gptr ptr, const char *filename, uint lineno, myf myflags)
{
struct st_irem *irem;
DBUG_ENTER("_myfree");
- DBUG_PRINT("enter",("ptr: %lx", ptr));
+ DBUG_PRINT("enter",("ptr: 0x%lx", ptr));
if (!sf_malloc_quick)
(void) _sanity (filename, lineno);
@@ -446,7 +446,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename,
irem->filename, irem->linenum);
fprintf(stderr, " discovered at %s:%d\n", filename, lineno);
(void) fflush(stderr);
- DBUG_PRINT("safe",("Underrun at %lx, allocated at %s:%d",
+ DBUG_PRINT("safe",("Underrun at 0x%lx, allocated at %s:%d",
data, irem->filename, irem->linenum));
flag=1;
}
@@ -462,7 +462,7 @@ static int _checkchunk(register struct st_irem *irem, const char *filename,
irem->filename, irem->linenum);
fprintf(stderr, " discovered at '%s:%d'\n", filename, lineno);
(void) fflush(stderr);
- DBUG_PRINT("safe",("Overrun at %lx, allocated at %s:%d",
+ DBUG_PRINT("safe",("Overrun at 0x%lx, allocated at %s:%d",
data,
irem->filename,
irem->linenum));
diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c
index 84a8e779ae1..caef1caaf3d 100644
--- a/mysys/thr_alarm.c
+++ b/mysys/thr_alarm.c
@@ -257,9 +257,9 @@ void thr_end_alarm(thr_alarm_t *alarmed)
if (!found)
{
if (*alarmed)
- fprintf(stderr,"Warning: Didn't find alarm %lx in queue of %d alarms\n",
+ fprintf(stderr,"Warning: Didn't find alarm 0x%lx in queue of %d alarms\n",
(long) *alarmed, alarm_queue.elements);
- DBUG_PRINT("warning",("Didn't find alarm %lx in queue\n",
+ DBUG_PRINT("warning",("Didn't find alarm 0x%lx in queue\n",
(long) *alarmed));
}
pthread_mutex_unlock(&LOCK_alarm);
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index 0e3ccfc0452..d9e46fe1beb 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -435,7 +435,7 @@ int thr_lock(THR_LOCK_DATA *data,enum thr_lock_type lock_type)
data->thread=pthread_self(); /* Must be reset ! */
data->thread_id=my_thread_id(); /* Must be reset ! */
VOID(pthread_mutex_lock(&lock->mutex));
- DBUG_PRINT("lock",("data: %lx thread: %ld lock: %lx type: %d",
+ DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx type: %d",
data,data->thread_id,lock,(int) lock_type));
check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ?
"enter read_lock" : "enter write_lock",0);
@@ -656,7 +656,7 @@ void thr_unlock(THR_LOCK_DATA *data)
THR_LOCK *lock=data->lock;
enum thr_lock_type lock_type=data->type;
DBUG_ENTER("thr_unlock");
- DBUG_PRINT("lock",("data: %lx thread: %ld lock: %lx",
+ DBUG_PRINT("lock",("data: 0x%lx thread: %ld lock: 0x%lx",
data,data->thread_id,lock));
pthread_mutex_lock(&lock->mutex);
check_locks(lock,"start of release lock",0);
@@ -827,7 +827,7 @@ int thr_multi_lock(THR_LOCK_DATA **data,uint count)
{
THR_LOCK_DATA **pos,**end;
DBUG_ENTER("thr_multi_lock");
- DBUG_PRINT("lock",("data: %lx count: %d",data,count));
+ DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count));
if (count > 1)
sort_locks(data,count);
/* lock everything */
@@ -839,7 +839,7 @@ int thr_multi_lock(THR_LOCK_DATA **data,uint count)
DBUG_RETURN(1);
}
#ifdef MAIN
- printf("Thread: %s Got lock: %lx type: %d\n",my_thread_name(),
+ printf("Thread: %s Got lock: 0x%lx type: %d\n",my_thread_name(),
(long) pos[0]->lock, pos[0]->type); fflush(stdout);
#endif
}
@@ -899,12 +899,12 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count)
{
THR_LOCK_DATA **pos,**end;
DBUG_ENTER("thr_multi_unlock");
- DBUG_PRINT("lock",("data: %lx count: %d",data,count));
+ DBUG_PRINT("lock",("data: 0x%lx count: %d",data,count));
for (pos=data,end=data+count; pos < end ; pos++)
{
#ifdef MAIN
- printf("Thread: %s Rel lock: %lx type: %d\n",
+ printf("Thread: %s Rel lock: 0x%lx type: %d\n",
my_thread_name(), (long) pos[0]->lock, pos[0]->type);
fflush(stdout);
#endif
@@ -912,7 +912,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count)
thr_unlock(*pos);
else
{
- DBUG_PRINT("lock",("Free lock: data: %lx thread: %ld lock: %lx",
+ DBUG_PRINT("lock",("Free lock: data: 0x%lx thread: %ld lock: 0x%lx",
*pos,(*pos)->thread_id,(*pos)->lock));
}
}
@@ -1098,7 +1098,7 @@ static void thr_print_lock(const char* name,struct st_lock_list *list)
prev= &list->data;
for (data=list->data; data && count++ < MAX_LOCKS ; data=data->next)
{
- printf("%lx (%lu:%d); ",(ulong) data,data->thread_id,(int) data->type);
+ printf("0x%lx (%lu:%d); ",(ulong) data,data->thread_id,(int) data->type);
if (data->prev != prev)
printf("\nWarning: prev didn't point at previous lock\n");
prev= &data->next;
@@ -1120,7 +1120,7 @@ void thr_print_locks(void)
{
THR_LOCK *lock=(THR_LOCK*) list->data;
VOID(pthread_mutex_lock(&lock->mutex));
- printf("lock: %lx:",(ulong) lock);
+ printf("lock: 0x%lx:",(ulong) lock);
if ((lock->write_wait.data || lock->read_wait.data) &&
(! lock->read.data && ! lock->write.data))
printf(" WARNING: ");
diff --git a/mysys/thr_mutex.c b/mysys/thr_mutex.c
index 8ebe5be22e8..bbcfaa8bba6 100644
--- a/mysys/thr_mutex.c
+++ b/mysys/thr_mutex.c
@@ -210,7 +210,7 @@ int safe_cond_wait(pthread_cond_t *cond, safe_mutex_t *mp, const char *file,
if (mp->count++)
{
fprintf(stderr,
- "safe_mutex: Count was %d in thread %lx when locking mutex at %s, line %d\n",
+ "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d\n",
mp->count-1, my_thread_id(), file, line);
fflush(stderr);
abort();
@@ -248,7 +248,7 @@ int safe_cond_timedwait(pthread_cond_t *cond, safe_mutex_t *mp,
if (mp->count++)
{
fprintf(stderr,
- "safe_mutex: Count was %d in thread %lx when locking mutex at %s, line %d (error: %d (%d))\n",
+ "safe_mutex: Count was %d in thread 0x%lx when locking mutex at %s, line %d (error: %d (%d))\n",
mp->count-1, my_thread_id(), file, line, error, error);
fflush(stderr);
abort();
diff --git a/mysys/tree.c b/mysys/tree.c
index 063c8739e58..bec1ec680f1 100644
--- a/mysys/tree.c
+++ b/mysys/tree.c
@@ -89,7 +89,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit,
tree_element_free free_element, void *custom_arg)
{
DBUG_ENTER("init_tree");
- DBUG_PRINT("enter",("tree: %lx size: %d",tree,size));
+ DBUG_PRINT("enter",("tree: 0x%lx size: %d",tree,size));
if (default_alloc_size < DEFAULT_ALLOC_SIZE)
default_alloc_size= DEFAULT_ALLOC_SIZE;
@@ -137,7 +137,7 @@ void init_tree(TREE *tree, uint default_alloc_size, uint memory_limit,
static void free_tree(TREE *tree, myf free_flags)
{
DBUG_ENTER("free_tree");
- DBUG_PRINT("enter",("tree: %lx",tree));
+ DBUG_PRINT("enter",("tree: 0x%lx",tree));
if (tree->root) /* If initialized */
{
diff --git a/mysys/typelib.c b/mysys/typelib.c
index 9aaf97d143f..90a093b0b32 100644
--- a/mysys/typelib.c
+++ b/mysys/typelib.c
@@ -49,7 +49,7 @@ int find_type(my_string x, TYPELIB *typelib, uint full_name)
reg1 my_string i;
reg2 const char *j;
DBUG_ENTER("find_type");
- DBUG_PRINT("enter",("x: '%s' lib: %lx",x,typelib));
+ DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib));
if (!typelib->count)
{
diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am
index 2565a78238b..b1b7951f216 100644
--- a/ndb/include/Makefile.am
+++ b/ndb/include/Makefile.am
@@ -23,6 +23,7 @@ ndbapi/NdbReceiver.hpp \
ndbapi/NdbResultSet.hpp \
ndbapi/NdbScanFilter.hpp \
ndbapi/NdbScanOperation.hpp \
+ndbapi/NdbIndexScanOperation.hpp \
ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp
index c49bd176ee8..6cd6a83e68d 100644
--- a/ndb/include/debugger/EventLogger.hpp
+++ b/ndb/include/debugger/EventLogger.hpp
@@ -73,13 +73,6 @@ public:
~EventLogger();
/**
- * Open/create the eventlog, the default name is 'cluster.log'.
- *
- * @return true if successful.
- */
- bool open();
-
- /**
* Opens/creates the eventlog with the specified filename.
*
* @param aFileName the eventlog filename.
diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h
index 7b70f4c3ac0..8941fa6b381 100644
--- a/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/ndb/include/kernel/GlobalSignalNumbers.h
@@ -84,7 +84,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_SCAN_NEXTREQ 28
#define GSN_SCAN_TABCONF 29
-#define GSN_SCAN_TABINFO 30
+// 30 unused
#define GSN_SCAN_TABREF 31
#define GSN_SCAN_TABREQ 32
#define GSN_KEYINFO20 33
@@ -897,12 +897,9 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_TUX_MAINT_CONF 678
#define GSN_TUX_MAINT_REF 679
-/*
- * TUP access
- */
-#define GSN_TUP_READ_ATTRS 680
-#define GSN_TUP_QUERY_TH 712
-#define GSN_TUP_STORE_TH 681
+// not used 680
+// not used 712
+// not used 681
/**
* from mgmtsrvr to NDBCNTR
diff --git a/ndb/include/kernel/Interpreter.hpp b/ndb/include/kernel/Interpreter.hpp
index 2c282be361c..74399f5732e 100644
--- a/ndb/include/kernel/Interpreter.hpp
+++ b/ndb/include/kernel/Interpreter.hpp
@@ -83,7 +83,7 @@ public:
static Uint32 LoadConst64(Uint32 Register); // Value in next 2 words
static Uint32 Add(Uint32 DstReg, Uint32 SrcReg1, Uint32 SrcReg2);
static Uint32 Sub(Uint32 DstReg, Uint32 SrcReg1, Uint32 SrcReg2);
- static Uint32 Branch(Uint32 Inst, Uint32 R1, Uint32 R2);
+ static Uint32 Branch(Uint32 Inst, Uint32 Reg1, Uint32 Reg2);
static Uint32 ExitOK();
/**
@@ -184,8 +184,8 @@ Interpreter::Sub(Uint32 Dcoleg, Uint32 SrcReg1, Uint32 SrcReg2){
inline
Uint32
-Interpreter::Branch(Uint32 Inst, Uint32 R1, Uint32 R2){
- return (R1 << 9) + (R2 << 6) + Inst;
+Interpreter::Branch(Uint32 Inst, Uint32 Reg1, Uint32 Reg2){
+ return (Reg1 << 9) + (Reg2 << 6) + Inst;
}
inline
diff --git a/ndb/include/kernel/NodeInfo.hpp b/ndb/include/kernel/NodeInfo.hpp
index 86aca7d6883..5377f001949 100644
--- a/ndb/include/kernel/NodeInfo.hpp
+++ b/ndb/include/kernel/NodeInfo.hpp
@@ -18,6 +18,7 @@
#define NODE_INFO_HPP
#include <NdbOut.hpp>
+#include <mgmapi_config_parameters.h>
class NodeInfo {
public:
@@ -27,10 +28,10 @@ public:
* NodeType
*/
enum NodeType {
- DB = 0, ///< Database node
- API = 1, ///< NDB API node
- MGM = 2, ///< Management node (incl. NDB API)
- REP = 3, ///< Replication node (incl. NDB API)
+ DB = NODE_TYPE_DB, ///< Database node
+ API = NODE_TYPE_API, ///< NDB API node
+ MGM = NODE_TYPE_MGM, ///< Management node (incl. NDB API)
+ REP = NODE_TYPE_REP, ///< Replication node (incl. NDB API)
INVALID = 255 ///< Invalid type
};
NodeType getType() const;
diff --git a/ndb/include/kernel/NodeState.hpp b/ndb/include/kernel/NodeState.hpp
index 1bc7806876d..185e3ea8ea4 100644
--- a/ndb/include/kernel/NodeState.hpp
+++ b/ndb/include/kernel/NodeState.hpp
@@ -18,6 +18,7 @@
#define NODE_STATE_HPP
#include <NdbOut.hpp>
+#include <NodeBitmask.hpp>
class NodeState {
public:
@@ -99,7 +100,7 @@ public:
/**
* Length in 32-bit words
*/
- static const Uint32 DataLength = 8;
+ static const Uint32 DataLength = 8 + NdbNodeBitmask::Size;
/**
* Constructor(s)
@@ -108,7 +109,8 @@ public:
NodeState(StartLevel);
NodeState(StartLevel, bool systemShutdown);
NodeState(StartLevel, Uint32 startPhase, StartType);
-
+ void init();
+
/**
* Current start level
*/
@@ -146,6 +148,8 @@ public:
Uint32 singleUserMode;
Uint32 singleUserApi; //the single user node
+ BitmaskPOD<NdbNodeBitmask::Size> m_connected_nodes;
+
void setDynamicId(Uint32 dynamic);
void setNodeGroup(Uint32 group);
void setSingleUser(Uint32 s);
@@ -177,16 +181,23 @@ public:
inline
NodeState::NodeState(){
+ init();
+}
+
+inline
+void
+NodeState::init(){
startLevel = SL_CMVMI;
nodeGroup = 0xFFFFFFFF;
dynamicId = 0xFFFFFFFF;
singleUserMode = 0;
singleUserApi = 0xFFFFFFFF;
+ m_connected_nodes.clear();
}
inline
NodeState::NodeState(StartLevel sl){
- NodeState::NodeState();
+ init();
startLevel = sl;
singleUserMode = 0;
singleUserApi = 0xFFFFFFFF;
@@ -194,7 +205,7 @@ NodeState::NodeState(StartLevel sl){
inline
NodeState::NodeState(StartLevel sl, Uint32 sp, StartType typeOfStart){
- NodeState::NodeState();
+ init();
startLevel = sl;
starting.startPhase = sp;
starting.restartType = typeOfStart;
@@ -204,7 +215,7 @@ NodeState::NodeState(StartLevel sl, Uint32 sp, StartType typeOfStart){
inline
NodeState::NodeState(StartLevel sl, bool sys){
- NodeState::NodeState();
+ init();
startLevel = sl;
stopping.systemShutdown = sys;
singleUserMode = 0;
diff --git a/ndb/include/kernel/kernel_config_parameters.h b/ndb/include/kernel/kernel_config_parameters.h
index 2f63efa4b6c..bb7c6ebd42c 100644
--- a/ndb/include/kernel/kernel_config_parameters.h
+++ b/ndb/include/kernel/kernel_config_parameters.h
@@ -14,10 +14,7 @@
#define CFG_ACC_SCAN (PRIVATE_BASE + 9)
#define CFG_DICT_ATTRIBUTE (PRIVATE_BASE + 10)
-#define CFG_DICT_CONNECT (PRIVATE_BASE + 11)
-#define CFG_DICT_FRAG_CONNECT (PRIVATE_BASE + 12)
#define CFG_DICT_TABLE (PRIVATE_BASE + 13)
-#define CFG_DICT_TC_CONNECT (PRIVATE_BASE + 14)
#define CFG_DIH_API_CONNECT (PRIVATE_BASE + 15)
#define CFG_DIH_CONNECT (PRIVATE_BASE + 16)
@@ -27,10 +24,8 @@
#define CFG_DIH_TABLE (PRIVATE_BASE + 20)
#define CFG_LQH_FRAG (PRIVATE_BASE + 21)
-#define CFG_LQH_CONNECT (PRIVATE_BASE + 22)
#define CFG_LQH_TABLE (PRIVATE_BASE + 23)
#define CFG_LQH_TC_CONNECT (PRIVATE_BASE + 24)
-#define CFG_LQH_REPLICAS (PRIVATE_BASE + 25)
#define CFG_LQH_LOG_FILES (PRIVATE_BASE + 26)
#define CFG_LQH_SCAN (PRIVATE_BASE + 27)
diff --git a/ndb/include/kernel/ndb_limits.h b/ndb/include/kernel/ndb_limits.h
index 68ffe310328..f35cc617e86 100644
--- a/ndb/include/kernel/ndb_limits.h
+++ b/ndb/include/kernel/ndb_limits.h
@@ -88,7 +88,7 @@
* Ordered index constants. Make configurable per index later.
*/
#define MAX_TTREE_NODE_SIZE 64 // total words in node
-#define MAX_TTREE_PREF_SIZE 4 // words in min/max prefix each
+#define MAX_TTREE_PREF_SIZE 4 // words in min prefix
#define MAX_TTREE_NODE_SLACK 3 // diff between max and min occupancy
/*
diff --git a/ndb/include/kernel/signaldata/CreateFragmentation.hpp b/ndb/include/kernel/signaldata/CreateFragmentation.hpp
index a2f45a9580d..7d53dd91154 100644
--- a/ndb/include/kernel/signaldata/CreateFragmentation.hpp
+++ b/ndb/include/kernel/signaldata/CreateFragmentation.hpp
@@ -88,7 +88,7 @@ class CreateFragmentationConf {
friend bool printCREATE_FRAGMENTATION_CONF(FILE *,
const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 3 );
+ STATIC_CONST( SignalLength = 4 );
SECTION( FRAGMENTS = 0 );
private:
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index 67610f9d2be..dec7145c897 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -100,6 +100,7 @@ public:
CustomTriggerId = 25,
FrmLen = 26,
FrmData = 27,
+ FragmentCount = 128, // No of fragments in table (!fragment replicas)
TableEnd = 999,
AttributeName = 1000, // String, Mandatory
@@ -277,6 +278,7 @@ public:
Uint32 CustomTriggerId;
Uint32 FrmLen;
char FrmData[MAX_FRM_DATA_SIZE];
+ Uint32 FragmentCount;
void init();
};
@@ -309,7 +311,7 @@ public:
ExtDatetime = NdbSqlUtil::Type::Datetime,
ExtTimespec = NdbSqlUtil::Type::Timespec,
ExtBlob = NdbSqlUtil::Type::Blob,
- ExtClob = NdbSqlUtil::Type::Clob
+ ExtText = NdbSqlUtil::Type::Text
};
// Attribute data interpretation
@@ -433,7 +435,7 @@ public:
AttributeArraySize = 12 * AttributeExtLength;
return true;
case DictTabInfo::ExtBlob:
- case DictTabInfo::ExtClob:
+ case DictTabInfo::ExtText:
AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
// head + inline part [ attr precision ]
diff --git a/ndb/include/kernel/signaldata/DropTab.hpp b/ndb/include/kernel/signaldata/DropTab.hpp
index 906f952d852..dd3946d8cc0 100644
--- a/ndb/include/kernel/signaldata/DropTab.hpp
+++ b/ndb/include/kernel/signaldata/DropTab.hpp
@@ -101,7 +101,8 @@ public:
NoSuchTable = 1,
DropWoPrep = 2, // Calling Drop with first calling PrepDrop
PrepDropInProgress = 3,
- DropInProgress = 4
+ DropInProgress = 4,
+ NF_FakeErrorREF = 5
};
private:
diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/ndb/include/kernel/signaldata/DumpStateOrd.hpp
index 6403a52926f..1e349fad55a 100644
--- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp
+++ b/ndb/include/kernel/signaldata/DumpStateOrd.hpp
@@ -94,6 +94,7 @@ public:
TcDumpOneApiConnectRec = 2505,
TcDumpAllApiConnectRec = 2506,
TcSetTransactionTimeout = 2507,
+ TcSetApplTransactionTimeout = 2508,
CmvmiDumpConnections = 2600,
CmvmiDumpLongSignalMemory = 2601,
CmvmiSetRestartOnErrorInsert = 2602,
diff --git a/ndb/include/kernel/signaldata/KeyInfo.hpp b/ndb/include/kernel/signaldata/KeyInfo.hpp
index b839a2c2035..a4c698f89b2 100644
--- a/ndb/include/kernel/signaldata/KeyInfo.hpp
+++ b/ndb/include/kernel/signaldata/KeyInfo.hpp
@@ -25,6 +25,7 @@ class KeyInfo {
*/
friend class DbUtil;
friend class NdbOperation;
+ friend class NdbScanOperation;
/**
* Reciver(s)
diff --git a/ndb/include/kernel/signaldata/PrepDropTab.hpp b/ndb/include/kernel/signaldata/PrepDropTab.hpp
index e9cc28fed0c..c54b2474aa3 100644
--- a/ndb/include/kernel/signaldata/PrepDropTab.hpp
+++ b/ndb/include/kernel/signaldata/PrepDropTab.hpp
@@ -88,7 +88,8 @@ public:
NoSuchTable = 1,
PrepDropInProgress = 2,
DropInProgress = 3,
- InvalidTableState = 4
+ InvalidTableState = 4,
+ NF_FakeErrorREF = 5
};
private:
@@ -137,7 +138,8 @@ public:
enum ErrorCode {
NoSuchTable = 1,
IllegalTableState = 2,
- DropInProgress = 3
+ DropInProgress = 3,
+ NF_FakeErrorREF = 4
};
Uint32 tableId;
diff --git a/ndb/include/kernel/signaldata/ScanTab.hpp b/ndb/include/kernel/signaldata/ScanTab.hpp
index efd8a4918ab..ab2978e48da 100644
--- a/ndb/include/kernel/signaldata/ScanTab.hpp
+++ b/ndb/include/kernel/signaldata/ScanTab.hpp
@@ -33,8 +33,8 @@ class ScanTabReq {
/**
* Sender(s)
*/
- friend class NdbOperation;
friend class NdbConnection;
+ friend class NdbScanOperation;
/**
* For printing
@@ -73,6 +73,7 @@ private:
static Uint8 getHoldLockFlag(const UintR & requestInfo);
static Uint8 getReadCommittedFlag(const UintR & requestInfo);
static Uint8 getRangeScanFlag(const UintR & requestInfo);
+ static Uint8 getScanBatch(const UintR & requestInfo);
/**
* Set:ers for requestInfo
@@ -83,7 +84,7 @@ private:
static void setHoldLockFlag(UintR & requestInfo, Uint32 flag);
static void setReadCommittedFlag(UintR & requestInfo, Uint32 flag);
static void setRangeScanFlag(UintR & requestInfo, Uint32 flag);
-
+ static void setScanBatch(Uint32& requestInfo, Uint32 sz);
};
/**
@@ -94,10 +95,11 @@ private:
h = Hold lock mode - 1 Bit 10
c = Read Committed - 1 Bit 11
x = Range Scan (TUX) - 1 Bit 15
+ b = Scan batch - 5 Bit 16-19 (max 15)
1111111111222222222233
01234567890123456789012345678901
- ppppppppl hc x
+ ppppppppl hc xbbbbb
*/
#define PARALLELL_SHIFT (0)
@@ -115,6 +117,9 @@ private:
#define RANGE_SCAN_SHIFT (15)
#define RANGE_SCAN_MASK (1)
+#define SCAN_BATCH_SHIFT (16)
+#define SCAN_BATCH_MASK (31)
+
inline
Uint8
ScanTabReq::getParallelism(const UintR & requestInfo){
@@ -146,6 +151,12 @@ ScanTabReq::getRangeScanFlag(const UintR & requestInfo){
}
inline
+Uint8
+ScanTabReq::getScanBatch(const Uint32 & requestInfo){
+ return (Uint8)((requestInfo >> SCAN_BATCH_SHIFT) & SCAN_BATCH_MASK);
+}
+
+inline
void
ScanTabReq::clearRequestInfo(UintR & requestInfo){
requestInfo = 0;
@@ -186,6 +197,12 @@ ScanTabReq::setRangeScanFlag(UintR & requestInfo, Uint32 flag){
requestInfo |= (flag << RANGE_SCAN_SHIFT);
}
+inline
+void
+ScanTabReq::setScanBatch(Uint32 & requestInfo, Uint32 flag){
+ ASSERT_MAX(flag, SCAN_BATCH_MASK, "ScanTabReq::setScanBatch");
+ requestInfo |= (flag << SCAN_BATCH_SHIFT);
+}
/**
*
@@ -213,7 +230,8 @@ public:
* Length of signal
*/
STATIC_CONST( SignalLength = 4 );
-
+ static const Uint32 EndOfData = (1 << 31);
+
private:
// Type definitions
@@ -225,29 +243,15 @@ private:
UintR requestInfo; // DATA 1
UintR transId1; // DATA 2
UintR transId2; // DATA 3
-#if 0
- UintR operLenAndIdx[16]; // DATA 4-19
-
- /**
- * Get:ers for operLenAndIdx
- */
- static Uint32 getLen(const UintR & operLenAndIdx);
- static Uint8 getIdx(const UintR & operLenAndIdx);
-#endif
-
- /**
- * Get:ers for requestInfo
- */
- static Uint8 getOperations(const UintR & reqInfo);
- static Uint8 getScanStatus(const UintR & reqInfo);
-
- /**
- * Set:ers for requestInfo
- */
- static void setOperations(UintR & reqInfo, Uint32 ops);
- static void setScanStatus(UintR & reqInfo, Uint32 stat);
+ struct OpData {
+ Uint32 apiPtrI;
+ Uint32 tcPtrI;
+ Uint32 info;
+ };
+ static Uint32 getLength(Uint32 opDataInfo) { return opDataInfo >> 5; };
+ static Uint32 getRows(Uint32 opDataInfo) { return opDataInfo & 31;}
};
/**
@@ -267,103 +271,6 @@ private:
#define STATUS_SHIFT (8)
#define STATUS_MASK (0xFF)
-inline
-Uint8
-ScanTabConf::getOperations(const UintR & reqInfo){
- return (Uint8)((reqInfo >> OPERATIONS_SHIFT) & OPERATIONS_MASK);
-}
-
-inline
-void
-ScanTabConf::setOperations(UintR & requestInfo, Uint32 ops){
- ASSERT_MAX(ops, OPERATIONS_MASK, "ScanTabConf::setOperations");
- requestInfo |= (ops << OPERATIONS_SHIFT);
-}
-
-inline
-Uint8
-ScanTabConf::getScanStatus(const UintR & reqInfo){
- return (Uint8)((reqInfo >> STATUS_SHIFT) & STATUS_MASK);
-}
-
-inline
-void
-ScanTabConf::setScanStatus(UintR & requestInfo, Uint32 stat){
- ASSERT_MAX(stat, STATUS_MASK, "ScanTabConf::setScanStatus");
- requestInfo |= (stat << STATUS_SHIFT);
-}
-
-
-/**
- *
- * SENDER: Dbtc, API
- * RECIVER: API, Dbtc
- */
-class ScanTabInfo {
- /**
- * Reciver(s) and Sender(s)
- */
- friend class NdbConnection;
- friend class Dbtc;
-
- /**
- * For printing
- */
- friend bool printSCANTABINFO(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
-
-public:
- /**
- * Length of signal
- */
- STATIC_CONST( SignalLength = 17 );
-
-private:
-
- // Type definitions
-
- /**
- * DATA VARIABLES
- */
- UintR apiConnectPtr; // DATA 0
- UintR operLenAndIdx[16]; // DATA 1-16
-
- /**
- * Get:ers for operLenAndIdx
- */
- static Uint32 getLen(const UintR & operLenAndIdx);
- static Uint8 getIdx(const UintR & operLenAndIdx);
-
-};
-
-
-/**
- * Operation length and index
- *
- l = Length of operation - 24 Bits -> Max 16777215 (Bit 0-24)
- i = Index of operation - 7 Bits -> Max 255 (Bit 25-32)
-
- 1111111111222222222233
- 01234567890123456789012345678901
- llllllllllllllllllllllllliiiiiii
-*/
-
-#define LENGTH_SHIFT (0)
-#define LENGTH_MASK (0xFFFFFF)
-
-#define INDEX_SHIFT (24)
-#define INDEX_MASK (0xFF)
-
-inline
-Uint32
-ScanTabInfo::getLen(const UintR & operLenAndIdx){
- return (Uint32)((operLenAndIdx >> LENGTH_SHIFT) & LENGTH_MASK);
-}
-
-inline
-Uint8
-ScanTabInfo::getIdx(const UintR & operLenAndIdx){
- return (Uint8)((operLenAndIdx >> INDEX_SHIFT) & INDEX_MASK);
-}
/**
*
@@ -390,7 +297,7 @@ public:
/**
* Length of signal
*/
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
@@ -403,7 +310,7 @@ private:
UintR transId1; // DATA 1
UintR transId2; // DATA 2
UintR errorCode; // DATA 3
- // UintR sendScanNextReqWithClose; // DATA 4
+ UintR closeNeeded; // DATA 4
};
diff --git a/ndb/include/kernel/signaldata/SignalData.hpp b/ndb/include/kernel/signaldata/SignalData.hpp
index 511e7d30c21..6e5748217b2 100644
--- a/ndb/include/kernel/signaldata/SignalData.hpp
+++ b/ndb/include/kernel/signaldata/SignalData.hpp
@@ -21,20 +21,10 @@
#include <ndb_limits.h>
#include <kernel_types.h>
-#ifndef NDB_ASSERT
-#ifdef VM_TRACE
-#define NDB_ASSERT(test, message) { if(!(test)) { printf(message); exit(-1); }}
-#else
-#define NDB_ASSERT(test, message)
-#endif
-#endif
-
-// Useful ASSERT macros...
-#define ASSERT_BOOL(flag, message) NDB_ASSERT( (flag<=1), (message) )
+#define ASSERT_BOOL(flag, message) assert(flag<=1)
#define ASSERT_RANGE(value, min, max, message) \
- NDB_ASSERT((value) >= (min) && (value) <= (max), (message))
-#define ASSERT_MAX(value, max, message) \
- NDB_ASSERT((value) <= (max), (message))
+ assert((value) >= (min) && (value) <= (max))
+#define ASSERT_MAX(value, max, message) assert((value) <= (max))
#define SECTION(x) STATIC_CONST(x)
diff --git a/ndb/include/kernel/signaldata/StartInfo.hpp b/ndb/include/kernel/signaldata/StartInfo.hpp
index da032adba8a..d0850b13ef4 100644
--- a/ndb/include/kernel/signaldata/StartInfo.hpp
+++ b/ndb/include/kernel/signaldata/StartInfo.hpp
@@ -78,7 +78,7 @@ class StartInfoRef {
Uint32 errorCode;
public:
- STATIC_CONST( SignalLength = 2 );
+ STATIC_CONST( SignalLength = 3 );
};
#endif
diff --git a/ndb/include/kernel/signaldata/TcCommit.hpp b/ndb/include/kernel/signaldata/TcCommit.hpp
index 43eb7be1c39..b7f3fbbb361 100644
--- a/ndb/include/kernel/signaldata/TcCommit.hpp
+++ b/ndb/include/kernel/signaldata/TcCommit.hpp
@@ -33,6 +33,7 @@ class TcCommitConf {
* Reciver(s)
*/
friend class Ndb;
+ friend class NdbConnection;
public:
STATIC_CONST( SignalLength = 3 );
diff --git a/ndb/include/kernel/signaldata/TcKeyReq.hpp b/ndb/include/kernel/signaldata/TcKeyReq.hpp
index df0a00da3e0..f7d3c2e3282 100644
--- a/ndb/include/kernel/signaldata/TcKeyReq.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyReq.hpp
@@ -38,6 +38,7 @@ class TcKeyReq {
friend class Ndbcntr;
friend class NdbOperation;
friend class NdbIndexOperation;
+ friend class NdbScanOperation;
friend class DbUtil;
/**
diff --git a/ndb/include/kernel/signaldata/TupAccess.hpp b/ndb/include/kernel/signaldata/TupAccess.hpp
deleted file mode 100644
index ab56a73322c..00000000000
--- a/ndb/include/kernel/signaldata/TupAccess.hpp
+++ /dev/null
@@ -1,174 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef TUP_ACCESS_HPP
-#define TUP_ACCESS_HPP
-
-#include "SignalData.hpp"
-
-/*
- * Direct signals used by ACC and TUX to access the TUP block in the
- * same thread.
- *
- * NOTE: Caller must set errorCode to RNIL. Signal printer uses this to
- * distinguish between input and output (no better way exists).
- */
-
-/*
- * Read attributes from any table.
- */
-class TupReadAttrs {
- friend class Dbtup;
- friend class Dbacc;
- friend class Dbtux;
- friend bool printTUP_READ_ATTRS(FILE*, const Uint32*, Uint32, Uint16);
-public:
- enum Flag {
- /*
- * Read primary key attributes. No input attribute ids are
- * specified. Instead TUP fills in both input and output sections.
- * Tuple version is not used.
- */
- ReadKeys = (1 << 0)
- };
- STATIC_CONST( SignalLength = 10 );
-private:
- /*
- * Error code set by TUP. Zero means no error.
- */
- Uint32 errorCode;
- /*
- * Request info contains flags (see Flags above).
- */
- Uint32 requestInfo;
- /*
- * Table i-value.
- */
- Uint32 tableId;
- /*
- * Fragment is given by logical id within the table or by direct
- * i-value (faster). Unknown values are given as RNIL. On return TUP
- * fills in both values.
- */
- Uint32 fragId;
- Uint32 fragPtrI;
- /*
- * Logical address ("local key") of "original" tuple (the latest
- * version) consisting of logical fragment page id and tuple index
- * within the page (shifted left by 1).
- */
- Uint32 tupAddr;
- /*
- * Version of the tuple to read. Not used if ReadKeys.
- */
- Uint32 tupVersion;
- /*
- * Real page id and offset of the "original" tuple. Unknown page is
- * given as RNIL. On return TUP fills in these.
- */
- Uint32 pageId;
- Uint32 pageOffset;
- /*
- * Shared buffer id. Currently must be 0 which means to use rest of
- * signal data.
- */
- Uint32 bufferId;
- /*
- * Shared buffer 0 starts after signal class. Input is number of
- * attributes and list of attribute ids in AttributeHeader format.
- * Output is placed after the input and consists of a list of entries
- * where each entry has an AttributeHeader followed by words of data.
- */
-};
-
-/*
- * Query status of tuple version. Used by TUX to decide if a tuple
- * version found in index tree is visible to the transaction.
- */
-class TupQueryTh {
- friend class Dbtup;
- friend class Dbtux;
- friend bool printTUP_QUERY_TH(FILE*, const Uint32*, Uint32, Uint16);
-public:
- enum Flag {
- };
- STATIC_CONST( SignalLength = 7 );
-private:
- /*
- TUX wants to check if tuple is visible to the scan query.
- Input data is tuple address (tableId, fragId, tupAddr, tupVersion),
- and transaction data so that TUP knows how to deduct if tuple is
- visible (transId1, transId2, savePointId).
- returnCode is set in return signal to indicate whether tuple is visible.
- */
- union {
- Uint32 returnCode; // 1 if tuple visible
- Uint32 tableId;
- };
- Uint32 fragId;
- Uint32 tupAddr;
- Uint32 tupVersion;
- Uint32 transId1;
- Uint32 transId2;
- Uint32 savePointId;
-};
-
-/*
- * Operate on entire tuple. Used by TUX where the table has a single
- * Uint32 array attribute representing an index tree node.
- *
- * XXX this signal is no longer used by TUX and can be removed
- */
-class TupStoreTh {
- friend class Dbtup;
- friend class Dbtux;
- friend bool printTUP_STORE_TH(FILE*, const Uint32*, Uint32, Uint16);
-public:
- enum OpCode {
- OpUndefined = 0,
- OpRead = 1,
- OpInsert = 2,
- OpUpdate = 3,
- OpDelete = 4
- };
- STATIC_CONST( SignalLength = 12 );
-private:
- /*
- * These are as in TupReadAttrs (except opCode). Version must be
- * zero. Ordered index tuple (tree node) has only current version.
- */
- Uint32 errorCode;
- Uint32 opCode;
- Uint32 tableId;
- Uint32 fragId;
- Uint32 fragPtrI;
- Uint32 tupAddr;
- Uint32 tupVersion;
- Uint32 pageId;
- Uint32 pageOffset;
- Uint32 bufferId;
- /*
- * Data offset and size in words. Applies to both the buffer and the
- * tuple. Used e.g. to read only node header.
- */
- Uint32 dataOffset;
- Uint32 dataSize;
- /*
- * Shared buffer 0 starts after signal class.
- */
-};
-
-#endif
diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h
index 7b2f728bda8..123297b0d71 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/ndb/include/mgmapi/mgmapi.h
@@ -49,6 +49,8 @@
* @{
*/
+#include "mgmapi_config_parameters.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -81,10 +83,10 @@ extern "C" {
*/
enum ndb_mgm_node_type {
NDB_MGM_NODE_TYPE_UNKNOWN = -1, /*/< Node type not known*/
- NDB_MGM_NODE_TYPE_API = 0, /*/< An application node (API)*/
- NDB_MGM_NODE_TYPE_NDB = 1, /*/< A database node (DB)*/
- NDB_MGM_NODE_TYPE_MGM = 2, /*/< A management server node (MGM)*/
- NDB_MGM_NODE_TYPE_REP = 3, ///< A replication node
+ NDB_MGM_NODE_TYPE_API = NODE_TYPE_API, /*/< An application node (API)*/
+ NDB_MGM_NODE_TYPE_NDB = NODE_TYPE_DB, /*/< A database node (DB)*/
+ NDB_MGM_NODE_TYPE_MGM = NODE_TYPE_MGM, /*/< A management server node (MGM)*/
+ NDB_MGM_NODE_TYPE_REP = NODE_TYPE_REP, ///< A replication node
NDB_MGM_NODE_TYPE_MIN = 0, /*/< Min valid value*/
NDB_MGM_NODE_TYPE_MAX = 3 /*/< Max valid value*/
@@ -666,6 +668,11 @@ extern "C" {
*/
struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle,
unsigned version);
+
+ int ndb_mgm_alloc_nodeid(NdbMgmHandle handle,
+ unsigned version,
+ unsigned *pnodeid,
+ int nodetype);
/**
* Config iterator
*/
diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h
index d3bb44c1523..3eca49055fe 100644
--- a/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/ndb/include/mgmapi/mgmapi_config_parameters.h
@@ -6,6 +6,7 @@
#define CFG_SYS_PRIMARY_MGM_NODE 1
#define CFG_SYS_CONFIG_GENERATION 2
#define CFG_SYS_REPLICATION_ROLE 7
+#define CFG_SYS_PORT_BASE 8
#define CFG_NODE_ID 3
#define CFG_NODE_BYTE_ORDER 4
@@ -87,11 +88,11 @@
#define CFG_CONNECTION_CHECKSUM 403
#define CFG_CONNECTION_NODE_1_SYSTEM 404
#define CFG_CONNECTION_NODE_2_SYSTEM 405
+#define CFG_CONNECTION_SERVER_PORT 406
#define CFG_TCP_HOSTNAME_1 450
#define CFG_TCP_HOSTNAME_2 451
#define CFG_TCP_SERVER 452
-#define CFG_TCP_SERVER_PORT 453
#define CFG_TCP_SEND_BUFFER_SIZE 454
#define CFG_TCP_RECEIVE_BUFFER_SIZE 455
#define CFG_TCP_PROXY 456
diff --git a/ndb/include/mgmapi/mgmapi_debug.h b/ndb/include/mgmapi/mgmapi_debug.h
index 2723263e7a7..1c562cd164f 100644
--- a/ndb/include/mgmapi/mgmapi_debug.h
+++ b/ndb/include/mgmapi/mgmapi_debug.h
@@ -106,6 +106,31 @@ extern "C" {
struct ndb_mgm_reply* reply);
+ /**
+ *
+ * @param handle the NDB management handle.
+ * @param nodeId the node id. 0 = all db nodes
+ * @param errrorCode the errorCode.
+ * @param reply the reply message.
+ * @return 0 if successful or an error code.
+ */
+ int ndb_mgm_set_int_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ unsigned value,
+ struct ndb_mgm_reply* reply);
+
+ int ndb_mgm_set_int64_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ unsigned long long value,
+ struct ndb_mgm_reply* reply);
+
+ int ndb_mgm_set_string_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ const char * value,
+ struct ndb_mgm_reply* reply);
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp
index 50d333b54dd..396ce24308c 100644
--- a/ndb/include/mgmcommon/ConfigRetriever.hpp
+++ b/ndb/include/mgmcommon/ConfigRetriever.hpp
@@ -19,6 +19,8 @@
#include <ndb_types.h>
#include <mgmapi.h>
+#include <BaseString.hpp>
+#include <LocalConfig.hpp>
/**
* @class ConfigRetriever
@@ -26,15 +28,16 @@
*/
class ConfigRetriever {
public:
- ConfigRetriever();
- ConfigRetriever(const int id, const char* remoteHost, const int port);
+ ConfigRetriever(Uint32 version, Uint32 nodeType);
~ConfigRetriever();
/**
* Read local config
* @return Own node id, -1 means fail
*/
- int init(bool onlyNodeId = false);
+ int init();
+
+ int do_connect();
/**
* Get configuration for current (nodeId given in local config file) node.
@@ -47,7 +50,7 @@ public:
* @return ndb_mgm_configuration object if succeeded,
* NULL if erroneous local config file or configuration error.
*/
- struct ndb_mgm_configuration * getConfig(int versionId, int nodeType);
+ struct ndb_mgm_configuration * getConfig();
const char * getErrorString();
@@ -62,28 +65,21 @@ public:
void setLocalConfigFileName(const char * connectString);
/**
- * Sets connectstring which can be used instead of local config file
- * environment variables and Ndb.cfg has precidence over this
- */
- void setDefaultConnectString(const char * defaultConnectString);
-
- /**
* @return Node id of this node (as stated in local config or connectString)
*/
- inline Uint32 getOwnNodeId() { return _ownNodeId; }
-
+ Uint32 allocNodeId();
/**
* Get config using socket
*/
- struct ndb_mgm_configuration * getConfig(const char * mgmhost, short port,
- int versionId);
+ struct ndb_mgm_configuration * getConfig(NdbMgmHandle handle);
+
/**
* Get config from file
*/
- struct ndb_mgm_configuration * getConfig(const char * file, int versionId);
+ struct ndb_mgm_configuration * getConfig(const char * file);
private:
- char * errorString;
+ BaseString errorString;
enum ErrorType {
CR_ERROR = 0,
CR_RETRY = 1
@@ -91,18 +87,21 @@ private:
ErrorType latestErrorType;
void setError(ErrorType, const char * errorMsg);
-
- char * _localConfigFileName;
- struct LocalConfig * _localConfig;
+
+ BaseString _localConfigFileName;
+ struct LocalConfig _localConfig;
int _ownNodeId;
-
- char * m_connectString;
- char * m_defaultConnectString;
+ BaseString m_connectString;
+
+ Uint32 m_version;
+ Uint32 m_node_type;
+ NdbMgmHandle m_handle;
+
/**
* Verify config
*/
- bool verifyConfig(const struct ndb_mgm_configuration *, int type);
+ bool verifyConfig(const struct ndb_mgm_configuration *);
};
#endif
diff --git a/ndb/src/common/mgmcommon/LocalConfig.hpp b/ndb/include/mgmcommon/LocalConfig.hpp
index eb676bf9bed..c741b35f482 100644
--- a/ndb/src/common/mgmcommon/LocalConfig.hpp
+++ b/ndb/include/mgmcommon/LocalConfig.hpp
@@ -32,49 +32,35 @@ enum MgmtSrvrId_Type {
struct MgmtSrvrId {
MgmtSrvrId_Type type;
- union {
- struct {
- char * remoteHost;
- unsigned int port;
- } tcp;
- struct {
- char * filename;
- } file;
- } data;
+ BaseString name;
+ unsigned int port;
};
struct LocalConfig {
int _ownNodeId;
-
- int size;
- int items;
- MgmtSrvrId ** ids;
-
+ Vector<MgmtSrvrId> ids;
+
int error_line;
char error_msg[256];
LocalConfig();
~LocalConfig();
- bool init(bool onlyNodeId = false,
- const char *connectString = 0,
- const char *fileName = 0,
- const char *defaultConnectString = 0);
-
- void add(MgmtSrvrId *i);
+ bool init(const char *connectString = 0,
+ const char *fileName = 0);
void printError() const;
void printUsage() const;
void setError(int lineNumber, const char * _msg);
- bool readConnectString(const char * connectString, bool onlyNodeId = false);
- bool readFile(const char * filename, bool &fopenError, bool onlyNodeId = false);
+ bool readConnectString(const char *);
+ bool readFile(const char * file, bool &fopenError);
bool parseLine(char * line, int lineNumber);
-
+
bool parseNodeId(const char *buf);
bool parseHostName(const char *buf);
bool parseFileName(const char *buf);
- bool parseString(const char *buf, bool onlyNodeId, char *line);
+ bool parseString(const char *buf, char *line);
};
#endif // LocalConfig_H
diff --git a/ndb/include/mgmcommon/MgmtErrorReporter.hpp b/ndb/include/mgmcommon/MgmtErrorReporter.hpp
index 925d9e6407a..0d980aa7245 100644
--- a/ndb/include/mgmcommon/MgmtErrorReporter.hpp
+++ b/ndb/include/mgmcommon/MgmtErrorReporter.hpp
@@ -63,12 +63,6 @@
// Returns: -
//****************************************************************************
-#ifndef NDB_ASSERT
-#define NDB_ASSERT(trueToContinue, message) \
- if ( !(trueToContinue) ) { \
-ndbout << "ASSERT FAILED. FILE: " << __FILE__ << ", LINE: " << __LINE__ << ", MSG: " << message << endl;exit(-1);}
-#endif
-
#define MGM_REQUIRE(x) \
if (!(x)) { ndbout << __FILE__ << " " << __LINE__ \
<< ": Warning! Requirement failed" << endl; }
diff --git a/ndb/include/mgmcommon/NdbConfig.h b/ndb/include/mgmcommon/NdbConfig.h
index d9b484edcc5..5c83a348df2 100644
--- a/ndb/include/mgmcommon/NdbConfig.h
+++ b/ndb/include/mgmcommon/NdbConfig.h
@@ -21,11 +21,14 @@
extern "C" {
#endif
-const char* NdbConfig_HomePath(char* buf, int buflen);
-
-const char* NdbConfig_NdbCfgName(char* buf, int buflen, int with_ndb_home);
-const char* NdbConfig_ErrorFileName(char* buf, int buflen);
-const char* NdbConfig_ClusterLogFileName(char* buf, int buflen);
+char* NdbConfig_NdbCfgName(int with_ndb_home);
+char* NdbConfig_ErrorFileName(int node_id);
+char* NdbConfig_ClusterLogFileName(int node_id);
+char* NdbConfig_SignalLogFileName(int node_id);
+char* NdbConfig_TraceFileName(int node_id, int file_no);
+char* NdbConfig_NextTraceFileName(int node_id);
+char* NdbConfig_PidFileName(int node_id);
+char* NdbConfig_StdoutFileName(int node_id);
#ifdef __cplusplus
}
diff --git a/ndb/include/ndb_global.h b/ndb/include/ndb_global.h
index f871acbc075..b8fcca6dbb1 100644
--- a/ndb/include/ndb_global.h
+++ b/ndb/include/ndb_global.h
@@ -3,6 +3,10 @@
#define NDBGLOBAL_H
#include <my_global.h>
+#define NDB_BASE_PORT 2200
+
+/** signal & SIG_PIPE */
+#include <my_alarm.h>
#if defined(_WIN32) || defined(_WIN64) || defined(__WIN32__) || defined(WIN32)
#define NDB_WIN32
@@ -90,6 +94,14 @@ extern int strcasecmp(const char *s1, const char *s2);
extern int strncasecmp(const char *s1, const char *s2, size_t n);
#endif
+#ifdef SCO
+
+#ifndef PATH_MAX
+#define PATH_MAX 1024
+#endif
+
+#endif /* SCO */
+
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp
index 27da5c3fa39..7904ecef305 100644
--- a/ndb/include/ndbapi/Ndb.hpp
+++ b/ndb/include/ndbapi/Ndb.hpp
@@ -867,6 +867,7 @@ class NdbObjectIdMap;
class NdbOperation;
class NdbEventOperationImpl;
class NdbScanOperation;
+class NdbIndexScanOperation;
class NdbIndexOperation;
class NdbConnection;
class NdbApiSignal;
@@ -875,7 +876,6 @@ class NdbLabel;
class NdbBranch;
class NdbSubroutine;
class NdbCall;
-class NdbScanReceiver;
class Table;
class BaseString;
class NdbEventOperation;
@@ -961,8 +961,9 @@ class Ndb
friend class NdbConnection;
friend class Table;
friend class NdbApiSignal;
- friend class NdbScanReceiver;
friend class NdbIndexOperation;
+ friend class NdbScanOperation;
+ friend class NdbIndexScanOperation;
friend class NdbDictionaryImpl;
friend class NdbDictInterface;
friend class NdbBlob;
@@ -1413,12 +1414,24 @@ public:
*
* @return tuple id or 0 on error
*/
- Uint64 getAutoIncrementValue(const char* aTableName, Uint32 cacheSize = 1);
- bool setAutoIncrementValue(const char* aTableName, Uint64 val);
- Uint64 getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize = 1000 );
- Uint64 getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize = 1000 );
- bool setTupleIdInNdb(const char* aTableName, Uint64 val);
- bool setTupleIdInNdb(Uint32 aTableId, Uint64 val);
+ Uint64 getAutoIncrementValue(const char* aTableName,
+ Uint32 cacheSize = 1);
+ Uint64 getAutoIncrementValue(NdbDictionary::Table * aTable,
+ Uint32 cacheSize = 1);
+ Uint64 readAutoIncrementValue(const char* aTableName);
+ Uint64 readAutoIncrementValue(NdbDictionary::Table * aTable);
+ bool setAutoIncrementValue(const char* aTableName, Uint64 val,
+ bool increase = false);
+ bool setAutoIncrementValue(NdbDictionary::Table * aTable, Uint64 val,
+ bool increase = false);
+ Uint64 getTupleIdFromNdb(const char* aTableName,
+ Uint32 cacheSize = 1000);
+ Uint64 getTupleIdFromNdb(Uint32 aTableId,
+ Uint32 cacheSize = 1000);
+ Uint64 readTupleIdFromNdb(Uint32 aTableId);
+ bool setTupleIdInNdb(const char* aTableName, Uint64 val,
+ bool increase);
+ bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase);
Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op);
#endif
@@ -1441,7 +1454,7 @@ private:
NdbConnection* doConnect(Uint32 nodeId);
void doDisconnect();
- NdbScanReceiver* getNdbScanRec();// Get a NdbScanReceiver from idle list
+ NdbReceiver* getNdbScanRec();// Get a NdbScanReceiver from idle list
NdbLabel* getNdbLabel(); // Get a NdbLabel from idle list
NdbBranch* getNdbBranch(); // Get a NdbBranch from idle list
NdbSubroutine* getNdbSubroutine();// Get a NdbSubroutine from idle
@@ -1450,7 +1463,7 @@ private:
NdbRecAttr* getRecAttr(); // Get a receeive attribute object from
// idle list of the Ndb object.
NdbOperation* getOperation(); // Get an operation from idle list
- NdbScanOperation* getScanOperation(); // Get a scan operation from idle
+ NdbIndexScanOperation* getScanOperation(); // Get a scan operation from idle
NdbIndexOperation* getIndexOperation();// Get an index operation from idle
class NdbGlobalEventBufferHandle* getGlobalEventBufferHandle();
@@ -1458,14 +1471,14 @@ private:
void releaseSignal(NdbApiSignal* anApiSignal);
void releaseSignalsInList(NdbApiSignal** pList);
- void releaseNdbScanRec(NdbScanReceiver* aNdbScanRec);
+ void releaseNdbScanRec(NdbReceiver* aNdbScanRec);
void releaseNdbLabel(NdbLabel* anNdbLabel);
void releaseNdbBranch(NdbBranch* anNdbBranch);
void releaseNdbSubroutine(NdbSubroutine* anNdbSubroutine);
void releaseNdbCall(NdbCall* anNdbCall);
void releaseRecAttr (NdbRecAttr* aRecAttr);
void releaseOperation(NdbOperation* anOperation);
- void releaseScanOperation(NdbScanOperation* aScanOperation);
+ void releaseScanOperation(NdbIndexScanOperation*);
void releaseNdbBlob(NdbBlob* aBlob);
void check_send_timeout();
@@ -1565,7 +1578,6 @@ private:
void* int2void (Uint32 val);
NdbReceiver* void2rec (void* val);
NdbConnection* void2con (void* val);
- NdbScanReceiver* void2rec_srec(void* val);
NdbOperation* void2rec_op (void* val);
NdbIndexOperation* void2rec_iop (void* val);
@@ -1607,7 +1619,7 @@ private:
NdbOperation* theOpIdleList; // First operation in the idle list.
- NdbScanOperation* theScanOpIdleList; // First scan operation in the idle list.
+ NdbIndexScanOperation* theScanOpIdleList; // First scan operation in the idle list.
NdbIndexOperation* theIndexOpIdleList; // First index operation in the idle list.
NdbConnection* theTransactionList;
NdbConnection** theConnectionArray;
@@ -1617,7 +1629,7 @@ private:
NdbBranch* theBranchList; // First branch descriptor in list
NdbSubroutine* theSubroutineList; // First subroutine descriptor in
NdbCall* theCallList; // First call descriptor in list
- NdbScanReceiver* theScanList;
+ NdbReceiver* theScanList;
NdbBlob* theNdbBlobIdleList;
Uint32 theMyRef; // My block reference
diff --git a/ndb/include/ndbapi/NdbApi.hpp b/ndb/include/ndbapi/NdbApi.hpp
index 515f39433e4..add733cccd7 100644
--- a/ndb/include/ndbapi/NdbApi.hpp
+++ b/ndb/include/ndbapi/NdbApi.hpp
@@ -23,6 +23,8 @@
#include "NdbOperation.hpp"
#include "NdbScanOperation.hpp"
#include "NdbIndexOperation.hpp"
+#include "NdbIndexScanOperation.hpp"
+#include "NdbScanFilter.hpp"
#include "NdbRecAttr.hpp"
#include "NdbResultSet.hpp"
#include "NdbDictionary.hpp"
diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/ndb/include/ndbapi/NdbBlob.hpp
index 9398f77c474..dc47115d16f 100644
--- a/ndb/include/ndbapi/NdbBlob.hpp
+++ b/ndb/include/ndbapi/NdbBlob.hpp
@@ -50,24 +50,33 @@ class NdbColumnImpl;
* - closed: after transaction commit
* - invalid: after rollback or transaction close
*
- * NdbBlob supports 2 styles of data access:
+ * NdbBlob supports 3 styles of data access:
*
* - in prepare phase, NdbBlob methods getValue and setValue are used to
- * prepare a read or write of a single blob value of known size
+ * prepare a read or write of a blob value of known size
*
- * - in active phase, NdbBlob methods readData and writeData are used to
- * read or write blob data of undetermined size
+ * - in prepare phase, setActiveHook is used to define a routine which
+ * is invoked as soon as the handle becomes active
+ *
+ * - in active phase, readData and writeData are used to read or write
+ * blob data of arbitrary size
+ *
+ * The styles can be applied in combination (in above order).
+ *
+ * Blob operations take effect at next transaction execute. In some
+ * cases NdbBlob is forced to do implicit executes. To avoid this,
+ * operate on complete blob parts.
+ *
+ * Use NdbConnection::executePendingBlobOps to flush your reads and
+ * writes. It avoids execute penalty if nothing is pending. It is not
+ * needed after execute (obviously) or after next scan result.
*
* NdbBlob methods return -1 on error and 0 on success, and use output
* parameters when necessary.
*
* Notes:
* - table and its blob part tables are not created atomically
- * - blob data operations take effect at next transaction execute
- * - NdbBlob may need to do implicit executes on the transaction
- * - read and write of complete parts is much more efficient
* - scan must use the "new" interface NdbScanOperation
- * - scan with blobs applies hold-read-lock (at minimum)
* - to update a blob in a read op requires exclusive tuple lock
* - update op in scan must do its own getBlobHandle
* - delete creates implicit, not-accessible blob handles
@@ -78,12 +87,16 @@ class NdbColumnImpl;
* - scan must use exclusive locking for now
*
* Todo:
- * - add scan method hold-read-lock-until-next + return-keyinfo
- * - better check of keyinfo length when setting keys
- * - better check of allowed blob op vs locking mode
+ * - add scan method hold-read-lock + return-keyinfo
+ * - check keyinfo length when setting keys
+ * - check allowed blob ops vs locking mode
+ * - overload control (too many pending ops)
*/
class NdbBlob {
public:
+ /**
+ * State.
+ */
enum State {
Idle = 0,
Prepared = 1,
@@ -93,8 +106,14 @@ public:
};
State getState();
/**
+ * Inline blob header.
+ */
+ struct Head {
+ Uint64 length;
+ };
+ /**
* Prepare to read blob value. The value is available after execute.
- * Use isNull to check for NULL and getLength to get the real length
+ * Use getNull to check for NULL and getLength to get the real length
* and to check for truncation. Sets current read/write position to
* after the data read.
*/
@@ -107,6 +126,20 @@ public:
*/
int setValue(const void* data, Uint32 bytes);
/**
+ * Callback for setActiveHook. Invoked immediately when the prepared
+ * operation has been executed (but not committed). Any getValue or
+ * setValue is done first. The blob handle is active so readData or
+ * writeData etc can be used to manipulate blob value. A user-defined
+ * argument is passed along. Returns non-zero on error.
+ */
+ typedef int ActiveHook(NdbBlob* me, void* arg);
+ /**
+ * Define callback for blob handle activation. The queue of prepared
+ * operations will be executed in no commit mode up to this point and
+ * then the callback is invoked.
+ */
+ int setActiveHook(ActiveHook* activeHook, void* arg);
+ /**
* Check if blob is null.
*/
int getNull(bool& isNull);
@@ -115,7 +148,7 @@ public:
*/
int setNull();
/**
- * Get current length in bytes. Use isNull to distinguish between
+ * Get current length in bytes. Use getNull to distinguish between
* length 0 blob and NULL blob.
*/
int getLength(Uint64& length);
@@ -180,6 +213,13 @@ public:
static const int ErrAbort = 4268;
// "Unknown blob error"
static const int ErrUnknown = 4269;
+ /**
+ * Return info about all blobs in this operation.
+ */
+ // Get first blob in list
+ NdbBlob* blobsFirstBlob();
+ // Get next blob in list after this one
+ NdbBlob* blobsNextBlob();
private:
friend class Ndb;
@@ -187,20 +227,20 @@ private:
friend class NdbOperation;
friend class NdbScanOperation;
friend class NdbDictionaryImpl;
+ friend class NdbResultSet; // atNextResult
// state
State theState;
void setState(State newState);
// define blob table
static void getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c);
static void getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c);
- // table name
- char theBlobTableName[BlobTableNameSize];
// ndb api stuff
Ndb* theNdb;
NdbConnection* theNdbCon;
NdbOperation* theNdbOp;
NdbTableImpl* theTable;
NdbTableImpl* theAccessTable;
+ NdbTableImpl* theBlobTable;
const NdbColumnImpl* theColumn;
char theFillChar;
// sizes
@@ -213,10 +253,11 @@ private:
bool theSetFlag;
const char* theSetBuf;
Uint32 theGetSetBytes;
- // head
- struct Head {
- Uint64 length;
- };
+ // pending ops
+ Uint8 thePendingBlobOps;
+ // activation callback
+ ActiveHook* theActiveHook;
+ void* theActiveHookArg;
// buffers
struct Buf {
char* data;
@@ -234,7 +275,6 @@ private:
char* theInlineData;
NdbRecAttr* theHeadInlineRecAttr;
bool theHeadInlineUpdateFlag;
- bool theNewPartFlag;
// length and read/write position
int theNullFlag;
Uint64 theLength;
@@ -275,6 +315,11 @@ private:
int insertParts(const char* buf, Uint32 part, Uint32 count);
int updateParts(const char* buf, Uint32 part, Uint32 count);
int deleteParts(Uint32 part, Uint32 count);
+ // pending ops
+ int executePendingBlobReads();
+ int executePendingBlobWrites();
+ // callbacks
+ int invokeActiveHook();
// blob handle maintenance
int atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn);
int preExecute(ExecType anExecType, bool& batch);
@@ -286,6 +331,7 @@ private:
void setErrorCode(NdbOperation* anOp, bool invalidFlag = true);
void setErrorCode(NdbConnection* aCon, bool invalidFlag = true);
#ifdef VM_TRACE
+ int getOperationType() const;
friend class NdbOut& operator<<(NdbOut&, const NdbBlob&);
#endif
};
diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp
index c620578cabd..c268f9aab04 100644
--- a/ndb/include/ndbapi/NdbConnection.hpp
+++ b/ndb/include/ndbapi/NdbConnection.hpp
@@ -19,15 +19,15 @@
#include <ndb_types.h>
#include <NdbError.hpp>
+#include <NdbDictionary.hpp>
class NdbConnection;
class NdbOperation;
-class NdbCursorOperation;
class NdbScanOperation;
+class NdbIndexScanOperation;
class NdbIndexOperation;
class NdbApiSignal;
class Ndb;
-class NdbScanReceiver;
class NdbBlob;
@@ -160,7 +160,7 @@ class NdbConnection
friend class NdbOperation;
friend class NdbScanOperation;
friend class NdbIndexOperation;
- friend class NdbScanReceiver;
+ friend class NdbIndexScanOperation;
friend class NdbBlob;
public:
@@ -178,56 +178,31 @@ public:
NdbOperation* getNdbOperation(const char* aTableName);
/**
- * Get an NdbOperation for index scan of a table.
- * Note that the operation has to be defined before it is executed.
- *
- * @note All operations within the same transaction need to
- * be initialized with this method.
- *
- * @param anIndexName The index name.
- * @param aTableName The table name.
- * @return Pointer to an NdbOperation object if successful, otherwise NULL.
- */
- NdbOperation* getNdbOperation(const char* anIndexName,
- const char* aTableName);
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- /**
* Get an operation from NdbScanOperation idlelist and
* get the NdbConnection object which
* was fetched by startTransaction pointing to this operation.
- * This operation will set the theTableId
- * in the NdbOperation object.synchronous.
*
* @param aTableName a table name.
* @return pointer to an NdbOperation object if successful, otherwise NULL
*/
NdbScanOperation* getNdbScanOperation(const char* aTableName);
-#endif
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Get an operation from NdbScanOperation idlelist and
* get the NdbConnection object which
* was fetched by startTransaction pointing to this operation.
- * This operation will set the theTableId
- * in the NdbOperation object.synchronous.
*
* @param anIndexName The index name.
* @param aTableName a table name.
* @return pointer to an NdbOperation object if successful, otherwise NULL
*/
- NdbScanOperation* getNdbScanOperation(const char* anIndexName,
- const char* aTableName);
-#endif
-
-
+ NdbIndexScanOperation* getNdbIndexScanOperation(const char* anIndexName,
+ const char* aTableName);
+
/**
* Get an operation from NdbIndexOperation idlelist and
* get the NdbConnection object that
* was fetched by startTransaction pointing to this operation.
- * This operation will set the theTableId
- * in the NdbOperation object. Synchronous.
*
* @param indexName An index name (as created by createIndex).
* @param tableName A table name.
@@ -337,76 +312,17 @@ public:
*/
void close();
- /** @} *********************************************************************/
-
- /**
- * @name Scan Transactions
- * @{
- */
-
/**
- * Execute a scan transaction. This will define
- * and start the scan transaction in the NDB kernel.
- *
- * @return 0 if successful otherwise -1.
- */
- int executeScan();
-
- /**
- * Get the next tuple in a scan transaction.
- *
- * After each call to NdbConnection::nextScanResult
- * the buffers and NdbRecAttr objects defined in
- * NdbOperation::getValue are updated with values
- * from the scanned tuple.
- *
- * @param fetchAllowed If set to false, then fetching is disabled
- *
- * The NDB API will contact the NDB Kernel for more tuples
- * when necessary to do so unless you set the fetchAllowed
- * to false.
- * This will force NDB to process any records it
- * already has in it's caches. When there are no more cached
- * records it will return 2. You must then call nextScanResult
- * with fetchAllowed = true in order to contact NDB for more
- * records.
- *
- * fetchAllowed = false is useful when you want to update or
- * delete all the records fetched in one transaction(This will save a
- * lot of round trip time and make updates or deletes of scanned
- * records a lot faster).
- * While nextScanResult(false)
- * returns 0 take over the record to another transaction. When
- * nextScanResult(false) returns 2 you must execute and commit the other
- * transaction. This will cause the locks to be transferred to the
- * other transaction, updates or deletes will be made and then the
- * locks will be released.
- * After that, call nextScanResult(true) which will fetch new records and
- * cache them in the NdbApi.
- *
- * @note If you don't take over the records to another transaction the
- * locks on those records will be released the next time NDB Kernel
- * is contacted for more records.
+ * Restart transaction
*
- * @note Please contact for examples of efficient scan
- * updates and deletes.
+ * Once a transaction has been completed successfully
+ * it can be started again wo/ calling closeTransaction/startTransaction
*
- * @return
- * - -1: if unsuccessful,<br>
- * - 0: if another tuple was received, and<br>
- * - 1: if there are no more tuples to scan.
- * - 2: if there are no more cached records in NdbApi
+ * Note this method also releases completed operations
*/
- int nextScanResult(bool fetchAllowed = true);
+ int restart();
- /**
- * Stops the scan. Used if no more tuples are wanted.
- * The transaction should still be closed with
- * Ndb::closeTransaction.
- *
- * @return 0 if successful otherwise -1.
- */
- int stopScan();
+ /** @} *********************************************************************/
/**
* @name Meta Information
@@ -514,15 +430,30 @@ public:
*/
const NdbOperation * getNextCompletedOperation(const NdbOperation * op)const;
+ /** @} *********************************************************************/
+
/**
- * Release completed operations
+ * Execute the transaction in NoCommit mode if there are any not-yet
+ * executed blob part operations of given types. Otherwise do
+ * nothing. The flags argument is bitwise OR of (1 << optype) where
+ * optype comes from NdbOperation::OperationType. Only the basic PK
+ * ops are used (read, insert, update, delete).
*/
- void releaseCompletedOperations();
+ int executePendingBlobOps(Uint8 flags = 0xFF);
-
- /** @} *********************************************************************/
+ // Fast path calls for MySQL ha_ndbcluster
+ NdbOperation* getNdbOperation(NdbDictionary::Table * table);
+ NdbIndexOperation* getNdbIndexOperation(NdbDictionary::Index * index,
+ NdbDictionary::Table * table);
+ NdbScanOperation* getNdbScanOperation(NdbDictionary::Table * table);
+ NdbIndexScanOperation* getNdbIndexScanOperation(NdbDictionary::Index * index,
+ NdbDictionary::Table * table);
private:
+ /**
+ * Release completed operations
+ */
+ void releaseCompletedOperations();
typedef Uint64 TimeMillis_t;
/**************************************************************************
@@ -587,13 +518,7 @@ private:
int receiveTCINDXCONF(const class TcIndxConf *, Uint32 aDataLength);
int receiveTCINDXREF(NdbApiSignal*);
int receiveSCAN_TABREF(NdbApiSignal*);
- int receiveSCAN_TABCONF(NdbApiSignal*);
- int receiveSCAN_TABINFO(NdbApiSignal*);
-
- int checkNextScanResultComplete();
- int sendScanStart();
- int sendScanNext(bool stopScanFlag);
- int fetchNextScanResult();
+ int receiveSCAN_TABCONF(NdbApiSignal*, const Uint32*, Uint32 len);
int doSend(); // Send all operations
int sendROLLBACK(); // Send of an ROLLBACK
@@ -616,7 +541,7 @@ private:
// Release all cursor operations in connection
void releaseOps(NdbOperation*);
- void releaseCursorOperations(NdbCursorOperation*);
+ void releaseScanOperations(NdbIndexScanOperation*);
// Set the transaction identity of the transaction
void setTransactionId(Uint64 aTransactionId);
@@ -633,10 +558,12 @@ private:
int checkMagicNumber(); // Verify correct object
NdbOperation* getNdbOperation(class NdbTableImpl* aTable,
NdbOperation* aNextOp = 0);
- NdbScanOperation* getNdbScanOperation(class NdbTableImpl* aTable);
+ NdbIndexScanOperation* getNdbScanOperation(class NdbTableImpl* aTable);
NdbIndexOperation* getNdbIndexOperation(class NdbIndexImpl* anIndex,
class NdbTableImpl* aTable,
NdbOperation* aNextOp = 0);
+ NdbIndexScanOperation* getNdbIndexScanOperation(NdbIndexImpl* index,
+ NdbTableImpl* table);
void handleExecuteCompletion();
@@ -687,7 +614,6 @@ private:
Uint32 theNoOfOpSent; // How many operations have been sent
Uint32 theNoOfOpCompleted; // How many operations have completed
Uint32 theNoOfOpFetched; // How many operations was actually fetched
- Uint32 theNoOfSCANTABCONFRecv; // How many SCAN_TABCONF have been received
Uint32 theMyRef; // Our block reference
Uint32 theTCConPtr; // Transaction Co-ordinator connection pointer.
Uint64 theTransactionId; // theTransactionId of the transaction
@@ -723,23 +649,20 @@ private:
Uint32 theNodeSequence; // The sequence no of the db node
bool theReleaseOnClose;
- // Cursor operations
+ // Scan operations
bool m_waitForReply;
- NdbCursorOperation* m_theFirstCursorOperation;
- NdbCursorOperation* m_theLastCursorOperation;
+ NdbIndexScanOperation* m_theFirstScanOperation;
+ NdbIndexScanOperation* m_theLastScanOperation;
- NdbCursorOperation* m_firstExecutedCursorOp;
- // Scan operations
- bool theScanFinished;
+ NdbIndexScanOperation* m_firstExecutedScanOp;
- NdbScanReceiver* theCurrentScanRec; // The current operation to
- // distribute to the app.
- NdbScanReceiver* thePreviousScanRec; // The previous operation read by
- // nextScanResult.
- NdbOperation* theScanningOp; // The operation actually performing the scan
+ // Scan operations
+ // The operation actually performing the scan
+ NdbScanOperation* theScanningOp;
Uint32 theBuddyConPtr;
// optim: any blobs
bool theBlobFlag;
+ Uint8 thePendingBlobOps;
static void sendTC_COMMIT_ACK(NdbApiSignal *,
Uint32 transId1, Uint32 transId2,
@@ -749,6 +672,7 @@ private:
#ifdef VM_TRACE
void printState();
#endif
+ bool checkState_TransId(const Uint32 * transId) const;
};
inline
@@ -783,6 +707,16 @@ NdbConnection::checkMagicNumber()
}
}
+inline
+bool
+NdbConnection::checkState_TransId(const Uint32 * transId) const {
+ const Uint32 tTmp1 = transId[0];
+ const Uint32 tTmp2 = transId[1];
+ Uint64 tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
+ bool b = theStatus == Connected && theTransactionId == tRecTransId;
+ return b;
+}
+
/************************************************************************************************
void setTransactionId(Uint64 aTransactionId);
@@ -956,6 +890,21 @@ NdbConnection::OpSent()
theNoOfOpSent++;
}
+/******************************************************************************
+void executePendingBlobOps();
+******************************************************************************/
+#include <stdlib.h>
+inline
+int
+NdbConnection::executePendingBlobOps(Uint8 flags)
+{
+ if (thePendingBlobOps & flags) {
+ // not executeNoBlobs because there can be new ops with blobs
+ return execute(NoCommit);
+ }
+ return 0;
+}
+
inline
Uint32
NdbConnection::ptr2int(){
@@ -963,5 +912,3 @@ NdbConnection::ptr2int(){
}
#endif
-
-
diff --git a/ndb/include/ndbapi/NdbCursorOperation.hpp b/ndb/include/ndbapi/NdbCursorOperation.hpp
index cd76b045ea2..e7eeb54ba2d 100644
--- a/ndb/include/ndbapi/NdbCursorOperation.hpp
+++ b/ndb/include/ndbapi/NdbCursorOperation.hpp
@@ -17,77 +17,4 @@
#ifndef NdbCursorOperation_H
#define NdbCursorOperation_H
-#include <NdbOperation.hpp>
-
-class NdbResultSet;
-
-/**
- * @class NdbCursorOperation
- * @brief Operation using cursors
- */
-class NdbCursorOperation : public NdbOperation
-{
- friend class NdbResultSet;
- friend class NdbConnection;
-
-public:
- /**
- * Type of cursor
- */
- enum CursorType {
- NoCursor = 0,
- ScanCursor = 1,
- IndexCursor = 2
- };
-
- /**
- * Lock when performing scan
- */
- enum LockMode {
- LM_Read = 0,
- LM_Exclusive = 1,
- LM_CommittedRead = 2,
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- LM_Dirty = 2
-#endif
- };
-
- virtual CursorType cursorType() = 0;
-
- /**
- * readTuples returns a NdbResultSet where tuples are stored.
- * Tuples are not stored in NdbResultSet until execute(NoCommit)
- * has been executed and nextResult has been called.
- *
- * @param parallel Scan parallelism
- * @param LockMode Scan lock handling
- * @returns NdbResultSet.
- */
- virtual NdbResultSet* readTuples(unsigned parallel = 0,
- LockMode = LM_Read ) = 0;
-
- inline NdbResultSet* readTuplesExclusive(int parallell = 0){
- return readTuples(parallell, LM_Exclusive);
- }
-
-protected:
- NdbCursorOperation(Ndb* aNdb);
-
- ~NdbCursorOperation();
-
- void cursInit();
-
- virtual int executeCursor(int ProcessorId) = 0;
-
- NdbResultSet* getResultSet();
- NdbResultSet* m_resultSet;
-
-private:
-
- virtual int nextResult(bool fetchAllowed) = 0;
-
- virtual void closeScan() = 0;
-};
-
-
#endif
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 3b38e33ec91..b5c3985c6cb 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -144,6 +144,8 @@ public:
FragAllLarge = 4 ///< Eight fragments per node group.
};
};
+
+ class Table; // forward declaration
/**
* @class Column
@@ -183,7 +185,7 @@ public:
Datetime, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
Timespec, ///< Precision down to 1 nsec(sizeof(Datetime) == 12 bytes )
Blob, ///< Binary large object (see NdbBlob)
- Clob ///< Text blob
+ Text ///< Text blob
};
/**
@@ -309,7 +311,8 @@ public:
/**
* For blob, set or get "part size" i.e. number of bytes to store in
- * each tuple of the "blob table". Must be less than 64k.
+ * each tuple of the "blob table". Can be set to zero to omit parts
+ * and to allow only inline bytes ("tinyblob").
*/
void setPartSize(int size) { setScale(size); }
int getPartSize() const { return getScale(); }
@@ -324,7 +327,7 @@ public:
/**
* Get size of element
*/
- int Column::getSize() const;
+ int getSize() const;
/**
* Set distribution key
@@ -364,6 +367,8 @@ public:
void setIndexOnlyStorage(bool);
bool getIndexOnlyStorage() const;
+ const Table * getBlobTable() const;
+
/**
* @name ODBC Specific methods
* @{
@@ -1060,6 +1065,6 @@ public:
};
};
-class NdbOut& operator <<(class NdbOut& ndbout, const NdbDictionary::Column::Type type);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col);
#endif
diff --git a/ndb/include/ndbapi/NdbIndexOperation.hpp b/ndb/include/ndbapi/NdbIndexOperation.hpp
index baf31dca0ee..2d873c52e56 100644
--- a/ndb/include/ndbapi/NdbIndexOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexOperation.hpp
@@ -29,7 +29,7 @@
#ifndef NdbIndexOperation_H
#define NdbIndexOperation_H
-#include <NdbCursorOperation.hpp>
+#include "NdbOperation.hpp"
class Index;
class NdbResultSet;
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
new file mode 100644
index 00000000000..82aed04a9fc
--- /dev/null
+++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -0,0 +1,140 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NdbIndexScanOperation_H
+#define NdbIndexScanOperation_H
+
+#include <NdbScanOperation.hpp>
+
+/**
+ * @class NdbIndexScanOperation
+ * @brief Class of scan operations for use to scan ordered index
+ */
+class NdbIndexScanOperation : public NdbScanOperation {
+ friend class Ndb;
+ friend class NdbConnection;
+ friend class NdbResultSet;
+ friend class NdbOperation;
+ friend class NdbScanOperation;
+public:
+ /**
+ * readTuples returns a NdbResultSet where tuples are stored.
+ * Tuples are not stored in NdbResultSet until execute(NoCommit)
+ * has been executed and nextResult has been called.
+ *
+ * @param parallel Scan parallelism
+ * @param batch No of rows to fetch from each fragment at a time
+ * @param LockMode Scan lock handling
+ * @param order_by Order result set in index order
+ * @returns NdbResultSet.
+ * @see NdbScanOperation::readTuples
+ */
+ NdbResultSet* readTuples(LockMode = LM_Read,
+ Uint32 batch = 0,
+ Uint32 parallel = 0,
+ bool order_by = false);
+
+ inline NdbResultSet* readTuples(int parallell){
+ return readTuples(LM_Read, 0, parallell, false);
+ }
+
+ inline NdbResultSet* readTuplesExclusive(int parallell = 0){
+ return readTuples(LM_Exclusive, 0, parallell, false);
+ }
+
+ /**
+ * @name Define Range Scan
+ *
+ * A range scan is a scan on an ordered index. The operation is on
+ * the index table but tuples are returned from the primary table.
+ * The index contains all tuples where at least one index key has not
+ * null value.
+ *
+ * A range scan is currently opened via a normal open scan method.
+ * Bounds can be defined for each index key. After setting bounds,
+ * usual scan methods can be used (get value, interpreter, take over).
+ * These operate on the primary table.
+ *
+ * @{
+ */
+
+ /**
+ * Type of ordered index key bound. The values (0-4) will not change
+ * and can be used explicitly (e.g. they could be computed).
+ */
+ enum BoundType {
+ BoundLE = 0, ///< lower bound,
+ BoundLT = 1, ///< lower bound, strict
+ BoundGE = 2, ///< upper bound
+ BoundGT = 3, ///< upper bound, strict
+ BoundEQ = 4 ///< equality
+ };
+
+ /**
+ * Define bound on index key in range scan.
+ *
+ * Each index key can have lower and/or upper bound, or can be set
+ * equal to a value. The bounds can be defined in any order but
+ * a duplicate definition is an error.
+ *
+ * The bounds must specify a single range i.e. they are on an initial
+ * sequence of index keys and the condition is equality for all but
+ * (at most) the last key which has a lower and/or upper bound.
+ *
+ * NULL is treated like a normal value which is less than any not-NULL
+ * value and equal to another NULL value. To search for NULL use
+ * setBound with null pointer (0).
+ *
+ * An index stores also all-NULL keys (this may become optional).
+ * Doing index scan with empty bound set returns all table tuples.
+ *
+ * @param attrName Attribute name, alternatively:
+ * @param anAttrId Index column id (starting from 0)
+ * @param type Type of bound
+ * @param value Pointer to bound value, 0 for NULL
+ * @param len Value length in bytes.
+ * Fixed per datatype and can be omitted
+ * @return 0 if successful otherwise -1
+ */
+ int setBound(const char* attr, int type, const void* aValue, Uint32 len = 0);
+
+ /**
+ * Define bound on index key in range scan using index column id.
+ * See the other setBound() method for details.
+ */
+ int setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len = 0);
+
+ /** @} *********************************************************************/
+
+private:
+ NdbIndexScanOperation(Ndb* aNdb);
+ virtual ~NdbIndexScanOperation();
+
+ int setBound(const NdbColumnImpl*, int type, const void* aValue, Uint32 len);
+ int saveBoundATTRINFO();
+
+ virtual int equal_impl(const NdbColumnImpl*, const char*, Uint32);
+ virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*);
+
+ void fix_get_values();
+ int next_result_ordered(bool fetchAllowed);
+ int send_next_scan_ordered(Uint32 idx);
+ int compare(Uint32 key, Uint32 cols, const NdbReceiver*, const NdbReceiver*);
+
+ Uint32 m_sort_columns;
+};
+
+#endif
diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp
index cfa656cb2d5..c48dccd4864 100644
--- a/ndb/include/ndbapi/NdbOperation.hpp
+++ b/ndb/include/ndbapi/NdbOperation.hpp
@@ -42,8 +42,8 @@ class NdbOperation
friend class NdbScanReceiver;
friend class NdbScanFilter;
friend class NdbScanFilterImpl;
+ friend class NdbReceiver;
friend class NdbBlob;
-
public:
/**
* @name Define Standard Operation Type
@@ -196,196 +196,7 @@ public:
*/
virtual int interpretedDeleteTuple();
- /**
- * Scan a table to read tuples.
- *
- * The operation only sets a temporary read lock while
- * reading the tuple.
- * The tuple lock is released when the result of the read reaches the
- * application.
- *
- * @param Parallelism Number of parallel tuple reads are performed
- * in the scan.
- * Currently a maximum of 256 parallel tuple
- * reads are allowed.
- * The parallelism can in reality be lower
- * than specified
- * depending on the number of nodes
- * in the cluster
- * @return 0 if successful otherwise -1.
- */
- int openScanRead(Uint32 Parallelism = 16 );
-
- /**
- * Scan a table to write or update tuples.
- *
- * The operation sets an exclusive lock on the tuple and sends the result
- * to the application.
- * Thus when the application reads the data, the tuple is
- * still locked with an exclusive lock.
- *
- * @param parallelism Number of parallel tuple reads are performed
- * in the scan.
- * Currently a maximum of 256 parallel tuple
- * reads are allowed.
- * The parallelism can in reality be lower
- * than specified depending on the number
- * of nodes in the cluster
- * @return 0 if successful otherwise -1.
- *
- */
- int openScanExclusive(Uint32 parallelism = 16);
-
- /**
- * Scan a table to read tuples.
- *
- * The operation only sets a read lock while
- * reading the tuple.
- * Thus when the application reads the data, the tuple is
- * still locked with a read lock.
- *
- * @param parallelism Number of parallel tuple reads are performed
- * in the scan.
- * Currently a maximum of 256 parallel tuple
- * reads are allowed.
- * The parallelism can in reality be lower
- * than specified
- * depending on the number of nodes
- * in the cluster
- * @return 0 if successful otherwise -1.
- */
- int openScanReadHoldLock(Uint32 parallelism = 16);
-
- /**
- * Scan a table to read tuples.
- *
- * The operation does not wait for locks held by other transactions
- * but returns the latest committed tuple instead.
- *
- * @param parallelism Number of parallel tuple reads are performed
- * in the scan.
- * Currently a maximum of 256 parallel tuple
- * reads are allowed.
- * The parallelism can in reality be lower
- * than specified
- * depending on the number of nodes
- * in the cluster
- * @return 0 if successful otherwise -1.
- */
- int openScanReadCommitted(Uint32 parallelism = 16);
-
- /** @} *********************************************************************/
-
- /**
- * @name Define Range Scan
- *
- * A range scan is a scan on an ordered index. The operation is on
- * the index table but tuples are returned from the primary table.
- * The index contains all tuples where at least one index key has not
- * null value.
- *
- * A range scan is currently opened via a normal open scan method.
- * Bounds can be defined for each index key. After setting bounds,
- * usual scan methods can be used (get value, interpreter, take over).
- * These operate on the primary table.
- *
- * @{
- */
-
- /**
- * Type of ordered index key bound. The values (0-4) will not change
- * and can be used explicitly (e.g. they could be computed).
- */
- enum BoundType {
- BoundLE = 0, ///< lower bound,
- BoundLT = 1, ///< lower bound, strict
- BoundGE = 2, ///< upper bound
- BoundGT = 3, ///< upper bound, strict
- BoundEQ = 4 ///< equality
- };
-
- /**
- * Define bound on index key in range scan.
- *
- * Each index key can have not null lower and/or upper bound, or can
- * be set equal to not null value. The bounds can be defined in any
- * order but a duplicate definition is an error.
- *
- * The scan is most effective when bounds are given for an initial
- * sequence of non-nullable index keys, and all but the last one is an
- * equality. In this case the scan returns a contiguous range from
- * each ordered index fragment.
- *
- * @note This release implements only the case described above,
- * except for the non-nullable limitation. Other sets of
- * bounds return error or empty result set.
- *
- * @note In this release a null key value satisfies any lower
- * bound and no upper bound. This may change.
- *
- * @param attrName Attribute name, alternatively:
- * @param anAttrId Index column id (starting from 0).
- * @param type Type of bound
- * @param value Pointer to bound value
- * @param len Value length in bytes.
- * Fixed per datatype and can be omitted
- * @return 0 if successful otherwise -1
- */
- int setBound(const char* anAttrName, int type, const void* aValue, Uint32 len = 0);
-
- /**
- * Define bound on index key in range scan using index column id.
- * See the other setBound() method for details.
- */
- int setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len = 0);
-
/** @} *********************************************************************/
-
- /**
- * Validate parallelism parameter by checking the number
- * against number of executing Ndb nodes.
- *
- * @param Parallelism
- * @return 0 if correct parallelism value, otherwise -1.
- *
- */
- int checkParallelism(Uint32 Parallelism);
-
- /**
- * Transfer scan operation to an updating transaction. Use this function
- * when a scan has found a record that you want to update.
- * 1. Start a new transaction.
- * 2. Call the function takeOverForUpdate using your new transaction
- * as parameter, all the properties of the found record will be copied
- * to the new transaction.
- * 3. When you execute the new transaction, the lock held by the scan will
- * be transferred to the new transaction(it's taken over).
- *
- * @note You must have started the scan with openScanExclusive
- * to be able to update the found tuple.
- *
- * @param updateTrans the update transaction connection.
- * @return an NdbOperation or NULL.
- */
- NdbOperation* takeOverForUpdate(NdbConnection* updateTrans);
-
- /**
- * Transfer scan operation to a deleting transaction. Use this function
- * when a scan has found a record that you want to delete.
- * 1. Start a new transaction.
- * 2. Call the function takeOverForDelete using your new transaction
- * as parameter, all the properties of the found record will be copied
- * to the new transaction.
- * 3. When you execute the new transaction, the lock held by the scan will
- * be transferred to the new transaction(its taken over).
- *
- * @note You must have started the scan with openScanExclusive
- * to be able to delete the found tuple.
- *
- * @param deleteTrans the delete transaction connection.
- * @return an NdbOperation or NULL.
- */
- NdbOperation* takeOverForDelete(NdbConnection* deleteTrans);
/**
* @name Specify Search Conditions
@@ -883,16 +694,7 @@ protected:
// Initialise after allocating operation to a transaction
//--------------------------------------------------------------
int init(class NdbTableImpl*, NdbConnection* aCon);
-
- void initScan(); // Initialise after allocating operation
- // to a scan transaction
- virtual void releaseScan(); // Release scan parts of transaction
- void releaseSignals();
- void releaseScanSignals();
- void prepareNextScanResult();
-
- // Common part for Read and Exclusive
- int openScan(Uint32 aParallelism, bool, bool, bool);
+ void initInterpreter();
void next(NdbOperation*); // Set next pointer
@@ -938,19 +740,12 @@ protected:
*****************************************************************************/
int doSend(int ProcessorId, Uint32 lastFlag);
- int doSendScan(int ProcessorId);
-
- int prepareSendScan(Uint32 TC_ConnectPtr,
- Uint64 TransactionId);
-
virtual int prepareSend(Uint32 TC_ConnectPtr,
Uint64 TransactionId);
virtual void setLastFlag(NdbApiSignal* signal, Uint32 lastFlag);
int prepareSendInterpreted(); // Help routine to prepare*
- void TCOPCONF(Uint32 anNdbColumnImplLen); // Handle TC[KEY/INDX]CONF signal
-
int receiveTCKEYREF(NdbApiSignal*);
@@ -958,7 +753,7 @@ protected:
int receiveREAD_CONF(const Uint32* aDataPtr, Uint32 aDataLength);
- int checkMagicNumber(); // Verify correct object
+ int checkMagicNumber(bool b = true); // Verify correct object
int checkState_TransId(NdbApiSignal* aSignal);
@@ -966,10 +761,8 @@ protected:
* These are support methods only used locally in this class.
******************************************************************************/
- virtual int equal_impl(const NdbColumnImpl* anAttrObject,
- const char* aValue,
- Uint32 len);
- NdbRecAttr* getValue(const NdbColumnImpl* anAttrObject, char* aValue = 0);
+ virtual int equal_impl(const NdbColumnImpl*,const char* aValue, Uint32 len);
+ virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char* aValue = 0);
int setValue(const NdbColumnImpl* anAttrObject, const char* aValue, Uint32 len);
NdbBlob* getBlobHandle(NdbConnection* aCon, const NdbColumnImpl* anAttrObject);
int incValue(const NdbColumnImpl* anAttrObject, Uint32 aValue);
@@ -981,15 +774,12 @@ protected:
int branch_reg_reg(Uint32 type, Uint32, Uint32, Uint32);
int branch_col(Uint32 type, Uint32, const char *, Uint32, bool, Uint32 Label);
int branch_col_null(Uint32 type, Uint32 col, Uint32 Label);
- int setBound(const NdbColumnImpl* anAttrObject, int type, const void* aValue, Uint32 len);
// Handle ATTRINFO signals
int receiveREAD_AI(Uint32* aDataPtr, Uint32 aLength);
int insertATTRINFO(Uint32 aData);
int insertATTRINFOloop(const Uint32* aDataPtr, Uint32 aLength);
- int getFirstATTRINFOScan();
- int saveBoundATTRINFO();
int insertKEYINFO(const char* aValue,
Uint32 aStartPosition,
@@ -1013,12 +803,8 @@ protected:
Uint32 ptr2int() { return theReceiver.getId(); };
- NdbOperation*
- takeOverScanOp(OperationType opType, NdbConnection* updateTrans);
-
// get table or index key from prepared signals
int getKeyFromTCREQ(Uint32* data, unsigned size);
- int getKeyFromKEYINFO20(Uint32* data, unsigned size);
/******************************************************************************
* These are the private variables that are defined in the operation objects.
@@ -1032,7 +818,6 @@ protected:
Ndb* theNdb; // Point back to the Ndb object.
NdbConnection* theNdbCon; // Point back to the connection object.
NdbOperation* theNext; // Next pointer to operation.
- NdbOperation* theNextScanOp;
NdbApiSignal* theTCREQ; // The TC[KEY/INDX]REQ signal object
NdbApiSignal* theFirstATTRINFO; // The first ATTRINFO signal object
NdbApiSignal* theCurrentATTRINFO; // The current ATTRINFO signal object
@@ -1043,9 +828,6 @@ protected:
NdbApiSignal* theFirstKEYINFO; // The first KEYINFO signal object
NdbApiSignal* theLastKEYINFO; // The first KEYINFO signal object
- NdbRecAttr* theFirstRecAttr; // The first receive attribute object
- NdbRecAttr* theCurrentRecAttr; // The current receive attribute object
-
class NdbLabel* theFirstLabel;
class NdbLabel* theLastLabel;
class NdbBranch* theFirstBranch;
@@ -1060,11 +842,6 @@ protected:
Uint32* theKEYINFOptr; // Pointer to where to write KEYINFO
Uint32* theATTRINFOptr; // Pointer to where to write ATTRINFO
- Uint32 theTotalRecAI_Len; // The total length received according
- // to the TCKEYCONF signal
- Uint32 theCurrRecAI_Len; // The currently received length
- Uint32 theAI_ElementLen; // How many words long is this element
- Uint32* theCurrElemPtr; // The current pointer to the element
class NdbTableImpl* m_currentTable; // The current table
class NdbTableImpl* m_accessTable;
@@ -1106,15 +883,6 @@ protected:
Uint16 m_keyInfoGSN;
Uint16 m_attrInfoGSN;
- // Scan related variables
- Uint32 theParallelism;
- NdbScanReceiver** theScanReceiversArray;
- NdbApiSignal* theSCAN_TABREQ;
- NdbApiSignal* theFirstSCAN_TABINFO_Send;
- NdbApiSignal* theLastSCAN_TABINFO_Send;
- NdbApiSignal* theFirstSCAN_TABINFO_Recv;
- NdbApiSignal* theLastSCAN_TABINFO_Recv;
- NdbApiSignal* theSCAN_TABCONF_Recv;
// saveBoundATTRINFO() moves ATTRINFO here when setBound() is ready
NdbApiSignal* theBoundATTRINFO;
Uint32 theTotalBoundAI_Len;
@@ -1130,11 +898,11 @@ protected:
inline
int
-NdbOperation::checkMagicNumber()
+NdbOperation::checkMagicNumber(bool b)
{
if (theMagicNumber != 0xABCDEF01){
#ifdef NDB_NO_DROPPED_SIGNAL
- abort();
+ if(b) abort();
#endif
return -1;
}
diff --git a/ndb/include/ndbapi/NdbRecAttr.hpp b/ndb/include/ndbapi/NdbRecAttr.hpp
index 36b54035d96..da03df13027 100644
--- a/ndb/include/ndbapi/NdbRecAttr.hpp
+++ b/ndb/include/ndbapi/NdbRecAttr.hpp
@@ -73,8 +73,9 @@ class NdbOperation;
class NdbRecAttr
{
friend class NdbOperation;
+ friend class NdbIndexScanOperation;
friend class NdbEventOperationImpl;
- friend class NdbScanReceiver;
+ friend class NdbReceiver;
friend class Ndb;
friend class NdbOut& operator<<(class NdbOut&, const class AttributeS&);
@@ -244,9 +245,8 @@ private:
NdbRecAttr();
Uint32 attrId() const; /* Get attribute id */
- void setNULL(); /* Set NULL indicator */
- void setNotNULL(); /* Set Not NULL indicator */
- void setUNDEFINED(); /* Set UNDEFINED indicator */
+ bool setNULL(); /* Set NULL indicator */
+ bool receive_data(const Uint32*, Uint32);
void release(); /* Release memory if allocated */
void init(); /* Initialise object when allocated */
@@ -269,6 +269,7 @@ private:
Uint32 theAttrId; /* The attribute id */
int theNULLind;
+ bool m_nullable;
Uint32 theAttrSize;
Uint32 theArraySize;
const NdbDictionary::Column* m_column;
@@ -289,29 +290,7 @@ NdbRecAttr::getColumn() const {
inline
Uint32
NdbRecAttr::attrSize() const {
-
- switch(getType()){
- case NdbDictionary::Column::Int:
- case NdbDictionary::Column::Unsigned:
- case NdbDictionary::Column::Float:
- return 4;
- case NdbDictionary::Column::Decimal:
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:
- return 1;
- case NdbDictionary::Column::Bigint:
- case NdbDictionary::Column::Bigunsigned:
- case NdbDictionary::Column::Double:
- case NdbDictionary::Column::Datetime:
- return 8;
- case NdbDictionary::Column::Timespec:
- return 12;
- case NdbDictionary::Column::Undefined:
- default:
- return 0;
- }
+ return theAttrSize;
}
inline
@@ -449,24 +428,11 @@ NdbRecAttr::attrId() const
}
inline
-void
+bool
NdbRecAttr::setNULL()
{
theNULLind = 1;
-}
-
-inline
-void
-NdbRecAttr::setNotNULL()
-{
- theNULLind = 0;
-}
-
-inline
-void
-NdbRecAttr::setUNDEFINED()
-{
- theNULLind = -1;
+ return m_nullable;
}
inline
diff --git a/ndb/include/ndbapi/NdbReceiver.hpp b/ndb/include/ndbapi/NdbReceiver.hpp
index a1a08a9735a..13898fc8e5f 100644
--- a/ndb/include/ndbapi/NdbReceiver.hpp
+++ b/ndb/include/ndbapi/NdbReceiver.hpp
@@ -23,6 +23,12 @@
class Ndb;
class NdbReceiver
{
+ friend class Ndb;
+ friend class NdbOperation;
+ friend class NdbScanOperation;
+ friend class NdbIndexOperation;
+ friend class NdbIndexScanOperation;
+ friend class NdbConnection;
public:
enum ReceiverType { NDB_UNINITIALIZED,
NDB_OPERATION = 1,
@@ -32,6 +38,7 @@ public:
NdbReceiver(Ndb *aNdb);
void init(ReceiverType type, void* owner);
+ void release();
~NdbReceiver();
Uint32 getId(){
@@ -42,18 +49,51 @@ public:
return m_type;
}
+ inline NdbConnection * getTransaction();
void* getOwner(){
return m_owner;
}
bool checkMagicNumber() const;
+ inline void next(NdbReceiver* next) { m_next = next;}
+ inline NdbReceiver* next() { return m_next; }
+
private:
Uint32 theMagicNumber;
Ndb* m_ndb;
Uint32 m_id;
+ Uint32 m_tcPtrI;
+ Uint32 m_key_info;
ReceiverType m_type;
void* m_owner;
+ NdbReceiver* m_next;
+
+ /**
+ * At setup
+ */
+ class NdbRecAttr * getValue(const class NdbColumnImpl*, char * user_dst_ptr);
+ void do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size);
+ void prepareSend();
+
+ int execKEYINFO20(Uint32 info, const Uint32* ptr, Uint32 len);
+ int execTRANSID_AI(const Uint32* ptr, Uint32 len);
+ int execTCOPCONF(Uint32 len);
+ int execSCANOPCONF(Uint32 tcPtrI, Uint32 len, Uint32 rows);
+ class NdbRecAttr* theFirstRecAttr;
+ class NdbRecAttr* theCurrentRecAttr;
+ class NdbRecAttr** m_rows;
+
+ Uint32 m_list_index; // When using multiple
+ Uint32 m_current_row;
+ Uint32 m_result_rows;
+ Uint32 m_defined_rows;
+
+ Uint32 m_expected_result_length;
+ Uint32 m_received_result_length;
+
+ bool nextResult() const { return m_current_row < m_result_rows; }
+ void copyout(NdbReceiver&);
};
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -72,5 +112,32 @@ NdbReceiver::checkMagicNumber() const {
return retVal;
}
+inline
+void
+NdbReceiver::prepareSend(){
+ m_current_row = 0;
+ m_received_result_length = 0;
+ m_expected_result_length = 0;
+ theCurrentRecAttr = theFirstRecAttr;
+}
+
+inline
+int
+NdbReceiver::execTCOPCONF(Uint32 len){
+ Uint32 tmp = m_received_result_length;
+ m_expected_result_length = len;
+ return (tmp == len ? 1 : 0);
+}
+
+inline
+int
+NdbReceiver::execSCANOPCONF(Uint32 tcPtrI, Uint32 len, Uint32 rows){
+ m_tcPtrI = tcPtrI;
+ m_result_rows = rows;
+ Uint32 tmp = m_received_result_length;
+ m_expected_result_length = len;
+ return (tmp == len ? 1 : 0);
+}
+
#endif
#endif
diff --git a/ndb/include/ndbapi/NdbResultSet.hpp b/ndb/include/ndbapi/NdbResultSet.hpp
index d48df01214e..483e08179c0 100644
--- a/ndb/include/ndbapi/NdbResultSet.hpp
+++ b/ndb/include/ndbapi/NdbResultSet.hpp
@@ -30,17 +30,15 @@
#define NdbResultSet_H
-#include <NdbCursorOperation.hpp>
-#include <NdbIndexOperation.hpp>
#include <NdbScanOperation.hpp>
/**
* @class NdbResultSet
- * @brief NdbResultSet contains a NdbCursorOperation.
+ * @brief NdbResultSet contains a NdbScanOperation.
*/
class NdbResultSet
{
- friend class NdbCursorOperation;
+ friend class NdbScanOperation;
public:
@@ -93,22 +91,62 @@ public:
*/
int nextResult(bool fetchAllowed = true);
+ /**
+ * Close result set (scan)
+ */
void close();
- NdbOperation* updateTuple();
- NdbOperation* updateTuple(NdbConnection* takeOverTransaction);
+ /**
+ * Restart
+ */
+ int restart();
+ /**
+ * Transfer scan operation to an updating transaction. Use this function
+ * when a scan has found a record that you want to update.
+ * 1. Start a new transaction.
+ * 2. Call the function takeOverForUpdate using your new transaction
+ * as parameter, all the properties of the found record will be copied
+ * to the new transaction.
+ * 3. When you execute the new transaction, the lock held by the scan will
+ * be transferred to the new transaction(it's taken over).
+ *
+ * @note You must have started the scan with openScanExclusive
+ * to be able to update the found tuple.
+ *
+ * @param updateTrans the update transaction connection.
+ * @return an NdbOperation or NULL.
+ */
+ NdbOperation* updateTuple();
+ NdbOperation* updateTuple(NdbConnection* updateTrans);
+
+ /**
+ * Transfer scan operation to a deleting transaction. Use this function
+ * when a scan has found a record that you want to delete.
+ * 1. Start a new transaction.
+ * 2. Call the function takeOverForDelete using your new transaction
+ * as parameter, all the properties of the found record will be copied
+ * to the new transaction.
+ * 3. When you execute the new transaction, the lock held by the scan will
+ * be transferred to the new transaction(its taken over).
+ *
+ * @note You must have started the scan with openScanExclusive
+ * to be able to delete the found tuple.
+ *
+ * @param deleteTrans the delete transaction connection.
+ * @return an NdbOperation or NULL.
+ */
int deleteTuple();
int deleteTuple(NdbConnection* takeOverTransaction);
private:
- NdbResultSet(NdbCursorOperation*);
+ NdbResultSet(NdbScanOperation*);
~NdbResultSet();
void init();
- NdbCursorOperation* m_operation;
+ NdbScanOperation* m_operation;
};
#endif
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp
index 151dc5bb99f..c7ae029e742 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -29,9 +29,7 @@
#ifndef NdbScanOperation_H
#define NdbScanOperation_H
-
#include <NdbOperation.hpp>
-#include <NdbCursorOperation.hpp>
class NdbBlob;
@@ -39,63 +37,71 @@ class NdbBlob;
* @class NdbScanOperation
* @brief Class of scan operations for use in transactions.
*/
-class NdbScanOperation : public NdbCursorOperation
-{
+class NdbScanOperation : public NdbOperation {
friend class Ndb;
friend class NdbConnection;
friend class NdbResultSet;
friend class NdbOperation;
-
+ friend class NdbBlob;
public:
/**
+ * Type of cursor
+ */
+ enum CursorType {
+ NoCursor = 0,
+ ScanCursor = 1,
+ IndexCursor = 2
+ };
+
+ /**
+ * Lock when performing scan
+ */
+ enum LockMode {
+ LM_Read = 0,
+ LM_Exclusive = 1,
+ LM_CommittedRead = 2,
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ LM_Dirty = 2
+#endif
+ };
+
+ /**
+ * Type of cursor
+ */
+ CursorType get_cursor_type() const;
+
+ /**
* readTuples returns a NdbResultSet where tuples are stored.
* Tuples are not stored in NdbResultSet until execute(NoCommit)
* has been executed and nextResult has been called.
*
* @param parallel Scan parallelism
+ * @param batch No of rows to fetch from each fragment at a time
* @param LockMode Scan lock handling
* @returns NdbResultSet.
+ * @note specifying 0 for batch and parallall means max performance
*/
- virtual NdbResultSet* readTuples(unsigned parallel = 0,
- LockMode = LM_Read );
+ NdbResultSet* readTuples(LockMode = LM_Read,
+ Uint32 batch = 0, Uint32 parallel = 0);
+
+ inline NdbResultSet* readTuples(int parallell){
+ return readTuples(LM_Read, 0, parallell);
+ }
+
+ inline NdbResultSet* readTuplesExclusive(int parallell = 0){
+ return readTuples(LM_Exclusive, 0, parallell);
+ }
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
-
- int updateTuples();
- int updateTuples(Uint32 parallelism);
-
- int deleteTuples();
- int deleteTuples(Uint32 parallelism);
-
- // Overload setValue for updateTuples
- int setValue(const char* anAttrName, const char* aValue, Uint32 len = 0);
- int setValue(const char* anAttrName, Int32 aValue);
- int setValue(const char* anAttrName, Uint32 aValue);
- int setValue(const char* anAttrName, Int64 aValue);
- int setValue(const char* anAttrName, Uint64 aValue);
- int setValue(const char* anAttrName, float aValue);
- int setValue(const char* anAttrName, double aValue);
-
- int setValue(Uint32 anAttrId, const char* aValue, Uint32 len = 0);
- int setValue(Uint32 anAttrId, Int32 aValue);
- int setValue(Uint32 anAttrId, Uint32 aValue);
- int setValue(Uint32 anAttrId, Int64 aValue);
- int setValue(Uint32 anAttrId, Uint64 aValue);
- int setValue(Uint32 anAttrId, float aValue);
- int setValue(Uint32 anAttrId, double aValue);
-#endif
-
NdbBlob* getBlobHandle(const char* anAttrName);
NdbBlob* getBlobHandle(Uint32 anAttrId);
-private:
- NdbScanOperation(Ndb* aNdb);
+protected:
+ CursorType m_cursor_type;
+ NdbScanOperation(Ndb* aNdb);
~NdbScanOperation();
- NdbCursorOperation::CursorType cursorType();
-
- virtual int nextResult(bool fetchAllowed = true);
+ int nextResult(bool fetchAllowed = true);
virtual void release();
void closeScan();
@@ -111,125 +117,54 @@ private:
virtual void setErrorCode(int aErrorCode);
virtual void setErrorCodeAbort(int aErrorCode);
- virtual int equal_impl(const NdbColumnImpl* anAttrObject,
- const char* aValue,
- Uint32 len);
-private:
+ NdbResultSet * m_resultSet;
+ NdbResultSet* getResultSet();
NdbConnection *m_transConnection;
- bool m_autoExecute;
- bool m_updateOp;
- bool m_writeOp;
- bool m_deleteOp;
- class SetValueRecList* m_setValueList;
-};
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
-class AttrInfo;
-class SetValueRecList;
+ // Scan related variables
+ Uint32 theBatchSize;
+ Uint32 theParallelism;
+ Uint32 m_keyInfo;
+ NdbApiSignal* theSCAN_TABREQ;
-class SetValueRec {
- friend class SetValueRecList;
-public:
- SetValueRec();
- ~SetValueRec();
-
- enum SetValueType {
- SET_STRING_ATTR1 = 0,
- SET_INT32_ATTR1 = 1,
- SET_UINT32_ATTR1 = 2,
- SET_INT64_ATTR1 = 3,
- SET_UINT64_ATTR1 = 4,
- SET_FLOAT_ATTR1 = 5,
- SET_DOUBLE_ATTR1 = 6,
- SET_STRING_ATTR2 = 7,
- SET_INT32_ATTR2 = 8,
- SET_UINT32_ATTR2 = 9,
- SET_INT64_ATTR2 = 10,
- SET_UINT64_ATTR2 = 11,
- SET_FLOAT_ATTR2 = 12,
- SET_DOUBLE_ATTR2 = 13
- };
-
- SetValueType stype;
- union {
- char* anAttrName;
- Uint32 anAttrId;
- };
- struct String {
- char* aStringValue;
- Uint32 len;
- };
- union {
- String stringStruct;
- Int32 anInt32Value;
- Uint32 anUint32Value;
- Int64 anInt64Value;
- Uint64 anUint64Value;
- float aFloatValue;
- double aDoubleValue;
- };
-private:
- SetValueRec* next;
-};
-
-inline
-SetValueRec::SetValueRec() :
- next(0)
-{
-}
+ int getFirstATTRINFOScan();
+ int doSendScan(int ProcessorId);
+ int prepareSendScan(Uint32 TC_ConnectPtr, Uint64 TransactionId);
+
+ int fix_receivers(Uint32 parallel);
+ Uint32* m_array; // containing all arrays below
+ Uint32 m_allocated_receivers;
+ NdbReceiver** m_receivers; // All receivers
-class SetValueRecList {
-public:
- SetValueRecList();
- ~SetValueRecList();
-
- void add(const char* anAttrName, const char* aValue, Uint32 len = 0);
- void add(const char* anAttrName, Int32 aValue);
- void add(const char* anAttrName, Uint32 aValue);
- void add(const char* anAttrName, Int64 aValue);
- void add(const char* anAttrName, Uint64 aValue);
- void add(const char* anAttrName, float aValue);
- void add(const char* anAttrName, double aValue);
- void add(Uint32 anAttrId, const char* aValue, Uint32 len = 0);
- void add(Uint32 anAttrId, Int32 aValue);
- void add(Uint32 anAttrId, Uint32 aValue);
- void add(Uint32 anAttrId, Int64 aValue);
- void add(Uint32 anAttrId, Uint64 aValue);
- void add(Uint32 anAttrId, float aValue);
- void add(Uint32 anAttrId, double aValue);
-
- typedef void(* IterateFn)(SetValueRec&, NdbOperation&);
- static void callSetValueFn(SetValueRec&, NdbOperation&);
- void iterate(IterateFn nextfn, NdbOperation&);
-private:
- SetValueRec* first;
- SetValueRec* last;
-};
+ Uint32* m_prepared_receivers; // These are to be sent
+
+ Uint32 m_current_api_receiver;
+ Uint32 m_api_receivers_count;
+ NdbReceiver** m_api_receivers; // These are currently used by api
+
+ Uint32 m_conf_receivers_count; // NOTE needs mutex to access
+ NdbReceiver** m_conf_receivers; // receive thread puts them here
+
+ Uint32 m_sent_receivers_count; // NOTE needs mutex to access
+ NdbReceiver** m_sent_receivers; // receive thread puts them here
+
+ int send_next_scan(Uint32 cnt, bool close);
+ void receiver_delivered(NdbReceiver*);
+ void receiver_completed(NdbReceiver*);
+ void execCLOSE_SCAN_REP();
-inline
-SetValueRecList::SetValueRecList() :
- first(0),
- last(0)
-{
-}
+ int getKeyFromKEYINFO20(Uint32* data, unsigned size);
+ NdbOperation* takeOverScanOp(OperationType opType, NdbConnection*);
-inline
-SetValueRecList::~SetValueRecList() {
- if (first) delete first;
- first = last = 0;
-}
+ Uint32 m_ordered;
+ int restart();
+};
inline
-void SetValueRecList::iterate(SetValueRecList::IterateFn nextfn, NdbOperation& oper)
-{
- SetValueRec* recPtr = first;
- while(recPtr) {
- (*nextfn)(*recPtr, oper);
- recPtr = recPtr->next; // Move to next in list - MASV
- }
+NdbScanOperation::CursorType
+NdbScanOperation::get_cursor_type() const {
+ return m_cursor_type;
}
#endif
-
-#endif
diff --git a/ndb/include/portlib/NdbTCP.h b/ndb/include/portlib/NdbTCP.h
index 42c34855c39..4dc8435eef1 100644
--- a/ndb/include/portlib/NdbTCP.h
+++ b/ndb/include/portlib/NdbTCP.h
@@ -64,7 +64,7 @@ typedef int socklen_t;
#define NDB_NONBLOCK O_NONBLOCK
#define NDB_SOCKET_TYPE int
#define NDB_INVALID_SOCKET -1
-#define NDB_CLOSE_SOCKET(x) close(x)
+#define NDB_CLOSE_SOCKET(x) ::close(x)
#define InetErrno errno
diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp
index cb859e310db..0301d12348f 100644
--- a/ndb/include/transporter/TransporterDefinitions.hpp
+++ b/ndb/include/transporter/TransporterDefinitions.hpp
@@ -69,6 +69,7 @@ struct TCP_TransporterConfiguration {
* SHM Transporter Configuration
*/
struct SHM_TransporterConfiguration {
+ Uint32 port;
NodeId remoteNodeId;
NodeId localNodeId;
bool compression;
diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp
index 6c979777f18..3c6c307406c 100644
--- a/ndb/include/transporter/TransporterRegistry.hpp
+++ b/ndb/include/transporter/TransporterRegistry.hpp
@@ -29,20 +29,10 @@
#define TransporterRegistry_H
#include "TransporterDefinitions.hpp"
+#include <SocketServer.hpp>
#include <NdbTCP.h>
-// A transporter is always in a PerformState.
-// PerformIO is used initially and as long as any of the events
-// PerformConnect, ...
-enum PerformState {
- PerformNothing = 4, // Does nothing
- PerformIO = 0, // Is connected
- PerformConnect = 1, // Is trying to connect
- PerformDisconnect = 2, // Trying to disconnect
- RemoveTransporter = 3 // Will be removed
-};
-
// A transporter is always in an IOState.
// NoHalt is used initially and as long as it is no restrictions on
// sending or receiving.
@@ -60,18 +50,45 @@ enum TransporterType {
tt_OSE_TRANSPORTER = 4
};
+static const char *performStateString[] =
+ { "is connected",
+ "is trying to connect",
+ "does nothing",
+ "is trying to disconnect" };
+
class Transporter;
class TCP_Transporter;
class SCI_Transporter;
class SHM_Transporter;
class OSE_Transporter;
+class TransporterRegistry;
+class SocketAuthenticator;
+
+class TransporterService : public SocketServer::Service {
+ SocketAuthenticator * m_auth;
+ TransporterRegistry * m_transporter_registry;
+public:
+ TransporterService(SocketAuthenticator *auth= 0)
+ {
+ m_auth= auth;
+ m_transporter_registry= 0;
+ }
+ void setTransporterRegistry(TransporterRegistry *t)
+ {
+ m_transporter_registry= t;
+ }
+ SocketServer::Session * newSession(NDB_SOCKET_TYPE socket);
+};
+
/**
* @class TransporterRegistry
* @brief ...
*/
class TransporterRegistry {
friend class OSE_Receiver;
+ friend class Transporter;
+ friend class TransporterService;
public:
/**
* Constructor
@@ -98,6 +115,12 @@ public:
*/
~TransporterRegistry();
+ bool start_service(SocketServer& server);
+ bool start_clients();
+ bool stop_clients();
+ void start_clients_thread();
+ void update_connections();
+
/**
* Start/Stop receiving
*/
@@ -110,16 +133,26 @@ public:
void startSending();
void stopSending();
+ // A transporter is always in a PerformState.
+ // PerformIO is used initially and as long as any of the events
+ // PerformConnect, ...
+ enum PerformState {
+ CONNECTED = 0,
+ CONNECTING = 1,
+ DISCONNECTED = 2,
+ DISCONNECTING = 3
+ };
+ const char *getPerformStateString(NodeId nodeId) const
+ { return performStateString[(unsigned)performStates[nodeId]]; };
+
/**
* Get and set methods for PerformState
*/
- PerformState performState(NodeId nodeId);
- void setPerformState(NodeId nodeId, PerformState state);
-
- /**
- * Set perform state for all transporters
- */
- void setPerformState(PerformState state);
+ void do_connect(NodeId node_id);
+ void do_disconnect(NodeId node_id);
+ bool is_connected(NodeId node_id) { return performStates[node_id] == CONNECTED; };
+ void report_connect(NodeId node_id);
+ void report_disconnect(NodeId node_id, int errnum);
/**
* Get and set methods for IOState
@@ -174,8 +207,6 @@ public:
void performReceive();
void performSend();
- void checkConnections();
-
/**
* Force sending if more than or equal to sendLimit
* number have asked for send. Returns 0 if not sending
@@ -187,11 +218,18 @@ public:
void printState();
#endif
+ unsigned short m_service_port;
+
protected:
private:
void * callbackObj;
+ TransporterService *m_transporter_service;
+ char *m_interface_name;
+ struct NdbThread *m_start_clients_thread;
+ bool m_run_start_clients_thread;
+
int sendCounter;
NodeId localNodeId;
bool nodeIdSpecified;
@@ -202,11 +240,6 @@ private:
int nSHMTransporters;
int nOSETransporters;
- int m_ccCount;
- int m_ccIndex;
- int m_ccStep;
- int m_nTransportersPerformConnect;
- bool m_ccReady;
/**
* Arrays holding all transporters in the order they are created
*/
diff --git a/ndb/include/util/BaseString.hpp b/ndb/include/util/BaseString.hpp
index 8755c13e9bb..a1bb91ea9c5 100644
--- a/ndb/include/util/BaseString.hpp
+++ b/ndb/include/util/BaseString.hpp
@@ -176,7 +176,7 @@ public:
/**
* Trim string from <i>delim</i>
*/
- static char* trim(char * src, const char * delim = " \t");
+ static char* trim(char * src, const char * delim);
private:
char* m_chr;
unsigned m_len;
diff --git a/ndb/include/util/Bitmask.hpp b/ndb/include/util/Bitmask.hpp
index 7355742f845..bb217adab5f 100644
--- a/ndb/include/util/Bitmask.hpp
+++ b/ndb/include/util/Bitmask.hpp
@@ -19,11 +19,6 @@
#include <ndb_global.h>
-#ifndef NDB_ASSERT
-#define NDB_ASSERT(x, s) \
- do { if (!(x)) { printf("%s\n", s); abort(); } } while (0)
-#endif
-
/**
* Bitmask implementation. Size is given explicitly
* (as first argument). All methods are static.
@@ -140,7 +135,7 @@ public:
inline bool
BitmaskImpl::get(unsigned size, const Uint32 data[], unsigned n)
{
- NDB_ASSERT(n < (size << 5), "bit get out of range");
+ assert(n < (size << 5));
return (data[n >> 5] & (1 << (n & 31))) != 0;
}
@@ -153,7 +148,7 @@ BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n, bool value)
inline void
BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n)
{
- NDB_ASSERT(n < (size << 5), "bit set out of range");
+ assert(n < (size << 5));
data[n >> 5] |= (1 << (n & 31));
}
@@ -176,7 +171,7 @@ BitmaskImpl::assign(unsigned size, Uint32 dst[], const Uint32 src[])
inline void
BitmaskImpl::clear(unsigned size, Uint32 data[], unsigned n)
{
- NDB_ASSERT(n < (size << 5), "bit clear out of range");
+ assert(n < (size << 5));
data[n >> 5] &= ~(1 << (n & 31));
}
@@ -326,7 +321,7 @@ BitmaskImpl::getText(unsigned size, const Uint32 data[], char* buf)
* XXX replace size by length in bits
*/
template <unsigned size>
-class Bitmask {
+struct BitmaskPOD {
public:
/**
* POD data representation
@@ -334,7 +329,7 @@ public:
struct Data {
Uint32 data[size];
#if 0
- Data & operator=(const Bitmask<size> & src) {
+ Data & operator=(const BitmaskPOD<size> & src) {
src.copyto(size, data);
return *this;
}
@@ -348,19 +343,17 @@ public:
STATIC_CONST( NotFound = BitmaskImpl::NotFound );
STATIC_CONST( TextLength = size * 8 );
- Bitmask() { clear();}
-
/**
* assign - Set all bits in <em>dst</em> to corresponding in <em>src/<em>
*/
- void assign(const typename Bitmask<size>::Data & src);
+ void assign(const typename BitmaskPOD<size>::Data & src);
/**
* assign - Set all bits in <em>dst</em> to corresponding in <em>src/<em>
*/
static void assign(Uint32 dst[], const Uint32 src[]);
- static void assign(Uint32 dst[], const Bitmask<size> & src);
- void assign(const Bitmask<size> & src);
+ static void assign(Uint32 dst[], const BitmaskPOD<size> & src);
+ void assign(const BitmaskPOD<size> & src);
/**
* copy this to <em>dst</em>
@@ -432,43 +425,43 @@ public:
* equal - Bitwise equal.
*/
static bool equal(const Uint32 data[], const Uint32 data2[]);
- bool equal(const Bitmask<size>& mask2) const;
+ bool equal(const BitmaskPOD<size>& mask2) const;
/**
* bitOR - Bitwise (x | y) into first operand.
*/
static void bitOR(Uint32 data[], const Uint32 data2[]);
- Bitmask<size>& bitOR(const Bitmask<size>& mask2);
+ BitmaskPOD<size>& bitOR(const BitmaskPOD<size>& mask2);
/**
* bitAND - Bitwise (x & y) into first operand.
*/
static void bitAND(Uint32 data[], const Uint32 data2[]);
- Bitmask<size>& bitAND(const Bitmask<size>& mask2);
+ BitmaskPOD<size>& bitAND(const BitmaskPOD<size>& mask2);
/**
* bitANDC - Bitwise (x & ~y) into first operand.
*/
static void bitANDC(Uint32 data[], const Uint32 data2[]);
- Bitmask<size>& bitANDC(const Bitmask<size>& mask2);
+ BitmaskPOD<size>& bitANDC(const BitmaskPOD<size>& mask2);
/**
* bitXOR - Bitwise (x ^ y) into first operand.
*/
static void bitXOR(Uint32 data[], const Uint32 data2[]);
- Bitmask<size>& bitXOR(const Bitmask<size>& mask2);
+ BitmaskPOD<size>& bitXOR(const BitmaskPOD<size>& mask2);
/**
* contains - Check if all bits set in data2 (that) are also set in data (this)
*/
static bool contains(Uint32 data[], const Uint32 data2[]);
- bool contains(Bitmask<size> that);
+ bool contains(BitmaskPOD<size> that);
/**
- * overlaps - Check if any bit set in this Bitmask (data) is also set in that (data2)
+ * overlaps - Check if any bit set in this BitmaskPOD (data) is also set in that (data2)
*/
static bool overlaps(Uint32 data[], const Uint32 data2[]);
- bool overlaps(Bitmask<size> that);
+ bool overlaps(BitmaskPOD<size> that);
/**
* getText - Return as hex-digits (only for debug routines).
@@ -479,286 +472,292 @@ public:
template <unsigned size>
inline void
-Bitmask<size>::assign(Uint32 dst[], const Uint32 src[])
+BitmaskPOD<size>::assign(Uint32 dst[], const Uint32 src[])
{
BitmaskImpl::assign(size, dst, src);
}
template <unsigned size>
inline void
-Bitmask<size>::assign(Uint32 dst[], const Bitmask<size> & src)
+BitmaskPOD<size>::assign(Uint32 dst[], const BitmaskPOD<size> & src)
{
BitmaskImpl::assign(size, dst, src.rep.data);
}
template <unsigned size>
inline void
-Bitmask<size>::assign(const typename Bitmask<size>::Data & src)
+BitmaskPOD<size>::assign(const typename BitmaskPOD<size>::Data & src)
{
- assign(rep.data, src.data);
+ BitmaskPOD<size>::assign(rep.data, src.data);
}
template <unsigned size>
inline void
-Bitmask<size>::assign(const Bitmask<size> & src)
+BitmaskPOD<size>::assign(const BitmaskPOD<size> & src)
{
- assign(rep.data, src.rep.data);
+ BitmaskPOD<size>::assign(rep.data, src.rep.data);
}
template <unsigned size>
inline void
-Bitmask<size>::copyto(unsigned sz, Uint32 dst[]) const
+BitmaskPOD<size>::copyto(unsigned sz, Uint32 dst[]) const
{
BitmaskImpl::assign(sz, dst, rep.data);
}
template <unsigned size>
inline void
-Bitmask<size>::assign(unsigned sz, const Uint32 src[])
+BitmaskPOD<size>::assign(unsigned sz, const Uint32 src[])
{
BitmaskImpl::assign(sz, rep.data, src);
}
template <unsigned size>
inline bool
-Bitmask<size>::get(const Uint32 data[], unsigned n)
+BitmaskPOD<size>::get(const Uint32 data[], unsigned n)
{
return BitmaskImpl::get(size, data, n);
}
template <unsigned size>
inline bool
-Bitmask<size>::get(unsigned n) const
+BitmaskPOD<size>::get(unsigned n) const
{
- return get(rep.data, n);
+ return BitmaskPOD<size>::get(rep.data, n);
}
template <unsigned size>
inline void
-Bitmask<size>::set(Uint32 data[], unsigned n, bool value)
+BitmaskPOD<size>::set(Uint32 data[], unsigned n, bool value)
{
BitmaskImpl::set(size, data, n, value);
}
template <unsigned size>
inline void
-Bitmask<size>::set(unsigned n, bool value)
+BitmaskPOD<size>::set(unsigned n, bool value)
{
- set(rep.data, n, value);
+ BitmaskPOD<size>::set(rep.data, n, value);
}
template <unsigned size>
inline void
-Bitmask<size>::set(Uint32 data[], unsigned n)
+BitmaskPOD<size>::set(Uint32 data[], unsigned n)
{
BitmaskImpl::set(size, data, n);
}
template <unsigned size>
inline void
-Bitmask<size>::set(unsigned n)
+BitmaskPOD<size>::set(unsigned n)
{
- set(rep.data, n);
+ BitmaskPOD<size>::set(rep.data, n);
}
template <unsigned size>
inline void
-Bitmask<size>::set(Uint32 data[])
+BitmaskPOD<size>::set(Uint32 data[])
{
BitmaskImpl::set(size, data);
}
template <unsigned size>
inline void
-Bitmask<size>::set()
+BitmaskPOD<size>::set()
{
- set(rep.data);
+ BitmaskPOD<size>::set(rep.data);
}
template <unsigned size>
inline void
-Bitmask<size>::clear(Uint32 data[], unsigned n)
+BitmaskPOD<size>::clear(Uint32 data[], unsigned n)
{
BitmaskImpl::clear(size, data, n);
}
template <unsigned size>
inline void
-Bitmask<size>::clear(unsigned n)
+BitmaskPOD<size>::clear(unsigned n)
{
- clear(rep.data, n);
+ BitmaskPOD<size>::clear(rep.data, n);
}
template <unsigned size>
inline void
-Bitmask<size>::clear(Uint32 data[])
+BitmaskPOD<size>::clear(Uint32 data[])
{
BitmaskImpl::clear(size, data);
}
template <unsigned size>
inline void
-Bitmask<size>::clear()
+BitmaskPOD<size>::clear()
{
- clear(rep.data);
+ BitmaskPOD<size>::clear(rep.data);
}
template <unsigned size>
inline bool
-Bitmask<size>::isclear(const Uint32 data[])
+BitmaskPOD<size>::isclear(const Uint32 data[])
{
return BitmaskImpl::isclear(size, data);
}
template <unsigned size>
inline bool
-Bitmask<size>::isclear() const
+BitmaskPOD<size>::isclear() const
{
- return isclear(rep.data);
+ return BitmaskPOD<size>::isclear(rep.data);
}
template <unsigned size>
unsigned
-Bitmask<size>::count(const Uint32 data[])
+BitmaskPOD<size>::count(const Uint32 data[])
{
return BitmaskImpl::count(size, data);
}
template <unsigned size>
inline unsigned
-Bitmask<size>::count() const
+BitmaskPOD<size>::count() const
{
- return count(rep.data);
+ return BitmaskPOD<size>::count(rep.data);
}
template <unsigned size>
unsigned
-Bitmask<size>::find(const Uint32 data[], unsigned n)
+BitmaskPOD<size>::find(const Uint32 data[], unsigned n)
{
return BitmaskImpl::find(size, data, n);
}
template <unsigned size>
inline unsigned
-Bitmask<size>::find(unsigned n) const
+BitmaskPOD<size>::find(unsigned n) const
{
- return find(rep.data, n);
+ return BitmaskPOD<size>::find(rep.data, n);
}
template <unsigned size>
inline bool
-Bitmask<size>::equal(const Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::equal(const Uint32 data[], const Uint32 data2[])
{
return BitmaskImpl::equal(size, data, data2);
}
template <unsigned size>
inline bool
-Bitmask<size>::equal(const Bitmask<size>& mask2) const
+BitmaskPOD<size>::equal(const BitmaskPOD<size>& mask2) const
{
- return equal(rep.data, mask2.rep.data);
+ return BitmaskPOD<size>::equal(rep.data, mask2.rep.data);
}
template <unsigned size>
inline void
-Bitmask<size>::bitOR(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::bitOR(Uint32 data[], const Uint32 data2[])
{
BitmaskImpl::bitOR(size,data, data2);
}
template <unsigned size>
-inline Bitmask<size>&
-Bitmask<size>::bitOR(const Bitmask<size>& mask2)
+inline BitmaskPOD<size>&
+BitmaskPOD<size>::bitOR(const BitmaskPOD<size>& mask2)
{
- bitOR(rep.data, mask2.rep.data);
+ BitmaskPOD<size>::bitOR(rep.data, mask2.rep.data);
return *this;
}
template <unsigned size>
inline void
-Bitmask<size>::bitAND(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::bitAND(Uint32 data[], const Uint32 data2[])
{
BitmaskImpl::bitAND(size,data, data2);
}
template <unsigned size>
-inline Bitmask<size>&
-Bitmask<size>::bitAND(const Bitmask<size>& mask2)
+inline BitmaskPOD<size>&
+BitmaskPOD<size>::bitAND(const BitmaskPOD<size>& mask2)
{
- bitAND(rep.data, mask2.rep.data);
+ BitmaskPOD<size>::bitAND(rep.data, mask2.rep.data);
return *this;
}
template <unsigned size>
inline void
-Bitmask<size>::bitANDC(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::bitANDC(Uint32 data[], const Uint32 data2[])
{
BitmaskImpl::bitANDC(size,data, data2);
}
template <unsigned size>
-inline Bitmask<size>&
-Bitmask<size>::bitANDC(const Bitmask<size>& mask2)
+inline BitmaskPOD<size>&
+BitmaskPOD<size>::bitANDC(const BitmaskPOD<size>& mask2)
{
- bitANDC(rep.data, mask2.rep.data);
+ BitmaskPOD<size>::bitANDC(rep.data, mask2.rep.data);
return *this;
}
template <unsigned size>
inline void
-Bitmask<size>::bitXOR(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::bitXOR(Uint32 data[], const Uint32 data2[])
{
BitmaskImpl::bitXOR(size,data, data2);
}
template <unsigned size>
-inline Bitmask<size>&
-Bitmask<size>::bitXOR(const Bitmask<size>& mask2)
+inline BitmaskPOD<size>&
+BitmaskPOD<size>::bitXOR(const BitmaskPOD<size>& mask2)
{
- bitXOR(rep.data, mask2.rep.data);
+ BitmaskPOD<size>::bitXOR(rep.data, mask2.rep.data);
return *this;
}
template <unsigned size>
char *
-Bitmask<size>::getText(const Uint32 data[], char* buf)
+BitmaskPOD<size>::getText(const Uint32 data[], char* buf)
{
return BitmaskImpl::getText(size, data, buf);
}
template <unsigned size>
inline char *
-Bitmask<size>::getText(char* buf) const
+BitmaskPOD<size>::getText(char* buf) const
{
- return getText(rep.data, buf);
+ return BitmaskPOD<size>::getText(rep.data, buf);
}
template <unsigned size>
inline bool
-Bitmask<size>::contains(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::contains(Uint32 data[], const Uint32 data2[])
{
return BitmaskImpl::contains(size, data, data2);
}
template <unsigned size>
inline bool
-Bitmask<size>::contains(Bitmask<size> that)
+BitmaskPOD<size>::contains(BitmaskPOD<size> that)
{
- return contains(this->rep.data, that.rep.data);
+ return BitmaskPOD<size>::contains(this->rep.data, that.rep.data);
}
template <unsigned size>
inline bool
-Bitmask<size>::overlaps(Uint32 data[], const Uint32 data2[])
+BitmaskPOD<size>::overlaps(Uint32 data[], const Uint32 data2[])
{
return BitmaskImpl::overlaps(size, data, data2);
}
template <unsigned size>
inline bool
-Bitmask<size>::overlaps(Bitmask<size> that)
+BitmaskPOD<size>::overlaps(BitmaskPOD<size> that)
{
- return overlaps(this->rep.data, that.rep.data);
+ return BitmaskPOD<size>::overlaps(this->rep.data, that.rep.data);
}
+template <unsigned size>
+class Bitmask : public BitmaskPOD<size> {
+public:
+ Bitmask() { this->clear();}
+};
+
#endif
diff --git a/ndb/include/util/ConfigValues.hpp b/ndb/include/util/ConfigValues.hpp
index 3fbeedb25a0..457488e3c42 100644
--- a/ndb/include/util/ConfigValues.hpp
+++ b/ndb/include/util/ConfigValues.hpp
@@ -32,9 +32,8 @@ public:
class ConstIterator {
friend class ConfigValuesFactory;
const ConfigValues & m_cfg;
- protected:
- Uint32 m_currentSection;
public:
+ Uint32 m_currentSection;
ConstIterator(const ConfigValues&c) : m_cfg(c) { m_currentSection = 0;}
bool openSection(Uint32 key, Uint32 no);
@@ -57,6 +56,9 @@ public:
ConfigValues & m_cfg;
public:
Iterator(ConfigValues&c) : ConstIterator(c), m_cfg(c) {}
+ Iterator(ConfigValues&c, const ConstIterator& i):ConstIterator(c),m_cfg(c){
+ m_currentSection = i.m_currentSection;
+ }
bool set(Uint32 key, Uint32 value);
bool set(Uint32 key, Uint64 value);
diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp
index 841da513d4a..a79245868e0 100644
--- a/ndb/include/util/NdbSqlUtil.hpp
+++ b/ndb/include/util/NdbSqlUtil.hpp
@@ -17,8 +17,7 @@
#ifndef NDB_SQL_UTIL_HPP
#define NDB_SQL_UTIL_HPP
-#include <string.h>
-#include <ndb_types.h>
+#include <ndb_global.h>
#include <kernel/ndb_limits.h>
class NdbSqlUtil {
@@ -80,7 +79,7 @@ public:
Datetime, // Precision down to 1 sec (size 8 bytes)
Timespec, // Precision down to 1 nsec (size 12 bytes)
Blob, // Blob
- Clob // Text blob
+ Text // Text blob
};
Enum m_typeId;
Cmp* m_cmp; // set to NULL if cmp not implemented
@@ -125,12 +124,13 @@ private:
static Cmp cmpDatetime;
static Cmp cmpTimespec;
static Cmp cmpBlob;
- static Cmp cmpClob;
+ static Cmp cmpText;
};
inline int
NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
+ // XXX require size >= 1
if (size > full)
return CmpError;
switch ((Type::Enum)typeId) {
@@ -192,10 +192,38 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full,
}
return CmpUnknown;
}
- case Type::Mediumint: // XXX fix these
- break;
+ case Type::Mediumint:
+ {
+ if (size >= 1) {
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ Int32 v1 = sint3korr(u1.v);
+ Int32 v2 = sint3korr(u2.v);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ return CmpUnknown;
+ }
case Type::Mediumunsigned:
- break;
+ {
+ if (size >= 1) {
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ Uint32 v1 = uint3korr(u1.v);
+ Uint32 v2 = uint3korr(u2.v);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ return CmpUnknown;
+ }
case Type::Int:
{
if (size >= 1) {
@@ -287,6 +315,7 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full,
return CmpUnknown;
}
case Type::Decimal:
+ // XXX not used by MySQL or NDB
break;
case Type::Char:
{
@@ -317,10 +346,28 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full,
}
return CmpUnknown;
}
- case Type::Binary: // XXX fix these
- break;
+ case Type::Binary:
+ {
+ // compare byte wise
+ union { const Uint32* p; const char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ int k = memcmp(u1.v, u2.v, size << 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ }
case Type::Varbinary:
- break;
+ {
+ // assume correctly padded and compare byte wise
+ if (size >= 1) {
+ union { const Uint32* p; const char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // length in first 2 bytes
+ int k = memcmp(u1.v + 2, u2.v + 2, (size << 2) - 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ }
+ return CmpUnknown;
+ }
case Type::Datetime:
{
/*
@@ -331,29 +378,66 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full,
u1.p = p1;
u2.p = p2;
// skip format check
- int k = strncmp(u1.v, u2.v, 4);
+ int k = memcmp(u1.v, u2.v, 4);
if (k != 0)
- return k;
+ return k < 0 ? -1 : +1;
if (size >= 2) {
- return strncmp(u1.v + 4, u2.v + 4, 4);
+ k = memcmp(u1.v + 4, u2.v + 4, 4);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
}
}
return CmpUnknown;
}
- case Type::Timespec: // XXX fix this
- break;
- case Type::Blob: // XXX fix
- break;
- case Type::Clob:
+ case Type::Timespec:
+ {
+ /*
+ * Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN
+ */
+ if (size >= 1) {
+ union { const Uint32* p; const char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // skip format check
+ int k = memcmp(u1.v, u2.v, 4);
+ if (k != 0)
+ return k < 0 ? -1 : +1;
+ if (size >= 2) {
+ k = memcmp(u1.v + 4, u2.v + 4, 4);
+ if (k != 0)
+ return k < 0 ? -1 : +1;
+ Uint32 n1 = *(const Uint32*)(u1.v + 8);
+ Uint32 n2 = *(const Uint32*)(u2.v + 8);
+ if (n1 < n2)
+ return -1;
+ if (n2 > n1)
+ return +1;
+ return 0;
+ }
+ }
+ return CmpUnknown;
+ }
+ case Type::Blob:
{
- // skip blob head, the rest is varchar
+ // skip blob head, the rest is binary
const unsigned skip = NDB_BLOB_HEAD_SIZE;
if (size >= skip + 1) {
union { const Uint32* p; const char* v; } u1, u2;
u1.p = p1 + skip;
u2.p = p2 + skip;
- // length in first 2 bytes
- int k = strncmp(u1.v + 2, u2.v + 2, ((size - skip) << 2) - 2);
+ int k = memcmp(u1.v, u2.v, (size - 1) << 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ }
+ return CmpUnknown;
+ }
+ case Type::Text:
+ {
+ // skip blob head, the rest is char
+ const unsigned skip = NDB_BLOB_HEAD_SIZE;
+ if (size >= skip + 1) {
+ union { const Uint32* p; const char* v; } u1, u2;
+ u1.p = p1 + skip;
+ u2.p = p2 + skip;
+ int k = memcmp(u1.v, u2.v, (size - 1) << 2);
return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
}
return CmpUnknown;
diff --git a/ndb/include/util/Properties.hpp b/ndb/include/util/Properties.hpp
index 2c30f7f7e3c..df8e2887001 100644
--- a/ndb/include/util/Properties.hpp
+++ b/ndb/include/util/Properties.hpp
@@ -55,7 +55,7 @@ public:
static const char delimiter = ':';
static const char version[];
- Properties();
+ Properties(bool case_insensitive= false);
Properties(const Properties &);
Properties(const Property *, int len);
virtual ~Properties();
diff --git a/ndb/include/util/SocketAuthenticator.hpp b/ndb/include/util/SocketAuthenticator.hpp
new file mode 100644
index 00000000000..1b82567feaa
--- /dev/null
+++ b/ndb/include/util/SocketAuthenticator.hpp
@@ -0,0 +1,39 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SOCKET_AUTHENTICATOR_HPP
+#define SOCKET_AUTHENTICATOR_HPP
+
+class SocketAuthenticator
+{
+public:
+ virtual ~SocketAuthenticator() {};
+ virtual bool client_authenticate(int sockfd) = 0;
+ virtual bool server_authenticate(int sockfd) = 0;
+};
+
+class SocketAuthSimple : public SocketAuthenticator
+{
+ const char *m_passwd;
+ const char *m_username;
+public:
+ SocketAuthSimple(const char *username, const char *passwd);
+ virtual ~SocketAuthSimple();
+ virtual bool client_authenticate(int sockfd);
+ virtual bool server_authenticate(int sockfd);
+};
+
+#endif // SOCKET_AUTHENTICATOR_HPP
diff --git a/ndb/include/util/SocketClient.hpp b/ndb/include/util/SocketClient.hpp
new file mode 100644
index 00000000000..de9a081464a
--- /dev/null
+++ b/ndb/include/util/SocketClient.hpp
@@ -0,0 +1,38 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SOCKET_CLIENT_HPP
+#define SOCKET_CLIENT_HPP
+
+#include <NdbTCP.h>
+class SocketAuthenticator;
+
+class SocketClient
+{
+ NDB_SOCKET_TYPE m_sockfd;
+ struct sockaddr_in m_servaddr;
+ unsigned short m_port;
+ char *m_server_name;
+ SocketAuthenticator *m_auth;
+public:
+ SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa = 0);
+ ~SocketClient();
+ bool init();
+ NDB_SOCKET_TYPE connect();
+ bool close();
+};
+
+#endif // SOCKET_ClIENT_HPP
diff --git a/ndb/src/common/debugger/DebuggerNames.cpp b/ndb/src/common/debugger/DebuggerNames.cpp
index ebe94a6059f..2142138e435 100644
--- a/ndb/src/common/debugger/DebuggerNames.cpp
+++ b/ndb/src/common/debugger/DebuggerNames.cpp
@@ -29,10 +29,11 @@ static const char * localBlockNames[NO_OF_BLOCKS];
static
int
initSignalNames(const char * dst[], const GsnName src[], unsigned short len){
- for(int i = 0; i<=MAX_GSN; i++)
+ int i;
+ for(i = 0; i<=MAX_GSN; i++)
dst[i] = 0;
- for(int i = 0; i<len; i++){
+ for(i = 0; i<len; i++){
unsigned short gsn = src[i].gsn;
const char * name = src[i].name;
@@ -54,10 +55,11 @@ int
initSignalPrinters(SignalDataPrintFunction dst[],
const NameFunctionPair src[],
unsigned short len){
- for(int i = 0; i<=MAX_GSN; i++)
+ int i;
+ for(i = 0; i<=MAX_GSN; i++)
dst[i] = 0;
- for(int i = 0; i<len; i++){
+ for(i = 0; i<len; i++){
unsigned short gsn = src[i].gsn;
SignalDataPrintFunction fun = src[i].function;
@@ -79,10 +81,11 @@ int
initBlockNames(const char * dst[],
const BlockName src[],
unsigned len){
- for(int i = 0; i<NO_OF_BLOCKS; i++)
+ int i;
+ for(i = 0; i<NO_OF_BLOCKS; i++)
dst[i] = 0;
- for(unsigned i = 0; i<len; i++){
+ for(i = 0; i<len; i++){
const int index = src[i].number - MIN_BLOCK_NO;
if(index < 0 && index >= NO_OF_BLOCKS || dst[index] != 0){
fprintf(stderr,
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp
index dd957d67383..50c3b778731 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/ndb/src/common/debugger/EventLogger.cpp
@@ -135,7 +135,7 @@ EventLogger::defEventLogMatrixSize = sizeof(EventLogger::defEventLogMatrix)/
*/
const EventLogger::EventCategoryName EventLogger::eventCategoryNames[] = {
{ LogLevel::llStartUp, "STARTUP" },
- { LogLevel::llStatistic, "STATISTIC" },
+ { LogLevel::llStatistic, "STATISTICS" },
{ LogLevel::llCheckpoint, "CHECKPOINT" },
{ LogLevel::llNodeRestart, "NODERESTART" },
{ LogLevel::llConnection, "CONNECTION" },
@@ -1303,14 +1303,15 @@ bool
EventLogger::matchEventCategory(const char * str,
LogLevel::EventCategory * cat,
bool exactMatch){
+ unsigned i;
if(cat == 0 || str == 0)
return false;
char * tmp = strdup(str);
- for(size_t i = 0; i<strlen(tmp); i++)
+ for(i = 0; i<strlen(tmp); i++)
tmp[i] = toupper(tmp[i]);
- for(Uint32 i = 0; i<noOfEventCategoryNames; i++){
+ for(i = 0; i<noOfEventCategoryNames; i++){
if(strcmp(tmp, eventCategoryNames[i].name) == 0){
* cat = eventCategoryNames[i].category;
free(tmp);
@@ -1350,15 +1351,6 @@ EventLogger::EventLogger() : Logger(), m_logLevel(), m_filterLevel(15)
EventLogger::~EventLogger()
{
-
-}
-
-bool
-EventLogger::open()
-{
- char clusterLog[128];
- NdbConfig_ClusterLogFileName(clusterLog, 128);
- return open(clusterLog);
}
bool
diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/ndb/src/common/debugger/SignalLoggerManager.cpp
index 3839a348222..d642ed09a68 100644
--- a/ndb/src/common/debugger/SignalLoggerManager.cpp
+++ b/ndb/src/common/debugger/SignalLoggerManager.cpp
@@ -488,31 +488,6 @@ SignalLoggerManager::printLinearSection(FILE * output,
}
void
-SignalLoggerManager::printSegmentedSection(FILE * output,
- const SignalHeader & sh,
- const SegmentedSectionPtr ptr[3],
- unsigned i)
-{
- fprintf(output, "SECTION %u type=segmented", i);
- if (i >= 3) {
- fprintf(output, " *** invalid ***\n");
- return;
- }
- const Uint32 len = ptr[i].sz;
- SectionSegment * ssp = ptr[i].p;
- Uint32 pos = 0;
- fprintf(output, " size=%u\n", (unsigned)len);
- while (pos < len) {
- if (pos > 0 && pos % SectionSegment::DataLength == 0) {
- ssp = g_sectionSegmentPool.getPtr(ssp->m_nextSegment);
- }
- printDataWord(output, pos, ssp->theData[pos % SectionSegment::DataLength]);
- }
- if (len > 0)
- putc('\n', output);
-}
-
-void
SignalLoggerManager::printDataWord(FILE * output, Uint32 & pos, const Uint32 data)
{
const char* const hex = "0123456789abcdef";
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index a0e0195adad..7e7bf87e2db 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -48,6 +48,7 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, CustomTriggerId, CustomTriggerId),
DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
+ DTIMAP(Table, FragmentCount, FragmentCount),
DTIBREAK(AttributeName)
};
@@ -128,6 +129,7 @@ DictTabInfo::Table::init(){
CustomTriggerId = RNIL;
FrmLen = 0;
memset(FrmData, 0, sizeof(FrmData));
+ FragmentCount = 0;
}
void
diff --git a/ndb/src/common/debugger/signaldata/LCP.cpp b/ndb/src/common/debugger/signaldata/LCP.cpp
index 825659d13b3..6b4bb13e2cd 100644
--- a/ndb/src/common/debugger/signaldata/LCP.cpp
+++ b/ndb/src/common/debugger/signaldata/LCP.cpp
@@ -25,7 +25,8 @@ printSTART_LCP_REQ(FILE * output, const Uint32 * theData,
const StartLcpReq * const sig = (StartLcpReq *) theData;
- char buf1[sig->participatingDIH.TextLength+1], buf2[sig->participatingLQH.TextLength+1];
+ char buf1[8*_NDB_NODE_BITMASK_SIZE+1];
+ char buf2[8*_NDB_NODE_BITMASK_SIZE+1];
fprintf(output,
" Sender: %d LcpId: %d\n"
" ParticipatingDIH = %s\n"
diff --git a/ndb/src/common/debugger/signaldata/Makefile.am b/ndb/src/common/debugger/signaldata/Makefile.am
index 0d6ed45dcef..0a5806e1e00 100644
--- a/ndb/src/common/debugger/signaldata/Makefile.am
+++ b/ndb/src/common/debugger/signaldata/Makefile.am
@@ -22,7 +22,7 @@ libsignaldataprint_la_SOURCES = \
CopyGCI.cpp SystemError.cpp StartRec.cpp NFCompleteRep.cpp \
FailRep.cpp DisconnectRep.cpp SignalDroppedRep.cpp \
SumaImpl.cpp NdbSttor.cpp CreateFragmentation.cpp \
- UtilLock.cpp TuxMaint.cpp TupAccess.cpp AccLock.cpp \
+ UtilLock.cpp TuxMaint.cpp AccLock.cpp \
LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp
include $(top_srcdir)/ndb/config/common.mk.am
@@ -30,3 +30,4 @@ include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/ndb/src/common/debugger/signaldata/ScanTab.cpp
index b4246059f6a..4b057171963 100644
--- a/ndb/src/common/debugger/signaldata/ScanTab.cpp
+++ b/ndb/src/common/debugger/signaldata/ScanTab.cpp
@@ -30,20 +30,34 @@ printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv
fprintf(output, " apiConnectPtr: H\'%.8x\n",
sig->apiConnectPtr);
fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo);
- fprintf(output, " Parallellism: %u, LockMode: %u, Holdlock: %u, RangeScan: %u\n",
- sig->getParallelism(requestInfo), sig->getLockMode(requestInfo), sig->getHoldLockFlag(requestInfo), sig->getRangeScanFlag(requestInfo));
-
+ fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Holdlock: %u, RangeScan: %u\n",
+ sig->getParallelism(requestInfo),
+ sig->getScanBatch(requestInfo),
+ sig->getLockMode(requestInfo),
+ sig->getHoldLockFlag(requestInfo),
+ sig->getRangeScanFlag(requestInfo));
+
fprintf(output, " attrLen: %d, tableId: %d, tableSchemaVer: %d\n",
sig->attrLen, sig->tableId, sig->tableSchemaVersion);
fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) storedProcId: H\'%.8x\n",
sig->transId1, sig->transId2, sig->storedProcId);
- fprintf(output, " OperationPtr(s):\n");
- for(int i = 0; i<16; i=i+4){
- fprintf(output, " H\'%.8x, H\'%.8x, H\'%.8x, H\'%.8x\n",
- sig->apiOperationPtr[i], sig->apiOperationPtr[i+1],
- sig->apiOperationPtr[i+2], sig->apiOperationPtr[i+3]);
+ fprintf(output, " OperationPtr(s):\n ");
+ Uint32 restLen = (len - 9);
+ const Uint32 * rest = &sig->apiOperationPtr[0];
+ while(restLen >= 7){
+ fprintf(output,
+ " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n",
+ rest[0], rest[1], rest[2], rest[3],
+ rest[4], rest[5], rest[6]);
+ restLen -= 7;
+ rest += 7;
+ }
+ if(restLen > 0){
+ for(Uint32 i = 0; i<restLen; i++)
+ fprintf(output, " H\'%.8x", rest[i]);
+ fprintf(output, "\n");
}
return false;
}
@@ -60,51 +74,28 @@ printSCANTABCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recei
fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n",
sig->transId1, sig->transId2);
- fprintf(output, " requestInfo: H\'%.8x(Operations: %u, ScanStatus: %u(\"",
- requestInfo, sig->getOperations(requestInfo), sig->getScanStatus(requestInfo));
- switch(sig->getScanStatus(requestInfo)){
- case 0:
- fprintf(output, "ZFALSE");
- break;
- case 1:
- fprintf(output, "ZTRUE");
- break;
- case 2:
- fprintf(output, "ZCLOSED");
- break;
- default:
- fprintf(output, "UNKNOWN");
- break;
+ fprintf(output, " requestInfo: Eod: %d OpCount: %d\n",
+ (requestInfo & ScanTabConf::EndOfData == ScanTabConf::EndOfData),
+ (requestInfo & (~ScanTabConf::EndOfData)));
+ size_t op_count= requestInfo & (~ScanTabConf::EndOfData);
+ if(op_count){
+ fprintf(output, " Operation(s) [api tc rows len]:\n");
+ ScanTabConf::OpData * op = (ScanTabConf::OpData*)
+ (theData + ScanTabConf::SignalLength);
+ for(int i = 0; i<op_count; i++){
+ if(op->info != ScanTabConf::EndOfData)
+ fprintf(output, " [0x%x 0x%x %d %d]",
+ op->apiPtrI, op->tcPtrI,
+ ScanTabConf::getRows(op->info),
+ ScanTabConf::getLength(op->info));
+ else
+ fprintf(output, " [0x%x 0x%x eod]",
+ op->apiPtrI, op->tcPtrI);
+
+ op++;
+ }
+ fprintf(output, "\n");
}
- fprintf(output, "\"))\n");
-#if 0
- fprintf(output, " Operation(s):\n");
- for(int i = 0; i<16; i++){
- fprintf(output, " [%.2u]ix=%d l=%.2d,",
- i, sig->getIdx(sig->operLenAndIdx[i]), sig->getLen(sig->operLenAndIdx[i]));
- if (((i+1) % 4) == 0)
- fprintf(output, "\n");
- }
-#endif
- return false;
-}
-
-bool
-printSCANTABINFO(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){
-
- const ScanTabInfo * const sig = (ScanTabInfo *) theData;
-
- fprintf(output, " apiConnectPtr: H\'%.8x\n",
- sig->apiConnectPtr);
-
- fprintf(output, " Operation(s):\n");
- for(int i = 0; i<16; i++){
- fprintf(output, " [%.2u]ix=%d l=%.2d,",
- i, sig->getIdx(sig->operLenAndIdx[i]), sig->getLen(sig->operLenAndIdx[i]));
- if (((i+1) % 4) == 0)
- fprintf(output, "\n");
- }
-
return false;
}
@@ -120,8 +111,8 @@ printSCANTABREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv
sig->transId1, sig->transId2);
fprintf(output, " Errorcode: %u\n", sig->errorCode);
-
- // fprintf(output, " sendScanNextReqWithClose: %u\n", sig->sendScanNextReqWithClose);
+
+ fprintf(output, " closeNeeded: %u\n", sig->closeNeeded);
return false;
}
@@ -147,13 +138,21 @@ printSCANNEXTREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recei
if(receiverBlockNo == DBTC){
const ScanNextReq * const sig = (ScanNextReq *) theData;
- fprintf(output, " aipConnectPtr: H\'%.8x\n",
+ fprintf(output, " apiConnectPtr: H\'%.8x\n",
sig->apiConnectPtr);
- fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n",
+ fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) ",
sig->transId1, sig->transId2);
fprintf(output, " Stop this scan: %u\n", sig->stopScan);
+
+ const Uint32 * ops = theData + ScanNextReq::SignalLength;
+ if(len > ScanNextReq::SignalLength){
+ fprintf(output, " tcFragPtr(s): ");
+ for(size_t i = ScanNextReq::SignalLength; i<len; i++)
+ fprintf(output, " 0x%x", * ops++);
+ fprintf(output, "\n");
+ }
}
if (receiverBlockNo == DBLQH){
return printSCANFRAGNEXTREQ(output, theData, len, receiverBlockNo);
diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
index d49e316ad38..65351663789 100644
--- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
@@ -73,7 +73,6 @@
#include <signaldata/CntrStart.hpp>
#include <signaldata/ReadNodesConf.hpp>
#include <signaldata/TuxMaint.hpp>
-#include <signaldata/TupAccess.hpp>
#include <signaldata/AccLock.hpp>
bool printCONTINUEB(FILE *, const Uint32 *, Uint32, Uint16);
@@ -249,14 +248,15 @@ SignalDataPrintFunctions[] = {
,{ GSN_READ_NODESCONF, printREAD_NODES_CONF }
,{ GSN_TUX_MAINT_REQ, printTUX_MAINT_REQ }
- ,{ GSN_TUP_READ_ATTRS, printTUP_READ_ATTRS }
- ,{ GSN_TUP_QUERY_TH, printTUP_QUERY_TH }
- ,{ GSN_TUP_STORE_TH, printTUP_STORE_TH }
,{ GSN_ACC_LOCKREQ, printACC_LOCKREQ }
,{ GSN_LQH_TRANSCONF, printLQH_TRANSCONF }
};
const unsigned short NO_OF_PRINT_FUNCTIONS = sizeof(SignalDataPrintFunctions)/sizeof(NameFunctionPair);
-
-
+template class Bitmask<1>;
+template class Bitmask<2>;
+template class Bitmask<4>;
+template struct BitmaskPOD<1>;
+template struct BitmaskPOD<2>;
+template struct BitmaskPOD<4>;
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 377a588dbb0..9d4d5bdf6f5 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -32,7 +32,6 @@ const GsnName SignalNames [] = {
,{ GSN_READCONF, "READCONF" }
,{ GSN_SCAN_NEXTREQ, "SCAN_NEXTREQ" }
,{ GSN_SCAN_TABCONF, "SCAN_TABCONF" }
- ,{ GSN_SCAN_TABINFO, "SCAN_TABINFO" }
,{ GSN_SCAN_TABREF, "SCAN_TABREF" }
,{ GSN_SCAN_TABREQ, "SCAN_TABREQ" }
,{ GSN_TC_COMMITCONF, "TC_COMMITCONF" }
@@ -641,9 +640,6 @@ const GsnName SignalNames [] = {
,{ GSN_TUX_MAINT_REQ, "TUX_MAINT_REQ" }
,{ GSN_TUX_MAINT_CONF, "TUX_MAINT_CONF" }
,{ GSN_TUX_MAINT_REF, "TUX_MAINT_REF" }
- ,{ GSN_TUP_READ_ATTRS, "TUP_READ_ATTRS" }
- ,{ GSN_TUP_QUERY_TH, "TUP_QUERY_TH" }
- ,{ GSN_TUP_STORE_TH, "TUP_STORE_TH" }
,{ GSN_TUX_BOUND_INFO, "TUX_BOUND_INFO" }
,{ GSN_ACC_LOCKREQ, "ACC_LOCKREQ" }
diff --git a/ndb/src/common/debugger/signaldata/TupAccess.cpp b/ndb/src/common/debugger/signaldata/TupAccess.cpp
deleted file mode 100644
index e94d4636cf5..00000000000
--- a/ndb/src/common/debugger/signaldata/TupAccess.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <signaldata/TupAccess.hpp>
-#include <SignalLoggerManager.hpp>
-#include <AttributeHeader.hpp>
-
-bool
-printTUP_READ_ATTRS(FILE* output, const Uint32* theData, Uint32 len, Uint16 rbn)
-{
- const TupReadAttrs* const sig = (const TupReadAttrs*)theData;
- if (sig->errorCode == RNIL)
- fprintf(output, " errorCode=RNIL flags=%x\n", sig->requestInfo);
- else
- fprintf(output, " errorCode=%u flags=%x\n", sig->errorCode, sig->requestInfo);
- fprintf(output, " table: id=%u", sig->tableId);
- fprintf(output, " fragment: id=%u ptr=0x%x\n", sig->fragId, sig->fragPtrI);
- fprintf(output, " tuple: addr=0x%x version=%u", sig->tupAddr, sig->tupVersion);
- fprintf(output, " realPage=0x%x offset=%u\n", sig->pageId, sig->pageOffset);
- const Uint32* buffer = (const Uint32*)sig + TupReadAttrs::SignalLength;
- Uint32 attrCount = buffer[0];
- bool readKeys = (sig->requestInfo & TupReadAttrs::ReadKeys);
- if (sig->errorCode == RNIL && ! readKeys ||
- sig->errorCode == 0 && readKeys) {
- fprintf(output, " input: attrCount=%u\n", attrCount);
- for (unsigned i = 0; i < attrCount; i++) {
- AttributeHeader ah(buffer[1 + i]);
- fprintf(output, " %u: attrId=%u\n", i, ah.getAttributeId());
- }
- }
- if (sig->errorCode == 0) {
- fprintf(output, " output: attrCount=%u\n", attrCount);
- Uint32 pos = 1 + attrCount;
- for (unsigned i = 0; i < attrCount; i++) {
- AttributeHeader ah(buffer[pos++]);
- fprintf(output, " %u: attrId=%u dataSize=%u\n", i, ah.getAttributeId(), ah.getDataSize());
- Uint32 next = pos + ah.getDataSize();
- Uint32 printpos = 0;
- while (pos < next) {
- SignalLoggerManager::printDataWord(output, printpos, buffer[pos]);
- pos++;
- }
- if (ah.getDataSize() > 0)
- fprintf(output, "\n");
- }
- }
- return true;
-}
-
-bool
-printTUP_QUERY_TH(FILE* output, const Uint32* theData, Uint32 len, Uint16 rbn)
-{
- const TupQueryTh* const sig = (const TupQueryTh*)theData;
- fprintf(output, "tableId = %u, fragId = %u ", sig->tableId, sig->fragId);
- fprintf(output, "tuple: addr = 0x%x version = %u\n", sig->tupAddr,
- sig->tupVersion);
- fprintf(output, "transId1 = 0x%x, transId2 = 0x%x, savePointId = %u\n",
- sig->transId1, sig->transId2, sig->savePointId);
- return true;
-}
-
-bool
-printTUP_STORE_TH(FILE* output, const Uint32* theData, Uint32 len, Uint16 rbn)
-{
- const TupStoreTh* const sig = (const TupStoreTh*)theData;
- if (sig->errorCode == RNIL)
- fprintf(output, " errorCode=RNIL\n");
- else
- fprintf(output, " errorCode=%u\n", sig->errorCode);
- fprintf(output, " table: id=%u", sig->tableId);
- fprintf(output, " fragment: id=%u ptr=0x%x\n", sig->fragId, sig->fragPtrI);
- fprintf(output, " tuple: addr=0x%x", sig->tupAddr);
- if ((sig->tupAddr & 0x1) == 0) {
- fprintf(output, " fragPage=0x%x index=%u",
- sig->tupAddr >> MAX_TUPLES_BITS,
- (sig->tupAddr & ((1 <<MAX_TUPLES_BITS) - 1)) >> 1);
- fprintf(output, " realPage=0x%x offset=%u\n", sig->pageId, sig->pageOffset);
- } else {
- fprintf(output, " cacheId=%u\n",
- sig->tupAddr >> 1);
- }
- if (sig->tupVersion != 0) {
- fprintf(output, " version=%u ***invalid***\n", sig->tupVersion);
- }
- bool showdata = true;
- switch (sig->opCode) {
- case TupStoreTh::OpRead:
- fprintf(output, " operation=Read\n");
- showdata = false;
- break;
- case TupStoreTh::OpInsert:
- fprintf(output, " operation=Insert\n");
- break;
- case TupStoreTh::OpUpdate:
- fprintf(output, " operation=Update\n");
- break;
- case TupStoreTh::OpDelete:
- fprintf(output, " operation=Delete\n");
- showdata = false;
- break;
- default:
- fprintf(output, " operation=%u ***invalid***\n", sig->opCode);
- break;
- }
- fprintf(output, " data: offset=%u size=%u", sig->dataOffset, sig->dataSize);
- if (! showdata) {
- fprintf(output, " [not printed]\n");
- } else {
- fprintf(output, "\n");
- const Uint32* buffer = (const Uint32*)sig + TupStoreTh::SignalLength;
- Uint32 pos = 0;
- while (pos < sig->dataSize)
- SignalLoggerManager::printDataWord(output, pos, buffer[sig->dataOffset + pos]);
- if (sig->dataSize > 0)
- fprintf(output, "\n");
- }
- return true;
-};
diff --git a/ndb/src/common/logger/FileLogHandler.cpp b/ndb/src/common/logger/FileLogHandler.cpp
index d13dd7b2a78..632db71db15 100644
--- a/ndb/src/common/logger/FileLogHandler.cpp
+++ b/ndb/src/common/logger/FileLogHandler.cpp
@@ -14,8 +14,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
#include <FileLogHandler.hpp>
-
#include <File.hpp>
//
@@ -146,7 +146,7 @@ FileLogHandler::createNewFile()
{
bool rc = true;
int fileNo = 1;
- char newName[MAXPATHLEN];
+ char newName[PATH_MAX];
do
{
diff --git a/ndb/src/common/logger/Logger.cpp b/ndb/src/common/logger/Logger.cpp
index 9c9f1eece18..c2fdecb642b 100644
--- a/ndb/src/common/logger/Logger.cpp
+++ b/ndb/src/common/logger/Logger.cpp
@@ -350,3 +350,4 @@ Logger::log(LoggerLevel logLevel, const char* pMsg, va_list ap) const
// PRIVATE
//
+template class Vector<LogHandler*>;
diff --git a/ndb/src/common/mgmcommon/ConfigInfo.cpp b/ndb/src/common/mgmcommon/ConfigInfo.cpp
index c2b5fdabf01..552b49727fb 100644
--- a/ndb/src/common/mgmcommon/ConfigInfo.cpp
+++ b/ndb/src/common/mgmcommon/ConfigInfo.cpp
@@ -14,15 +14,26 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <NdbTCP.h>
#include "ConfigInfo.hpp"
#include <mgmapi_config_parameters.h>
+#include <ndb_limits.h>
#define MAX_LINE_LENGTH 255
#define KEY_INTERNAL 0
-
+#define MAX_INT_RNIL (RNIL - 1)
/****************************************************************************
* Section names
****************************************************************************/
+
+const ConfigInfo::AliasPair
+ConfigInfo::m_sectionNameAliases[]={
+ {"API", "MYSQLD"},
+ {"DB", "NDBD"},
+ {"MGM", "NDB_MGMD"},
+ {0, 0}
+};
+
const char*
ConfigInfo::m_sectionNames[]={
"SYSTEM",
@@ -47,24 +58,25 @@ sizeof(m_sectionNames)/sizeof(char*);
/****************************************************************************
* Section Rules declarations
****************************************************************************/
-bool transformComputer(InitConfigFileParser::Context & ctx, const char *);
-bool transformSystem(InitConfigFileParser::Context & ctx, const char *);
-bool transformExternalSystem(InitConfigFileParser::Context & ctx, const char *);
-bool transformNode(InitConfigFileParser::Context & ctx, const char *);
-bool transformExtNode(InitConfigFileParser::Context & ctx, const char *);
-bool transformConnection(InitConfigFileParser::Context & ctx, const char *);
-bool applyDefaultValues(InitConfigFileParser::Context & ctx, const char *);
-bool checkMandatory(InitConfigFileParser::Context & ctx, const char *);
-bool fixPortNumber(InitConfigFileParser::Context & ctx, const char *);
-bool fixShmkey(InitConfigFileParser::Context & ctx, const char *);
-bool checkDbConstraints(InitConfigFileParser::Context & ctx, const char *);
-bool checkConnectionConstraints(InitConfigFileParser::Context &, const char *);
-bool fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data);
-bool fixHostname(InitConfigFileParser::Context & ctx, const char * data);
-bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data);
-bool fixExtConnection(InitConfigFileParser::Context & ctx, const char * data);
-bool fixDepricated(InitConfigFileParser::Context & ctx, const char *);
-bool saveInConfigValues(InitConfigFileParser::Context & ctx, const char *);
+static bool transformComputer(InitConfigFileParser::Context & ctx, const char *);
+static bool transformSystem(InitConfigFileParser::Context & ctx, const char *);
+static bool transformExternalSystem(InitConfigFileParser::Context & ctx, const char *);
+static bool transformNode(InitConfigFileParser::Context & ctx, const char *);
+static bool transformExtNode(InitConfigFileParser::Context & ctx, const char *);
+static bool transformConnection(InitConfigFileParser::Context & ctx, const char *);
+static bool applyDefaultValues(InitConfigFileParser::Context & ctx, const char *);
+static bool checkMandatory(InitConfigFileParser::Context & ctx, const char *);
+static bool fixPortNumber(InitConfigFileParser::Context & ctx, const char *);
+static bool fixShmkey(InitConfigFileParser::Context & ctx, const char *);
+static bool checkDbConstraints(InitConfigFileParser::Context & ctx, const char *);
+static bool checkConnectionConstraints(InitConfigFileParser::Context &, const char *);
+static bool checkTCPConstraints(InitConfigFileParser::Context &, const char *);
+static bool fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data);
+static bool fixHostname(InitConfigFileParser::Context & ctx, const char * data);
+static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data);
+static bool fixExtConnection(InitConfigFileParser::Context & ctx, const char * data);
+static bool fixDepricated(InitConfigFileParser::Context & ctx, const char *);
+static bool saveInConfigValues(InitConfigFileParser::Context & ctx, const char *);
const ConfigInfo::SectionRule
ConfigInfo::m_SectionRules[] = {
@@ -83,9 +95,6 @@ ConfigInfo::m_SectionRules[] = {
{ "SCI", transformConnection, 0 },
{ "OSE", transformConnection, 0 },
- { "TCP", fixPortNumber, 0 },
- //{ "SHM", fixShmKey, 0 },
-
{ "DB", fixNodeHostname, 0 },
{ "API", fixNodeHostname, 0 },
{ "MGM", fixNodeHostname, 0 },
@@ -106,6 +115,10 @@ ConfigInfo::m_SectionRules[] = {
{ "OSE", fixHostname, "HostName1" },
{ "OSE", fixHostname, "HostName2" },
+ { "TCP", fixPortNumber, 0 }, // has to come after fixHostName
+ { "SHM", fixPortNumber, 0 }, // has to come after fixHostName
+ //{ "SHM", fixShmKey, 0 },
+
/**
* fixExtConnection must be after fixNodeId
*/
@@ -128,7 +141,9 @@ ConfigInfo::m_SectionRules[] = {
{ "SCI", checkConnectionConstraints, 0 },
{ "OSE", checkConnectionConstraints, 0 },
-
+ { "TCP", checkTCPConstraints, "HostName1" },
+ { "TCP", checkTCPConstraints, "HostName2" },
+
{ "*", checkMandatory, 0 },
{ "DB", saveInConfigValues, 0 },
@@ -146,13 +161,21 @@ const int ConfigInfo::m_NoOfRules = sizeof(m_SectionRules)/sizeof(SectionRule);
/****************************************************************************
* Config Rules declarations
****************************************************************************/
-bool addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
- struct InitConfigFileParser::Context &ctx,
- const char * ruleData);
+static bool add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
+static bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
+static bool check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
const ConfigInfo::ConfigRule
ConfigInfo::m_ConfigRules[] = {
- { addNodeConnections, 0 },
+ { add_node_connections, 0 },
+ { add_server_ports, 0 },
+ { check_node_vs_replicas, 0 },
{ 0, 0 }
};
@@ -166,24 +189,8 @@ struct DepricationTransform {
static
const DepricationTransform f_deprication[] = {
- { "DB", "NoOfIndexPages", "IndexMemory", 0, 8192 }
- ,{ "DB", "MemorySpaceIndexes", "IndexMemory", 0, 8192 }
- ,{ "DB", "NoOfDataPages", "DataMemory", 0, 8192 }
- ,{ "DB", "MemorySpaceTuples", "DataMemory", 0, 8192 }
- ,{ "DB", "TransactionInactiveTimeBeforeAbort", "TransactionInactiveTimeout",
- 0, 1 }
- ,{ "TCP", "ProcessId1", "NodeId1", 0, 1}
- ,{ "TCP", "ProcessId2", "NodeId2", 0, 1}
- ,{ "TCP", "SendBufferSize", "SendBufferMemory", 0, 16384 }
- ,{ "TCP", "MaxReceiveSize", "ReceiveBufferMemory", 0, 16384 }
-
- ,{ "SHM", "ProcessId1", "NodeId1", 0, 1}
- ,{ "SHM", "ProcessId2", "NodeId2", 0, 1}
- ,{ "SCI", "ProcessId1", "NodeId1", 0, 1}
- ,{ "SCI", "ProcessId2", "NodeId2", 0, 1}
- ,{ "OSE", "ProcessId1", "NodeId1", 0, 1}
- ,{ "OSE", "ProcessId2", "NodeId2", 0, 1}
- ,{ 0, 0, 0, 0, 0}
+ { "DB", "Discless", "Diskless", 0, 1 },
+ { 0, 0, 0, 0, 0}
};
/**
@@ -241,27 +248,14 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
KEY_INTERNAL,
"HostName",
"COMPUTER",
- "Hostname of computer (e.g. alzato.com)",
+ "Hostname of computer (e.g. mysql.com)",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
- {
- CFG_NODE_BYTE_ORDER,
- "ByteOrder",
- "COMPUTER",
- "Not yet implemented",
- ConfigInfo::USED, // Actually not used, but since it is MANDATORY,
- // we don't want any warning message
- false,
- ConfigInfo::STRING,
- MANDATORY, // Big == 0, Little == 1, NotSet == 2 (?)
- 0,
- 1 },
-
/****************************************************************************
* SYSTEM
***************************************************************************/
@@ -311,7 +305,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
0,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SYS_CONFIG_GENERATION,
@@ -323,7 +317,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
0,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/***************************************************************************
* DB
@@ -350,7 +344,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_SYSTEM,
@@ -362,7 +356,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_ID,
@@ -377,6 +371,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
(MAX_NODES - 1) },
{
+ KEY_INTERNAL,
+ "ServerPort",
+ "DB",
+ "Port used to setup transporter",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ UNDEFINED,
+ 1,
+ 65535 },
+
+ {
CFG_DB_NO_REPLICAS,
"NoOfReplicas",
"DB",
@@ -386,7 +392,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
1,
- 2 },
+ 4 },
{
CFG_DB_NO_ATTRIBUTES,
@@ -398,7 +404,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1000,
32,
- 4096 },
+ MAX_INT_RNIL/16 },
{
CFG_DB_NO_TABLES,
@@ -408,9 +414,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 32,
+ 128,
8,
- 128 },
+ MAX_INT_RNIL },
{
CFG_DB_NO_INDEXES,
@@ -422,7 +428,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
128,
0,
- 2048 },
+ MAX_INT_RNIL },
{
CFG_DB_NO_INDEX_OPS,
@@ -434,8 +440,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
8192,
0,
- 1000000
- },
+ MAX_INT_RNIL
+ },
{
CFG_DB_NO_TRIGGERS,
@@ -447,7 +453,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
768,
0,
- 2432 },
+ MAX_INT_RNIL },
{
CFG_DB_NO_TRIGGER_OPS,
@@ -457,9 +463,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1000,
+ 4000,
0,
- 1000000 },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -471,7 +477,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_NO_SAVE_MSGS,
@@ -483,7 +489,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
25,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_MEMLOCK,
@@ -495,33 +501,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
false,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "SleepWhenIdle",
- "DB",
- 0,
- ConfigInfo::DEPRICATED,
- true,
- ConfigInfo::BOOL,
- true,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
- KEY_INTERNAL,
- "NoOfSignalsToExecuteBetweenCommunicationInterfacePoll",
- "DB",
- 0,
- ConfigInfo::DEPRICATED,
- true,
- ConfigInfo::INT,
- 20,
- 1,
- 0x7FFFFFFF },
-
- {
CFG_DB_WATCHDOG_INTERVAL,
"TimeBetweenWatchDogCheck",
"DB",
@@ -529,9 +511,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 4000,
+ 6000,
70,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_STOP_ON_ERROR,
@@ -543,7 +525,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
true,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_STOP_ON_ERROR_INSERT,
@@ -565,9 +547,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 8192,
+ 32768,
32,
- 1000000 },
+ MAX_INT_RNIL },
{
CFG_DB_NO_TRANSACTIONS,
@@ -579,7 +561,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
4096,
32,
- 1000000 },
+ MAX_INT_RNIL },
{
CFG_DB_NO_SCANS,
@@ -589,7 +571,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 25,
+ 256,
2,
500 },
@@ -601,9 +583,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1024000,
+ (1024 * 1024),
1024,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_INDEX_MEM,
@@ -613,33 +595,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT64,
- 3000 * 8192,
+ 3 * 1024 * 8192,
128 * 8192,
- ((Uint64)192000) * ((Uint64)8192) },
-
- {
- KEY_INTERNAL,
- "NoOfIndexPages",
- "DB",
- "IndexMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- 3000,
- 128,
- 192000 },
-
- {
- KEY_INTERNAL,
- "MemorySpaceIndexes",
- "DB",
- "IndexMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 128,
- 192000 },
+ ((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
{
CFG_DB_DATA_MEM,
@@ -649,35 +607,11 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT64,
- 10000 * 8192,
+ 10 * 1024 * 8192,
128 * 8192,
- ((Uint64)400000) * ((Uint64)8192) },
+ ((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
{
- KEY_INTERNAL,
- "NoOfDataPages",
- "DB",
- "DataMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- 10000,
- 128,
- 400000 },
-
- {
- KEY_INTERNAL,
- "MemorySpaceTuples",
- "DB",
- "DataMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 128,
- 400000 },
-
- {
CFG_DB_START_PARTIAL_TIMEOUT,
"StartPartialTimeout",
"DB",
@@ -709,23 +643,11 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 5*60000,
+ 0,
0,
~0 },
{
- KEY_INTERNAL,
- "TimeToWaitAlive",
- "DB",
- "Start{Partial/Partitioned/Failure}Time",
- ConfigInfo::DEPRICATED,
- true,
- ConfigInfo::INT,
- 25,
- 2,
- 4000 },
-
- {
CFG_DB_HEARTBEAT_INTERVAL,
"HeartbeatIntervalDbDb",
"DB",
@@ -735,7 +657,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1500,
10,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_API_HEARTBEAT_INTERVAL,
@@ -747,7 +669,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1500,
100,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_LCP_INTERVAL,
@@ -783,7 +705,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
8,
1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -808,7 +730,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1000,
1000,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
@@ -822,9 +744,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 3000,
+ MAX_INT_RNIL,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
@@ -837,33 +759,21 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 3000,
+ 1200,
50,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
- "TransactionInactiveTimeBeforeAbort",
- "DB",
- "TransactionInactiveTimeout",
- ConfigInfo::DEPRICATED,
- true,
- ConfigInfo::INT,
- 3000,
- 20,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
"NoOfDiskPagesToDiskDuringRestartTUP",
"DB",
"?",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 50,
+ 40,
1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -873,9 +783,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 10,
+ 40,
1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -885,9 +795,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 25,
+ 20,
1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -897,14 +807,14 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 5,
+ 20,
1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_DISCLESS,
- "Discless",
+ "Diskless",
"DB",
"Run wo/ disk",
ConfigInfo::USED,
@@ -913,6 +823,20 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
0,
0,
1},
+
+ {
+ KEY_INTERNAL,
+ "Discless",
+ "DB",
+ "Diskless",
+ ConfigInfo::DEPRICATED,
+ true,
+ ConfigInfo::BOOL,
+ 0,
+ 0,
+ 1},
+
+
{
CFG_DB_ARBIT_TIMEOUT,
@@ -922,9 +846,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1000,
+ 3000,
10,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_FILESYSTEM_PATH,
@@ -936,7 +860,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_LOGLEVEL_STARTUP,
@@ -1059,7 +983,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
(2 * 1024 * 1024) + (2 * 1024 * 1024), // sum of BackupDataBufferSize and BackupLogBufferSize
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_BACKUP_DATA_BUFFER_MEM,
@@ -1071,7 +995,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
(2 * 1024 * 1024), // remember to change BackupMemory
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_BACKUP_LOG_BUFFER_MEM,
@@ -1083,7 +1007,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
(2 * 1024 * 1024), // remember to change BackupMemory
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_DB_BACKUP_WRITE_SIZE,
@@ -1095,7 +1019,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
32768,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/***************************************************************************
* REP
@@ -1122,7 +1046,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_SYSTEM,
@@ -1134,7 +1058,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_ID,
@@ -1158,7 +1082,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_REP_HEARTBEAT_INTERVAL,
@@ -1170,7 +1094,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
3000,
100,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/***************************************************************************
* API
@@ -1197,7 +1121,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_SYSTEM,
@@ -1209,7 +1133,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_ID,
@@ -1231,9 +1155,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::STRING,
- MANDATORY,
0,
- 0x7FFFFFFF },
+ 0,
+ MAX_INT_RNIL },
{
CFG_NODE_ARBIT_RANK,
@@ -1243,7 +1167,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2,
+ 0,
0,
2 },
@@ -1257,7 +1181,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
0,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/****************************************************************************
* MGM
@@ -1284,7 +1208,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_SYSTEM,
@@ -1296,7 +1220,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_ID,
@@ -1320,7 +1244,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
0,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -1330,9 +1254,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::STRING,
- MANDATORY,
0,
- 0x7FFFFFFF },
+ 0,
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -1344,7 +1268,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
100,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_MGM_PORT,
@@ -1354,9 +1278,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2200,
+ NDB_BASE_PORT,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
KEY_INTERNAL,
@@ -1368,7 +1292,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
2199,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_NODE_ARBIT_RANK,
@@ -1378,7 +1302,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2,
+ 1,
0,
2 },
@@ -1392,7 +1316,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
0,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/****************************************************************************
* TCP
@@ -1419,7 +1343,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_TCP_HOSTNAME_2,
@@ -1431,7 +1355,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_1,
@@ -1443,19 +1367,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId1",
- "TCP",
- "NodeId1",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2,
@@ -1467,43 +1379,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId2",
- "TCP",
- "NodeId2",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "IpAddress1",
- "TCP",
- "HostName1",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::STRING,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "IpAddress2",
- "TCP",
- "HostName2",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::STRING,
- UNDEFINED,
- 0,
- 0 },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1515,7 +1391,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
true,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
@@ -1528,19 +1404,19 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
false,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
- CFG_TCP_SERVER_PORT,
+ CFG_CONNECTION_SERVER_PORT,
"PortNumber",
"TCP",
"Port used for this transporter",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2202,
+ MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_TCP_SEND_BUFFER_SIZE,
@@ -1552,21 +1428,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
16 * 16384,
1 * 16384,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
- KEY_INTERNAL,
- "SendBufferSize",
- "TCP",
- "SendBufferMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- 16,
- 1,
- 0x7FFFFFFF },
-
- {
CFG_TCP_RECEIVE_BUFFER_SIZE,
"ReceiveBufferMemory",
"TCP",
@@ -1576,19 +1440,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
4 * 16384,
1 * 16384,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "MaxReceiveSize",
- "TCP",
- "ReceiveBufferMemory",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- 4,
- 1,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_TCP_PROXY,
@@ -1603,19 +1455,6 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
0 },
{
- KEY_INTERNAL,
- "Compression",
- "TCP",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::BOOL,
- false,
- 0,
- 0x7FFFFFFF },
-
-
- {
CFG_CONNECTION_NODE_1_SYSTEM,
"NodeId1_System",
"TCP",
@@ -1625,7 +1464,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1637,7 +1476,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/****************************************************************************
@@ -1665,20 +1504,20 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
- KEY_INTERNAL,
- "ProcessId1",
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
"SHM",
- "NodeId1",
- ConfigInfo::DEPRICATED,
+ "Port used for this transporter",
+ ConfigInfo::USED,
false,
- ConfigInfo::STRING,
- UNDEFINED,
+ ConfigInfo::INT,
+ MANDATORY,
0,
- 0x7FFFFFFF },
-
+ MAX_INT_RNIL },
+
{
CFG_CONNECTION_NODE_2,
"NodeId2",
@@ -1689,19 +1528,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId2",
- "SHM",
- "NodeId1",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::STRING,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1713,7 +1540,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
false,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
@@ -1726,7 +1553,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
true,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SHM_KEY,
@@ -1738,7 +1565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SHM_BUFFER_MEM,
@@ -1750,21 +1577,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1048576,
4096,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
- KEY_INTERNAL,
- "Compression",
- "SHM",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::BOOL,
- false,
- 0,
- 0x7FFFFFFF },
-
- {
CFG_CONNECTION_NODE_1_SYSTEM,
"NodeId1_System",
"SHM",
@@ -1774,7 +1589,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1786,7 +1601,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/****************************************************************************
* SCI
@@ -1813,19 +1628,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId1",
- "SCI",
- "NodeId1",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2,
@@ -1837,19 +1640,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId2",
- "SCI",
- "NodeId2",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SCI_ID_0,
@@ -1861,7 +1652,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SCI_ID_1,
@@ -1873,7 +1664,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1885,7 +1676,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
true,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_CHECKSUM,
@@ -1897,7 +1688,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
false,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SCI_SEND_LIMIT,
@@ -1909,7 +1700,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
2048,
512,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_SCI_BUFFER_MEM,
@@ -1921,67 +1712,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1048576,
262144,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Node1_NoOfAdapters",
- "SCI",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Node2_NoOfAdapters",
- "SCI",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Node1_Adapter",
- "SCI",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Node2_Adapter",
- "SCI",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Compression",
- "SCI",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::BOOL,
- false,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_1_SYSTEM,
@@ -1993,7 +1724,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -2005,7 +1736,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
/****************************************************************************
* OSE
@@ -2032,7 +1763,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_OSE_HOSTNAME_2,
@@ -2044,7 +1775,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_1,
@@ -2056,19 +1787,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
MANDATORY,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId1",
- "OSE",
- "NodeId1",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- UNDEFINED,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_2,
@@ -2080,19 +1799,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
UNDEFINED,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "ProcessId2",
- "OSE",
- "NodeId2",
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::INT,
- MANDATORY,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -2104,7 +1811,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
true,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_CHECKSUM,
@@ -2116,7 +1823,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::BOOL,
false,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_OSE_PRIO_A_SIZE,
@@ -2128,7 +1835,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1000,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_OSE_PRIO_B_SIZE,
@@ -2140,7 +1847,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
1000,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_OSE_RECEIVE_ARRAY_SIZE,
@@ -2152,19 +1859,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::INT,
10,
0,
- 0x7FFFFFFF },
-
- {
- KEY_INTERNAL,
- "Compression",
- "OSE",
- 0,
- ConfigInfo::DEPRICATED,
- false,
- ConfigInfo::BOOL,
- false,
- 0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
{
CFG_CONNECTION_NODE_1_SYSTEM,
@@ -2176,7 +1871,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL},
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -2188,7 +1883,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::STRING,
UNDEFINED,
0,
- 0x7FFFFFFF },
+ MAX_INT_RNIL },
};
const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo);
@@ -2197,22 +1892,21 @@ const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo);
/****************************************************************************
* Ctor
****************************************************************************/
-inline void require(bool v) { if(!v) abort();}
+static void require(bool v) { if(!v) abort();}
-ConfigInfo::ConfigInfo() {
+ConfigInfo::ConfigInfo()
+ : m_info(true), m_systemDefaults(true)
+{
+ int i;
Properties *section;
const Properties *oldpinfo;
- m_info.setCaseInsensitiveNames(true);
- m_systemDefaults.setCaseInsensitiveNames(true);
-
- for (int i=0; i<m_NoOfParams; i++) {
+ for (i=0; i<m_NoOfParams; i++) {
const ParamInfo & param = m_ParamInfo[i];
// Create new section if it did not exist
if (!m_info.getCopy(param._section, &section)) {
- Properties newsection;
- newsection.setCaseInsensitiveNames(true);
+ Properties newsection(true);
m_info.put(param._section, &newsection);
}
@@ -2220,7 +1914,7 @@ ConfigInfo::ConfigInfo() {
m_info.getCopy(param._section, &section);
// Create pinfo (parameter info) entry
- Properties pinfo;
+ Properties pinfo(true);
pinfo.put("Id", param._paramId);
pinfo.put("Fname", param._fname);
pinfo.put("Description", param._description);
@@ -2248,8 +1942,7 @@ ConfigInfo::ConfigInfo() {
if(param._type != ConfigInfo::SECTION){
Properties * p;
if(!m_systemDefaults.getCopy(param._section, &p)){
- p = new Properties();
- p->setCaseInsensitiveNames(true);
+ p = new Properties(true);
}
if(param._type != STRING &&
param._default != UNDEFINED &&
@@ -2261,7 +1954,7 @@ ConfigInfo::ConfigInfo() {
}
}
- for (int i=0; i<m_NoOfParams; i++) {
+ for (i=0; i<m_NoOfParams; i++) {
if(m_ParamInfo[i]._section == NULL){
ndbout << "Check that each entry has a section failed." << endl;
ndbout << "Parameter \"" << m_ParamInfo[i]._fname << endl;
@@ -2376,6 +2069,14 @@ ConfigInfo::isSection(const char * section) const {
return false;
}
+const char*
+ConfigInfo::getAlias(const char * section) const {
+ for (int i = 0; m_sectionNameAliases[i].name != 0; i++)
+ if(!strcmp(section, m_sectionNameAliases[i].alias))
+ return m_sectionNameAliases[i].name;
+ return 0;
+}
+
bool
ConfigInfo::verify(const Properties * section, const char* fname,
Uint64 value) const {
@@ -2489,11 +2190,27 @@ transformNode(InitConfigFileParser::Context & ctx, const char * data){
Uint32 id;
if(!ctx.m_currentSection->get("Id", &id)){
+ Uint32 nextNodeId= 1;
+ ctx.m_userProperties.get("NextNodeId", &nextNodeId);
+ id= nextNodeId;
+ while (ctx.m_userProperties.get("AllocatedNodeId_", id, &id))
+ id++;
+ ctx.m_userProperties.put("NextNodeId", id+1, true);
+ ctx.m_currentSection->put("Id", id);
+#if 0
ctx.reportError("Mandatory parameter Id missing from section "
"[%s] starting at line: %d",
ctx.fname, ctx.m_sectionLineno);
return false;
+#endif
+ } else if(ctx.m_userProperties.get("AllocatedNodeId_", id, &id)) {
+ ctx.reportError("Duplicate Id in section "
+ "[%s] starting at line: %d",
+ ctx.fname, ctx.m_sectionLineno);
+ return false;
}
+
+ ctx.m_userProperties.put("AllocatedNodeId_", id, id);
snprintf(ctx.pname, sizeof(ctx.pname), "Node_%d", id);
ctx.m_currentSection->put("Type", ctx.fname);
@@ -2502,6 +2219,13 @@ transformNode(InitConfigFileParser::Context & ctx, const char * data){
ctx.m_userProperties.get("NoOfNodes", &nodes);
ctx.m_userProperties.put("NoOfNodes", ++nodes, true);
+ /**
+ * Update count (per type)
+ */
+ nodes = 0;
+ ctx.m_userProperties.get(ctx.fname, &nodes);
+ ctx.m_userProperties.put(ctx.fname, ++nodes, true);
+
return true;
}
@@ -2510,10 +2234,16 @@ fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data){
const char * compId;
if(!ctx.m_currentSection->get("ExecuteOnComputer", &compId)){
- ctx.reportError("Parameter \"ExecuteOnComputer\" missing from section "
- "[%s] starting at line: %d",
- ctx.fname, ctx.m_sectionLineno);
- return false;
+ require(ctx.m_currentSection->put("HostName", ""));
+
+ const char * type;
+ if(ctx.m_currentSection->get("Type", &type) && strcmp(type,"DB") == 0) {
+ ctx.reportError("Parameter \"ExecuteOnComputer\" missing from DB section"
+ " [%s] starting at line: %d",
+ ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
+ return true;
}
const Properties * computer;
@@ -2598,6 +2328,9 @@ transformSystem(InitConfigFileParser::Context & ctx, const char * data){
ctx.fname, ctx.m_sectionLineno);
return false;
}
+
+ ndbout << "transformSystem " << name << endl;
+
snprintf(ctx.pname, sizeof(ctx.pname), "SYSTEM_%s", name);
return true;
@@ -2638,6 +2371,22 @@ transformComputer(InitConfigFileParser::Context & ctx, const char * data){
ctx.m_userProperties.get("NoOfComputers", &computers);
ctx.m_userProperties.put("NoOfComputers", ++computers, true);
+ const char * hostname = 0;
+ ctx.m_currentSection->get("HostName", &hostname);
+ if(!hostname){
+ return true;
+ }
+
+ if(!strcmp(hostname, "localhost") || !strcmp(hostname, "127.0.0.1")){
+ if(ctx.m_userProperties.get("$computer-localhost", &hostname)){
+ ctx.reportError("Mixing of localhost with other hostname(%s) is illegal",
+ hostname);
+ return false;
+ }
+ } else {
+ ctx.m_userProperties.put("$computer-localhost", hostname);
+ }
+
return true;
}
@@ -2725,7 +2474,7 @@ checkMandatory(InitConfigFileParser::Context & ctx, const char * data){
* Transform a string "NodeidX" (e.g. "uppsala.32")
* into a Uint32 "NodeIdX" (e.g. 32) and a string "SystemX" (e.g. "uppsala").
*/
-bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data)
+static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data)
{
char buf[] = "NodeIdX"; buf[6] = data[sizeof("NodeI")];
char sysbuf[] = "SystemX"; sysbuf[6] = data[sizeof("NodeI")];
@@ -2761,7 +2510,7 @@ bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data)
* - name of external system in parameter extSystemName, and
* - nodeId of external node in parameter extSystemNodeId.
*/
-bool
+static bool
isExtConnection(InitConfigFileParser::Context & ctx,
const char **extSystemName, Uint32 * extSystemNodeId){
@@ -2789,7 +2538,7 @@ isExtConnection(InitConfigFileParser::Context & ctx,
* If connection is to an external system, then move connection into
* external system configuration (i.e. a sub-property).
*/
-bool
+static bool
fixExtConnection(InitConfigFileParser::Context & ctx, const char * data){
const char * extSystemName;
@@ -2844,7 +2593,7 @@ fixExtConnection(InitConfigFileParser::Context & ctx, const char * data){
* -# Via Node's ExecuteOnComputer lookup Hostname
* -# Add HostName to Connection
*/
-bool
+static bool
fixHostname(InitConfigFileParser::Context & ctx, const char * data){
char buf[] = "NodeIdX"; buf[6] = data[sizeof("HostNam")];
@@ -2867,28 +2616,61 @@ fixHostname(InitConfigFileParser::Context & ctx, const char * data){
/**
* Connection rule: Fix port number (using a port number adder)
*/
-bool
+static bool
fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
- if(!ctx.m_currentSection->contains("PortNumber")){
- Uint32 adder = 0;
- ctx.m_userProperties.get("PortNumberAdder", &adder);
- Uint32 base = 0;
- if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) &&
- !ctx.m_systemDefaults->get("PortNumber", &base)){
- return false;
+ Uint32 id1= 0, id2= 0;
+ require(ctx.m_currentSection->get("NodeId1", &id1));
+ require(ctx.m_currentSection->get("NodeId2", &id2));
+ id1 = id1 < id2 ? id1 : id2;
+
+ const Properties * node;
+ require(ctx.m_config->get("Node", id1, &node));
+ BaseString hostname;
+ require(node->get("HostName", hostname));
+
+ if (hostname.c_str()[0] == 0) {
+ ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1);
+ return false;
+ }
+
+ Uint32 port= 0;
+ if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
+ Uint32 adder= 0;
+ {
+ BaseString server_port_adder(hostname);
+ server_port_adder.append("_ServerPortAdder");
+ ctx.m_userProperties.get(server_port_adder.c_str(), &adder);
+ ctx.m_userProperties.put(server_port_adder.c_str(), adder+1, true);
+ }
+
+ Uint32 base= 0;
+ if (!ctx.m_userProperties.get("ServerPortBase", &base)){
+ if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) &&
+ !ctx.m_systemDefaults->get("PortNumber", &base)) {
+ base= NDB_BASE_PORT+2;
+ // ctx.reportError("Cannot retrieve base port number");
+ // return false;
+ }
+ ctx.m_userProperties.put("ServerPortBase", base);
}
- ctx.m_currentSection->put("PortNumber", base + adder);
- adder++;
- ctx.m_userProperties.put("PortNumberAdder", adder, true);
+ port= base + adder;
+ ctx.m_userProperties.put("ServerPort_", id1, port);
}
+
+ if(ctx.m_currentSection->contains("PortNumber")) {
+ ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl;
+ ctx.m_currentSection->put("PortNumber", port, true);
+ } else
+ ctx.m_currentSection->put("PortNumber", port);
+
return true;
}
/**
* DB Node rule: Check various constraints
*/
-bool
+static bool
checkDbConstraints(InitConfigFileParser::Context & ctx, const char *){
Uint32 t1 = 0, t2 = 0;
@@ -2921,7 +2703,7 @@ checkDbConstraints(InitConfigFileParser::Context & ctx, const char *){
/**
* Connection rule: Check varius constraints
*/
-bool
+static bool
checkConnectionConstraints(InitConfigFileParser::Context & ctx, const char *){
Uint32 id1 = 0, id2 = 0;
@@ -2977,6 +2759,22 @@ checkConnectionConstraints(InitConfigFileParser::Context & ctx, const char *){
ctx.fname, ctx.m_sectionLineno);
return false;
}
+
+ return true;
+}
+
+static bool
+checkTCPConstraints(InitConfigFileParser::Context & ctx, const char * data){
+
+ const char * host;
+ struct in_addr addr;
+ if(ctx.m_currentSection->get(data, &host) && strlen(host) &&
+ Ndb_getInAddr(&addr, host)){
+ ctx.reportError("Unable to lookup/illegal hostname %s"
+ " - [%s] starting at line: %d",
+ host, ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
return true;
}
@@ -3000,7 +2798,8 @@ transform(InitConfigFileParser::Context & ctx,
require(ctx.m_currentSection->getTypeOf(oldName, &oldType));
ConfigInfo::Type newType = ctx.m_info->getType(ctx.m_currentInfo, newName);
if(!((oldType == PropertiesType_Uint32 || oldType == PropertiesType_Uint64)
- && (newType == ConfigInfo::INT || newType == ConfigInfo::INT64))){
+ && (newType == ConfigInfo::INT || newType == ConfigInfo::INT64 || newType == ConfigInfo::BOOL))){
+ ndbout << "oldType: " << (int)oldType << ", newType: " << (int)newType << endl;
ctx.reportError("Unable to handle type conversion w.r.t deprication %s %s"
"- [%s] starting at line: %d",
oldName, newName,
@@ -3019,23 +2818,24 @@ transform(InitConfigFileParser::Context & ctx,
return false;
}
- if(newType == ConfigInfo::INT){
+ if(newType == ConfigInfo::INT || newType == ConfigInfo::BOOL){
require(dst.put(newName, (Uint32)newVal));
- } else {
+ } else if(newType == ConfigInfo::INT64) {
require(dst.put64(newName, newVal));
}
return true;
}
-bool
+static bool
fixDepricated(InitConfigFileParser::Context & ctx, const char * data){
+ const char * name;
/**
* Transform old values to new values
* Transform new values to old values (backward compatible)
*/
- Properties tmp;
+ Properties tmp(true);
Properties::Iterator it(ctx.m_currentSection);
- for (const char* name = it.first(); name != NULL; name = it.next()) {
+ for (name = it.first(); name != NULL; name = it.next()) {
const DepricationTransform * p = &f_deprication[0];
while(p->m_section != 0){
if(strcmp(p->m_section, ctx.fname) == 0){
@@ -3056,7 +2856,7 @@ fixDepricated(InitConfigFileParser::Context & ctx, const char * data){
}
Properties::Iterator it2(&tmp);
- for (const char* name = it2.first(); name != NULL; name = it2.next()) {
+ for (name = it2.first(); name != NULL; name = it2.next()) {
PropertiesType type;
require(tmp.getTypeOf(name, &type));
switch(type){
@@ -3086,7 +2886,7 @@ fixDepricated(InitConfigFileParser::Context & ctx, const char * data){
return true;
}
-bool
+static bool
saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){
const Properties * sec;
if(!ctx.m_currentInfo->get(ctx.fname, &sec)){
@@ -3151,22 +2951,24 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){
default:
abort();
}
+ require(ok);
}
ctx.m_configValues.closeSection();
} while(0);
return true;
}
-bool
-addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
+static bool
+add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
struct InitConfigFileParser::Context &ctx,
- const char * ruleData)
+ const char * rule_data)
{
+ Uint32 i;
Properties * props= ctx.m_config;
- Properties p_connections;
- Properties p_connections2;
+ Properties p_connections(true);
+ Properties p_connections2(true);
- for (Uint32 i = 0;; i++){
+ for (i = 0;; i++){
const Properties * tmp;
Uint32 nodeId1, nodeId2;
@@ -3184,11 +2986,11 @@ addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
Uint32 nNodes;
ctx.m_userProperties.get("NoOfNodes", &nNodes);
- Properties p_db_nodes;
- Properties p_api_mgm_nodes;
+ Properties p_db_nodes(true);
+ Properties p_api_mgm_nodes(true);
- Uint32 i_db= 0, i_api_mgm= 0;
- for (Uint32 i= 0, n= 0; n < nNodes; i++){
+ Uint32 i_db= 0, i_api_mgm= 0, n;
+ for (i= 0, n= 0; n < nNodes; i++){
const Properties * tmp;
if(!props->get("Node", i, &tmp)) continue;
n++;
@@ -3205,13 +3007,13 @@ addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
Uint32 nodeId1, nodeId2, dummy;
- for (Uint32 i= 0; p_db_nodes.get("", i, &nodeId1); i++){
+ for (i= 0; p_db_nodes.get("", i, &nodeId1); i++){
for (Uint32 j= i+1;; j++){
if(!p_db_nodes.get("", j, &nodeId2)) break;
if(!p_connections2.get("", nodeId1+nodeId2<<16, &dummy)) {
ConfigInfo::ConfigRuleSection s;
s.m_sectionType= BaseString("TCP");
- s.m_sectionData= new Properties;
+ s.m_sectionData= new Properties(true);
char buf[16];
snprintf(buf, sizeof(buf), "%u", nodeId1);
s.m_sectionData->put("NodeId1", buf);
@@ -3222,13 +3024,13 @@ addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
}
}
- for (Uint32 i= 0; p_api_mgm_nodes.get("", i, &nodeId1); i++){
+ for (i= 0; p_api_mgm_nodes.get("", i, &nodeId1); i++){
if(!p_connections.get("", nodeId1, &dummy)) {
for (Uint32 j= 0;; j++){
if(!p_db_nodes.get("", j, &nodeId2)) break;
ConfigInfo::ConfigRuleSection s;
s.m_sectionType= BaseString("TCP");
- s.m_sectionData= new Properties;
+ s.m_sectionData= new Properties(true);
char buf[16];
snprintf(buf, sizeof(buf), "%u", nodeId1);
s.m_sectionData->put("NodeId1", buf);
@@ -3241,3 +3043,63 @@ addNodeConnections(Vector<ConfigInfo::ConfigRuleSection>&sections,
return true;
}
+
+
+static bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data)
+{
+#if 0
+ Properties * props= ctx.m_config;
+ Properties computers(true);
+ Uint32 port_base = NDB_BASE_PORT+2;
+
+ Uint32 nNodes;
+ ctx.m_userProperties.get("NoOfNodes", &nNodes);
+
+ for (Uint32 i= 0, n= 0; n < nNodes; i++){
+ Properties * tmp;
+ if(!props->get("Node", i, &tmp)) continue;
+ n++;
+
+ const char * type;
+ if(!tmp->get("Type", &type)) continue;
+
+ Uint32 port;
+ if (tmp->get("ServerPort", &port)) continue;
+
+ Uint32 computer;
+ if (!tmp->get("ExecuteOnComputer", &computer)) continue;
+
+ Uint32 adder= 0;
+ computers.get("",computer, &adder);
+
+ if (strcmp(type,"DB") == 0) {
+ adder++;
+ tmp->put("ServerPort", port_base+adder);
+ computers.put("",computer, adder);
+ }
+ }
+#endif
+ return true;
+}
+
+static bool
+check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data)
+{
+ Uint32 db_nodes = 0;
+ Uint32 replicas = 0;
+ ctx.m_userProperties.get("DB", &db_nodes);
+ ctx.m_userProperties.get("NoOfReplicas", &replicas);
+ if((db_nodes % replicas) != 0){
+ ctx.reportError("Invalid no of db nodes wrt no of replicas.\n"
+ "No of nodes must be dividable with no or replicas");
+ return false;
+ }
+
+ return true;
+}
+
+template class Vector<ConfigInfo::ConfigRuleSection>;
diff --git a/ndb/src/common/mgmcommon/ConfigInfo.hpp b/ndb/src/common/mgmcommon/ConfigInfo.hpp
index 79c17b436fe..9a954fe78d5 100644
--- a/ndb/src/common/mgmcommon/ConfigInfo.hpp
+++ b/ndb/src/common/mgmcommon/ConfigInfo.hpp
@@ -61,6 +61,11 @@ public:
Uint64 _max;
};
+ struct AliasPair{
+ const char * name;
+ const char * alias;
+ };
+
/**
* Entry for one section rule
*/
@@ -100,6 +105,7 @@ public:
* @note Result is not defined if section/name are wrong!
*/
bool verify(const Properties* secti, const char* fname, Uint64 value) const;
+ const char* getAlias(const char*) const;
bool isSection(const char*) const;
const char* getDescription(const Properties * sec, const char* fname) const;
@@ -123,6 +129,7 @@ private:
static const ParamInfo m_ParamInfo[];
static const int m_NoOfParams;
+ static const AliasPair m_sectionNameAliases[];
static const char* m_sectionNames[];
static const int m_noOfSectionNames;
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index d2c622593de..2e809907058 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -37,37 +37,26 @@
#include <mgmapi.h>
#include <mgmapi_config_parameters.h>
+#include <mgmapi_configuration.hpp>
#include <ConfigValues.hpp>
#include <NdbHost.h>
//****************************************************************************
//****************************************************************************
-ConfigRetriever::ConfigRetriever() {
+ConfigRetriever::ConfigRetriever(Uint32 version, Uint32 node_type) {
- _localConfigFileName = NULL;
- m_defaultConnectString = NULL;
-
-
- errorString = 0;
- _localConfig = new LocalConfig();
- m_connectString = NULL;
+ m_handle= 0;
+ m_version = version;
+ m_node_type = node_type;
}
ConfigRetriever::~ConfigRetriever(){
- if(_localConfigFileName != 0)
- free(_localConfigFileName);
-
- if(m_defaultConnectString != 0)
- free(m_defaultConnectString);
- if(m_connectString != 0)
- free(m_connectString);
-
- if(errorString != 0)
- free(errorString);
-
- delete _localConfig;
+ if (m_handle) {
+ ndb_mgm_disconnect(m_handle);
+ ndb_mgm_destroy_handle(&m_handle);
+ }
}
@@ -75,67 +64,51 @@ ConfigRetriever::~ConfigRetriever(){
//****************************************************************************
int
-ConfigRetriever::init(bool onlyNodeId) {
- if (_localConfig->init(onlyNodeId, m_connectString, _localConfigFileName, m_defaultConnectString)) {
- return _ownNodeId = (*_localConfig)._ownNodeId;
+ConfigRetriever::init() {
+ if (!_localConfig.init(m_connectString.c_str(),
+ _localConfigFileName.c_str())){
+
+ setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr");
+ _localConfig.printError();
+ _localConfig.printUsage();
+ return -1;
}
-
- setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr");
- _localConfig->printError();
- _localConfig->printUsage();
-
- return -1;
-}
-//****************************************************************************
-//****************************************************************************
-//****************************************************************************
-//****************************************************************************
-struct ndb_mgm_configuration*
-ConfigRetriever::getConfig(int verId, int nodeType) {
+ return _ownNodeId = _localConfig._ownNodeId;
+}
- int res = init();
- if (res == -1) {
- return 0;
- }
+int
+ConfigRetriever::do_connect(){
- if (_localConfig->items == 0){
- setError(CR_ERROR,"No Management Servers configured in local config file");
- return 0;
+ if(!m_handle)
+ m_handle= ndb_mgm_create_handle();
+
+ if (m_handle == 0) {
+ setError(CR_ERROR, "Unable to allocate mgm handle");
+ return -1;
}
int retry = 1;
int retry_max = 12; // Max number of retry attempts
int retry_interval= 5; // Seconds between each retry
- do {
+ while(retry < retry_max){
Uint32 type = CR_ERROR;
- for (int i = 0; i<_localConfig->items; i++){
- MgmtSrvrId * m = _localConfig->ids[i];
- struct ndb_mgm_configuration * p = 0;
+ BaseString tmp;
+ for (int i = 0; i<_localConfig.ids.size(); i++){
+ MgmtSrvrId * m = &_localConfig.ids[i];
switch(m->type){
case MgmId_TCP:
- p = getConfig(m->data.tcp.remoteHost, m->data.tcp.port, verId);
- break;
- case MgmId_File:
- p = getConfig(m->data.file.filename, verId);
- break;
- default:
- setError(CR_ERROR, "Unknown error type");
- break;
- }
-
- if (p != 0) {
- if(!verifyConfig(p, nodeType)){
- free(p);
+ tmp.assfmt("%s:%d", m->name.c_str(), m->port);
+ if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) {
return 0;
}
- return p;
+ setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle));
+ case MgmId_File:
+ break;
}
- if(latestErrorType == CR_RETRY)
- type = CR_RETRY;
- } // for
-
- if(type == CR_RETRY){
+ }
+
+ if(latestErrorType == CR_RETRY){
REPORT_WARNING("Failed to retrieve cluster configuration");
ndbout << "(Cause of failure: " << getErrorString() << ")" << endl;
ndbout << "Attempt " << retry << " of " << retry_max << ". "
@@ -146,60 +119,63 @@ ConfigRetriever::getConfig(int verId, int nodeType) {
break;
}
retry++;
-
- } while (retry <= retry_max);
+ }
- return 0;
+ ndb_mgm_destroy_handle(&m_handle);
+ m_handle= 0;
+ return -1;
}
-ndb_mgm_configuration *
-ConfigRetriever::getConfig(const char * mgmhost,
- short port,
- int versionId){
-
- NdbMgmHandle h;
- h = ndb_mgm_create_handle();
- if (h == NULL) {
- setError(CR_ERROR, "Unable to allocate mgm handle");
- return 0;
- }
+//****************************************************************************
+//****************************************************************************
+//****************************************************************************
+//****************************************************************************
+struct ndb_mgm_configuration*
+ConfigRetriever::getConfig() {
+
+ struct ndb_mgm_configuration * p = 0;
- BaseString tmp;
- tmp.assfmt("%s:%d", mgmhost, port);
- if (ndb_mgm_connect(h, tmp.c_str()) != 0) {
- setError(CR_RETRY, ndb_mgm_get_latest_error_desc(h));
- ndb_mgm_destroy_handle(&h);
+ if(m_handle != 0){
+ p = getConfig(m_handle);
+ } else {
+ for (int i = 0; i<_localConfig.ids.size(); i++){
+ MgmtSrvrId * m = &_localConfig.ids[i];
+ switch(m->type){
+ case MgmId_File:
+ p = getConfig(m->name.c_str());
+ break;
+ case MgmId_TCP:
+ break;
+ }
+ if(p)
+ break;
+ }
+ }
+ if(p == 0)
return 0;
+
+ if(!verifyConfig(p)){
+ free(p);
+ p= 0;
}
+
+ return p;
+}
- ndb_mgm_configuration * conf = ndb_mgm_get_configuration(h, versionId);
+ndb_mgm_configuration *
+ConfigRetriever::getConfig(NdbMgmHandle m_handle){
+
+ ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version);
if(conf == 0){
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(h));
- }
-
- ndb_mgm_disconnect(h);
- ndb_mgm_destroy_handle(&h);
-
- return conf;
-#if 0
- bool compatible;
- if (global_ndb_check)
- compatible = ndbCompatible_ndb_mgmt(versionId, version);
- else
- compatible = ndbCompatible_api_mgmt(versionId, version);
-
- if(!compatible){ // if(version != versionId){
- NDB_CLOSE_SOCKET(sockfd);
- snprintf(err_buf, sizeof(err_buf), "Management Server: Invalid version. "
- "Version from server: %d Own version: %d", version, versionId);
- setError(CR_ERROR, err_buf);
+ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
return 0;
}
-#endif
+
+ return conf;
}
ndb_mgm_configuration *
-ConfigRetriever::getConfig(const char * filename, int versionId){
+ConfigRetriever::getConfig(const char * filename){
struct stat sbuf;
const int res = stat(filename, &sbuf);
@@ -242,60 +218,29 @@ ConfigRetriever::getConfig(const char * filename, int versionId){
void
ConfigRetriever::setError(ErrorType et, const char * s){
- if(errorString != 0){
- free(errorString);
- }
- if(s == 0)
- errorString = 0;
- else
- errorString = strdup(s);
+ errorString.assign(s ? s : "");
latestErrorType = et;
}
const char *
ConfigRetriever::getErrorString(){
- return errorString;
+ return errorString.c_str();
}
void
ConfigRetriever::setLocalConfigFileName(const char * localConfigFileName) {
- if(_localConfigFileName != 0)
- free(_localConfigFileName);
- if(localConfigFileName != 0)
- _localConfigFileName = strdup(localConfigFileName);
- else
- _localConfigFileName = 0;
+ _localConfigFileName.assign(localConfigFileName ? localConfigFileName : "");
}
void
ConfigRetriever::setConnectString(const char * connectString) {
- if(m_connectString != 0)
- free(m_connectString);
- if (connectString != 0) {
- m_connectString = strdup(connectString);
- } else {
- m_connectString = 0;
- }
-}
-
-/**
- * @note Do not use! Use the one above if possible. /elathal
- */
-void
-ConfigRetriever::setDefaultConnectString(const char * defaultConnectString) {
- if(m_defaultConnectString != 0)
- free(m_defaultConnectString);
- if (defaultConnectString != 0) {
- m_defaultConnectString = strdup(defaultConnectString);
- } else {
- m_defaultConnectString = 0;
- }
+ m_connectString.assign(connectString ? connectString : "");
}
bool
-ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf,
- int type){
+ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf){
+
char buf[255];
ndb_mgm_configuration_iterator * it;
it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, CFG_SECTION_NODE);
@@ -308,8 +253,8 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf,
}
NdbAutoPtr<ndb_mgm_configuration_iterator> ptr(it);
- if(ndb_mgm_find(it, CFG_NODE_ID, getOwnNodeId()) != 0){
- snprintf(buf, 255, "Unable to find node with id: %d", getOwnNodeId());
+ if(ndb_mgm_find(it, CFG_NODE_ID, _ownNodeId) != 0){
+ snprintf(buf, 255, "Unable to find node with id: %d", _ownNodeId);
setError(CR_ERROR, buf);
return false;
}
@@ -323,12 +268,15 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf,
char localhost[MAXHOSTNAMELEN];
if(NdbHost_GetHostName(localhost) != 0){
- snprintf(buf, 255, "Unable to own hostname");
+ snprintf(buf, 255, "Unable to get own hostname");
setError(CR_ERROR, buf);
return false;
}
do {
+ if(strlen(hostname) == 0)
+ break;
+
if(strcasecmp(hostname, localhost) == 0)
break;
@@ -363,11 +311,67 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf,
return false;
}
- if(_type != type){
+ if(_type != m_node_type){
snprintf(buf, 255, "Supplied node type(%d) and config node type(%d) "
- " don't match", type, _type);
+ " don't match", m_node_type, _type);
setError(CR_ERROR, buf);
return false;
}
+
+ /**
+ * Check hostnames
+ */
+ ndb_mgm_configuration_iterator iter(* conf, CFG_SECTION_CONNECTION);
+ for(iter.first(); iter.valid(); iter.next()){
+
+ Uint32 type = CONNECTION_TYPE_TCP + 1;
+ if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
+ if(type != CONNECTION_TYPE_TCP) continue;
+
+ Uint32 nodeId1, nodeId2, remoteNodeId;
+ if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue;
+ if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue;
+
+ if(nodeId1 != _ownNodeId && nodeId2 != _ownNodeId) continue;
+ remoteNodeId = (_ownNodeId == nodeId1 ? nodeId2 : nodeId1);
+
+ const char * name;
+ struct in_addr addr;
+ BaseString tmp;
+ if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){
+ if(Ndb_getInAddr(&addr, name) != 0){
+ tmp.assfmt("Unable to lookup/illegal hostname %s, "
+ "connection from node %d to node %d",
+ name, _ownNodeId, remoteNodeId);
+ setError(CR_ERROR, tmp.c_str());
+ return false;
+ }
+ }
+
+ if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){
+ if(Ndb_getInAddr(&addr, name) != 0){
+ tmp.assfmt("Unable to lookup/illegal hostname %s, "
+ "connection from node %d to node %d",
+ name, _ownNodeId, remoteNodeId);
+ setError(CR_ERROR, tmp.c_str());
+ return false;
+ }
+ }
+ }
return true;
}
+
+Uint32
+ConfigRetriever::allocNodeId(){
+ unsigned nodeid= _ownNodeId;
+
+ if(m_handle != 0){
+ int res= ndb_mgm_alloc_nodeid(m_handle, m_version, &nodeid, m_node_type);
+ if(res != 0) {
+ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ return 0;
+ }
+ }
+
+ return _ownNodeId= nodeid;
+}
diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp
index ba5fe7ace80..a76c541f3f6 100644
--- a/ndb/src/common/mgmcommon/IPCConfig.cpp
+++ b/ndb/src/common/mgmcommon/IPCConfig.cpp
@@ -339,12 +339,13 @@ IPCConfig::getNodeType(NodeId id) const {
return out;
}
+#include <mgmapi.h>
Uint32
IPCConfig::configureTransporters(Uint32 nodeId,
const class ndb_mgm_configuration & config,
class TransporterRegistry & tr){
- Uint32 noOfTransportersCreated = 0;
+ Uint32 noOfTransportersCreated= 0, server_port= 0;
ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION);
for(iter.first(); iter.valid(); iter.next()){
@@ -364,6 +365,16 @@ IPCConfig::configureTransporters(Uint32 nodeId,
Uint32 type = ~0;
if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
+ Uint32 tmp_server_port= 0;
+ if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break;
+ if (nodeId <= nodeId1 && nodeId <= nodeId2) {
+ if (server_port && server_port != tmp_server_port) {
+ ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl;
+ exit(-1);
+ }
+ server_port= tmp_server_port;
+ }
+
switch(type){
case CONNECTION_TYPE_SHM:{
SHM_TransporterConfiguration conf;
@@ -377,6 +388,8 @@ IPCConfig::configureTransporters(Uint32 nodeId,
if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break;
if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break;
+ conf.port= tmp_server_port;
+
if(!tr.createTransporter(&conf)){
ndbout << "Failed to create SHM Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
@@ -428,10 +441,10 @@ IPCConfig::configureTransporters(Uint32 nodeId,
if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break;
if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break;
- if(iter.get(CFG_TCP_SERVER_PORT, &conf.port)) break;
if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break;
if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break;
+ conf.port= tmp_server_port;
const char * proxy;
if (!iter.get(CFG_TCP_PROXY, &proxy)) {
if (strlen(proxy) > 0 && nodeId2 == nodeId) {
@@ -490,6 +503,8 @@ IPCConfig::configureTransporters(Uint32 nodeId,
}
}
+ tr.m_service_port= server_port;
+
return noOfTransportersCreated;
}
diff --git a/ndb/src/common/mgmcommon/InitConfigFileParser.cpp b/ndb/src/common/mgmcommon/InitConfigFileParser.cpp
index d52bc54db52..7c842508491 100644
--- a/ndb/src/common/mgmcommon/InitConfigFileParser.cpp
+++ b/ndb/src/common/mgmcommon/InitConfigFileParser.cpp
@@ -43,10 +43,10 @@ InitConfigFileParser::~InitConfigFileParser() {
// Read Config File
//****************************************************************************
InitConfigFileParser::Context::Context(const ConfigInfo * info)
- : m_configValues(1000, 20) {
+ : m_configValues(1000, 20), m_userProperties(true) {
- m_config = new Properties();
- m_defaults = new Properties();
+ m_config = new Properties(true);
+ m_defaults = new Properties(true);
}
InitConfigFileParser::Context::~Context(){
@@ -115,7 +115,7 @@ InitConfigFileParser::parseConfig(FILE * file) {
snprintf(ctx.fname, sizeof(ctx.fname), section); free(section);
ctx.type = InitConfigFileParser::DefaultSection;
ctx.m_sectionLineno = ctx.m_lineno;
- ctx.m_currentSection = new Properties();
+ ctx.m_currentSection = new Properties(true);
ctx.m_userDefaults = NULL;
ctx.m_currentInfo = m_info->getInfo(ctx.fname);
ctx.m_systemDefaults = m_info->getDefaults(ctx.fname);
@@ -137,7 +137,7 @@ InitConfigFileParser::parseConfig(FILE * file) {
free(section);
ctx.type = InitConfigFileParser::Section;
ctx.m_sectionLineno = ctx.m_lineno;
- ctx.m_currentSection = new Properties();
+ ctx.m_currentSection = new Properties(true);
ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults);
ctx.m_currentInfo = m_info->getInfo(ctx.fname);
ctx.m_systemDefaults = m_info->getDefaults(ctx.fname);
@@ -222,6 +222,8 @@ bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) {
char tmpLine[MAX_LINE_LENGTH];
char fname[MAX_LINE_LENGTH], rest[MAX_LINE_LENGTH];
char* t;
+ const char *separator_list[]= {":", "=", 0};
+ const char *separator= 0;
if (ctx.m_currentSection == NULL){
ctx.reportError("Value specified outside section");
@@ -233,7 +235,14 @@ bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) {
// *************************************
// Check if a separator exists in line
// *************************************
- if (!strchr(tmpLine, ':')) {
+ for(int i= 0; separator_list[i] != 0; i++) {
+ if(strchr(tmpLine, separator_list[i][0])) {
+ separator= separator_list[i];
+ break;
+ }
+ }
+
+ if (separator == 0) {
ctx.reportError("Parse error");
return false;
}
@@ -241,13 +250,13 @@ bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) {
// *******************************************
// Get pointer to substring before separator
// *******************************************
- t = strtok(tmpLine, ":");
+ t = strtok(tmpLine, separator);
// *****************************************
// Count number of tokens before separator
// *****************************************
if (sscanf(t, "%120s%120s", fname, rest) != 1) {
- ctx.reportError("Multiple names before \':\'");
+ ctx.reportError("Multiple names before \'%c\'", separator[0]);
return false;
}
if (!ctx.m_currentInfo->contains(fname)) {
@@ -475,8 +484,24 @@ InitConfigFileParser::parseSectionHeader(const char* line) const {
tmp[0] = ' ';
trim(tmp);
+ // Convert section header to upper
+ for(int i= strlen(tmp)-1; i >= 0; i--)
+ tmp[i]= toupper(tmp[i]);
+
+ // Get the correct header name if an alias
+ {
+ const char *tmp_alias= m_info->getAlias(tmp);
+ if (tmp_alias) {
+ free(tmp);
+ tmp= strdup(tmp_alias);
+ }
+ }
+
// Lookup token among sections
- if(!m_info->isSection(tmp)) return NULL;
+ if(!m_info->isSection(tmp)) {
+ free(tmp);
+ return NULL;
+ }
if(m_info->getInfo(tmp)) return tmp;
free(tmp);
@@ -497,7 +522,7 @@ InitConfigFileParser::parseDefaultSectionHeader(const char* line) const {
if (no != 2) return NULL;
// Not correct keyword at end
- if (!strcmp(token2, "DEFAULT") == 0) return NULL;
+ if (!strcasecmp(token2, "DEFAULT") == 0) return NULL;
if(m_info->getInfo(token1)){
return strdup(token1);
diff --git a/ndb/src/common/mgmcommon/LocalConfig.cpp b/ndb/src/common/mgmcommon/LocalConfig.cpp
index 12e685ced34..0440ce84dba 100644
--- a/ndb/src/common/mgmcommon/LocalConfig.cpp
+++ b/ndb/src/common/mgmcommon/LocalConfig.cpp
@@ -17,17 +17,16 @@
#include "LocalConfig.hpp"
#include <NdbEnv.h>
#include <NdbConfig.h>
+#include <NdbAutoPtr.hpp>
LocalConfig::LocalConfig(){
- ids = 0; size = 0; items = 0;
error_line = 0; error_msg[0] = 0;
+ _ownNodeId= 0;
}
bool
-LocalConfig::init(bool onlyNodeId,
- const char *connectString,
- const char *fileName,
- const char *defaultConnectString) {
+LocalConfig::init(const char *connectString,
+ const char *fileName) {
/**
* Escalation:
* 1. Check connectString
@@ -39,8 +38,8 @@ LocalConfig::init(bool onlyNodeId,
*/
//1. Check connectString
- if(connectString != 0) {
- if(readConnectString(connectString, onlyNodeId)){
+ if(connectString != 0 && connectString[0] != 0){
+ if(readConnectString(connectString)){
return true;
}
return false;
@@ -49,7 +48,7 @@ LocalConfig::init(bool onlyNodeId,
//2. Check given filename
if (fileName && strlen(fileName) > 0) {
bool fopenError;
- if(readFile(fileName, fopenError, onlyNodeId)){
+ if(readFile(fileName, fopenError)){
return true;
}
return false;
@@ -59,7 +58,7 @@ LocalConfig::init(bool onlyNodeId,
char buf[255];
if(NdbEnv_GetEnv("NDB_CONNECTSTRING", buf, sizeof(buf)) &&
strlen(buf) != 0){
- if(readConnectString(buf, onlyNodeId)){
+ if(readConnectString(buf)){
return true;
}
return false;
@@ -68,10 +67,10 @@ LocalConfig::init(bool onlyNodeId,
//4. Check Ndb.cfg in NDB_HOME
{
bool fopenError;
- char buf[256];
- if(readFile(NdbConfig_NdbCfgName(buf, sizeof(buf), 1 /*true*/), fopenError, onlyNodeId)){
+ char *buf= NdbConfig_NdbCfgName(1 /*true*/);
+ NdbAutoPtr<char> tmp_aptr(buf);
+ if(readFile(buf, fopenError))
return true;
- }
if (!fopenError)
return false;
}
@@ -79,20 +78,20 @@ LocalConfig::init(bool onlyNodeId,
//5. Check Ndb.cfg in cwd
{
bool fopenError;
- char buf[256];
- if(readFile(NdbConfig_NdbCfgName(buf, sizeof(buf), 0 /*false*/), fopenError, onlyNodeId)){
+ char *buf= NdbConfig_NdbCfgName(0 /*false*/);
+ NdbAutoPtr<char> tmp_aptr(buf);
+ if(readFile(buf, fopenError))
return true;
- }
if (!fopenError)
return false;
}
- //6. Check defaultConnectString
- if(defaultConnectString != 0) {
- if(readConnectString(defaultConnectString, onlyNodeId)){
+ //7. Check
+ {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "host=localhost:%u", NDB_BASE_PORT);
+ if(readConnectString(buf))
return true;
- }
- return false;
}
setError(0, "");
@@ -101,30 +100,8 @@ LocalConfig::init(bool onlyNodeId,
}
LocalConfig::~LocalConfig(){
- for(int i = 0; i<items; i++){
- if(ids[i]->type == MgmId_TCP)
- free(ids[i]->data.tcp.remoteHost);
- else if(ids[i]->type == MgmId_File)
- free(ids[i]->data.file.filename);
- delete ids[i];
- }
- if(ids != 0)
- delete[] ids;
}
-void LocalConfig::add(MgmtSrvrId * i){
- if(items == size){
- MgmtSrvrId ** tmp = new MgmtSrvrId * [size+10];
- if(ids != 0){
- memcpy(tmp, ids, items*sizeof(MgmtSrvrId *));
- delete []ids;
- }
- ids = tmp;
- }
- ids[items] = i;
- items++;
-}
-
void LocalConfig::setError(int lineNumber, const char * _msg) {
error_line = lineNumber;
strncpy(error_msg, _msg, sizeof(error_msg));
@@ -144,22 +121,22 @@ void LocalConfig::printUsage() const {
ndbout << "1. Put a Ndb.cfg file in the directory where you start"<<endl
<< " the node. "<< endl
<< " Ex: Ndb.cfg" << endl
- << " | nodeid=11;host=localhost:2200"<<endl<<endl;
+ << " | host=localhost:"<<NDB_BASE_PORT<<endl;
ndbout << "2. Use the environment variable NDB_CONNECTSTRING to "<<endl
<< " provide this information." <<endl
<< " Ex: " << endl
- << " >export NDB_CONNECTSTRING=\"nodeid=11;host=localhost:2200\""
+ << " >export NDB_CONNECTSTRING=\"host=localhost:"<<NDB_BASE_PORT<<"\""
<<endl<<endl;
}
-char *nodeIdTokens[] = {
+const char *nodeIdTokens[] = {
"OwnProcessId %i",
"nodeid=%i",
0
};
-char *hostNameTokens[] = {
+const char *hostNameTokens[] = {
"host://%[^:]:%i",
"host=%[^:]:%i",
"%[^:]:%i",
@@ -167,7 +144,7 @@ char *hostNameTokens[] = {
0
};
-char *fileNameTokens[] = {
+const char *fileNameTokens[] = {
"file://%s",
"file=%s",
0
@@ -183,15 +160,15 @@ LocalConfig::parseNodeId(const char * buf){
bool
LocalConfig::parseHostName(const char * buf){
- char tempString[100];
+ char tempString[1024];
int port;
for(int i = 0; hostNameTokens[i] != 0; i++) {
if (sscanf(buf, hostNameTokens[i], tempString, &port) == 2) {
- MgmtSrvrId* mgmtSrvrId = new MgmtSrvrId();
- mgmtSrvrId->type = MgmId_TCP;
- mgmtSrvrId->data.tcp.remoteHost = strdup(tempString);
- mgmtSrvrId->data.tcp.port = port;
- add(mgmtSrvrId);
+ MgmtSrvrId mgmtSrvrId;
+ mgmtSrvrId.type = MgmId_TCP;
+ mgmtSrvrId.name.assign(tempString);
+ mgmtSrvrId.port = port;
+ ids.push_back(mgmtSrvrId);
return true;
}
}
@@ -200,13 +177,13 @@ LocalConfig::parseHostName(const char * buf){
bool
LocalConfig::parseFileName(const char * buf){
- char tempString[100];
+ char tempString[1024];
for(int i = 0; fileNameTokens[i] != 0; i++) {
if (sscanf(buf, fileNameTokens[i], tempString) == 1) {
- MgmtSrvrId* mgmtSrvrId = new MgmtSrvrId();
- mgmtSrvrId->type = MgmId_File;
- mgmtSrvrId->data.file.filename = strdup(tempString);
- add(mgmtSrvrId);
+ MgmtSrvrId mgmtSrvrId;
+ mgmtSrvrId.type = MgmId_File;
+ mgmtSrvrId.name.assign(tempString);
+ ids.push_back(mgmtSrvrId);
return true;
}
}
@@ -214,17 +191,15 @@ LocalConfig::parseFileName(const char * buf){
}
bool
-LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line){
- bool return_value = true;
-
+LocalConfig::parseString(const char * connectString, char *line){
char * for_strtok;
char * copy = strdup(connectString);
+ NdbAutoPtr<char> tmp_aptr(copy);
bool b_nodeId = false;
bool found_other = false;
- for (char *tok = strtok_r(copy,";",&for_strtok);
- tok != 0 && !(onlyNodeId && b_nodeId);
+ for (char *tok = strtok_r(copy,";",&for_strtok); tok != 0;
tok = strtok_r(NULL, ";", &for_strtok)) {
if (tok[0] == '#') continue;
@@ -232,28 +207,27 @@ LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line
if (!b_nodeId) // only one nodeid definition allowed
if (b_nodeId = parseNodeId(tok))
continue;
- if (onlyNodeId)
- continue;
if (found_other = parseHostName(tok))
continue;
if (found_other = parseFileName(tok))
continue;
- snprintf(line, 150, "Unexpected entry: \"%s\"", tok);
- return_value = false;
- break;
+ if (line)
+ snprintf(line, 150, "Unexpected entry: \"%s\"", tok);
+ return false;
}
- if (return_value && !onlyNodeId && !found_other) {
- return_value = false;
- snprintf(line, 150, "Missing host/file name extry in \"%s\"", connectString);
+ if (!found_other) {
+ if (line)
+ snprintf(line, 150, "Missing host/file name extry in \"%s\"",
+ connectString);
+ return false;
}
- free(copy);
- return return_value;
+ return true;
}
-bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNodeId)
+bool LocalConfig::readFile(const char * filename, bool &fopenError)
{
char line[150], line2[150];
@@ -284,7 +258,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNod
strcat(theString, line);
}
- bool return_value = parseString(theString, onlyNodeId, line);
+ bool return_value = parseString(theString, line);
if (!return_value) {
snprintf(line2, 150, "Reading %s: %s", filename, line);
@@ -297,12 +271,14 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNod
}
bool
-LocalConfig::readConnectString(const char * connectString, bool onlyNodeId){
+LocalConfig::readConnectString(const char * connectString){
char line[150], line2[150];
- bool return_value = parseString(connectString, onlyNodeId, line);
+ bool return_value = parseString(connectString, line);
if (!return_value) {
snprintf(line2, 150, "Reading NDB_CONNECTSTRING \"%s\": %s", connectString, line);
setError(0,line2);
}
return return_value;
}
+
+template class Vector<MgmtSrvrId>;
diff --git a/ndb/src/common/mgmcommon/NdbConfig.c b/ndb/src/common/mgmcommon/NdbConfig.c
index 827ef34a840..6b609b22fa4 100644
--- a/ndb/src/common/mgmcommon/NdbConfig.c
+++ b/ndb/src/common/mgmcommon/NdbConfig.c
@@ -18,43 +18,93 @@
#include <NdbConfig.h>
#include <NdbEnv.h>
-const char*
-NdbConfig_HomePath(char* buf, int buflen){
- const char* p;
- p = NdbEnv_GetEnv("NDB_HOME", buf, buflen);
- if (p == NULL){
- strlcpy(buf, "", buflen);
- p = buf;
- } else {
- const int len = strlen(buf);
- if(len != 0 && buf[len-1] != '/'){
- buf[len] = '/';
- buf[len+1] = 0;
- }
- }
- return p;
-}
-
-const char*
-NdbConfig_NdbCfgName(char* buf, int buflen, int with_ndb_home){
- if (with_ndb_home)
- NdbConfig_HomePath(buf, buflen);
+static char*
+NdbConfig_AllocHomePath(int _len)
+{
+ const char *path= NdbEnv_GetEnv("NDB_HOME", 0, 0);
+ int len= _len;
+ int path_len= 0;
+ char *buf;
+
+ if (path)
+ path_len= strlen(path);
+
+ len+= path_len;
+ buf= malloc(len);
+ if (path_len > 0)
+ snprintf(buf, len, "%s%s", path, DIR_SEPARATOR);
else
- buf[0] = 0;
- strlcat(buf, "Ndb.cfg", buflen);
+ buf[0]= 0;
+
+ return buf;
+}
+
+char*
+NdbConfig_NdbCfgName(int with_ndb_home){
+ char *buf;
+ int len= 0;
+
+ if (with_ndb_home) {
+ buf= NdbConfig_AllocHomePath(128);
+ len= strlen(buf);
+ } else
+ buf= malloc(128);
+ snprintf(buf+len, 128, "Ndb.cfg");
+ return buf;
+}
+
+char*
+NdbConfig_ErrorFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_error.log", node_id);
+ return buf;
+}
+
+char*
+NdbConfig_ClusterLogFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_cluster.log", node_id);
+ return buf;
+}
+
+char*
+NdbConfig_SignalLogFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_signal.log", node_id);
+ return buf;
+}
+
+char*
+NdbConfig_TraceFileName(int node_id, int file_no){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_trace.log.%u", node_id, file_no);
+ return buf;
+}
+
+char*
+NdbConfig_NextTraceFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_trace.log.next", node_id);
return buf;
}
-const char*
-NdbConfig_ErrorFileName(char* buf, int buflen){
- NdbConfig_HomePath(buf, buflen);
- strlcat(buf, "error.log", buflen);
+char*
+NdbConfig_PidFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u.pid", node_id);
return buf;
}
-const char*
-NdbConfig_ClusterLogFileName(char* buf, int buflen){
- NdbConfig_HomePath(buf, buflen);
- strlcat(buf, "cluster.log", buflen);
+char*
+NdbConfig_StdoutFileName(int node_id){
+ char *buf= NdbConfig_AllocHomePath(128);
+ int len= strlen(buf);
+ snprintf(buf+len, 128, "ndb_%u_out.log", node_id);
return buf;
}
diff --git a/ndb/src/common/portlib/Makefile.am b/ndb/src/common/portlib/Makefile.am
index e6ecb30fe04..6f3a3fe01a9 100644
--- a/ndb/src/common/portlib/Makefile.am
+++ b/ndb/src/common/portlib/Makefile.am
@@ -4,7 +4,7 @@ noinst_LTLIBRARIES = libportlib.la
libportlib_la_SOURCES = \
NdbCondition.c NdbMutex.c NdbSleep.c NdbTick.c \
- NdbEnv.c NdbThread.c NdbHost.c NdbTCP.c \
+ NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp \
NdbDaemon.c NdbMem.c
include $(top_srcdir)/ndb/config/common.mk.am
diff --git a/ndb/src/common/portlib/NdbTCP.c b/ndb/src/common/portlib/NdbTCP.cpp
index 287dc6c2ecd..4bf4936aa30 100644
--- a/ndb/src/common/portlib/NdbTCP.c
+++ b/ndb/src/common/portlib/NdbTCP.cpp
@@ -16,14 +16,15 @@
#include <NdbMutex.h>
-#include "NdbTCP.h"
+#include <NdbTCP.h>
-#ifdef NDB_WIN32
+#if defined NDB_WIN32 || defined SCO
static NdbMutex & LOCK_gethostbyname = * NdbMutex_Create();
#else
static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER;
#endif
+extern "C"
int
Ndb_getInAddr(struct in_addr * dst, const char *address) {
struct hostent * hostPtr;
diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp
index fa72af12dac..645517a4b1a 100644
--- a/ndb/src/common/transporter/Packer.cpp
+++ b/ndb/src/common/transporter/Packer.cpp
@@ -391,6 +391,7 @@ Packer::pack(Uint32 * insertPtr,
const SignalHeader * header,
const Uint32 * theData,
const LinearSectionPtr ptr[3]) const {
+ Uint32 i;
Uint32 dataLen32 = header->theLength;
Uint32 no_segs = header->m_noOfSections;
@@ -400,7 +401,7 @@ Packer::pack(Uint32 * insertPtr,
checksumUsed + signalIdUsed + (sizeof(Protocol6)/4);
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
len32 += ptr[i].sz;
}
@@ -429,12 +430,12 @@ Packer::pack(Uint32 * insertPtr,
memcpy(tmpInserPtr, theData, 4 * dataLen32);
tmpInserPtr += dataLen32;
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
tmpInserPtr[i] = ptr[i].sz;
}
tmpInserPtr += no_segs;
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
import(tmpInserPtr, ptr[i]);
}
@@ -450,6 +451,7 @@ Packer::pack(Uint32 * insertPtr,
const Uint32 * theData,
class SectionSegmentPool & thePool,
const SegmentedSectionPtr ptr[3]) const {
+ Uint32 i;
Uint32 dataLen32 = header->theLength;
Uint32 no_segs = header->m_noOfSections;
@@ -458,7 +460,7 @@ Packer::pack(Uint32 * insertPtr,
dataLen32 + no_segs +
checksumUsed + signalIdUsed + (sizeof(Protocol6)/4);
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
len32 += ptr[i].sz;
}
@@ -487,12 +489,12 @@ Packer::pack(Uint32 * insertPtr,
memcpy(tmpInserPtr, theData, 4 * dataLen32);
tmpInserPtr += dataLen32;
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
tmpInserPtr[i] = ptr[i].sz;
}
tmpInserPtr += no_segs;
- for(Uint32 i = 0; i<no_segs; i++){
+ for(i = 0; i<no_segs; i++){
copy(tmpInserPtr, thePool, ptr[i]);
}
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp
index 7c673f93c22..aa6b650afa8 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.cpp
@@ -23,26 +23,22 @@
#include <NdbSleep.h>
#include <NdbOut.hpp>
-#ifndef NDB_WIN32
-#include <sys/ipc.h>
-#include <sys/shm.h>
-#endif
-
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
-SHM_Transporter::SHM_Transporter(NodeId lNodeId,
+SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ NodeId lNodeId,
NodeId rNodeId,
- key_t _shmKey,
- Uint32 _shmSize,
bool compression,
bool checksum,
- bool signalId) :
- Transporter(lNodeId,
- rNodeId,
- 0,
- compression,
- checksum,
- signalId),
- isServer(lNodeId < rNodeId),
+ bool signalId,
+ key_t _shmKey,
+ Uint32 _shmSize) :
+ Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
+ 0, compression, checksum, signalId),
shmKey(_shmKey),
shmSize(_shmSize)
{
@@ -68,16 +64,6 @@ SHM_Transporter::initTransporter(){
return true;
}
-bool
-SHM_Transporter::connectImpl(Uint32 timeOutMillis){
- bool res;
- if(isServer)
- res = connectServer(timeOutMillis);
- else
- res = connectClient(timeOutMillis);
- return res;
-}
-
void
SHM_Transporter::setupBuffers(){
Uint32 sharedSize = 0;
@@ -233,3 +219,127 @@ SHM_Transporter::prepareSend(const SignalHeader * const signalHeader,
return SEND_DISCONNECTED;
}
#endif
+
+
+bool
+SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+ char buf[256];
+
+ // Create
+ if(!_shmSegCreated){
+ if (!ndb_shm_create()) {
+ report_error(TE_SHM_UNABLE_TO_CREATE_SEGMENT);
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ _shmSegCreated = true;
+ }
+
+ // Attach
+ if(!_attached){
+ if (!ndb_shm_attach()) {
+ report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ _attached = true;
+ }
+
+ // Send ok to client
+ s_output.println("shm server 1 ok");
+
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+
+ int r= connect_common(sockfd);
+
+ if (r) {
+ // Send ok to client
+ s_output.println("shm server 2 ok");
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ }
+
+ NDB_CLOSE_SOCKET(sockfd);
+ return r;
+}
+
+bool
+SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketInputStream s_input(sockfd);
+ SocketOutputStream s_output(sockfd);
+ char buf[256];
+
+ // Wait for server to create and attach
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+
+ // Create
+ if(!_shmSegCreated){
+ if (!ndb_shm_get()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ _shmSegCreated = true;
+ }
+
+ // Attach
+ if(!_attached){
+ if (!ndb_shm_attach()) {
+ report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ _attached = true;
+ }
+
+ // Send ok to server
+ s_output.println("shm client 1 ok");
+
+ int r= connect_common(sockfd);
+
+ if (r) {
+ // Wait for ok from server
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ // Send ok to server
+ s_output.println("shm client 2 ok");
+ }
+
+ NDB_CLOSE_SOCKET(sockfd);
+ return r;
+}
+
+bool
+SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
+{
+ if (!checkConnected())
+ return false;
+
+ if(!setupBuffersDone) {
+ setupBuffers();
+ setupBuffersDone=true;
+ }
+
+ if(setupBuffersDone) {
+ NdbSleep_MilliSleep(m_timeOutMillis);
+ if(*serverStatusFlag == 1 && *clientStatusFlag == 1)
+ return true;
+ }
+
+ return false;
+}
diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp
index da4566515e3..be54d0daa2a 100644
--- a/ndb/src/common/transporter/SHM_Transporter.hpp
+++ b/ndb/src/common/transporter/SHM_Transporter.hpp
@@ -32,13 +32,17 @@ typedef Uint32 key_t;
class SHM_Transporter : public Transporter {
friend class TransporterRegistry;
public:
- SHM_Transporter(NodeId lNodeId,
+ SHM_Transporter(TransporterRegistry &,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ NodeId lNodeId,
NodeId rNodeId,
- key_t shmKey,
- Uint32 shmSize,
bool compression,
bool checksum,
- bool signalId);
+ bool signalId,
+ key_t shmKey,
+ Uint32 shmSize);
/**
* SHM destructor
@@ -75,14 +79,6 @@ protected:
void disconnectImpl();
/**
- * Invokes the connectServer or connectClient.
- * @param timeOutMillis - the timeout the connect thread waits before
- * retrying.
- * @return True if connectImpl successful, otherwise false.
- */
- bool connectImpl(Uint32 timeOutMillis);
-
- /**
* Blocking
*
* -# Create shm segment
@@ -94,7 +90,7 @@ protected:
* i.e., both agrees that the other one has setup the segment.
* Otherwise false.
*/
- bool connectServer(Uint32 timeOutMillis);
+ virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd);
/**
* Blocking
@@ -108,8 +104,13 @@ protected:
* i.e., both agrees that the other one has setup the segment.
* Otherwise false.
*/
- bool connectClient(Uint32 timeOutMillis);
+ virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd);
+
+ bool connect_common(NDB_SOCKET_TYPE sockfd);
+ bool ndb_shm_create();
+ bool ndb_shm_get();
+ bool ndb_shm_attach();
/**
* Check if there are two processes attached to the segment (a connection)
@@ -127,7 +128,6 @@ private:
bool _shmSegCreated;
bool _attached;
- const bool isServer;
key_t shmKey;
volatile Uint32 * serverStatusFlag;
volatile Uint32 * clientStatusFlag;
diff --git a/ndb/src/common/transporter/SHM_Transporter.unix.cpp b/ndb/src/common/transporter/SHM_Transporter.unix.cpp
index afbf124432e..28882324fc0 100644
--- a/ndb/src/common/transporter/SHM_Transporter.unix.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.unix.cpp
@@ -26,114 +26,37 @@
#include <sys/ipc.h>
#include <sys/shm.h>
-
-
bool
-SHM_Transporter::connectServer(Uint32 timeOutMillis){
- if(!_shmSegCreated){
- shmId = shmget(shmKey, shmSize, IPC_CREAT | 960);
- if(shmId == -1){
- perror("shmget: ");
- reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_CREATE_SEGMENT);
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
- _shmSegCreated = true;
- }
-
- if(!_attached){
- shmBuf = (char *)shmat(shmId, 0, 0);
- if(shmBuf == 0){
- perror("shmat: ");
- reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
- _attached = true;
- }
-
- struct shmid_ds info;
- const int res = shmctl(shmId, IPC_STAT, &info);
- if(res == -1){
- perror("shmctl: ");
- reportThreadError(remoteNodeId, TE_SHM_IPC_STAT);
- NdbSleep_MilliSleep(timeOutMillis);
+SHM_Transporter::ndb_shm_create()
+{
+ shmId = shmget(shmKey, shmSize, IPC_CREAT | 960);
+ if(shmId == -1) {
+ perror("shmget: ");
return false;
}
-
- if(info.shm_nattch == 2 && !setupBuffersDone) {
- setupBuffers();
- setupBuffersDone=true;
- }
-
- if(setupBuffersDone) {
- NdbSleep_MilliSleep(timeOutMillis);
- if(*serverStatusFlag==1 && *clientStatusFlag==1)
- return true;
- }
-
-
- if(info.shm_nattch > 2){
- reportThreadError(remoteNodeId, TE_SHM_DISCONNECT);
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
-
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
+ return true;
}
bool
-SHM_Transporter::connectClient(Uint32 timeOutMillis){
- if(!_shmSegCreated){
-
- shmId = shmget(shmKey, shmSize, 0);
- if(shmId == -1){
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
- _shmSegCreated = true;
- }
-
- if(!_attached){
- shmBuf = (char *)shmat(shmId, 0, 0);
- if(shmBuf == 0){
- reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
- _attached = true;
- }
-
- struct shmid_ds info;
-
- const int res = shmctl(shmId, IPC_STAT, &info);
- if(res == -1){
- reportThreadError(remoteNodeId, TE_SHM_IPC_STAT);
- NdbSleep_MilliSleep(timeOutMillis);
+SHM_Transporter::ndb_shm_get()
+{
+ shmId = shmget(shmKey, shmSize, 0);
+ if(shmId == -1) {
+ perror("shmget: ");
return false;
}
-
-
- if(info.shm_nattch == 2 && !setupBuffersDone) {
- setupBuffers();
- setupBuffersDone=true;
- }
-
- if(setupBuffersDone) {
- NdbSleep_MilliSleep(timeOutMillis);
- if(*serverStatusFlag==1 && *clientStatusFlag==1)
- return true;
- }
+ return true;
+}
- if(info.shm_nattch > 2){
- reportThreadError(remoteNodeId, TE_SHM_DISCONNECT);
- NdbSleep_MilliSleep(timeOutMillis);
+bool
+SHM_Transporter::ndb_shm_attach()
+{
+ shmBuf = (char *)shmat(shmId, 0, 0);
+ if(shmBuf == 0) {
+ perror("shmat: ");
return false;
}
-
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
+ return true;
}
bool
@@ -141,12 +64,12 @@ SHM_Transporter::checkConnected(){
struct shmid_ds info;
const int res = shmctl(shmId, IPC_STAT, &info);
if(res == -1){
- reportError(callbackObj, remoteNodeId, TE_SHM_IPC_STAT);
+ report_error(TE_SHM_IPC_STAT);
return false;
}
if(info.shm_nattch != 2){
- reportError(callbackObj, remoteNodeId, TE_SHM_DISCONNECT);
+ report_error(TE_SHM_DISCONNECT);
return false;
}
return true;
@@ -168,11 +91,10 @@ SHM_Transporter::disconnectImpl(){
if(isServer && _shmSegCreated){
const int res = shmctl(shmId, IPC_RMID, 0);
if(res == -1){
- reportError(callbackObj, remoteNodeId, TE_SHM_UNABLE_TO_REMOVE_SEGMENT);
+ report_error(TE_SHM_UNABLE_TO_REMOVE_SEGMENT);
return;
}
_shmSegCreated = false;
}
setupBuffersDone=false;
}
-
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp
index 99b6a137797..8833b51e236 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -63,27 +63,23 @@ ndbstrerror::~ndbstrerror(void)
#define ndbstrerror strerror
#endif
-TCP_Transporter::TCP_Transporter(int sendBufSize, int maxRecvSize,
- int portNo,
- const char *rHostName,
+TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg,
+ int sendBufSize, int maxRecvSize,
const char *lHostName,
- NodeId rNodeId, NodeId lNodeId,
+ const char *rHostName,
+ int r_port,
+ NodeId lNodeId,
+ NodeId rNodeId,
int byte_order,
bool compr, bool chksm, bool signalId,
Uint32 _reportFreq) :
- Transporter(lNodeId, rNodeId, byte_order, compr, chksm, signalId),
- m_sendBuffer(sendBufSize),
- isServer(lNodeId < rNodeId),
- port(portNo)
+ Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
+ byte_order, compr, chksm, signalId),
+ m_sendBuffer(sendBufSize)
{
maxReceiveSize = maxRecvSize;
- strncpy(remoteHostName, rHostName, sizeof(remoteHostName));
-
// Initialize member variables
- Ndb_getInAddr(&remoteHostAddress, rHostName);
-
- Ndb_getInAddr(&localHostAddress, lHostName);
theSocket = NDB_INVALID_SOCKET;
sendCount = receiveCount = 0;
@@ -108,6 +104,24 @@ TCP_Transporter::~TCP_Transporter() {
receiveBuffer.destroy();
}
+bool TCP_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
+{
+ return connect_common(sockfd);
+}
+
+bool TCP_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
+{
+ return connect_common(sockfd);
+}
+
+bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
+{
+ theSocket = sockfd;
+ setSocketOptions();
+ setSocketNonBlocking(theSocket);
+ return true;
+}
+
bool
TCP_Transporter::initTransporter() {
@@ -316,7 +330,7 @@ TCP_Transporter::doSend() {
sendCount ++;
sendSize += nBytesSent;
if(sendCount == reportFreq){
- reportSendLen(callbackObj,remoteNodeId, sendCount, sendSize);
+ reportSendLen(get_callback_obj(), remoteNodeId, sendCount, sendSize);
sendCount = 0;
sendSize = 0;
}
@@ -331,7 +345,7 @@ TCP_Transporter::doSend() {
#endif
if(DISCONNECT_ERRNO(InetErrno, nBytesSent)){
doDisconnect();
- reportDisconnect(callbackObj, remoteNodeId, InetErrno);
+ report_disconnect(InetErrno);
}
return false;
@@ -361,14 +375,15 @@ TCP_Transporter::doReceive() {
#endif
ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- reportError(callbackObj, remoteNodeId, TE_INVALID_MESSAGE_LENGTH);
+ report_error(TE_INVALID_MESSAGE_LENGTH);
return 0;
}
receiveCount ++;
receiveSize += nBytesRead;
+
if(receiveCount == reportFreq){
- reportReceiveLen(callbackObj, remoteNodeId, receiveCount, receiveSize);
+ reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize);
receiveCount = 0;
receiveSize = 0;
}
@@ -384,60 +399,17 @@ TCP_Transporter::doReceive() {
if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){
// The remote node has closed down
doDisconnect();
- reportDisconnect(callbackObj, remoteNodeId,InetErrno);
+ report_disconnect(InetErrno);
}
}
return nBytesRead;
}
-bool
-TCP_Transporter::connectImpl(Uint32 timeOutMillis){
- struct timeval timeout = {0, 0};
- timeout.tv_sec = timeOutMillis / 1000;
- timeout.tv_usec = (timeOutMillis % 1000)*1000;
-
- bool retVal = false;
-
- if(isServer){
- if(theSocket == NDB_INVALID_SOCKET){
- startTCPServer();
- }
- if(theSocket == NDB_INVALID_SOCKET)
- {
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
- retVal = acceptClient(&timeout);
- } else {
- // Is client
- retVal = connectClient(&timeout);
- }
-
- if(!retVal) {
- NdbSleep_MilliSleep(timeOutMillis);
- return false;
- }
-
-#if defined NDB_OSE || defined NDB_SOFTOSE
- if(setsockopt(theSocket, SOL_SOCKET, SO_OSEOWNER,
- &theReceiverPid, sizeof(PROCESS)) != 0){
-
- ndbout << "Failed to transfer ownership of socket" << endl;
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = -1;
- return false;
- }
-#endif
-
- return true;
-}
-
-
void
-TCP_Transporter::disconnectImpl() {
+TCP_Transporter::disconnectImpl() {
if(theSocket != NDB_INVALID_SOCKET){
if(NDB_CLOSE_SOCKET(theSocket) < 0){
- reportError(callbackObj, remoteNodeId, TE_ERROR_CLOSING_SOCKET);
+ report_error(TE_ERROR_CLOSING_SOCKET);
}
}
@@ -447,155 +419,3 @@ TCP_Transporter::disconnectImpl() {
theSocket = NDB_INVALID_SOCKET;
}
-
-bool
-TCP_Transporter::startTCPServer() {
-
- int bindResult, listenResult;
-
- // The server variable is the remote server when we are a client
- // htonl and htons returns the parameter in network byte order
- // INADDR_ANY tells the OS kernel to choose the IP address
- struct sockaddr_in server;
- memset((void*)&server, 0, sizeof(server));
- server.sin_family = AF_INET;
- server.sin_addr.s_addr = localHostAddress.s_addr;
- server.sin_port = htons(port);
-
- if (theSocket != NDB_INVALID_SOCKET) {
- return true; // Server socket is already initialized
- }
-
- // Create the socket
- theSocket = socket(AF_INET, SOCK_STREAM, 0);
- if (theSocket == NDB_INVALID_SOCKET) {
- reportThreadError(remoteNodeId, TE_COULD_NOT_CREATE_SOCKET);
- return false;
- }
-
- // Set the socket reuse addr to true, so we are sure we can bind the
- // socket
- int reuseAddr = 1;
- setsockopt(theSocket, SOL_SOCKET, SO_REUSEADDR,
- (char*)&reuseAddr, sizeof(reuseAddr));
-
- // Set the TCP_NODELAY option so also small packets are sent
- // as soon as possible
- int nodelay = 1;
- setsockopt(theSocket, IPPROTO_TCP, TCP_NODELAY,
- (char*)&nodelay, sizeof(nodelay));
-
- // Bind the socket
- bindResult = bind(theSocket, (struct sockaddr *) &server,
- sizeof(server));
- if (bindResult < 0) {
- reportThreadError(remoteNodeId, TE_COULD_NOT_BIND_SOCKET);
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = NDB_INVALID_SOCKET;
- return false;
- }
-
- // Perform listen.
- listenResult = listen(theSocket, 1);
- if (listenResult == 1) {
- reportThreadError(remoteNodeId, TE_LISTEN_FAILED);
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = NDB_INVALID_SOCKET;
- return false;
- }
-
- return true;
-}
-
-
-bool
-TCP_Transporter::acceptClient (struct timeval * timeout){
-
- struct sockaddr_in clientAddress;
-
- fd_set readset;
- FD_ZERO(&readset);
- FD_SET(theSocket, &readset);
- const int res = select(theSocket + 1, &readset, 0, 0, timeout);
- if(res == 0)
- return false;
-
- if(res < 0){
- reportThreadError(remoteNodeId, TE_ERROR_IN_SELECT_BEFORE_ACCEPT);
- return false;
- }
-
- NDB_SOCKLEN_T clientAddressLen = sizeof(clientAddress);
- const NDB_SOCKET_TYPE clientSocket = accept(theSocket,
- (struct sockaddr*)&clientAddress,
- &clientAddressLen);
- if (clientSocket == NDB_INVALID_SOCKET) {
- reportThreadError(remoteNodeId, TE_ACCEPT_RETURN_ERROR);
- return false;
- }
-
- if (clientAddress.sin_addr.s_addr != remoteHostAddress.s_addr) {
- ndbout_c("Wrong client connecting!");
- ndbout_c("connecting address: %s", inet_ntoa(clientAddress.sin_addr));
- ndbout_c("expecting address: %s", inet_ntoa(remoteHostAddress));
- // The newly connected host is not the remote host
- // we wanted to connect to. Disconnect it.
- // XXX This is not valid. We cannot disconnect it.
- NDB_CLOSE_SOCKET(clientSocket);
- return false;
- } else {
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = clientSocket;
- setSocketOptions();
- setSocketNonBlocking(theSocket);
- return true;
- }
-}
-
-bool
-TCP_Transporter::connectClient (struct timeval * timeout){
-
- // Create the socket
- theSocket = socket(AF_INET, SOCK_STREAM, 0);
- if (theSocket == NDB_INVALID_SOCKET) {
- reportThreadError(remoteNodeId, TE_COULD_NOT_CREATE_SOCKET);
- return false;
- }
-
- struct sockaddr_in server;
- memset((void*)&server, 0, sizeof(server));
- server.sin_family = AF_INET;
- server.sin_addr = remoteHostAddress;
- server.sin_port = htons(port);
-
- struct sockaddr_in client;
- memset((void*)&client, 0, sizeof(client));
- client.sin_family = AF_INET;
- client.sin_addr = localHostAddress;
- client.sin_port = 0; // Any port
-
- // Bind the socket
- const int bindResult = bind(theSocket, (struct sockaddr *) &client,
- sizeof(client));
- if (bindResult < 0) {
- reportThreadError(remoteNodeId, TE_COULD_NOT_BIND_SOCKET);
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = NDB_INVALID_SOCKET;
- return false;
- }
-
- const int connectRes = ::connect(theSocket, (struct sockaddr *) &server,
- sizeof(server));
- if(connectRes == 0){
- setSocketOptions();
- setSocketNonBlocking(theSocket);
- return true;
- }
-
- NDB_CLOSE_SOCKET(theSocket);
- theSocket = NDB_INVALID_SOCKET;
- return false;
-}
-
-
-
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp
index 30b730a5b1c..958cfde03a1 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -14,24 +14,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-//****************************************************************************
-//
-// AUTHOR
-// sa Fransson
-//
-// NAME
-// TCP_Transporter
-//
-// DESCRIPTION
-// A TCP_Transporter instance is created when TCP/IP-communication
-// shall be used (user specified). It handles connect, disconnect,
-// send and receive.
-//
-//
-//
-//***************************************************************************/
-#ifndef TCP_Transporter_H
-#define TCP_Transporter_H
+#ifndef TCP_TRANSPORTER_HPP
+#define TCP_TRANSPORTER_HPP
#include "Transporter.hpp"
#include "SendBuffer.hpp"
@@ -61,11 +45,13 @@ class TCP_Transporter : public Transporter {
friend class TransporterRegistry;
private:
// Initialize member variables
- TCP_Transporter(int sendBufferSize, int maxReceiveSize,
- int port,
- const char *rHostName,
+ TCP_Transporter(TransporterRegistry&,
+ int sendBufferSize, int maxReceiveSize,
const char *lHostName,
- NodeId rHostId, NodeId lHostId,
+ const char *rHostName,
+ int r_port,
+ NodeId lHostId,
+ NodeId rHostId,
int byteorder,
bool compression, bool checksum, bool signalId,
Uint32 reportFreq = 4096);
@@ -121,12 +107,14 @@ protected:
* A client connects to the remote server
* A server accepts any new connections
*/
- bool connectImpl(Uint32 timeOutMillis);
+ virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd);
+ virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd);
+ bool connect_common(NDB_SOCKET_TYPE sockfd);
/**
* Disconnects a TCP/IP node. Empty send and receivebuffer.
*/
- void disconnectImpl();
+ virtual void disconnectImpl();
private:
/**
@@ -134,22 +122,12 @@ private:
*/
SendBuffer m_sendBuffer;
- const bool isServer;
- const unsigned int port;
-
// Sending/Receiving socket used by both client and server
NDB_SOCKET_TYPE theSocket;
Uint32 maxReceiveSize;
/**
- * Remote host name/and address
- */
- char remoteHostName[256];
- struct in_addr remoteHostAddress;
- struct in_addr localHostAddress;
-
- /**
* Socket options
*/
int sockOptRcvBufSize;
@@ -164,43 +142,6 @@ private:
bool sendIsPossible(struct timeval * timeout);
/**
- * startTCPServer - None blocking
- *
- * create a server socket
- * bind
- * listen
- *
- * Note: Does not call accept
- */
- bool startTCPServer();
-
- /**
- * acceptClient - Blocking
- *
- * Accept a connection
- * checks if "right" client has connected
- * if so
- * close server socket
- * else
- * close newly created socket and goto begin
- */
- bool acceptClient(struct timeval * timeout);
-
- /**
- * Creates a client socket
- *
- * Note does not call connect
- */
- bool createClientSocket();
-
- /**
- * connectClient - Blocking
- *
- * connects to remote host
- */
- bool connectClient(struct timeval * timeout);
-
- /**
* Statistics
*/
Uint32 reportFreq;
diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp
index 5ca523d5185..7a469252c00 100644
--- a/ndb/src/common/transporter/Transporter.cpp
+++ b/ndb/src/common/transporter/Transporter.cpp
@@ -15,132 +15,118 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <TransporterRegistry.hpp>
+#include <TransporterCallback.hpp>
#include "Transporter.hpp"
#include "TransporterInternalDefinitions.hpp"
#include <NdbSleep.h>
-
-Transporter::Transporter(NodeId lNodeId, NodeId rNodeId,
+#include <SocketAuthenticator.hpp>
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
+
+Transporter::Transporter(TransporterRegistry &t_reg,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ NodeId lNodeId,
+ NodeId rNodeId,
int _byteorder,
bool _compression, bool _checksum, bool _signalId)
- : localNodeId(lNodeId), remoteNodeId(rNodeId),
- m_packer(_signalId, _checksum)
+ : m_r_port(r_port), localNodeId(lNodeId), remoteNodeId(rNodeId),
+ isServer(lNodeId < rNodeId),
+ m_packer(_signalId, _checksum),
+ m_transporter_registry(t_reg)
{
+ if (rHostName && strlen(rHostName) > 0){
+ strncpy(remoteHostName, rHostName, sizeof(remoteHostName));
+ Ndb_getInAddr(&remoteHostAddress, rHostName);
+ }
+ else
+ {
+ if (!isServer) {
+ ndbout << "Unable to setup transporter. Node " << rNodeId
+ << " must have hostname. Update configuration." << endl;
+ exit(-1);
+ }
+ remoteHostName[0]= 0;
+ }
+ strncpy(localHostName, lHostName, sizeof(localHostName));
+
+ if (strlen(lHostName) > 0)
+ Ndb_getInAddr(&localHostAddress, lHostName);
+
byteOrder = _byteorder;
compressionUsed = _compression;
checksumUsed = _checksum;
signalIdUsed = _signalId;
- _threadError = TE_NO_ERROR;
+ m_connected = false;
+ m_timeOutMillis = 1000;
- _connecting = false;
- _disconnecting = false;
- _connected = false;
- _timeOutMillis = 1000;
- theThreadPtr = NULL;
- theMutexPtr = NdbMutex_Create();
+ if (isServer)
+ m_socket_client= 0;
+ else
+ m_socket_client= new SocketClient(remoteHostName, r_port,
+ new SocketAuthSimple("ndbd", "ndbd passwd"));
}
Transporter::~Transporter(){
- NdbMutex_Destroy(theMutexPtr);
-
- if(theThreadPtr != 0){
- void * retVal;
- NdbThread_WaitFor(theThreadPtr, &retVal);
- NdbThread_Destroy(&theThreadPtr);
- }
+ if (m_socket_client)
+ delete m_socket_client;
}
-extern "C"
-void *
-runConnect_C(void * me)
-{
- runConnect(me);
- NdbThread_Exit(0);
- return NULL;
-}
-
-void *
-runConnect(void * me){
- Transporter * t = (Transporter *) me;
-
- DEBUG("Connect thread to " << t->remoteNodeId << " started");
-
- while(true){
- NdbMutex_Lock(t->theMutexPtr);
- if(t->_disconnecting){
- t->_connecting = false;
- NdbMutex_Unlock(t->theMutexPtr);
- DEBUG("Connect Thread " << t->remoteNodeId << " stop due to disconnect");
- return 0;
- }
- NdbMutex_Unlock(t->theMutexPtr);
-
- bool res = t->connectImpl(t->_timeOutMillis); // 1000 ms
- DEBUG("Waiting for " << t->remoteNodeId << "...");
- if(res){
- t->_connected = true;
- t->_connecting = false;
- t->_errorCount = 0;
- t->_threadError = TE_NO_ERROR;
- DEBUG("Connect Thread " << t->remoteNodeId << " stop due to connect");
- return 0;
- }
- }
-}
-
-void
-Transporter::doConnect() {
+bool
+Transporter::connect_server(NDB_SOCKET_TYPE sockfd) {
+ if(m_connected)
+ return true; // TODO assert(0);
- NdbMutex_Lock(theMutexPtr);
- if(_connecting || _disconnecting || _connected){
- NdbMutex_Unlock(theMutexPtr);
- return;
+ bool res = connect_server_impl(sockfd);
+ if(res){
+ m_connected = true;
+ m_errorCount = 0;
}
-
- _connecting = true;
- _threadError = TE_NO_ERROR;
+ return res;
+}
- // Start thread
+bool
+Transporter::connect_client() {
+ if(m_connected)
+ return true;
+ NDB_SOCKET_TYPE sockfd = m_socket_client->connect();
- char buf[16];
- snprintf(buf, sizeof(buf), "ndb_con_%d", remoteNodeId);
-
- if(theThreadPtr != 0){
- void * retVal;
- NdbThread_WaitFor(theThreadPtr, &retVal);
- NdbThread_Destroy(&theThreadPtr);
+ if (sockfd < 0)
+ return false;
+
+ // send info about own id
+ SocketOutputStream s_output(sockfd);
+ s_output.println("%d", localNodeId);
+ // get remote id
+ int nodeId;
+ SocketInputStream s_input(sockfd);
+ char buf[256];
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
}
-
- theThreadPtr = NdbThread_Create(runConnect_C,
- (void**)this,
- 32768,
- buf,
- NDB_THREAD_PRIO_LOW);
-
- NdbSleep_MilliSleep(100); // Let thread start
-
- NdbMutex_Unlock(theMutexPtr);
+ if (sscanf(buf, "%d", &nodeId) != 1) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return false;
+ }
+ bool res = connect_client_impl(sockfd);
+ if(res){
+ m_connected = true;
+ m_errorCount = 0;
+ }
+ return res;
}
void
-Transporter::doDisconnect() {
-
- NdbMutex_Lock(theMutexPtr);
- _disconnecting = true;
- while(_connecting){
- DEBUG("Waiting for connect to finish...");
-
- NdbMutex_Unlock(theMutexPtr);
- NdbSleep_MilliSleep(500);
- NdbMutex_Lock(theMutexPtr);
- }
-
- _connected = false;
-
+Transporter::doDisconnect() {
+
+ if(!m_connected)
+ return; //assert(0); TODO will fail
+
+ m_connected= false;
disconnectImpl();
- _threadError = TE_NO_ERROR;
- _disconnecting = false;
-
- NdbMutex_Unlock(theMutexPtr);
}
diff --git a/ndb/src/common/transporter/Transporter.hpp b/ndb/src/common/transporter/Transporter.hpp
index 43b26d45899..9a39f8788bc 100644
--- a/ndb/src/common/transporter/Transporter.hpp
+++ b/ndb/src/common/transporter/Transporter.hpp
@@ -19,6 +19,9 @@
#include <ndb_global.h>
+#include <SocketClient.hpp>
+
+#include <TransporterRegistry.hpp>
#include <TransporterCallback.hpp>
#include "TransporterDefinitions.hpp"
#include "Packer.hpp"
@@ -40,8 +43,9 @@ public:
* None blocking
* Use isConnected() to check status
*/
- virtual void doConnect();
-
+ bool connect_client();
+ bool connect_server(NDB_SOCKET_TYPE socket);
+
/**
* Blocking
*/
@@ -60,14 +64,17 @@ public:
*/
NodeId getRemoteNodeId() const;
-
/**
- * Set callback object
+ * Local (own) Node Id
*/
- void setCallbackObject(void * callback);
+ NodeId getLocalNodeId() const;
protected:
- Transporter(NodeId lNodeId,
+ Transporter(TransporterRegistry &,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ NodeId lNodeId,
NodeId rNodeId,
int byteorder,
bool compression,
@@ -78,58 +85,59 @@ protected:
* Blocking, for max timeOut milli seconds
* Returns true if connect succeded
*/
- virtual bool connectImpl(Uint32 timeOut) = 0;
+ virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd) = 0;
+ virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd) = 0;
/**
* Blocking
*/
virtual void disconnectImpl() = 0;
- const NodeId localNodeId;
+ /**
+ * Remote host name/and address
+ */
+ char remoteHostName[256];
+ char localHostName[256];
+ struct in_addr remoteHostAddress;
+ struct in_addr localHostAddress;
+
+ const unsigned int m_r_port;
+
const NodeId remoteNodeId;
+ const NodeId localNodeId;
+ const bool isServer;
+
unsigned createIndex;
int byteOrder;
bool compressionUsed;
bool checksumUsed;
bool signalIdUsed;
- Packer m_packer;
-
+ Packer m_packer;
private:
- /**
- * Thread and mutex for connect
- */
- NdbThread* theThreadPtr;
- friend void* runConnect(void * me);
+
+ SocketClient *m_socket_client;
protected:
- /**
- * Error reporting from connect thread(s)
- */
- void reportThreadError(NodeId nodeId,
- TransporterError errorCode);
Uint32 getErrorCount();
- TransporterError getThreadError();
- void resetThreadError();
- TransporterError _threadError;
- Uint32 _timeOutMillis;
- Uint32 _errorCount;
-
-protected:
- NdbMutex* theMutexPtr;
- bool _connected; // Are we connected
- bool _connecting; // Connect thread is running
- bool _disconnecting; // We are disconnecting
-
- void * callbackObj;
+ Uint32 m_errorCount;
+ Uint32 m_timeOutMillis;
+
+protected:
+ bool m_connected; // Are we connected
+
+ TransporterRegistry &m_transporter_registry;
+ void *get_callback_obj() { return m_transporter_registry.callbackObj; };
+ void report_disconnect(int err){m_transporter_registry.report_disconnect(remoteNodeId,err);};
+ void report_error(enum TransporterError err){reportError(get_callback_obj(),remoteNodeId,err);};
};
inline
bool
Transporter::isConnected() const {
- return _connected;
+ return m_connected;
}
inline
@@ -138,42 +146,17 @@ Transporter::getRemoteNodeId() const {
return remoteNodeId;
}
-inline
-void
-Transporter::reportThreadError(NodeId nodeId, TransporterError errorCode)
-{
-#if 0
- ndbout_c("Transporter::reportThreadError (NodeId: %d, Error code: %d)",
- nodeId, errorCode);
-#endif
- _threadError = errorCode;
- _errorCount++;
-}
-
inline
-TransporterError
-Transporter::getThreadError(){
- return _threadError;
+NodeId
+Transporter::getLocalNodeId() const {
+ return remoteNodeId;
}
inline
Uint32
Transporter::getErrorCount()
{
- return _errorCount;
-}
-
-inline
-void
-Transporter::resetThreadError()
-{
- _threadError = TE_NO_ERROR;
-}
-
-inline
-void
-Transporter::setCallbackObject(void * callback) {
- callbackObj = callback;
+ return m_errorCount;
}
#endif // Define of Transporter_H
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index 3f98eeed89e..01f1f74f053 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -16,10 +16,11 @@
#include <ndb_global.h>
-#include "TransporterRegistry.hpp"
+#include <TransporterRegistry.hpp>
#include "TransporterInternalDefinitions.hpp"
#include "Transporter.hpp"
+#include <SocketAuthenticator.hpp>
#ifdef NDB_TCP_TRANSPORTER
#include "TCP_Transporter.hpp"
@@ -42,20 +43,67 @@
#include "NdbOut.hpp"
#include <NdbSleep.h>
#include <NdbTick.h>
-#define STEPPING 1
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
+
+SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
+{
+ if (m_auth && !m_auth->server_authenticate(sockfd)){
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+
+ {
+ // read node id from client
+ int nodeId;
+ SocketInputStream s_input(sockfd);
+ char buf[256];
+ if (s_input.gets(buf, 256) == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+ if (sscanf(buf, "%d", &nodeId) != 1) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+
+ //check that nodeid is valid and that there is an allocated transporter
+ if ( nodeId < 0 || nodeId >= m_transporter_registry->maxTransporters) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+ if (m_transporter_registry->theTransporters[nodeId] == 0) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+
+ //check that the transporter should be connected
+ if (m_transporter_registry->performStates[nodeId] != TransporterRegistry::CONNECTING) {
+ NDB_CLOSE_SOCKET(sockfd);
+ return 0;
+ }
+
+ Transporter *t= m_transporter_registry->theTransporters[nodeId];
+
+ // send info about own id (just as response to acknowledge connection)
+ SocketOutputStream s_output(sockfd);
+ s_output.println("%d", t->getLocalNodeId());
+
+ // setup transporter (transporter responsible for closing sockfd)
+ t->connect_server(sockfd);
+ }
+
+ return 0;
+}
TransporterRegistry::TransporterRegistry(void * callback,
unsigned _maxTransporters,
unsigned sizeOfLongSignalMemory) {
+ m_transporter_service= 0;
nodeIdSpecified = false;
maxTransporters = _maxTransporters;
sendCounter = 1;
- m_ccCount = 0;
- m_ccIndex = 0;
- m_ccStep = STEPPING;
- m_ccReady = false;
- m_nTransportersPerformConnect=0;
callbackObj=callback;
@@ -82,7 +130,7 @@ TransporterRegistry::TransporterRegistry(void * callback,
theSHMTransporters[i] = NULL;
theOSETransporters[i] = NULL;
theTransporters[i] = NULL;
- performStates[i] = PerformNothing;
+ performStates[i] = DISCONNECTED;
ioStates[i] = NoHalt;
}
theOSEReceiver = 0;
@@ -152,15 +200,15 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
if(theTransporters[config->remoteNodeId] != NULL)
return false;
-
-
- TCP_Transporter * t = new TCP_Transporter(config->sendBufferSize,
- config->maxReceiveSize,
- config->port,
- config->remoteHostName,
+
+ TCP_Transporter * t = new TCP_Transporter(*this,
+ config->sendBufferSize,
+ config->maxReceiveSize,
config->localHostName,
- config->remoteNodeId,
+ config->remoteHostName,
+ config->port,
localNodeId,
+ config->remoteNodeId,
config->byteOrder,
config->compression,
config->checksum,
@@ -172,13 +220,11 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
return false;
}
- t->setCallbackObject(callbackObj);
-
// Put the transporter in the transporter arrays
theTCPTransporters[nTCPTransporters] = t;
theTransporters[t->getRemoteNodeId()] = t;
theTransporterTypes[t->getRemoteNodeId()] = tt_TCP_TRANSPORTER;
- performStates[t->getRemoteNodeId()] = PerformNothing;
+ performStates[t->getRemoteNodeId()] = DISCONNECTED;
nTransporters++;
nTCPTransporters++;
@@ -228,12 +274,11 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
delete t;
return false;
}
- t->setCallbackObject(callbackObj);
// Put the transporter in the transporter arrays
theOSETransporters[nOSETransporters] = t;
theTransporters[t->getRemoteNodeId()] = t;
theTransporterTypes[t->getRemoteNodeId()] = tt_OSE_TRANSPORTER;
- performStates[t->getRemoteNodeId()] = PerformNothing;
+ performStates[t->getRemoteNodeId()] = DISCONNECTED;
nTransporters++;
nOSETransporters++;
@@ -279,12 +324,11 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
delete t;
return false;
}
- t->setCallbackObject(callbackObj);
// Put the transporter in the transporter arrays
theSCITransporters[nSCITransporters] = t;
theTransporters[t->getRemoteNodeId()] = t;
theTransporterTypes[t->getRemoteNodeId()] = tt_SCI_TRANSPORTER;
- performStates[t->getRemoteNodeId()] = PerformNothing;
+ performStates[t->getRemoteNodeId()] = DISCONNECTED;
nTransporters++;
nSCITransporters++;
@@ -307,13 +351,17 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
if(theTransporters[config->remoteNodeId] != NULL)
return false;
- SHM_Transporter * t = new SHM_Transporter(config->localNodeId,
+ SHM_Transporter * t = new SHM_Transporter(*this,
+ "localhost",
+ "localhost",
+ config->port,
+ localNodeId,
config->remoteNodeId,
- config->shmKey,
- config->shmSize,
config->compression,
config->checksum,
- config->signalId
+ config->signalId,
+ config->shmKey,
+ config->shmSize
);
if (t == NULL)
return false;
@@ -321,12 +369,11 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
delete t;
return false;
}
- t->setCallbackObject(callbackObj);
// Put the transporter in the transporter arrays
theSHMTransporters[nSHMTransporters] = t;
theTransporters[t->getRemoteNodeId()] = t;
theTransporterTypes[t->getRemoteNodeId()] = tt_SHM_TRANSPORTER;
- performStates[t->getRemoteNodeId()] = PerformNothing;
+ performStates[t->getRemoteNodeId()] = DISCONNECTED;
nTransporters++;
nSHMTransporters++;
@@ -781,7 +828,7 @@ TransporterRegistry::performReceive(){
TCP_Transporter *t = theTCPTransporters[i];
const NodeId nodeId = t->getRemoteNodeId();
const NDB_SOCKET_TYPE socket = t->getSocket();
- if(performStates[nodeId] == PerformIO){
+ if(is_connected(nodeId)){
if(t->isConnected() && FD_ISSET(socket, &tcpReadset)) {
const int receiveSize = t->doReceive();
if(receiveSize > 0){
@@ -804,7 +851,7 @@ TransporterRegistry::performReceive(){
checkJobBuffer();
SCI_Transporter *t = theSCITransporters[i];
const NodeId nodeId = t->getRemoteNodeId();
- if(performStates[nodeId] == PerformIO){
+ if(is_connected(nodeId)){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
@@ -819,7 +866,7 @@ TransporterRegistry::performReceive(){
checkJobBuffer();
SHM_Transporter *t = theSHMTransporters[i];
const NodeId nodeId = t->getRemoteNodeId();
- if(performStates[nodeId] == PerformIO){
+ if(is_connected(nodeId)){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
@@ -834,13 +881,13 @@ TransporterRegistry::performReceive(){
static int x = 0;
void
TransporterRegistry::performSend(){
-
+ int i;
sendCounter = 1;
#ifdef NDB_OSE_TRANSPORTER
for (int i = 0; i < nOSETransporters; i++){
OSE_Transporter *t = theOSETransporters[i];
- if((performStates[t->getRemoteNodeId()] == PerformIO) &&
+ if((is_connected(t->getRemoteNodeId()) &&
(t->isConnected())) {
t->doSend();
}//if
@@ -858,7 +905,7 @@ TransporterRegistry::performSend(){
FD_ZERO(&writeset);
// Prepare for sending and receiving
- for (int i = 0; i < nTCPTransporters; i++) {
+ for (i = 0; i < nTCPTransporters; i++) {
TCP_Transporter * t = theTCPTransporters[i];
// If the transporter is connected
@@ -883,11 +930,11 @@ TransporterRegistry::performSend(){
if (tmp == 0) {
return;
}//if
- for (int i = 0; i < nTCPTransporters; i++) {
+ for (i = 0; i < nTCPTransporters; i++) {
TCP_Transporter *t = theTCPTransporters[i];
const NodeId nodeId = t->getRemoteNodeId();
const int socket = t->getSocket();
- if(performStates[nodeId] == PerformIO){
+ if(is_connected(nodeId)){
if(t->isConnected() && FD_ISSET(socket, &writeset)) {
t->doSend();
}//if
@@ -896,21 +943,21 @@ TransporterRegistry::performSend(){
}
#endif
#ifdef NDB_TCP_TRANSPORTER
- for (int i = x; i < nTCPTransporters; i++) {
+ for (i = x; i < nTCPTransporters; i++) {
TCP_Transporter *t = theTCPTransporters[i];
if (t &&
(t->hasDataToSend()) &&
(t->isConnected()) &&
- (performStates[t->getRemoteNodeId()] == PerformIO)) {
+ (is_connected(t->getRemoteNodeId()))) {
t->doSend();
}//if
}//for
- for (int i = 0; i < x && i < nTCPTransporters; i++) {
+ for (i = 0; i < x && i < nTCPTransporters; i++) {
TCP_Transporter *t = theTCPTransporters[i];
if (t &&
(t->hasDataToSend()) &&
(t->isConnected()) &&
- (performStates[t->getRemoteNodeId()] == PerformIO)) {
+ (is_connected(t->getRemoteNodeId()))) {
t->doSend();
}//if
}//for
@@ -921,11 +968,11 @@ TransporterRegistry::performSend(){
#ifdef NDB_SCI_TRANSPORTER
//scroll through the SCI transporters,
// get each transporter, check if connected, send data
- for (int i=0; i<nSCITransporters; i++) {
+ for (i=0; i<nSCITransporters; i++) {
SCI_Transporter *t = theSCITransporters[i];
const NodeId nodeId = t->getRemoteNodeId();
- if(performStates[nodeId] == PerformIO){
+ if(is_connected(nodeId)){
if(t->isConnected() && t->hasDataToSend()) {
t->doSend();
} //if
@@ -961,70 +1008,212 @@ TransporterRegistry::printState(){
}
#endif
-PerformState
-TransporterRegistry::performState(NodeId nodeId) {
- return performStates[nodeId];
+IOState
+TransporterRegistry::ioState(NodeId nodeId) {
+ return ioStates[nodeId];
}
-#ifdef DEBUG_TRANSPORTER
-const char *
-performStateString(PerformState state){
- switch(state){
- case PerformNothing:
- return "PerformNothing";
- break;
- case PerformIO:
- return "PerformIO";
+void
+TransporterRegistry::setIOState(NodeId nodeId, IOState state) {
+ DEBUG("TransporterRegistry::setIOState("
+ << nodeId << ", " << state << ")");
+ ioStates[nodeId] = state;
+}
+
+static void *
+run_start_clients_C(void * me)
+{
+ ((TransporterRegistry*) me)->start_clients_thread();
+ NdbThread_Exit(0);
+ return me;
+}
+
+// Run by kernel thread
+void
+TransporterRegistry::do_connect(NodeId node_id)
+{
+ PerformState &curr_state = performStates[node_id];
+ switch(curr_state){
+ case DISCONNECTED:
break;
- case PerformConnect:
- return "PerformConnect";
+ case CONNECTED:
+ return;
+ case CONNECTING:
+ return;
+ case DISCONNECTING:
break;
- case PerformDisconnect:
- return "PerformDisconnect";
+ }
+ curr_state= CONNECTING;
+}
+void
+TransporterRegistry::do_disconnect(NodeId node_id)
+{
+ PerformState &curr_state = performStates[node_id];
+ switch(curr_state){
+ case DISCONNECTED:
+ return;
+ case CONNECTED:
break;
- case RemoveTransporter:
- return "RemoveTransporter";
+ case CONNECTING:
break;
+ case DISCONNECTING:
+ return;
}
- return "Unknown";
+ curr_state= DISCONNECTING;
}
-#endif
void
-TransporterRegistry::setPerformState(NodeId nodeId, PerformState state) {
- DEBUG("TransporterRegistry::setPerformState("
- << nodeId << ", " << performStateString(state) << ")");
-
- performStates[nodeId] = state;
+TransporterRegistry::report_connect(NodeId node_id)
+{
+ performStates[node_id] = CONNECTED;
+ reportConnect(callbackObj, node_id);
+}
+
+void
+TransporterRegistry::report_disconnect(NodeId node_id, int errnum)
+{
+ performStates[node_id] = DISCONNECTED;
+ reportDisconnect(callbackObj, node_id, errnum);
}
void
-TransporterRegistry::setPerformState(PerformState state) {
- int count = 0;
- int index = 0;
- while(count < nTransporters){
- if(theTransporters[index] != 0){
- setPerformState(theTransporters[index]->getRemoteNodeId(), state);
- count ++;
+TransporterRegistry::update_connections()
+{
+ for (int i= 0, n= 0; n < nTransporters; i++){
+ Transporter * t = theTransporters[i];
+ if (!t)
+ continue;
+ n++;
+
+ const NodeId nodeId = t->getRemoteNodeId();
+ switch(performStates[nodeId]){
+ case CONNECTED:
+ case DISCONNECTED:
+ break;
+ case CONNECTING:
+ if(t->isConnected())
+ report_connect(nodeId);
+ break;
+ case DISCONNECTING:
+ if(!t->isConnected())
+ report_disconnect(nodeId, 0);
+ break;
}
- index ++;
}
}
-IOState
-TransporterRegistry::ioState(NodeId nodeId) {
- return ioStates[nodeId];
+// run as own thread
+void
+TransporterRegistry::start_clients_thread()
+{
+ while (m_run_start_clients_thread) {
+ NdbSleep_MilliSleep(100);
+ for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){
+ Transporter * t = theTransporters[i];
+ if (!t)
+ continue;
+ n++;
+
+ const NodeId nodeId = t->getRemoteNodeId();
+ switch(performStates[nodeId]){
+ case CONNECTING:
+ if(!t->isConnected() && !t->isServer)
+ t->connect_client();
+ break;
+ case DISCONNECTING:
+ if(t->isConnected())
+ t->doDisconnect();
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
-void
-TransporterRegistry::setIOState(NodeId nodeId, IOState state) {
- DEBUG("TransporterRegistry::setIOState("
- << nodeId << ", " << state << ")");
- ioStates[nodeId] = state;
+bool
+TransporterRegistry::start_clients()
+{
+ m_run_start_clients_thread= true;
+ m_start_clients_thread= NdbThread_Create(run_start_clients_C,
+ (void**)this,
+ 32768,
+ "ndb_start_clients",
+ NDB_THREAD_PRIO_LOW);
+ if (m_start_clients_thread == 0) {
+ m_run_start_clients_thread= false;
+ return false;
+ }
+ return true;
+}
+
+bool
+TransporterRegistry::stop_clients()
+{
+ if (m_start_clients_thread) {
+ m_run_start_clients_thread= false;
+ void* status;
+ int r= NdbThread_WaitFor(m_start_clients_thread, &status);
+ NdbThread_Destroy(&m_start_clients_thread);
+ }
+ return true;
+}
+
+bool
+TransporterRegistry::start_service(SocketServer& socket_server)
+{
+#if 0
+ for (int i= 0, n= 0; n < nTransporters; i++){
+ Transporter * t = theTransporters[i];
+ if (!t)
+ continue;
+ n++;
+ if (t->isServer) {
+ t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd"));
+ if(!socket_server.setup(t->m_service, t->m_r_port, 0))
+ {
+ ndbout_c("Unable to setup transporter service port: %d!\n"
+ "Please check if the port is already used,\n"
+ "(perhaps a mgmtsrvrserver is already running)",
+ m_service_port);
+ delete t->m_service;
+ return false;
+ }
+ }
+ }
+#endif
+
+ if (m_service_port != 0) {
+
+ m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
+
+ if (nodeIdSpecified != true) {
+ ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ return false;
+ }
+
+ //m_interface_name = "ndbd";
+ m_interface_name = 0;
+
+ if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name))
+ {
+ ndbout_c("Unable to setup transporter service port: %d!\n"
+ "Please check if the port is already used,\n"
+ "(perhaps a mgmtsrvrserver is already running)",
+ m_service_port);
+ delete m_transporter_service;
+ return false;
+ }
+ m_transporter_service->setTransporterRegistry(this);
+ } else
+ m_transporter_service= 0;
+
+ return true;
}
void
-TransporterRegistry::startReceiving(){
+TransporterRegistry::startReceiving()
+{
#ifdef NDB_OSE_TRANSPORTER
if(theOSEReceiver != NULL){
theOSEReceiver->createPhantom();
@@ -1081,99 +1270,6 @@ TransporterRegistry::stopSending(){
#endif
}
-/**
- * The old implementation did not scale with a large
- * number of nodes. (Watchdog killed NDB because
- * it took too long time to allocated threads in
- * doConnect.
- *
- * The new implementation only checks the connection
- * for a number of transporters (STEPPING), until to
- * the point where all transporters has executed
- * doConnect once. After that, the behaviour is as
- * in the old implemenation, i.e, checking the connection
- * for all transporters.
- * @todo: instead of STEPPING, maybe we should only
- * allow checkConnections to execute for a certain
- * time that somehow factors in heartbeat times and
- * watchdog times.
- *
- */
-
-void
-TransporterRegistry::checkConnections(){
- if(m_ccStep > nTransporters)
- m_ccStep = nTransporters;
-
- while(m_ccCount < m_ccStep){
- if(theTransporters[m_ccIndex] != 0){
- Transporter * t = theTransporters[m_ccIndex];
- const NodeId nodeId = t->getRemoteNodeId();
- if(t->getThreadError() != 0) {
- reportError(callbackObj, nodeId, t->getThreadError());
- t->resetThreadError();
- }
-
- switch(performStates[nodeId]){
- case PerformConnect:
- if(!t->isConnected()){
- t->doConnect();
- if(m_nTransportersPerformConnect!=nTransporters)
- m_nTransportersPerformConnect++;
-
- } else {
- performStates[nodeId] = PerformIO;
- reportConnect(callbackObj, nodeId);
- }
- break;
- case PerformDisconnect:
- {
- bool wasConnected = t->isConnected();
- t->doDisconnect();
- performStates[nodeId] = PerformNothing;
- if(wasConnected){
- reportDisconnect(callbackObj, nodeId,0);
- }
- }
- break;
- case RemoveTransporter:
- removeTransporter(nodeId);
- break;
- case PerformNothing:
- case PerformIO:
- break;
- }
- m_ccCount ++;
- }
- m_ccIndex ++;
- }
-
- if(!m_ccReady) {
- if(m_ccCount < nTransporters) {
- if(nTransporters - m_ccStep < STEPPING)
- m_ccStep += nTransporters-m_ccStep;
- else
- m_ccStep += STEPPING;
-
- // ndbout_c("count %d step %d ", m_ccCount, m_ccStep);
- }
- else {
- m_ccCount = 0;
- m_ccIndex = 0;
- m_ccStep = STEPPING;
- // ndbout_c("count %d step %d ", m_ccCount, m_ccStep);
- }
- }
- if((nTransporters == m_nTransportersPerformConnect) || m_ccReady) {
- m_ccReady = true;
- m_ccCount = 0;
- m_ccIndex = 0;
- m_ccStep = nTransporters;
- // ndbout_c("alla count %d step %d ", m_ccCount, m_ccStep);
- }
-
-}//TransporterRegistry::checkConnections()
-
NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
out << "-- Signal Header --" << endl;
out << "theLength: " << sh.theLength << endl;
diff --git a/ndb/src/common/util/BaseString.cpp b/ndb/src/common/util/BaseString.cpp
index d15249adf72..8b7df485f77 100644
--- a/ndb/src/common/util/BaseString.cpp
+++ b/ndb/src/common/util/BaseString.cpp
@@ -412,3 +412,6 @@ int main()
}
#endif
+
+template class Vector<char *>;
+template class Vector<BaseString>;
diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp
index 7fc99bc526c..8a14882550c 100644
--- a/ndb/src/common/util/ConfigValues.cpp
+++ b/ndb/src/common/util/ConfigValues.cpp
@@ -105,19 +105,19 @@ ConfigValues::getByPos(Uint32 pos, Entry * result) const {
Uint64 *
ConfigValues::get64(Uint32 index) const {
assert(index < m_int64Count);
- const Uint32 * data = m_values + (m_size << 1);
+ const Uint32 * data = m_values + (m_size << 1);
Uint64 * ptr = (Uint64*)data;
- ptr += index;
+ ptr += index;
return ptr;
}
char **
ConfigValues::getString(Uint32 index) const {
assert(index < m_stringCount);
- const Uint32 * data = m_values + (m_size << 1);
- char * ptr = (char*)data;
+ const Uint32 * data = m_values + (m_size << 1);
+ char * ptr = (char*)data;
ptr += m_dataSize;
- ptr -= (index * sizeof(char *));
+ ptr -= (index * sizeof(char *));
return (char**)ptr;
}
@@ -261,9 +261,9 @@ directory(Uint32 sz){
ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){
m_sectionCounter = (1 << KP_SECTION_SHIFT);
m_freeKeys = directory(keys);
- m_freeData = data;
+ m_freeData = (data + 7) & ~7;
m_currentSection = 0;
- m_cfg = create(m_freeKeys, data);
+ m_cfg = create(m_freeKeys, m_freeData);
}
ConfigValuesFactory::ConfigValuesFactory(ConfigValues * cfg){
@@ -316,7 +316,8 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){
m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size);
m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize);
m_freeKeys = directory(m_freeKeys);
-
+ m_freeData = (m_freeData + 7) & ~7;
+
ConfigValues * m_tmp = m_cfg;
m_cfg = create(m_freeKeys, m_freeData);
put(* m_tmp);
@@ -333,6 +334,7 @@ ConfigValuesFactory::shrink(){
m_freeKeys = m_cfg->m_size - m_freeKeys;
m_freeData = m_cfg->m_dataSize - m_freeData;
m_freeKeys = directory(m_freeKeys);
+ m_freeData = (m_freeData + 7) & ~7;
ConfigValues * m_tmp = m_cfg;
m_cfg = create(m_freeKeys, m_freeData);
@@ -462,7 +464,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){
case ConfigValues::StringType:{
Uint32 index = m_cfg->m_stringCount++;
m_cfg->m_values[pos+1] = index;
- char ** ref = m_cfg->getString(index);
+ char ** ref = m_cfg->getString(index);
* ref = strdup(entry.m_string ? entry.m_string : "");
m_freeKeys--;
m_freeData -= sizeof(char *);
@@ -578,11 +580,11 @@ ConfigValues::getPackedSize() const {
Uint32
ConfigValues::pack(void * _dst, Uint32 _len) const {
-
+ Uint32 i;
char * dst = (char*)_dst;
memcpy(dst, Magic, sizeof(Magic)); dst += sizeof(Magic);
- for(Uint32 i = 0; i < 2 * m_size; i += 2){
+ for(i = 0; i < 2 * m_size; i += 2){
Uint32 key = m_values[i];
Uint32 val = m_values[i+1];
if(key != CFV_KEY_FREE){
@@ -621,7 +623,7 @@ ConfigValues::pack(void * _dst, Uint32 _len) const {
const Uint32 * sum = (Uint32*)_dst;
const Uint32 len = ((Uint32*)dst) - sum;
Uint32 chk = 0;
- for(Uint32 i = 0; i<len; i++){
+ for(i = 0; i<len; i++){
chk ^= htonl(sum[i]);
}
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
index 59d9775b8e3..678added01e 100644
--- a/ndb/src/common/util/Makefile.am
+++ b/ndb/src/common/util/Makefile.am
@@ -3,7 +3,8 @@ noinst_LTLIBRARIES = libgeneral.la
libgeneral_la_SOURCES = \
File.cpp md5_hash.cpp Properties.cpp socket_io.cpp \
- SimpleProperties.cpp Parser.cpp InputStream.cpp SocketServer.cpp \
+ SimpleProperties.cpp Parser.cpp InputStream.cpp \
+ SocketServer.cpp SocketClient.cpp SocketAuthenticator.cpp\
OutputStream.cpp NdbOut.cpp BaseString.cpp Base64.cpp \
NdbSqlUtil.cpp new.cpp \
uucode.c random.c getarg.c version.c \
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index e34d6d18539..9d05fc7fb02 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -98,11 +98,11 @@ NdbSqlUtil::m_typeList[] = {
},
{
Type::Mediumint,
- NULL // cmpMediumint
+ cmpMediumint
},
{
Type::Mediumunsigned,
- NULL // cmpMediumunsigned
+ cmpMediumunsigned
},
{
Type::Int,
@@ -130,7 +130,7 @@ NdbSqlUtil::m_typeList[] = {
},
{
Type::Decimal,
- NULL // cmpDecimal
+ NULL // cmpDecimal
},
{
Type::Char,
@@ -142,11 +142,11 @@ NdbSqlUtil::m_typeList[] = {
},
{
Type::Binary,
- NULL // cmpBinary
+ cmpBinary
},
{
Type::Varbinary,
- NULL // cmpVarbinary
+ cmpVarbinary
},
{
Type::Datetime,
@@ -154,15 +154,15 @@ NdbSqlUtil::m_typeList[] = {
},
{
Type::Timespec,
- NULL // cmpTimespec
+ cmpTimespec
},
{
Type::Blob,
- NULL // cmpDatetime
+ cmpBlob
},
{
- Type::Clob,
- cmpClob
+ Type::Text,
+ cmpText
}
};
@@ -299,9 +299,9 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size
}
int
-NdbSqlUtil::cmpClob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Clob, p1, p2, full, size);
+ return cmp(Type::Text, p1, p2, full, size);
}
#ifdef NDB_SQL_UTIL_TEST
diff --git a/ndb/src/common/util/Parser.cpp b/ndb/src/common/util/Parser.cpp
index 2e8120f88ce..dea128ccf66 100644
--- a/ndb/src/common/util/Parser.cpp
+++ b/ndb/src/common/util/Parser.cpp
@@ -347,3 +347,4 @@ ParserImpl::checkMandatory(Context* ctx, const Properties* props){
return true;
}
+template class Vector<const ParserRow<ParserImpl::Dummy>*>;
diff --git a/ndb/src/common/util/Properties.cpp b/ndb/src/common/util/Properties.cpp
index 3e41056ac18..80fb0027830 100644
--- a/ndb/src/common/util/Properties.cpp
+++ b/ndb/src/common/util/Properties.cpp
@@ -56,7 +56,7 @@ class PropertiesImpl {
PropertiesImpl(const PropertiesImpl &); // Not implemented
PropertiesImpl& operator=(const PropertiesImpl&); // Not implemented
public:
- PropertiesImpl(Properties *);
+ PropertiesImpl(Properties *, bool case_insensitive);
PropertiesImpl(Properties *, const PropertiesImpl &);
~PropertiesImpl();
@@ -69,6 +69,7 @@ public:
bool m_insensitive;
int (* compare)(const char *s1, const char *s2);
+ void setCaseInsensitiveNames(bool value);
void grow(int sizeToAdd);
PropertyImpl * get(const char * name) const;
@@ -113,9 +114,9 @@ Property::~Property(){
/**
* Methods for Properties
*/
-Properties::Properties(){
+Properties::Properties(bool case_insensitive){
parent = 0;
- impl = new PropertiesImpl(this);
+ impl = new PropertiesImpl(this, case_insensitive);
}
Properties::Properties(const Properties & org){
@@ -124,7 +125,7 @@ Properties::Properties(const Properties & org){
}
Properties::Properties(const Property * anArray, int arrayLen){
- impl = new PropertiesImpl(this);
+ impl = new PropertiesImpl(this, false);
put(anArray, arrayLen);
}
@@ -169,6 +170,7 @@ put(PropertiesImpl * impl, const char * name, T value, bool replace){
return tmp->put(new PropertyImpl(short_name, value));
}
+
bool
Properties::put(const char * name, Uint32 value, bool replace){
return ::put(impl, name, value, replace);
@@ -478,13 +480,12 @@ Properties::unpack(const Uint32 * buf, Uint32 bufLen){
/**
* Methods for PropertiesImpl
*/
-PropertiesImpl::PropertiesImpl(Properties * p){
+PropertiesImpl::PropertiesImpl(Properties * p, bool case_insensitive){
this->properties = p;
items = 0;
size = 25;
content = new PropertyImpl * [size];
- this->m_insensitive = false;
- this->compare = strcmp;
+ setCaseInsensitiveNames(case_insensitive);
}
PropertiesImpl::PropertiesImpl(Properties * p, const PropertiesImpl & org){
@@ -505,6 +506,15 @@ PropertiesImpl::~PropertiesImpl(){
delete [] content;
}
+void
+PropertiesImpl::setCaseInsensitiveNames(bool value){
+ m_insensitive = value;
+ if(value)
+ compare = strcasecmp;
+ else
+ compare = strcmp;
+}
+
void
PropertiesImpl::grow(int sizeToAdd){
PropertyImpl ** newContent = new PropertyImpl * [size + sizeToAdd];
@@ -522,9 +532,11 @@ PropertiesImpl::get(const char * name) const {
return 0;
}
- for(unsigned int i = 0; i<tmp->items; i++)
+ for(unsigned int i = 0; i<tmp->items; i++) {
if((* compare)(tmp->content[i]->name, short_name) == 0)
return tmp->content[i];
+ }
+
return 0;
}
@@ -1109,14 +1121,15 @@ Properties::getCopy(const char * name, Uint32 no, Properties ** value) const {
void
Properties::setCaseInsensitiveNames(bool value){
- impl->m_insensitive = value;
- if(value)
- impl->compare = strcasecmp;
- else
- impl->compare = strcmp;
+ impl->setCaseInsensitiveNames(value);
}
bool
Properties::getCaseInsensitiveNames() const {
return impl->m_insensitive;
}
+
+template bool put(PropertiesImpl *, const char *, Uint32, bool);
+template bool put(PropertiesImpl *, const char *, Uint64, bool);
+template bool put(PropertiesImpl *, const char *, const char *, bool);
+template bool put(PropertiesImpl *, const char *, const Properties*, bool);
diff --git a/ndb/src/common/util/SocketAuthenticator.cpp b/ndb/src/common/util/SocketAuthenticator.cpp
new file mode 100644
index 00000000000..aed4db39231
--- /dev/null
+++ b/ndb/src/common/util/SocketAuthenticator.cpp
@@ -0,0 +1,91 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <ndb_global.h>
+
+#include <SocketClient.hpp>
+#include <SocketAuthenticator.hpp>
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
+#include <NdbOut.hpp>
+
+SocketAuthSimple::SocketAuthSimple(const char *username, const char *passwd) {
+ if (username)
+ m_username= strdup(username);
+ else
+ m_username= 0;
+ if (passwd)
+ m_passwd= strdup(passwd);
+ else
+ m_passwd= 0;
+}
+
+SocketAuthSimple::~SocketAuthSimple()
+{
+ if (m_passwd)
+ free((void*)m_passwd);
+ if (m_username)
+ free((void*)m_username);
+}
+
+bool SocketAuthSimple::client_authenticate(int sockfd)
+{
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+
+ if (m_username)
+ s_output.println("%s", m_username);
+ else
+ s_output.println("");
+
+ if (m_passwd)
+ s_output.println("%s", m_passwd);
+ else
+ s_output.println("");
+
+ char buf[16];
+ if (s_input.gets(buf, 16) == 0) return false;
+ if (strncmp("ok", buf, 2) == 0)
+ return true;
+
+ return false;
+}
+
+bool SocketAuthSimple::server_authenticate(int sockfd)
+{
+
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+
+ char buf[256];
+
+ if (s_input.gets(buf, 256) == 0) return false;
+ buf[255]= 0;
+ if (m_username)
+ free((void*)m_username);
+ m_username= strdup(buf);
+
+ if (s_input.gets(buf, 256) == 0) return false;
+ buf[255]= 0;
+ if (m_passwd)
+ free((void*)m_passwd);
+ m_passwd= strdup(buf);
+
+ s_output.println("ok");
+
+ return true;
+}
diff --git a/ndb/src/common/util/SocketClient.cpp b/ndb/src/common/util/SocketClient.cpp
new file mode 100644
index 00000000000..ec837babc24
--- /dev/null
+++ b/ndb/src/common/util/SocketClient.cpp
@@ -0,0 +1,92 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <ndb_global.h>
+#include <NdbOut.hpp>
+
+#include <SocketClient.hpp>
+#include <SocketAuthenticator.hpp>
+
+SocketClient::SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa)
+{
+ m_auth= sa;
+ m_port= port;
+ m_server_name= strdup(server_name);
+ m_sockfd= -1;
+}
+
+SocketClient::~SocketClient()
+{
+ if (m_server_name)
+ free(m_server_name);
+ if (m_sockfd >= 0)
+ NDB_CLOSE_SOCKET(m_sockfd);
+ if (m_auth)
+ delete m_auth;
+}
+
+bool
+SocketClient::init()
+{
+ if (m_sockfd >= 0)
+ NDB_CLOSE_SOCKET(m_sockfd);
+
+ memset(&m_servaddr, 0, sizeof(m_servaddr));
+ m_servaddr.sin_family = AF_INET;
+ m_servaddr.sin_port = htons(m_port);
+ // Convert ip address presentation format to numeric format
+ if (Ndb_getInAddr(&m_servaddr.sin_addr, m_server_name))
+ return false;
+
+ m_sockfd= socket(AF_INET, SOCK_STREAM, 0);
+ if (m_sockfd == NDB_INVALID_SOCKET) {
+ return false;
+ }
+
+ return true;
+}
+
+NDB_SOCKET_TYPE
+SocketClient::connect()
+{
+ if (m_sockfd < 0)
+ {
+ if (!init()) {
+ ndbout << "SocketClient::connect() failed " << m_server_name << " " << m_port << endl;
+ return -1;
+ }
+ }
+ const int r = ::connect(m_sockfd, (struct sockaddr*) &m_servaddr, sizeof(m_servaddr));
+ if (r == -1) {
+ NDB_CLOSE_SOCKET(m_sockfd);
+ m_sockfd= -1;
+ return -1;
+ }
+
+ if (m_auth) {
+ if (!m_auth->client_authenticate(m_sockfd))
+ {
+ NDB_CLOSE_SOCKET(m_sockfd);
+ m_sockfd= -1;
+ return -1;
+ }
+ }
+ NDB_SOCKET_TYPE sockfd= m_sockfd;
+ m_sockfd= -1;
+
+ return sockfd;
+}
diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp
index a0ec0aaa676..0cc06a54496 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/ndb/src/common/util/SocketServer.cpp
@@ -17,7 +17,7 @@
#include <ndb_global.h>
-#include "SocketServer.hpp"
+#include <SocketServer.hpp>
#include <NdbTCP.h>
#include <NdbOut.hpp>
@@ -36,10 +36,11 @@ SocketServer::SocketServer(int maxSessions) :
}
SocketServer::~SocketServer() {
- for(unsigned i = 0; i<m_sessions.size(); i++){
+ unsigned i;
+ for(i = 0; i<m_sessions.size(); i++){
delete m_sessions[i].m_session;
}
- for(unsigned i = 0; i<m_services.size(); i++){
+ for(i = 0; i<m_services.size(); i++){
delete m_services[i].m_service;
}
}
@@ -146,7 +147,6 @@ SocketServer::doAccept(){
ServiceInstance & si = m_services[i];
if(FD_ISSET(si.m_socket, &readSet)){
-
NDB_SOCKET_TYPE childSock = accept(si.m_socket, 0, 0);
if(childSock == NDB_INVALID_SOCKET){
continue;
@@ -265,10 +265,11 @@ SocketServer::checkSessions(){
void
SocketServer::stopSessions(bool wait){
- for(int i = m_sessions.size() - 1; i>=0; i--)
+ int i;
+ for(i = m_sessions.size() - 1; i>=0; i--)
m_sessions[i].m_session->m_stop = true;
- for(int i = m_services.size() - 1; i>=0; i--)
+ for(i = m_services.size() - 1; i>=0; i--)
m_services[i].m_service->stopSessions();
if(wait){
@@ -303,3 +304,6 @@ sessionThread_C(void* _sc){
NdbThread_Exit(0);
return 0;
}
+
+template class MutexVector<SocketServer::ServiceInstance>;
+template class MutexVector<SocketServer::SessionInstance>;
diff --git a/ndb/src/common/util/socket_io.cpp b/ndb/src/common/util/socket_io.cpp
index 97bb4863a67..b2f4ef91031 100644
--- a/ndb/src/common/util/socket_io.cpp
+++ b/ndb/src/common/util/socket_io.cpp
@@ -93,8 +93,8 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
FD_ZERO(&readset);
FD_SET(socket, &readset);
- timeout.tv_sec = 1;
- timeout.tv_usec = 0; // 1 s
+ timeout.tv_sec = (timeout_millis / 1000);
+ timeout.tv_usec = (timeout_millis % 1000) * 1000;
const int selectRes = select(socket + 1, &readset, 0, 0, &timeout);
if(selectRes != 1){
return -1;
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index caf19ddba0e..46b043c7004 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -382,3 +382,5 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */,
m_cpcd.m_processes.unlock();
}
+
+template class Vector<ParserRow<CPCDAPISession> const*>;
diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp
index f2878b7dea1..44db10422b9 100644
--- a/ndb/src/cw/cpcd/CPCD.cpp
+++ b/ndb/src/cw/cpcd/CPCD.cpp
@@ -351,8 +351,9 @@ CPCD::loadProcessList(){
sess.loadFile();
loadingProcessList = false;
+ size_t i;
Vector<int> temporary;
- for(size_t i = 0; i<m_processes.size(); i++){
+ for(i = 0; i<m_processes.size(); i++){
Process * proc = m_processes[i];
proc->readPid();
if(proc->m_processType == TEMPORARY){
@@ -360,7 +361,7 @@ CPCD::loadProcessList(){
}
}
- for(size_t i = 0; i<temporary.size(); i++){
+ for(i = 0; i<temporary.size(); i++){
RequestStatus rs;
undefineProcess(&rs, temporary[i]);
}
@@ -430,3 +431,5 @@ CPCD::report(int id, CPCEvent::EventType t){
}
m_subscribers.unlock();
}
+
+template class MutexVector<EventSubscriber*>;
diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am
index 6345bae9bbe..1f7b0d88448 100644
--- a/ndb/src/cw/cpcd/Makefile.am
+++ b/ndb/src/cw/cpcd/Makefile.am
@@ -1,5 +1,5 @@
-ndbtools_PROGRAMS = ndb_cpcd
+ndbbin_PROGRAMS = ndb_cpcd
ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp
diff --git a/ndb/src/cw/cpcd/Monitor.cpp b/ndb/src/cw/cpcd/Monitor.cpp
index 2935cd0a648..141de926d4d 100644
--- a/ndb/src/cw/cpcd/Monitor.cpp
+++ b/ndb/src/cw/cpcd/Monitor.cpp
@@ -75,3 +75,5 @@ void
CPCD::Monitor::signal() {
NdbCondition_Signal(m_changeCondition);
}
+
+template class MutexVector<CPCD::Process*>;
diff --git a/ndb/src/cw/cpcd/Process.cpp b/ndb/src/cw/cpcd/Process.cpp
index 74426306a88..0a986f63fda 100644
--- a/ndb/src/cw/cpcd/Process.cpp
+++ b/ndb/src/cw/cpcd/Process.cpp
@@ -15,8 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <signal.h>
-
#include <BaseString.hpp>
#include <InputStream.hpp>
@@ -209,54 +207,50 @@ int
set_ulimit(const BaseString & pair){
#ifdef HAVE_GETRLIMIT
errno = 0;
- do {
- Vector<BaseString> list;
- pair.split(list, ":");
- if(list.size() != 2){
- break;
- }
-
- int res;
- rlim_t value = RLIM_INFINITY;
- if(!(list[1].trim() == "unlimited")){
- value = atoi(list[1].c_str());
- }
-
- struct rlimit rlp;
+ Vector<BaseString> list;
+ pair.split(list, ":");
+ if(list.size() != 2){
+ logger.error("Unable to process ulimit: split >%s< list.size()=%d",
+ pair.c_str(), list.size());
+ return -1;
+ }
+
+ int res;
+ rlim_t value = RLIM_INFINITY;
+ if(!(list[1].trim() == "unlimited")){
+ value = atoi(list[1].c_str());
+ }
+
+ struct rlimit rlp;
#define _RLIMIT_FIX(x) { res = getrlimit(x,&rlp); if(!res){ rlp.rlim_cur = value; res = setrlimit(x, &rlp); }}
-
- if(list[0].trim() == "c"){
- _RLIMIT_FIX(RLIMIT_CORE);
- } else if(list[0] == "d"){
- _RLIMIT_FIX(RLIMIT_DATA);
- } else if(list[0] == "f"){
- _RLIMIT_FIX(RLIMIT_FSIZE);
- } else if(list[0] == "n"){
- _RLIMIT_FIX(RLIMIT_NOFILE);
- } else if(list[0] == "s"){
- _RLIMIT_FIX(RLIMIT_STACK);
- } else if(list[0] == "t"){
- _RLIMIT_FIX(RLIMIT_CPU);
- } else {
- errno = EINVAL;
- break;
- }
- if(!res)
- break;
-
- return 0;
- } while(false);
- logger.error("Unable to process ulimit: %s(%s)",
- pair.c_str(), strerror(errno));
- return -1;
-#else
- return 0; // Maybe it's ok anyway...
+
+ if(list[0].trim() == "c"){
+ _RLIMIT_FIX(RLIMIT_CORE);
+ } else if(list[0] == "d"){
+ _RLIMIT_FIX(RLIMIT_DATA);
+ } else if(list[0] == "f"){
+ _RLIMIT_FIX(RLIMIT_FSIZE);
+ } else if(list[0] == "n"){
+ _RLIMIT_FIX(RLIMIT_NOFILE);
+ } else if(list[0] == "s"){
+ _RLIMIT_FIX(RLIMIT_STACK);
+ } else if(list[0] == "t"){
+ _RLIMIT_FIX(RLIMIT_CPU);
+ } else {
+ errno = EINVAL;
+ }
+ if(res){
+ logger.error("Unable to process ulimit: %s res=%d error=%d(%s)",
+ pair.c_str(), res, errno, strerror(errno));
+ return -1;
+ }
#endif
+ return 0;
}
void
CPCD::Process::do_exec() {
-
+ size_t i;
setup_environment(m_env.c_str());
char **argv = BaseString::argify(m_path.c_str(), m_args.c_str());
@@ -272,7 +266,7 @@ CPCD::Process::do_exec() {
Vector<BaseString> ulimit;
m_ulimit.split(ulimit);
- for(size_t i = 0; i<ulimit.size(); i++){
+ for(i = 0; i<ulimit.size(); i++){
if(ulimit[i].trim().length() > 0 && set_ulimit(ulimit[i]) != 0){
_exit(1);
}
@@ -286,7 +280,7 @@ CPCD::Process::do_exec() {
BaseString * redirects[] = { &m_stdin, &m_stdout, &m_stderr };
int fds[3];
- for(int i = 0; i<3; i++){
+ for(i = 0; i<3; i++){
if(redirects[i]->empty()){
#ifndef DEBUG
dup2(fd, i);
@@ -319,7 +313,7 @@ CPCD::Process::do_exec() {
}
/* Close all filedescriptors */
- for(int i = STDERR_FILENO+1; i < getdtablesize(); i++)
+ for(i = STDERR_FILENO+1; i < getdtablesize(); i++)
close(i);
execv(m_path.c_str(), argv);
diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp
index 11f6238d5f7..913c31de1f7 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/ndb/src/cw/cpcd/main.cpp
@@ -15,7 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h> /* Needed for mkdir(2) */
-#include <signal.h>
#include "CPCD.hpp"
#include "APIService.hpp"
diff --git a/ndb/src/kernel/Makefile.am b/ndb/src/kernel/Makefile.am
index b2aa5f2e074..60284f6a369 100644
--- a/ndb/src/kernel/Makefile.am
+++ b/ndb/src/kernel/Makefile.am
@@ -4,7 +4,7 @@ include $(top_srcdir)/ndb/config/common.mk.am
ndbbin_PROGRAMS = ndbd
-ndbd_SOURCES = Main.cpp SimBlockList.cpp
+ndbd_SOURCES = main.cpp SimBlockList.cpp
include $(top_srcdir)/ndb/config/type_kernel.mk.am
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index 92dbfd067f7..af575de4f62 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -3,7 +3,7 @@ Next NDBCNTR 1000
Next NDBFS 2000
Next DBACC 3001
Next DBTUP 4007
-Next DBLQH 5036
+Next DBLQH 5040
Next DBDICT 6006
Next DBDIH 7173
Next DBTC 8035
@@ -190,6 +190,10 @@ Delay execution of ABORTREQ signal 2 seconds to generate time-out.
5035: Delay ACC_CONTOPCONT
+5038: Drop LQHKEYREQ + set 5039
+5039: Drop ABORT + set 5003
+
+
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
-------------------------------------------------
8040:
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index 4342a9d6d94..52a543dbcdc 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -3332,7 +3332,8 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
req->transId1 = 0;
req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
- for(unsigned int i = 0; i<parallelism; i++) {
+ Uint32 i;
+ for(i = 0; i<parallelism; i++) {
jam();
req->clientOpPtr[i] = filePtr.i;
}//for
@@ -3350,7 +3351,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
signal->theData[7] = 0;
Uint32 dataPos = 8;
- for(Uint32 i = 0; i<table.noOfAttributes; i++) {
+ for(i = 0; i<table.noOfAttributes; i++) {
jam();
AttributePtr attr;
table.attributes.getPtr(attr, i);
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index 77669e759d3..1e2100251be 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -696,6 +696,7 @@ Uint32 *
Backup::OperationRecord::newVariableKey(Uint32 sz){
attrLeft--;
attrSzLeft = 0;
+ attrSzTotal += sz;
dst = &dst_VariableData->Data[0];
dst_VariableData->Sz = htonl(sz);
@@ -712,7 +713,7 @@ Backup::OperationRecord::finished(){
return false;
}
- attrLen[opNoDone] = attrSzTotal;
+ attrLen[opNoDone] = attrSzTotal + sz_FixedKeys;
opNoDone++;
scanStop = dst = (Uint32 *)dst_VariableData;
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 36ce1857144..d8cbb36df62 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -213,3 +213,6 @@ Backup::~Backup()
BLOCK_FUNCTIONS(Backup);
+template class ArrayPool<Backup::Page32>;
+template class ArrayPool<Backup::Attribute>;
+template class ArrayPool<Backup::Fragment>;
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.cpp b/ndb/src/kernel/blocks/backup/restore/Restore.cpp
index 24d2cfbfe35..f0ca54884be 100644
--- a/ndb/src/kernel/blocks/backup/restore/Restore.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/Restore.cpp
@@ -34,6 +34,7 @@ Uint64 Twiddle64(Uint64 in); // Byte shift 64-bit data
bool
BackupFile::Twiddle(const AttributeDesc* attr_desc, AttributeData* attr_data, Uint32 arraySize){
+ Uint32 i;
if(m_hostByteOrder)
return true;
@@ -47,17 +48,17 @@ BackupFile::Twiddle(const AttributeDesc* attr_desc, AttributeData* attr_data, Ui
return true;
case 16:
- for(unsigned i = 0; i<arraySize; i++){
+ for(i = 0; i<arraySize; i++){
attr_data->u_int16_value[i] = Twiddle16(attr_data->u_int16_value[i]);
}
return true;
case 32:
- for(unsigned i = 0; i<arraySize; i++){
+ for(i = 0; i<arraySize; i++){
attr_data->u_int32_value[i] = Twiddle32(attr_data->u_int32_value[i]);
}
return true;
case 64:
- for(unsigned i = 0; i<arraySize; i++){
+ for(i = 0; i<arraySize; i++){
attr_data->u_int64_value[i] = Twiddle64(attr_data->u_int64_value[i]);
}
return true;
@@ -333,8 +334,8 @@ RestoreDataIterator::getNextTuple(int & res)
Uint32 *buf_ptr = (Uint32*)_buf_ptr, *ptr = buf_ptr;
ptr += m_currentTable->m_nullBitmaskSize;
-
- for(Uint32 i= 0; i < m_currentTable->m_fixedKeys.size(); i++){
+ Uint32 i;
+ for(i= 0; i < m_currentTable->m_fixedKeys.size(); i++){
assert(ptr < buf_ptr + dataLength);
const Uint32 attrId = m_currentTable->m_fixedKeys[i]->attrId;
@@ -355,7 +356,7 @@ RestoreDataIterator::getNextTuple(int & res)
ptr += sz;
}
- for(Uint32 i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){
+ for(i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){
assert(ptr < buf_ptr + dataLength);
const Uint32 attrId = m_currentTable->m_fixedAttribs[i]->attrId;
@@ -377,7 +378,7 @@ RestoreDataIterator::getNextTuple(int & res)
ptr += sz;
}
- for(Uint32 i = 0; i < m_currentTable->m_variableAttribs.size(); i++){
+ for(i = 0; i < m_currentTable->m_variableAttribs.size(); i++){
const Uint32 attrId = m_currentTable->m_variableAttribs[i]->attrId;
AttributeData * attr_data = m_tuple.getData(attrId);
@@ -936,3 +937,8 @@ operator<<(NdbOut& ndbout, const TableS & table){
} // for
return ndbout;
}
+
+template class Vector<TableS*>;
+template class Vector<AttributeS*>;
+template class Vector<AttributeDesc*>;
+
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.hpp b/ndb/src/kernel/blocks/backup/restore/Restore.hpp
index e9149e38e44..5a705740c69 100644
--- a/ndb/src/kernel/blocks/backup/restore/Restore.hpp
+++ b/ndb/src/kernel/blocks/backup/restore/Restore.hpp
@@ -301,9 +301,10 @@ public:
}
~LogEntry()
{
- for(Uint32 i= 0; i< m_values.size(); i++)
+ Uint32 i;
+ for(i= 0; i< m_values.size(); i++)
delete m_values[i];
- for(Uint32 i= 0; i< m_values_e.size(); i++)
+ for(i= 0; i< m_values_e.size(); i++)
delete m_values_e[i];
}
Uint32 size() const { return m_values.size(); }
diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp
index 99deeb3115c..23805173484 100644
--- a/ndb/src/kernel/blocks/backup/restore/main.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/main.cpp
@@ -250,8 +250,8 @@ main(int argc, const char** argv)
return -1;
}
-
- for(Uint32 i= 0; i < g_consumers.size(); i++)
+ Uint32 i;
+ for(i= 0; i < g_consumers.size(); i++)
{
if (!g_consumers[i]->init())
{
@@ -261,7 +261,7 @@ main(int argc, const char** argv)
}
- for(Uint32 i = 0; i<metaData.getNoOfTables(); i++)
+ for(i = 0; i<metaData.getNoOfTables(); i++)
{
if (checkSysTable(metaData[i]->getTableName()))
{
@@ -345,7 +345,7 @@ main(int argc, const char** argv)
return -1;
}
logIter.validateFooter(); //not implemented
- for (Uint32 i= 0; i < g_consumers.size(); i++)
+ for (i= 0; i < g_consumers.size(); i++)
g_consumers[i]->endOfLogEntrys();
}
}
@@ -353,3 +353,4 @@ main(int argc, const char** argv)
return 1;
} // main
+template class Vector<BackupConsumer*>;
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 694007c8508..0f25391fccb 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -114,6 +114,8 @@ Cmvmi::Cmvmi(const Configuration & conf) :
}
setNodeInfo(nodeId).m_type = nodeType;
}
+
+ setNodeInfo(getOwnNodeId()).m_connected = true;
}
Cmvmi::~Cmvmi()
@@ -360,7 +362,7 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
globalTransporterRegistry.setIOState(i, HaltIO);
- globalTransporterRegistry.setPerformState(i, PerformDisconnect);
+ globalTransporterRegistry.do_disconnect(i);
/**
* Cancel possible event subscription
@@ -388,7 +390,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
const Uint32 len = signal->getLength();
if(len == 2){
- globalTransporterRegistry.setPerformState(tStartingNode, PerformConnect);
+ globalTransporterRegistry.do_connect(tStartingNode);
globalTransporterRegistry.setIOState(tStartingNode, HaltIO);
//-----------------------------------------------------
@@ -403,7 +405,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
jam();
if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2){
jam();
- globalTransporterRegistry.setPerformState(i, PerformConnect);
+ globalTransporterRegistry.do_connect(i);
globalTransporterRegistry.setIOState(i, HaltIO);
signal->theData[0] = EventReport::CommunicationOpened;
@@ -454,34 +456,21 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
ndbrequire(type != NodeInfo::INVALID);
- if (globalTransporterRegistry.performState(hostId) != PerformDisconnect) {
+ if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){
jam();
-
- // -------------------------------------------------------------------
- // We do not report the disconnection when disconnection is already ongoing.
- // This reporting should be looked into but this secures that we avoid
- // crashes due to too quick re-reporting of disconnection.
- // -------------------------------------------------------------------
- if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){
- jam();
- DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
- rep->nodeId = hostId;
- rep->err = errNo;
- sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
- DisconnectRep::SignalLength, JBA);
- globalTransporterRegistry.setPerformState(hostId, PerformDisconnect);
- } else if(globalData.theStartLevel == NodeState::SL_CMVMI ||
- globalData.theStartLevel == NodeState::SL_STARTING) {
- /**
- * Someone disconnected during cmvmi period
- */
- if(type == NodeInfo::MGM){
- jam();
- globalTransporterRegistry.setPerformState(hostId, PerformConnect);
- } else {
- globalTransporterRegistry.setPerformState(hostId, PerformDisconnect);
- }
- }
+ DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ rep->nodeId = hostId;
+ rep->err = errNo;
+ sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
+ DisconnectRep::SignalLength, JBA);
+ } else if((globalData.theStartLevel == NodeState::SL_CMVMI ||
+ globalData.theStartLevel == NodeState::SL_STARTING)
+ && type == NodeInfo::MGM) {
+ /**
+ * Someone disconnected during cmvmi period
+ */
+ jam();
+ globalTransporterRegistry.do_connect(hostId);
}
signal->theData[0] = EventReport::Disconnected;
@@ -491,7 +480,6 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
void Cmvmi::execCONNECT_REP(Signal *signal){
const Uint32 hostId = signal->theData[0];
-
jamEntry();
const NodeInfo::NodeType type = (NodeInfo::NodeType)getNodeInfo(hostId).m_type;
@@ -520,7 +508,8 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
/**
* Dont allow api nodes to connect
*/
- globalTransporterRegistry.setPerformState(hostId, PerformDisconnect);
+ abort();
+ globalTransporterRegistry.do_disconnect(hostId);
}
}
@@ -754,8 +743,8 @@ Cmvmi::execSTART_ORD(Signal* signal) {
*/
for(unsigned int i = 1; i < MAX_NODES; i++ ){
if (getNodeInfo(i).m_type == NodeInfo::MGM){
- if(globalTransporterRegistry.performState(i) != PerformIO){
- globalTransporterRegistry.setPerformState(i, PerformConnect);
+ if(!globalTransporterRegistry.is_connected(i)){
+ globalTransporterRegistry.do_connect(i);
globalTransporterRegistry.setIOState(i, NoHalt);
}
}
@@ -781,7 +770,7 @@ Cmvmi::execSTART_ORD(Signal* signal) {
// without any connected nodes.
for(unsigned int i = 1; i < MAX_NODES; i++ ){
if (i != getOwnNodeId() && getNodeInfo(i).m_type != NodeInfo::MGM){
- globalTransporterRegistry.setPerformState(i, PerformDisconnect);
+ globalTransporterRegistry.do_disconnect(i);
globalTransporterRegistry.setIOState(i, HaltIO);
}
}
@@ -1060,29 +1049,10 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
if(nodeTypeStr == 0)
continue;
- const char* actionStr = "";
- switch (globalTransporterRegistry.performState(i)){
- case PerformNothing:
- actionStr = "does nothing";
- break;
- case PerformIO:
- actionStr = "is connected";
- break;
- case PerformConnect:
- actionStr = "is trying to connect";
- break;
- case PerformDisconnect:
- actionStr = "is trying to disconnect";
- break;
- case RemoveTransporter:
- actionStr = "will be removed";
- break;
- }
-
infoEvent("Connection to %d (%s) %s",
i,
nodeTypeStr,
- actionStr);
+ globalTransporterRegistry.getPerformStateString(i));
}
}
@@ -1100,14 +1070,15 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}
if (dumpState->args[0] == DumpStateOrd::CmvmiTestLongSigWithDelay) {
+ unsigned i;
Uint32 loopCount = dumpState->args[1];
const unsigned len0 = 11;
const unsigned len1 = 123;
Uint32 sec0[len0];
Uint32 sec1[len1];
- for (unsigned i = 0; i < len0; i++)
+ for (i = 0; i < len0; i++)
sec0[i] = i;
- for (unsigned i = 0; i < len1; i++)
+ for (i = 0; i < len1; i++)
sec1[i] = 16 * i;
Uint32* sig = signal->getDataPtrSend();
sig[0] = reference();
@@ -1160,6 +1131,7 @@ static LinearSectionPtr g_test[3];
void
Cmvmi::execTESTSIG(Signal* signal){
+ Uint32 i;
/**
* Test of SafeCounter
*/
@@ -1184,14 +1156,14 @@ Cmvmi::execTESTSIG(Signal* signal){
getOwnNodeId(),
true);
ndbout_c("-- Fixed section --");
- for(Uint32 i = 0; i<signal->length(); i++){
+ for(i = 0; i<signal->length(); i++){
fprintf(stdout, "H'0x%.8x ", signal->theData[i]);
if(((i + 1) % 6) == 0)
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
- for(Uint32 i = 0; i<signal->header.m_noOfSections; i++){
+ for(i = 0; i<signal->header.m_noOfSections; i++){
SegmentedSectionPtr ptr;
ndbout_c("-- Section %d --", i);
signal->getSection(ptr, i);
@@ -1204,7 +1176,7 @@ Cmvmi::execTESTSIG(Signal* signal){
/**
* Validate length:s
*/
- for(Uint32 i = 0; i<signal->header.m_noOfSections; i++){
+ for(i = 0; i<signal->header.m_noOfSections; i++){
SegmentedSectionPtr ptr;
signal->getSection(ptr, i);
ndbrequire(ptr.p != 0);
@@ -1249,7 +1221,7 @@ Cmvmi::execTESTSIG(Signal* signal){
case 4:{
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
- for(Uint32 i = 0; i<secs; i++){
+ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
@@ -1298,7 +1270,7 @@ Cmvmi::execTESTSIG(Signal* signal){
case 8:{
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
- for(Uint32 i = 0; i<secs; i++){
+ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
@@ -1332,7 +1304,7 @@ Cmvmi::execTESTSIG(Signal* signal){
sendNextLinearFragment(signal, fragSend);
}
- for(Uint32 i = 0; i<secs; i++){
+ for(i = 0; i<secs; i++){
delete[] ptr[i].p;
}
break;
@@ -1364,7 +1336,7 @@ Cmvmi::execTESTSIG(Signal* signal){
const Uint32 secs = signal->getNoOfSections();
memset(g_test, 0, sizeof(g_test));
- for(Uint32 i = 0; i<secs; i++){
+ for(i = 0; i<secs; i++){
SegmentedSectionPtr sptr;
signal->getSection(sptr, i);
g_test[i].sz = sptr.sz;
@@ -1408,7 +1380,7 @@ Cmvmi::execTESTSIG(Signal* signal){
case 14:{
Uint32 count = signal->theData[8];
signal->theData[10] = count * rg.m_nodes.count();
- for(Uint32 i = 0; i<count; i++){
+ for(i = 0; i<count; i++){
sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB);
}
return;
diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index 6ba2d083e58..cc3e646f219 100644
--- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -218,6 +218,7 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZREL_FRAG 6
#define ZREL_DIR 7
#define ZREPORT_MEMORY_USAGE 8
+#define ZLCP_OP_WRITE_RT_BREAK 9
/* ------------------------------------------------------------------------- */
/* ERROR CODES */
@@ -1190,6 +1191,8 @@ private:
void zpagesize_error(const char* where);
void reportMemoryUsage(Signal* signal, int gth);
+ void lcp_write_op_to_undolog(Signal* signal);
+ void reenable_expand_after_redo_log_exection_complete(Signal*);
// Initialisation
@@ -1559,7 +1562,7 @@ private:
Uint32 cexcPrevforward;
Uint32 clocalkey[32];
Uint32 ckeys[2048];
-
+
Uint32 c_errorInsert3000_TableId;
Uint32 cSrUndoRecords[5];
};
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 90e914987c3..b22fd6ce641 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -94,7 +94,8 @@ void Dbacc::initRecords()
page8 = (Page8*)allocRecord("Page8",
sizeof(Page8),
- cpagesize);
+ cpagesize,
+ false);
rootfragmentrec = (Rootfragmentrec*)allocRecord("Rootfragmentrec",
sizeof(Rootfragmentrec),
@@ -114,7 +115,8 @@ void Dbacc::initRecords()
undopage = (Undopage*)allocRecord("Undopage",
sizeof(Undopage),
- cundopagesize);
+ cundopagesize,
+ false);
// Initialize BAT for interface to file system
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 933ee2cf8e1..9cfac0ad2a2 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -46,13 +46,17 @@ Dbacc::remainingUndoPages(){
ndbrequire(HeadPage>=TailPage);
Uint32 UsedPages = HeadPage - TailPage;
- Uint32 Remaining = cundopagesize - UsedPages;
+ Int32 Remaining = cundopagesize - UsedPages;
// There can not be more than cundopagesize remaining
- ndbrequire(Remaining<=cundopagesize);
-
+ if (Remaining <= 0){
+ // No more undolog, crash node
+ progError(__LINE__,
+ ERR_NO_MORE_UNDOLOG,
+ "There are more than 1Mbyte undolog writes outstanding");
+ }
return Remaining;
-}//Dbacc::remainingUndoPages()
+}
void
Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){
@@ -193,6 +197,17 @@ void Dbacc::execCONTINUEB(Signal* signal)
return;
}
+ case ZLCP_OP_WRITE_RT_BREAK:
+ {
+ operationRecPtr.i= signal->theData[1];
+ fragrecptr.i= signal->theData[2];
+ lcpConnectptr.i= signal->theData[3];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ lcp_write_op_to_undolog(signal);
+ return;
+ }
default:
ndbrequire(false);
break;
@@ -742,6 +757,7 @@ void Dbacc::initialiseDirRec(Signal* signal)
DirectoryarrayPtr idrDirptr;
ndbrequire(cdirarraysize > 0);
for (idrDirptr.i = 0; idrDirptr.i < cdirarraysize; idrDirptr.i++) {
+ refresh_watch_dog();
ptrAss(idrDirptr, directoryarray);
for (Uint32 i = 0; i <= 255; i++) {
idrDirptr.p->pagep[i] = RNIL;
@@ -761,6 +777,7 @@ void Dbacc::initialiseDirRangeRec(Signal* signal)
ndbrequire(cdirrangesize > 0);
for (idrDirRangePtr.i = 0; idrDirRangePtr.i < cdirrangesize; idrDirRangePtr.i++) {
+ refresh_watch_dog();
ptrAss(idrDirRangePtr, dirRange);
idrDirRangePtr.p->dirArray[0] = idrDirRangePtr.i + 1;
for (Uint32 i = 1; i < 256; i++) {
@@ -783,6 +800,7 @@ void Dbacc::initialiseFragRec(Signal* signal)
ndbrequire(cfragmentsize > 0);
for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) {
jam();
+ refresh_watch_dog();
ptrAss(regFragPtr, fragmentrec);
initFragGeneral(regFragPtr);
regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
@@ -861,6 +879,7 @@ void Dbacc::initialiseOperationRec(Signal* signal)
{
ndbrequire(coprecsize > 0);
for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) {
+ refresh_watch_dog();
ptrAss(operationRecPtr, operationrec);
operationRecPtr.p->transactionstate = IDLE;
operationRecPtr.p->operation = ZUNDEFINED_OP;
@@ -883,6 +902,7 @@ void Dbacc::initialiseOverflowRec(Signal* signal)
ndbrequire(coverflowrecsize > 0);
for (iorOverflowRecPtr.i = 0; iorOverflowRecPtr.i < coverflowrecsize; iorOverflowRecPtr.i++) {
+ refresh_watch_dog();
ptrAss(iorOverflowRecPtr, overflowRecord);
iorOverflowRecPtr.p->nextfreeoverrec = iorOverflowRecPtr.i + 1;
}//for
@@ -943,6 +963,7 @@ void Dbacc::initialiseRootfragRec(Signal* signal)
{
ndbrequire(crootfragmentsize > 0);
for (rootfragrecptr.i = 0; rootfragrecptr.i < crootfragmentsize; rootfragrecptr.i++) {
+ refresh_watch_dog();
ptrAss(rootfragrecptr, rootfragmentrec);
rootfragrecptr.p->nextroot = rootfragrecptr.i + 1;
rootfragrecptr.p->fragmentptr[0] = RNIL;
@@ -998,6 +1019,7 @@ void Dbacc::initialiseTableRec(Signal* signal)
{
ndbrequire(ctablesize > 0);
for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
+ refresh_watch_dog();
ptrAss(tabptr, tabrec);
for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
tabptr.p->fragholder[i] = RNIL;
@@ -2324,13 +2346,14 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
if (fragrecptr.p->slack > fragrecptr.p->slackCheck) { /* TIME FOR JOIN BUCKETS PROCESS */
if (fragrecptr.p->expandCounter > 0) {
- if (fragrecptr.p->expandFlag == 0) {
+ if (fragrecptr.p->expandFlag < 2) {
jam();
- fragrecptr.p->expandFlag = 1;
signal->theData[0] = fragrecptr.i;
signal->theData[1] = fragrecptr.p->p;
signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 3, JBB);
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
}//if
}//if
}//if
@@ -2344,7 +2367,7 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
if (fragrecptr.p->slack >= (Uint32)(1 << 31)) { /* IT MEANS THAT IF SLACK < ZERO */
if (fragrecptr.p->expandFlag == 0) {
jam();
- fragrecptr.p->expandFlag = 1;
+ fragrecptr.p->expandFlag = 2;
signal->theData[0] = fragrecptr.i;
signal->theData[1] = fragrecptr.p->p;
signal->theData[2] = fragrecptr.p->maxp;
@@ -6316,9 +6339,16 @@ Uint32 Dbacc::checkScanExpand(Signal* signal)
void Dbacc::execEXPANDCHECK2(Signal* signal)
{
+ jamEntry();
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+ jam();
+ reenable_expand_after_redo_log_exection_complete(signal);
+ return;
+ }
+
DirectoryarrayPtr newDirptr;
- jamEntry();
fragrecptr.i = signal->theData[0];
tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
Uint32 tmp = 1;
@@ -6375,6 +6405,7 @@ void Dbacc::execEXPANDCHECK2(Signal* signal)
return;
}//if
}//if
+
/*--------------------------------------------------------------------------*/
/* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/
/* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/
@@ -6428,6 +6459,7 @@ void Dbacc::execEXPANDCHECK2(Signal* signal)
} else {
ptrCheckGuard(expPageptr, cpagesize, page8);
}//if
+
fragrecptr.p->expReceivePageptr = expPageptr.i;
fragrecptr.p->expReceiveIndex = texpReceivedBucket & ((1 << fragrecptr.p->k) - 1);
/*--------------------------------------------------------------------------*/
@@ -6455,7 +6487,7 @@ void Dbacc::execEXPANDCHECK2(Signal* signal)
endofexpLab(signal);
return;
}//Dbacc::execEXPANDCHECK2()
-
+
void Dbacc::endofexpLab(Signal* signal)
{
fragrecptr.p->p++;
@@ -6478,7 +6510,7 @@ void Dbacc::endofexpLab(Signal* signal)
/* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */
/* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */
/* --------------------------------------------------------------------------------- */
- fragrecptr.p->expandFlag = 1;
+ fragrecptr.p->expandFlag = 2;
signal->theData[0] = fragrecptr.i;
signal->theData[1] = fragrecptr.p->p;
signal->theData[2] = fragrecptr.p->maxp;
@@ -6487,6 +6519,47 @@ void Dbacc::endofexpLab(Signal* signal)
return;
}//Dbacc::endofexpLab()
+void Dbacc::reenable_expand_after_redo_log_exection_complete(Signal* signal){
+
+ tabptr.i = signal->theData[0];
+ Uint32 fragId = signal->theData[1];
+
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, fragId));
+#if 0
+ ndbout_c("reenable expand check for table %d fragment: %d",
+ tabptr.i, fragId);
+#endif
+
+ for (Uint32 i = 0; i < 2; i++) {
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ switch(fragrecptr.p->expandFlag){
+ case 0:
+ /**
+ * Hmm... this means that it's alreay has been reenabled...
+ */
+ ndbassert(false);
+ continue;
+ case 1:
+ /**
+ * Nothing is going on start expand check
+ */
+ case 2:
+ /**
+ * A shrink is running, do expand check anyway
+ * (to reset expandFlag)
+ */
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ break;
+ }
+ }
+}
+
void Dbacc::execDEBUG_SIG(Signal* signal)
{
jamEntry();
@@ -6882,9 +6955,10 @@ void Dbacc::execSHRINKCHECK2(Signal* signal)
jamEntry();
fragrecptr.i = signal->theData[0];
- tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ Uint32 oldFlag = signal->theData[3];
ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- fragrecptr.p->expandFlag = 0;
+ fragrecptr.p->expandFlag = oldFlag;
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) {
jam();
/* TIME FOR JOIN BUCKETS PROCESS */
@@ -6965,6 +7039,7 @@ void Dbacc::execSHRINKCHECK2(Signal* signal)
jam();
fragrecptr.p->p--;
}//if
+
/*--------------------------------------------------------------------------*/
/* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */
/* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */
@@ -7157,11 +7232,13 @@ void Dbacc::endofshrinkbucketLab(Signal* signal)
/* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */
/* WAS REMOVED 2000-05-12. */
/*--------------------------------------------------------------*/
- fragrecptr.p->expandFlag = 1;
signal->theData[0] = fragrecptr.i;
signal->theData[1] = fragrecptr.p->p;
signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 3, JBB);
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ ndbrequire(fragrecptr.p->expandFlag < 2);
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
}//if
}//if
}//if
@@ -7697,32 +7774,70 @@ void Dbacc::execACC_LCPREQ(Signal* signal)
fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex;
fragrecptr.p->createLcp = ZTRUE;
operationRecPtr.i = fragrecptr.p->lockOwnersList;
- while (operationRecPtr.i != RNIL) {
+ lcp_write_op_to_undolog(signal);
+}
+
+void
+Dbacc::lcp_write_op_to_undolog(Signal* signal)
+{
+ bool delay_continueb= false;
+ Uint32 i, j;
+ for (i= 0; i < 16; i++) {
jam();
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ delay_continueb= true;
+ break;
+ }
+ for (j= 0; j < 32; j++) {
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- if ((operationRecPtr.p->operation == ZINSERT) ||
- (operationRecPtr.p->elementIsDisappeared == ZTRUE)){
+ if ((operationRecPtr.p->operation == ZINSERT) ||
+ (operationRecPtr.p->elementIsDisappeared == ZTRUE)){
/*******************************************************************
* Only log inserts and elements that are marked as dissapeared.
* All other operations update the element header and that is handled
* when pages are written to disk
********************************************************************/
- undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
-
- writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
- /* IN OP REC, IS WRITTEN AT UNDO PAGES */
- cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
- writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
- checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
- /* UNDO PAGES,CURRENTLY 8, IS FILLED */
- }//if
+ undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
- operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
- }//while
+ writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
+ /* IN OP REC, IS WRITTEN AT UNDO PAGES */
+ cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
+ writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
+ checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
+ /* UNDO PAGES,CURRENTLY 8, IS FILLED */
+ }
+ operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
+ }
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ }
+ if (operationRecPtr.i != RNIL) {
+ jam();
+ signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK;
+ signal->theData[1]= operationRecPtr.i;
+ signal->theData[2]= fragrecptr.i;
+ signal->theData[3]= lcpConnectptr.i;
+ if (delay_continueb) {
+ jam();
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4);
+ } else {
+ jam();
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
+ }
+ return;
+ }
signal->theData[0] = fragrecptr.p->lcpLqhPtr;
sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED,
@@ -7735,8 +7850,7 @@ void Dbacc::execACC_LCPREQ(Signal* signal)
signal->theData[0] = lcpConnectptr.i;
signal->theData[1] = fragrecptr.i;
sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
- return;
-}//Dbacc::execACC_LCPREQ()
+}
/* ******************--------------------------------------------------------------- */
/* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */
@@ -8595,12 +8709,6 @@ void Dbacc::checkUndoPages(Signal* signal)
* RECORDS IN
*/
Uint16 nextUndoPageId = tundoPageId + 1;
- if (nextUndoPageId > (clastUndoPageIdWritten + cundopagesize)){
- // No more undolog, crash node
- progError(__LINE__,
- ERR_NO_MORE_UNDOLOG,
- "There are more than 1Mbyte undolog writes outstanding");
- }
updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS);
if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) {
@@ -9146,7 +9254,14 @@ void Dbacc::initFragAdd(Signal* signal,
ndbrequire(req->kValue == 6);
regFragPtr.p->k = req->kValue; /* TK_SIZE = 6 IN THIS VERSION */
regFragPtr.p->expandCounter = 0;
- regFragPtr.p->expandFlag = 0;
+
+ /**
+ * Only allow shrink during SR
+ * - to make sure we don't run out of pages during REDO log execution
+ *
+ * Is later restored to 0 by LQH at end of REDO log execution
+ */
+ regFragPtr.p->expandFlag = (getNodeState().getSystemRestartInProgress()?1:0);
regFragPtr.p->p = 0;
regFragPtr.p->maxp = (1 << req->kValue) - 1;
regFragPtr.p->minloadfactor = minLoadFactor;
@@ -9199,8 +9314,8 @@ void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
regFragPtr.p->datapages[i] = RNIL;
}//for
- for (Uint32 i = 0; i < 4; i++) {
- regFragPtr.p->longKeyPageArray[i] = RNIL;
+ for (Uint32 j = 0; j < 4; j++) {
+ regFragPtr.p->longKeyPageArray[j] = RNIL;
}//for
}//Dbacc::initFragGeneral()
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 2ef9e721e22..7126842459e 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -75,7 +75,6 @@
#include <signaldata/AlterTab.hpp>
#include <signaldata/CreateFragmentation.hpp>
#include <signaldata/CreateTab.hpp>
-#include "../dbtc/Dbtc.hpp"
#include <NdbSleep.h>
#define ZNOT_FOUND 626
@@ -254,6 +253,7 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
w.add(DictTabInfo::FragmentKeyTypeVal, tablePtr.p->fragmentKeyType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
+ w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
if (tablePtr.p->primaryTableId != RNIL){
TableRecordPtr primTab;
@@ -1313,6 +1313,7 @@ void Dbdict::initTableRecords()
TableRecordPtr tablePtr;
while (1) {
jam();
+ refresh_watch_dog();
c_tableRecordPool.seize(tablePtr);
if (tablePtr.i == RNIL) {
jam();
@@ -1373,6 +1374,7 @@ void Dbdict::initTriggerRecords()
TriggerRecordPtr triggerPtr;
while (1) {
jam();
+ refresh_watch_dog();
c_triggerRecordPool.seize(triggerPtr);
if (triggerPtr.i == RNIL) {
jam();
@@ -3599,30 +3601,37 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
SegmentedSectionPtr fragDataPtr;
signal->getSection(fragDataPtr, CreateFragmentationConf::FRAGMENTS);
-
signal->header.m_noOfSections = 0;
/**
- * Correct table
+ * Get table
*/
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ /**
+ * Save fragment count
+ */
+ tabPtr.p->fragmentCount = conf->noOfFragments;
+
+ /**
+ * Update table version
+ */
PageRecordPtr pagePtr;
c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
SchemaFile::TableEntry * tabEntry = getTableEntry(pagePtr.p, tabPtr.i);
+ tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
+
/**
- * Update table version
+ * Pack
*/
- tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
-
SimplePropertiesSectionWriter w(getSectionSegmentPool());
packTableIntoPagesImpl(w, tabPtr);
SegmentedSectionPtr spDataPtr;
w.getPtr(spDataPtr);
-
+
signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
@@ -3749,6 +3758,10 @@ Dbdict::createTab_reply(Signal* signal,
ref->senderRef = reference();
ref->senderData = createTabPtr.p->m_senderData;
ref->errorCode = createTabPtr.p->m_errorCode;
+ ref->masterNodeId = c_masterNodeId;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = 0;
//@todo check api failed
sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal,
@@ -4252,7 +4265,9 @@ Dbdict::execDIADDTABCONF(Signal* signal){
/**
* No local fragment (i.e. no LQHFRAGREQ)
*/
- sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+ execute(signal, createTabPtr.p->m_callback, 0);
+ return;
+ //sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
}
}
@@ -4637,6 +4652,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->fragmentKeyType = (DictTabInfo::FragmentKeyType)tableDesc.FragmentKeyType;
tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
tablePtr.p->kValue = tableDesc.TableKValue;
+ tablePtr.p->fragmentCount = tableDesc.FragmentCount;
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
@@ -5080,8 +5096,20 @@ Dbdict::execPREP_DROP_TAB_REF(Signal* signal){
Uint32 nodeId = refToNode(prep->senderRef);
dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
-
- dropTabPtr.p->setErrorCode((Uint32)prep->errorCode);
+
+ Uint32 block = refToBlock(prep->senderRef);
+ if((prep->errorCode == PrepDropTabRef::NoSuchTable && block == DBLQH) ||
+ (prep->errorCode == PrepDropTabRef::NF_FakeErrorREF)){
+ jam();
+ /**
+ * Ignore errors:
+ * 1) no such table and LQH, it might not exists in different LQH's
+ * 2) node failure...
+ */
+ } else {
+ dropTabPtr.p->setErrorCode((Uint32)prep->errorCode);
+ }
+
if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
jam();
return;
@@ -5112,6 +5140,19 @@ void
Dbdict::execDROP_TAB_REF(Signal* signal){
jamEntry();
+ DropTabRef * const req = (DropTabRef*)signal->getDataPtr();
+
+ Uint32 block = refToBlock(req->senderRef);
+ ndbrequire(req->errorCode == DropTabRef::NF_FakeErrorREF ||
+ (req->errorCode == DropTabRef::NoSuchTable &&
+ (block == DBTUP || block == DBACC || block == DBLQH)));
+
+ if(block != DBDICT){
+ jam();
+ ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
+ dropTab_localDROP_TAB_CONF(signal);
+ return;
+ }
ndbrequire(false);
}
@@ -5619,7 +5660,7 @@ void Dbdict::execGET_TABINFOREQ(Signal* signal)
signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
r0.reset(); // undo implicit first()
- if(r0.getWords((Uint32*)tableName, len))
+ if(r0.getWords((Uint32*)tableName, ((len + 3)/4)))
memcpy(keyRecord.tableName, tableName, len);
else {
jam();
@@ -5734,6 +5775,7 @@ void
Dbdict::execLIST_TABLES_REQ(Signal* signal)
{
jamEntry();
+ Uint32 i;
ListTablesReq * req = (ListTablesReq*)signal->getDataPtr();
Uint32 senderRef = req->senderRef;
Uint32 senderData = req->senderData;
@@ -5747,7 +5789,7 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal)
conf->senderData = senderData;
conf->counter = 0;
Uint32 pos = 0;
- for (Uint32 i = 0; i < c_tableRecordPool.getSize(); i++) {
+ for (i = 0; i < c_tableRecordPool.getSize(); i++) {
TableRecordPtr tablePtr;
c_tableRecordPool.getPtr(tablePtr, i);
// filter
@@ -5827,12 +5869,12 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal)
conf->counter++;
pos = 0;
}
- Uint32 i = 0;
- while (i < size) {
+ Uint32 k = 0;
+ while (k < size) {
char* p = (char*)&conf->tableData[pos];
for (Uint32 j = 0; j < 4; j++) {
- if (i < size)
- *p++ = tablePtr.p->tableName[i++];
+ if (k < size)
+ *p++ = tablePtr.p->tableName[k++];
else
*p++ = 0;
}
@@ -5846,7 +5888,7 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal)
}
}
// XXX merge with above somehow
- for (Uint32 i = 0; i < c_triggerRecordPool.getSize(); i++) {
+ for (i = 0; i < c_triggerRecordPool.getSize(); i++) {
if (reqListIndexes)
break;
TriggerRecordPtr triggerPtr;
@@ -5890,12 +5932,12 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal)
conf->counter++;
pos = 0;
}
- Uint32 i = 0;
- while (i < size) {
+ Uint32 k = 0;
+ while (k < size) {
char* p = (char*)&conf->tableData[pos];
for (Uint32 j = 0; j < 4; j++) {
- if (i < size)
- *p++ = triggerPtr.p->triggerName[i++];
+ if (k < size)
+ *p++ = triggerPtr.p->triggerName[k++];
else
*p++ = 0;
}
@@ -6132,6 +6174,7 @@ Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
void
Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
{
+ Uint32 k;
jam();
const CreateIndxReq* const req = &opPtr.p->m_request;
// signal data writer
@@ -6201,7 +6244,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
}
// hash index attributes must currently be in table order
Uint32 prevAttrId = RNIL;
- for (Uint32 k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
jam();
bool found = false;
for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
@@ -6212,16 +6255,6 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
jam();
found = true;
const Uint32 a = aRec->attributeDescriptor;
- bool isNullable = AttributeDescriptor::getNullable(a);
- // We do not allow more than one NULLable attribute for hash index
- if (isNullable &&
- indexPtr.p->isHashIndex() &&
- (opPtr.p->m_attrList.sz > 1)) {
- jam();
- opPtr.p->m_errorCode = CreateIndxRef::AttributeNullable;
- opPtr.p->m_errorLine = __LINE__;
- return;
- }
if (indexPtr.p->isHashIndex()) {
const Uint32 s1 = AttributeDescriptor::getSize(a);
const Uint32 s2 = AttributeDescriptor::getArraySize(a);
@@ -6261,7 +6294,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
// write index key attributes
AttributeRecordPtr aRecPtr;
c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
- for (Uint32 k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
jam();
for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 68214785234..de1d9757b2a 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -230,7 +230,7 @@ public:
Uint32 frmLen;
char frmData[MAX_FRM_DATA_SIZE];
-
+ Uint32 fragmentCount;
};
typedef Ptr<TableRecord> TableRecordPtr;
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index e029af70574..14fa262f871 100644
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -269,7 +269,7 @@ public:
};
struct NodeRecord {
- NodeRecord() { m_nodefailSteps.clear();}
+ NodeRecord();
enum NodeStatus {
NOT_IN_CLUSTER = 0,
@@ -943,6 +943,7 @@ private:
void ndbStartReqLab(Signal *, BlockReference ref);
void nodeRestartStartRecConfLab(Signal *);
void dihCopyCompletedLab(Signal *);
+ void release_connect(ConnectRecordPtr ptr);
void copyTableNode(Signal *,
CopyTableNode* ctn,
NodeRecordPtr regNodePtr);
@@ -1127,7 +1128,6 @@ private:
void setAllowNodeStart(Uint32 nodeId, bool newState);
bool getNodeCopyCompleted(Uint32 nodeId);
void setNodeCopyCompleted(Uint32 nodeId, bool newState);
- void initNodeState(NodeRecordPtr regNodePtr);
bool checkNodeAlive(Uint32 nodeId);
// Initialisation
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index df47237ae59..7ca45ef4b43 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -50,17 +50,18 @@ void Dbdih::initData()
nodeRecord = (NodeRecord*)
allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES);
- for(Uint32 i = 0; i<MAX_NDB_NODES; i++){
+ Uint32 i;
+ for(i = 0; i<MAX_NDB_NODES; i++){
new (&nodeRecord[i]) NodeRecord();
}
takeOverRecord = (TakeOverRecord*)allocRecord("TakeOverRecord",
sizeof(TakeOverRecord),
MAX_NDB_NODES);
- for(Uint32 i = 0; i<MAX_NDB_NODES; i++)
+ for(i = 0; i<MAX_NDB_NODES; i++)
new (&takeOverRecord[i]) TakeOverRecord();
- for(Uint32 i = 0; i<MAX_NDB_NODES; i++)
+ for(i = 0; i<MAX_NDB_NODES; i++)
new (&takeOverRecord[i]) TakeOverRecord();
waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE);
@@ -254,6 +255,7 @@ Dbdih::Dbdih(const class Configuration & config):
addRecSignal(GSN_UPDATE_TOCONF, &Dbdih::execUPDATE_TOCONF);
addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdih::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbdih::execWAIT_DROP_TAB_REF);
addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbdih::execWAIT_DROP_TAB_CONF);
addRecSignal(GSN_DROP_TAB_REQ, &Dbdih::execDROP_TAB_REQ);
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 0ce1f1e4bbe..0a8abe59aed 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -1401,6 +1401,7 @@ void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref)
void Dbdih::execREAD_NODESCONF(Signal* signal)
{
+ unsigned i;
ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
jamEntry();
Uint32 nodeArray[MAX_NDB_NODES];
@@ -1408,9 +1409,10 @@ void Dbdih::execREAD_NODESCONF(Signal* signal)
csystemnodes = readNodes->noOfNodes;
cmasterNodeId = readNodes->masterNodeId;
int index = 0;
- for (unsigned i = 1; i < MAX_NDB_NODES; i++){
+ NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes);
+ for (i = 1; i < MAX_NDB_NODES; i++){
jam();
- if(NodeBitmask::get(readNodes->allNodes, i)){
+ if(tmp.get(i)){
jam();
nodeArray[index] = i;
if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){
@@ -1420,6 +1422,32 @@ void Dbdih::execREAD_NODESCONF(Signal* signal)
index++;
}//if
}//for
+
+ if(cstarttype == NodeState::ST_SYSTEM_RESTART ||
+ cstarttype == NodeState::ST_NODE_RESTART){
+
+ for(i = 1; i<MAX_NDB_NODES; i++){
+ const Uint32 stat = Sysfile::getNodeStatus(i, SYSFILE->nodeStatus);
+ if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){
+ jam();
+ continue;
+ }
+
+ if(tmp.get(i) && stat != Sysfile::NS_NotDefined){
+ jam();
+ continue;
+ }
+ char buf[255];
+ snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of storage nodes (node %d)", i);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
+
ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES);
if (cstarttype == NodeState::ST_INITIAL_START) {
jam();
@@ -1534,11 +1562,12 @@ void Dbdih::execSTART_MECONF(Signal* signal)
StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
Uint32 nodeId = startMe->startingNodeId;
const Uint32 startWord = startMe->startWord;
+ Uint32 i;
CRASH_INSERTION(7130);
ndbrequire(nodeId == cownNodeId);
arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4);
- for(Uint32 i = 0; i < StartMeConf::DATA_SIZE; i++)
+ for(i = 0; i < StartMeConf::DATA_SIZE; i++)
cdata[startWord+i] = startMe->data[i];
if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){
@@ -1556,12 +1585,12 @@ void Dbdih::execSTART_MECONF(Signal* signal)
* But dont copy lastCompletedGCI:s
*/
Uint32 tempGCP[MAX_NDB_NODES];
- for(Uint32 i = 0; i < MAX_NDB_NODES; i++)
+ for(i = 0; i < MAX_NDB_NODES; i++)
tempGCP[i] = SYSFILE->lastCompletedGCI[i];
- for(Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++)
+ for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++)
sysfileData[i] = cdata[i];
- for(Uint32 i = 0; i < MAX_NDB_NODES; i++)
+ for(i = 0; i < MAX_NDB_NODES; i++)
SYSFILE->lastCompletedGCI[i] = tempGCP[i];
setNodeActiveStatus();
@@ -1981,9 +2010,11 @@ void Dbdih::execSTART_INFOREQ(Signal* signal)
(ERROR_INSERTED(7124))) {
jam();
StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0];
+ ref->startingNodeId = startNode;
ref->sendingNodeId = cownNodeId;
ref->errorCode = ZNODE_START_DISALLOWED_ERROR;
- sendSignal(cmasterdihref, GSN_START_INFOREF, signal, 2, JBB);
+ sendSignal(cmasterdihref, GSN_START_INFOREF, signal,
+ StartInfoRef::SignalLength, JBB);
return;
}//if
setNodeStatus(startNode, NodeRecord::STARTING);
@@ -2053,7 +2084,7 @@ void Dbdih::execINCL_NODEREQ(Signal* signal)
Sysfile::ActiveStatus TsaveState = nodePtr.p->activeStatus;
Uint32 TnodeGroup = nodePtr.p->nodeGroup;
- initNodeState(nodePtr);
+ new (nodePtr.p) NodeRecord();
nodePtr.p->nodeGroup = TnodeGroup;
nodePtr.p->activeStatus = TsaveState;
nodePtr.p->nodeStatus = NodeRecord::ALIVE;
@@ -3448,10 +3479,37 @@ void Dbdih::selectMasterCandidateAndSend(Signal* signal)
}//if
}//for
ndbrequire(masterCandidateId != 0);
+ setNodeGroups();
signal->theData[0] = masterCandidateId;
signal->theData[1] = gci;
sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB);
- setNodeGroups();
+
+ Uint32 node_groups[MAX_NDB_NODES];
+ memset(node_groups, 0, sizeof(node_groups));
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups);
+ if(ng != NO_NODE_GROUP_ID){
+ ndbrequire(ng < MAX_NDB_NODES);
+ node_groups[ng]++;
+ }
+ }
+
+ for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ Uint32 count = node_groups[nodePtr.i];
+ if(count != 0 && count != cnoReplicas){
+ char buf[255];
+ snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of replicas (%d != %d)",
+ node_groups[nodePtr.i], cnoReplicas);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
}//Dbdih::selectMasterCandidate()
/* ------------------------------------------------------------------------- */
@@ -3599,6 +3657,7 @@ void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr)
/*---------------------------------------------------------------------------*/
void Dbdih::execNODE_FAILREP(Signal* signal)
{
+ Uint32 i;
Uint32 failedNodes[MAX_NDB_NODES];
jamEntry();
NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
@@ -3611,7 +3670,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal)
// The first step is to convert from a bit mask to an array of failed nodes.
/*-------------------------------------------------------------------------*/
Uint32 index = 0;
- for (Uint32 i = 1; i < MAX_NDB_NODES; i++) {
+ for (i = 1; i < MAX_NDB_NODES; i++) {
jam();
if(NodeBitmask::get(nodeFail->theNodes, i)){
jam();
@@ -3629,7 +3688,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal)
// We also set certain state variables ensuring that the node no longer is
// used in transactions and also mark that we received this signal.
/*-------------------------------------------------------------------------*/
- for (Uint32 i = 0; i < noOfFailedNodes; i++) {
+ for (i = 0; i < noOfFailedNodes; i++) {
jam();
NodeRecordPtr TNodePtr;
TNodePtr.i = failedNodes[i];
@@ -3671,7 +3730,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal)
const bool masterTakeOver = (oldMasterId != newMasterId);
- for(Uint32 i = 0; i < noOfFailedNodes; i++) {
+ for(i = 0; i < noOfFailedNodes; i++) {
NodeRecordPtr failedNodePtr;
failedNodePtr.i = failedNodes[i];
ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
@@ -6071,13 +6130,9 @@ void Dbdih::execDIRELEASEREQ(Signal* signal)
ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
ndbrequire(connectPtr.p->connectState != ConnectRecord::FREE);
ndbrequire(connectPtr.p->userblockref == userRef);
- connectPtr.p->connectState = ConnectRecord::FREE;
signal->theData[0] = connectPtr.p->userpointer;
sendSignal(connectPtr.p->userblockref, GSN_DIRELEASECONF, signal, 1, JBB);
- connectPtr.p->nfConnect = cfirstconnect;
- cfirstconnect = connectPtr.i;
- connectPtr.p->userblockref = ZNIL;
- connectPtr.p->userpointer = RNIL;
+ release_connect(connectPtr);
}//Dbdih::execDIRELEASEREQ()
/*
@@ -6514,11 +6569,16 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
DiAddTabConf::SignalLength, JBB);
// Release
- connectPtr.p->userblockref = ZNIL;
- connectPtr.p->userpointer = RNIL;
- connectPtr.p->connectState = ConnectRecord::FREE;
- connectPtr.p->nfConnect = cfirstconnect;
- cfirstconnect = connectPtr.i;
+ release_connect(connectPtr);
+}
+void
+Dbdih::release_connect(ConnectRecordPtr ptr)
+{
+ ptr.p->userblockref = ZNIL;
+ ptr.p->userpointer = RNIL;
+ ptr.p->connectState = ConnectRecord::FREE;
+ ptr.p->nfConnect = cfirstconnect;
+ cfirstconnect = ptr.i;
}
void
@@ -6555,11 +6615,7 @@ Dbdih::execADD_FRAGREF(Signal* signal){
}
// Release
- connectPtr.p->userblockref = ZNIL;
- connectPtr.p->userpointer = RNIL;
- connectPtr.p->connectState = ConnectRecord::FREE;
- connectPtr.p->nfConnect = cfirstconnect;
- cfirstconnect = connectPtr.i;
+ release_connect(connectPtr);
}
/*
@@ -6568,10 +6624,10 @@ Dbdih::execADD_FRAGREF(Signal* signal){
*/
void Dbdih::addtabrefuseLab(Signal* signal, ConnectRecordPtr connectPtr, Uint32 errorCode)
{
- connectPtr.p->connectState = ConnectRecord::INUSE;
signal->theData[0] = connectPtr.p->userpointer;
signal->theData[1] = errorCode;
sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, 2, JBB);
+ release_connect(connectPtr);
return;
}//Dbdih::addtabrefuseLab()
@@ -6882,8 +6938,9 @@ void Dbdih::releaseFragments(TabRecordPtr tabPtr)
void Dbdih::initialiseFragstore()
{
+ Uint32 i;
FragmentstorePtr fragPtr;
- for (Uint32 i = 0; i < cfragstoreFileSize; i++) {
+ for (i = 0; i < cfragstoreFileSize; i++) {
fragPtr.i = i;
ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
initFragstore(fragPtr);
@@ -6892,7 +6949,8 @@ void Dbdih::initialiseFragstore()
fragPtr.i = 0;
cfirstfragstore = RNIL;
cremainingfrags = 0;
- for (Uint32 i = 0; i < noOfChunks; i++) {
+ for (i = 0; i < noOfChunks; i++) {
+ refresh_watch_dog();
ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
fragPtr.p->nextFragmentChunk = cfirstfragstore;
cfirstfragstore = fragPtr.i;
@@ -10231,11 +10289,12 @@ void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr,
ReplicaRecordPtr& newReplicaPtr,
Uint32 nodeId)
{
+ Uint32 i;
ReplicaRecordPtr arrReplicaPtr;
ReplicaRecordPtr arrPrevReplicaPtr;
seizeReplicaRec(newReplicaPtr);
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ for (i = 0; i < MAX_LCP_STORED; i++) {
newReplicaPtr.p->maxGciCompleted[i] = 0;
newReplicaPtr.p->maxGciStarted[i] = 0;
newReplicaPtr.p->lcpId[i] = 0;
@@ -10243,7 +10302,7 @@ void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr,
}//for
newReplicaPtr.p->noCrashedReplicas = 0;
newReplicaPtr.p->initialGci = currentgcp;
- for (Uint32 i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1;
newReplicaPtr.p->createGci[i] = 0;
}//for
@@ -10354,7 +10413,8 @@ void Dbdih::checkEscalation()
{
Uint32 TnodeGroup[MAX_NDB_NODES];
NodeRecordPtr nodePtr;
- for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
TnodeGroup[i] = ZFALSE;
}//for
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
@@ -10366,7 +10426,7 @@ void Dbdih::checkEscalation()
TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE;
}
}
- for (Uint32 i = 0; i < cnoOfNodeGroups; i++) {
+ for (i = 0; i < cnoOfNodeGroups; i++) {
jam();
if (TnodeGroup[i] == ZFALSE) {
jam();
@@ -10900,27 +10960,6 @@ void Dbdih::initFragstore(FragmentstorePtr fragPtr)
fragPtr.p->distributionKey = 0;
}//Dbdih::initFragstore()
-void Dbdih::initNodeState(NodeRecordPtr nodePtr)
-{
- nodePtr.p->gcpstate = NodeRecord::READY;
-
- nodePtr.p->activeStatus = Sysfile::NS_NotDefined;
- nodePtr.p->recNODE_FAILREP = ZFALSE;
- nodePtr.p->nodeGroup = ZNIL;
- nodePtr.p->dbtcFailCompleted = ZTRUE;
- nodePtr.p->dbdictFailCompleted = ZTRUE;
- nodePtr.p->dbdihFailCompleted = ZTRUE;
- nodePtr.p->dblqhFailCompleted = ZTRUE;
- nodePtr.p->noOfStartedChkpt = 0;
- nodePtr.p->noOfQueuedChkpt = 0;
- nodePtr.p->lcpStateAtTakeOver = (MasterLCPConf::State)255;
-
- nodePtr.p->activeTabptr = RNIL;
- nodePtr.p->nodeStatus = NodeRecord::NOT_IN_CLUSTER;
- nodePtr.p->useInTransactions = false;
- nodePtr.p->copyCompleted = false;
-}//Dbdih::initNodeState()
-
/*************************************************************************/
/* */
/* MODULE: INIT_RESTART_INFO */
@@ -10929,7 +10968,8 @@ void Dbdih::initNodeState(NodeRecordPtr nodePtr)
/*************************************************************************/
void Dbdih::initRestartInfo()
{
- for (int i = 0; i < MAX_NDB_NODES; i++) {
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
SYSFILE->lastCompletedGCI[i] = 0;
}//for
NodeRecordPtr nodePtr;
@@ -10950,10 +10990,10 @@ void Dbdih::initRestartInfo()
SYSFILE->oldestRestorableGCI = 1;
SYSFILE->newestRestorableGCI = 1;
SYSFILE->systemRestartBits = 0;
- for (Uint32 i = 0; i < NodeBitmask::Size; i++) {
+ for (i = 0; i < NodeBitmask::Size; i++) {
SYSFILE->lcpActive[0] = 0;
}//for
- for (Uint32 i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) {
+ for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) {
SYSFILE->takeOver[i] = 0;
}//for
Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
@@ -11032,10 +11072,11 @@ void Dbdih::initTable(TabRecordPtr tabPtr)
tabPtr.p->tabFile[1] = RNIL;
tabPtr.p->m_dropTab.tabUserRef = 0;
tabPtr.p->m_dropTab.tabUserPtr = RNIL;
- for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
tabPtr.p->startFid[i] = RNIL;
}//for
- for (Uint32 i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
tabPtr.p->pageRef[i] = RNIL;
}//for
tabPtr.p->tableType = DictTabInfo::UndefTableType;
@@ -11100,6 +11141,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
jam();
/******** INTIALIZING API CONNECT RECORDS ********/
for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) {
+ refresh_watch_dog();
ptrAss(apiConnectptr, apiConnectRecord);
apiConnectptr.p->nextApi = RNIL;
}//for
@@ -11111,6 +11153,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
jam();
/****** CONNECT ******/
for (connectPtr.i = 0; connectPtr.i < cconnectFileSize; connectPtr.i++) {
+ refresh_watch_dog();
ptrAss(connectPtr, connectRecord);
connectPtr.p->userpointer = RNIL;
connectPtr.p->userblockref = ZNIL;
@@ -11165,7 +11208,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
NodeRecordPtr nodePtr;
for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
ptrAss(nodePtr, nodeRecord);
- initNodeState(nodePtr);
+ new (nodePtr.p) NodeRecord();
}//for
break;
}
@@ -11175,6 +11218,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
jam();
/******* PAGE RECORD ******/
for (pagePtr.i = 0; pagePtr.i < cpageFileSize; pagePtr.i++) {
+ refresh_watch_dog();
ptrAss(pagePtr, pageRecord);
pagePtr.p->nextfreepage = pagePtr.i + 1;
}//for
@@ -11191,6 +11235,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
/******* REPLICA RECORD ******/
for (initReplicaPtr.i = 0; initReplicaPtr.i < creplicaFileSize;
initReplicaPtr.i++) {
+ refresh_watch_dog();
ptrAss(initReplicaPtr, replicaRecord);
initReplicaPtr.p->lcpIdStarted = 0;
initReplicaPtr.p->lcpOngoingFlag = false;
@@ -11210,6 +11255,7 @@ void Dbdih::initialiseRecordsLab(Signal* signal,
/********* TAB-DESCRIPTOR ********/
for (loopTabptr.i = 0; loopTabptr.i < ctabFileSize; loopTabptr.i++) {
ptrAss(loopTabptr, tabRecord);
+ refresh_watch_dog();
initTable(loopTabptr);
}//for
break;
@@ -11367,6 +11413,7 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
Uint32 tmngNodeGroup;
Uint32 tmngReplica;
Uint32 tmngLimit;
+ Uint32 i;
/**-----------------------------------------------------------------------
* ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED
@@ -11376,7 +11423,7 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
tmngReplica = 0;
tmngLimit = csystemnodes - cnoHotSpare;
ndbrequire(tmngLimit < MAX_NDB_NODES);
- for (Uint32 i = 0; i < tmngLimit; i++) {
+ for (i = 0; i < tmngLimit; i++) {
NodeGroupRecordPtr NGPtr;
jam();
tmngNode = nodeArray[i];
@@ -11396,14 +11443,14 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
}//for
cnoOfNodeGroups = tmngNodeGroup;
ndbrequire(csystemnodes < MAX_NDB_NODES);
- for (Uint32 i = tmngLimit + 1; i < csystemnodes; i++) {
+ for (i = tmngLimit + 1; i < csystemnodes; i++) {
jam();
tmngNode = nodeArray[i];
mngNodeptr.i = tmngNode;
ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
mngNodeptr.p->nodeGroup = ZNIL;
}//for
- for(int i = 0; i < MAX_NDB_NODES; i++){
+ for(i = 0; i < MAX_NDB_NODES; i++){
jam();
Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
}//for
@@ -11521,7 +11568,7 @@ void Dbdih::makePrnList(ReadNodesConf * readNodes, Uint32 nodeArray[])
jam();
nodePtr.i = nodeArray[i];
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
- initNodeState(nodePtr);
+ new (nodePtr.p) NodeRecord();
if (NodeBitmask::get(readNodes->inactiveNodes, nodePtr.i) == false){
jam();
nodePtr.p->nodeStatus = NodeRecord::ALIVE;
@@ -11690,12 +11737,13 @@ Uint32 Dbdih::readPageWord(RWFragment* rf)
void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
{
+ Uint32 i;
readReplicaPtr.p->procNode = readPageWord(rf);
readReplicaPtr.p->initialGci = readPageWord(rf);
readReplicaPtr.p->noCrashedReplicas = readPageWord(rf);
readReplicaPtr.p->nextLcp = readPageWord(rf);
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ for (i = 0; i < MAX_LCP_STORED; i++) {
readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf);
readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf);
readReplicaPtr.p->lcpId[i] = readPageWord(rf);
@@ -11703,13 +11751,13 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
}//for
const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas;
ndbrequire(noCrashedReplicas < 8);
- for (Uint32 i = 0; i < noCrashedReplicas; i++) {
+ for (i = 0; i < noCrashedReplicas; i++) {
readReplicaPtr.p->createGci[i] = readPageWord(rf);
readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1);
ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
}//for
- for(Uint32 i = noCrashedReplicas; i<8; i++){
+ for(i = noCrashedReplicas; i<8; i++){
readReplicaPtr.p->createGci[i] = readPageWord(rf);
readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
// They are not initialized...
@@ -11732,7 +11780,7 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
/* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */
/* INVALIDATED BY MOVING BACK THE RESTART GCI. */
/* ---------------------------------------------------------------------- */
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ for (i = 0; i < MAX_LCP_STORED; i++) {
jam();
if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) &&
(readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) {
@@ -11764,6 +11812,7 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
{
+ Uint32 i;
ReplicaRecordPtr newReplicaPtr;
Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas;
Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas;
@@ -11775,7 +11824,7 @@ void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
fragPtr.p->noOldStoredReplicas = 0;
Uint32 replicaIndex = 0;
ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS);
- for (Uint32 i = 0; i < noStoredReplicas; i++) {
+ for (i = 0; i < noStoredReplicas; i++) {
seizeReplicaRec(newReplicaPtr);
readReplica(rf, newReplicaPtr);
if (checkNodeAlive(newReplicaPtr.p->procNode)) {
@@ -11790,7 +11839,7 @@ void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
}//if
}//for
fragPtr.p->fragReplicas = noStoredReplicas;
- for (Uint32 i = 0; i < noOldStoredReplicas; i++) {
+ for (i = 0; i < noOldStoredReplicas; i++) {
jam();
seizeReplicaRec(newReplicaPtr);
readReplica(rf, newReplicaPtr);
@@ -12640,11 +12689,11 @@ void Dbdih::setNodeRestartInfoBits()
NodeRecordPtr nodePtr;
Uint32 tsnrNodeGroup;
Uint32 tsnrNodeActiveStatus;
-
- for(int i = 1; i < MAX_NDB_NODES; i++){
+ Uint32 i;
+ for(i = 1; i < MAX_NDB_NODES; i++){
Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active);
}//for
- for(Uint32 i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){
+ for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){
SYSFILE->nodeGroups[i] = 0;
}//for
NdbNodeBitmask::clear(SYSFILE->lcpActive);
@@ -12786,13 +12835,14 @@ void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex)
writePageWord(wf, wfReplicaPtr.p->initialGci);
writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas);
writePageWord(wf, wfReplicaPtr.p->nextLcp);
- for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ Uint32 i;
+ for (i = 0; i < MAX_LCP_STORED; i++) {
writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]);
writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]);
writePageWord(wf, wfReplicaPtr.p->lcpId[i]);
writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]);
}//if
- for (Uint32 i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
writePageWord(wf, wfReplicaPtr.p->createGci[i]);
writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]);
}//if
@@ -13003,7 +13053,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
if(signal->theData[0] == 7012){
- char buf[c_lcpState.m_participatingDIH.TextLength+1];
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf));
infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf));
infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s",
@@ -13020,8 +13070,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
jam();
ptrAss(nodePtr, nodeRecord);
if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){
-
- for(Uint32 i = 0; i<nodePtr.p->noOfStartedChkpt; i++){
+ Uint32 i;
+ for(i = 0; i<nodePtr.p->noOfStartedChkpt; i++){
infoEvent("Node %d: started: table=%d fragment=%d replica=%d",
nodePtr.i,
nodePtr.p->startedChkpt[i].tableId,
@@ -13029,7 +13079,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
nodePtr.p->startedChkpt[i].replicaPtr);
}
- for(Uint32 i = 0; i<nodePtr.p->noOfQueuedChkpt; i++){
+ for(i = 0; i<nodePtr.p->noOfQueuedChkpt; i++){
infoEvent("Node %d: queued: table=%d fragment=%d replica=%d",
nodePtr.i,
nodePtr.p->queuedChkpt[i].tableId,
@@ -13360,6 +13410,25 @@ Dbdih::checkPrepDropTabComplete(Signal* signal, TabRecordPtr tabPtr){
}
void
+Dbdih::execWAIT_DROP_TAB_REF(Signal* signal){
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ Uint32 nodeId = refToNode(ref->senderRef);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
Dbdih::execWAIT_DROP_TAB_CONF(Signal* signal){
jamEntry();
WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
@@ -14137,3 +14206,25 @@ bool Dbdih::isActiveMaster()
{
return ((reference() == cmasterdihref) && (cmasterState == MASTER_ACTIVE));
}//Dbdih::isActiveMaster()
+
+Dbdih::NodeRecord::NodeRecord(){
+ m_nodefailSteps.clear();
+ gcpstate = NodeRecord::READY;
+
+ activeStatus = Sysfile::NS_NotDefined;
+ recNODE_FAILREP = ZFALSE;
+ nodeGroup = ZNIL;
+ dbtcFailCompleted = ZTRUE;
+ dbdictFailCompleted = ZTRUE;
+ dbdihFailCompleted = ZTRUE;
+ dblqhFailCompleted = ZTRUE;
+ noOfStartedChkpt = 0;
+ noOfQueuedChkpt = 0;
+ lcpStateAtTakeOver = (MasterLCPConf::State)255;
+
+ activeTabptr = RNIL;
+ nodeStatus = NodeRecord::NOT_IN_CLUSTER;
+ useInTransactions = false;
+ copyCompleted = false;
+ allowNodeStart = true;
+}
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 824f74c59af..9fcb6faf3e3 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -410,7 +410,6 @@
*/
class Dblqh: public SimulatedBlock {
public:
-
enum LcpCloseState {
LCP_IDLE = 0,
LCP_RUNNING = 1, // LCP is running
@@ -1990,7 +1989,6 @@ public:
UintR nextTcLogQueue;
UintR nextTc;
UintR nextTcConnectrec;
- Uint16 nodeAfterNext[2];
UintR prevHashRec;
UintR prevLogTcrec;
UintR prevTc;
@@ -2027,6 +2025,7 @@ public:
Uint16 nextReplica;
Uint16 primKeyLen;
Uint16 save1;
+ Uint16 nodeAfterNext[3];
Uint8 activeCreat;
Uint8 apiVersionNo;
@@ -2765,6 +2764,11 @@ private:
/* ------------------------------------------------------------------------- */
UintR cfirstCompletedFragSr;
+ /**
+ * List of fragment that the log execution is completed for
+ */
+ Uint32 c_redo_log_complete_frags;
+
/* ------------------------------------------------------------------------- */
/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */
/*FROM AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG TAIL. */
@@ -2859,8 +2863,9 @@ private:
/* ------------------------------------------------------------------------- */
UintR preComputedRequestInfoMask;
UintR ctransidHash[1024];
-
-
+
+ Uint32 c_diskless;
+
public:
/**
*
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index d5f40ec143c..4bb31185cfe 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -65,6 +65,7 @@ void Dblqh::initData()
cLqhTimeOutCount = 0;
cLqhTimeOutCheckCount = 0;
cbookedAccOps = 0;
+ c_redo_log_complete_frags = RNIL;
}//Dblqh::initData()
void Dblqh::initRecords()
@@ -120,7 +121,8 @@ void Dblqh::initRecords()
logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
sizeof(LogPageRecord),
- clogPageFileSize);
+ clogPageFileSize,
+ false);
pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
sizeof(PageRefRecord),
@@ -322,6 +324,31 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = {
+ &addfragptr,
+ &attrinbufptr,
+ &databufptr,
+ &fragptr,
+ &gcpPtr,
+ &lcpPtr,
+ &lcpLocptr,
+ &logPartPtr,
+ &logFilePtr,
+ &lfoPtr,
+ &logPagePtr,
+ &pageRefPtr,
+ &scanptr,
+ &tabptr,
+ &tcConnectptr,
+ &tcNodeFailptr,
+ };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
+
}//Dblqh::Dblqh()
Dblqh::~Dblqh()
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 1abf4b3a7e9..6b4a78380be 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -60,10 +60,55 @@
// seen only when we debug the product
#ifdef VM_TRACE
#define DEBUG(x) ndbout << "DBLQH: "<< x << endl;
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::TransactionState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::LogWriteState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::ListState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::AbortState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::LogFileOperationRecord::LfoState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){
+ out << (int)state;
+ return out;
+}
+
#else
#define DEBUG(x)
#endif
+//#define MARKER_TRACE 1
+//#define TRACE_SCAN_TAKEOVER 1
+
const Uint32 NR_ScanNo = 0;
void Dblqh::execACC_COM_BLOCK(Signal* signal)
@@ -847,6 +892,8 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_SCANS_PER_FRAG;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
+
initRecords();
initialiseRecordsLab(signal, 0, ref, senderData);
@@ -957,7 +1004,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
} else {
fragptr.p->tableFragptr = fragptr.i;
}
-
+
if (tempTable) {
//--------------------------------------------
// reqinfo bit 3-4 = 2 means temporary table
@@ -2416,6 +2463,9 @@ Dblqh::execREMOVE_MARKER_ORD(Signal* signal)
CommitAckMarkerPtr removedPtr;
m_commitAckMarkerHash.release(removedPtr, key);
ndbrequire(removedPtr.i != RNIL);
+#ifdef MARKER_TRACE
+ ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2);
+#endif
}
@@ -3140,6 +3190,13 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
return;
}//if
+
+ if(ERROR_INSERTED(5038) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5039);
+ return;
+ }
c_Counters.operations++;
@@ -3208,10 +3265,17 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
const NodeId tcNodeId = refToNode(sig5);
markerPtr.p->tcNodeId = tcNodeId;
+ CommitAckMarkerPtr tmp;
+#ifdef VM_TRACE
+#ifdef MARKER_TRACE
+ ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2);
+#endif
+ ndbrequire(!m_commitAckMarkerHash.find(tmp, * markerPtr.p));
+#endif
m_commitAckMarkerHash.add(markerPtr);
regTcPtr->commitAckMarker = markerPtr.i;
- }
-
+ }
+
regTcPtr->reqinfo = Treqinfo;
regTcPtr->lastReplicaNo = LqhKeyReq::getLastReplicaNo(Treqinfo);
regTcPtr->lockType = LqhKeyReq::getLockType(Treqinfo);
@@ -3510,6 +3574,7 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
/* -------------------------------------------------------------------------- */
/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */
/* -------------------------------------------------------------------------- */
+ Uint32 tc_ptr_i = tcConnectptr.i;
TcConnectionrec * const regTcPtr = tcConnectptr.p;
if (regTcPtr->indTakeOver == ZTRUE) {
jam();
@@ -3520,7 +3585,10 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo);
key.fragPtrI = fragptr.i;
c_scanTakeOverHash.find(scanptr, key);
- ndbassert(scanptr.i != RNIL);
+#ifdef TRACE_SCAN_TAKEOVER
+ if(scanptr.i == RNIL)
+ ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI);
+#endif
}
if (scanptr.i == RNIL) {
jam();
@@ -3610,14 +3678,14 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
signal, 7 + regTcPtr->primKeyLen);
if (signal->theData[0] < RNIL) {
- signal->theData[0] = tcConnectptr.i;
+ signal->theData[0] = tc_ptr_i;
execACCKEYCONF(signal);
return;
} else if (signal->theData[0] == RNIL) {
;
} else {
ndbrequire(signal->theData[0] == (UintR)-1);
- signal->theData[0] = tcConnectptr.i;
+ signal->theData[0] = tc_ptr_i;
execACCKEYREF(signal);
}//if
return;
@@ -5632,9 +5700,7 @@ void Dblqh::execABORT(Signal* signal)
BlockReference tcBlockref = signal->theData[1];
Uint32 transid1 = signal->theData[2];
Uint32 transid2 = signal->theData[3];
- if (ERROR_INSERTED(5003)) {
- systemErrorLab(signal);
- }
+ CRASH_INSERTION(5003);
if (ERROR_INSERTED(5015)) {
CLEAR_ERROR_INSERT_VALUE;
sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4);
@@ -5644,6 +5710,21 @@ void Dblqh::execABORT(Signal* signal)
transid2,
tcOprec) != ZOK) {
jam();
+
+ if(ERROR_INSERTED(5039) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5040);
+ return;
+ }
+
+ if(ERROR_INSERTED(5040) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5003);
+ return;
+ }
+
/* ------------------------------------------------------------------------- */
// SEND ABORTED EVEN IF NOT FOUND.
//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE.
@@ -5676,7 +5757,23 @@ void Dblqh::execABORT(Signal* signal)
}//if
regTcPtr->abortState = TcConnectionrec::ABORT_FROM_TC;
regTcPtr->activeCreat = ZFALSE;
+
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(commitAckMarker != RNIL){
+ jam();
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Ab2 marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ }
+
abortStateHandlerLab(signal);
+
return;
}//Dblqh::execABORT()
@@ -5815,10 +5912,18 @@ void Dblqh::execACCKEYREF(Signal* signal)
* Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
*
* Unless it's a simple or dirty read
+ *
+ * NOT TRUE!
+ * 1) op1 - primary insert ok
+ * 2) op1 - backup insert fail (log full or what ever)
+ * 3) op1 - delete ok @ primary
+ * 4) op1 - delete fail @ backup
+ *
+ * -> ZNO_TUPLE_FOUND is possible
*/
ndbrequire
(tcPtr->seqNoReplica == 0 ||
- (errCode != ZTUPLE_ALREADY_EXIST && errCode != ZNO_TUPLE_FOUND) ||
+ errCode != ZTUPLE_ALREADY_EXIST ||
(tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple)));
}
tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
@@ -5925,10 +6030,15 @@ void Dblqh::abortStateHandlerLab(Signal* signal)
break;
case TcConnectionrec::STOPPED:
jam();
-/* ------------------------------------------------------------------------- */
-/*WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LOCAL CHECKPOINT. */
-/* ------------------------------------------------------------------------- */
+ /* ---------------------------------------------------------------------
+ * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP
+ * Since nothing has been done, just release operation
+ * i.e. no prepare log record has been written
+ * so no abort log records needs to be written
+ */
releaseWaitQueue(signal);
+ continueAfterLogAbortWriteLab(signal);
+ return;
break;
case TcConnectionrec::WAIT_AI_AFTER_ABORT:
jam();
@@ -6034,7 +6144,13 @@ void Dblqh::abortCommonLab(Signal* signal)
* There is no NR ongoing and we have a marker
*/
jam();
-
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Abo marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
m_commitAckMarkerHash.release(commitAckMarker);
regTcPtr->commitAckMarker = RNIL;
}
@@ -6307,12 +6423,13 @@ void Dblqh::execNODE_FAILREP(Signal* signal)
UintR TfoundNodes = 0;
UintR TnoOfNodes;
UintR Tdata[MAX_NDB_NODES];
+ Uint32 i;
NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
TnoOfNodes = nodeFail->noOfNodes;
UintR index = 0;
- for (Uint32 i = 1; i < MAX_NDB_NODES; i++) {
+ for (i = 1; i < MAX_NDB_NODES; i++) {
jam();
if(NodeBitmask::get(nodeFail->theNodes, i)){
jam();
@@ -6326,7 +6443,7 @@ void Dblqh::execNODE_FAILREP(Signal* signal)
ndbrequire(index == TnoOfNodes);
ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES);
- for (Uint32 i = 0; i < TnoOfNodes; i++) {
+ for (i = 0; i < TnoOfNodes; i++) {
const Uint32 nodeId = Tdata[i];
lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId);
@@ -6524,7 +6641,7 @@ Dblqh::scanMarkers(Signal* signal,
}
const Uint32 RT_BREAK = 256;
- for(Uint32 i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
+ for(i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
jam();
if(iter.curr.i == RNIL){
@@ -6793,7 +6910,8 @@ void Dblqh::execSCAN_NEXTREQ(Signal* signal)
if (findTransaction(transid1, transid2, senderData) != ZOK){
jam();
- DEBUG("Received SCAN_NEXTREQ in LQH with close flag when closed");
+ DEBUG(senderData <<
+ " Received SCAN_NEXTREQ in LQH with close flag when closed");
ndbrequire(nextReq->closeFlag == ZTRUE);
return;
}
@@ -6833,6 +6951,10 @@ void Dblqh::execSCAN_NEXTREQ(Signal* signal)
return;
}//if
+ if(ERROR_INSERTED(5036)){
+ return;
+ }
+
scanptr.i = tcConnectptr.p->tcScanRec;
ndbrequire(scanptr.i != RNIL);
c_scanRecordPool.getPtr(scanptr);
@@ -6849,6 +6971,10 @@ void Dblqh::execSCAN_NEXTREQ(Signal* signal)
if(ERROR_INSERTED(5034)){
CLEAR_ERROR_INSERT_VALUE;
}
+ if(ERROR_INSERTED(5036)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
closeScanRequestLab(signal);
return;
}//if
@@ -7134,7 +7260,7 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
ScanFragRef * ref;
const Uint32 transid1 = scanFragReq->transId1;
const Uint32 transid2 = scanFragReq->transId2;
- Uint32 errorCode;
+ Uint32 errorCode= 0;
Uint32 senderData;
Uint32 hashIndex;
TcConnectionrecPtr nextHashptr;
@@ -7862,27 +7988,10 @@ void Dblqh::scanTupkeyConfLab(Signal* signal)
}//if
if (scanptr.p->scanKeyinfoFlag) {
jam();
- DatabufPtr TdataBuf;
- TdataBuf.i = tcConnectptr.p->firstTupkeybuf;
- const Uint32 keyLen = tcConnectptr.p->primKeyLen;
- const Uint32 dataBufSz = cdatabufFileSize;
-
- /**
- * Note that this code requires signal->theData to be big enough for
- * a entire key
- */
- ndbrequire(keyLen * 4 <= sizeof(signal->theData));
- KeyInfo20 * keyInfo = (KeyInfo20*)&signal->theData[0];
- for(Uint32 i = 0; i < keyLen; i += 4){
- ptrCheckGuard(TdataBuf, dataBufSz, databuf);
- keyInfo->keyData[i + 0] = TdataBuf.p->data[0];
- keyInfo->keyData[i + 1] = TdataBuf.p->data[1];
- keyInfo->keyData[i + 2] = TdataBuf.p->data[2];
- keyInfo->keyData[i + 3] = TdataBuf.p->data[3];
- TdataBuf.i = TdataBuf.p->nextDatabuf;
- }
sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
releaseOprec(signal);
+
+ tdata4 += tcConnectptr.p->primKeyLen;// Inform API about keyinfo len aswell
}//if
ndbrequire(scanptr.p->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
scanptr.p->scanOpLength[scanptr.p->scanCompletedOperations] = tdata4;
@@ -8200,7 +8309,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
scanptr.p->scanLocalref[1] = 0;
scanptr.p->scanLocalFragid = 0;
scanptr.p->scanTcWaiting = ZTRUE;
- scanptr.p->scanNumber = ZNIL;
+ scanptr.p->scanNumber = ~0;
for (Uint32 i = 0; i < scanConcurrentOperations; i++) {
jam();
@@ -8256,6 +8365,11 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
ScanRecordPtr tmp;
ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p));
#endif
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d",
+ scanptr.p->scanNumber, scanptr.p->fragPtrI,
+ tabptr.i, scanFragReq->fragmentNo, fragptr.i, fragptr.p->tableFragptr);
+#endif
c_scanTakeOverHash.add(scanptr);
}
return ZOK;
@@ -8318,7 +8432,8 @@ void Dblqh::initScanTc(Signal* signal,
tcConnectptr.p->opExec = 1;
tcConnectptr.p->operation = ZREAD;
tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
-
+ tcConnectptr.p->commitAckMarker = RNIL;
+
tabptr.p->usageCount++;
}//Dblqh::initScanTc()
@@ -8345,6 +8460,9 @@ void Dblqh::finishScanrec(Signal* signal)
if(scanptr.p->scanKeyinfoFlag){
jam();
ScanRecordPtr tmp;
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI);
+#endif
c_scanTakeOverHash.remove(tmp, * scanptr.p);
ndbrequire(tmp.p == scanptr.p);
}
@@ -8388,6 +8506,9 @@ void Dblqh::finishScanrec(Signal* signal)
ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p));
#endif
c_scanTakeOverHash.add(restart);
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
+#endif
}
scanptr = restart;
@@ -8422,78 +8543,131 @@ void Dblqh::sendKeyinfo20(Signal* signal,
ndbrequire(scanP->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
+ DatabufPtr TdataBuf;
+ TdataBuf.i = tcConP->firstTupkeybuf;
+ Uint32 keyLen = tcConP->primKeyLen;
+ const Uint32 dataBufSz = cdatabufFileSize;
+
+ /**
+ * Note that this code requires signal->theData to be big enough for
+ * a entire key
+ */
+ ndbrequire(keyLen * 4 <= sizeof(signal->theData));
+ const BlockReference ref = scanP->scanApiBlockref;
const Uint32 scanOp = scanP->scanCompletedOperations;
+ const Uint32 nodeId = refToNode(ref);
+ const bool connectedToNode = getNodeInfo(nodeId).m_connected;
+ const Uint32 type = getNodeInfo(nodeId).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
+ const bool longable = true; // TODO is_api && !old_dest;
+
+ Uint32 * dst = keyInfo->keyData;
+ dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength;
+
+ /**
+ * Copy keydata from data buffer into signal
+ *
+ */
+ for(Uint32 i = 0; i < keyLen; i += 4){
+ ptrCheckGuard(TdataBuf, dataBufSz, databuf);
+ * dst++ = TdataBuf.p->data[0];
+ * dst++ = TdataBuf.p->data[1];
+ * dst++ = TdataBuf.p->data[2];
+ * dst++ = TdataBuf.p->data[3];
+ TdataBuf.i = TdataBuf.p->nextDatabuf;
+ }
+
keyInfo->clientOpPtr = scanP->scanApiOpPtr[scanOp];
- keyInfo->keyLen = tcConP->primKeyLen;
+ keyInfo->keyLen = keyLen;
keyInfo->scanInfo_Node = KeyInfo20::setScanInfo(scanOp,
scanP->scanNumber)+
(getOwnNodeId() << 16);
-
keyInfo->transId1 = tcConP->transid[0];
keyInfo->transId2 = tcConP->transid[1];
-
- const BlockReference ref = scanP->scanApiBlockref;
- const Uint32 keyLen = tcConP->primKeyLen;
- if(refToNode(ref) == getOwnNodeId()){
+
+ Uint32 * src = signal->theData+25;
+ if(connectedToNode){
jam();
- EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal, 5 + keyLen);
+
+ if(nodeId != getOwnNodeId()){
+ jam();
+
+ if(keyLen <= KeyInfo20::DataLength || !longable) {
+ while(keyLen > KeyInfo20::DataLength){
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength);
+ sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
+ src += KeyInfo20::DataLength;;
+ keyLen -= KeyInfo20::DataLength;
+ } while(keyLen >= KeyInfo20::DataLength);
+
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ sendSignal(ref, GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength+keyLen, JBB);
+ return;
+ }
+
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength,
+ JBB, ptr, 1);
+ return;
+ }
+
+ EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength + keyLen);
jamEntry();
return;
- }
-
- bool connectedToNode = getNodeInfo(refToNode(ref)).m_connected;
-
- if (ERROR_INSERTED(5029)){
- // Use error insert to turn routing on
- jam();
- connectedToNode = false;
}
- if (connectedToNode){
- jam();
- Uint32 keyLenLeft = keyLen;
- Uint32 keyDataIndex = 20;
- for(; keyLenLeft > 20; keyLenLeft -= 20, keyDataIndex += 20){
- jam();
- sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
- for(Uint32 i = 0; i<20; i++)
- keyInfo->keyData[i] = keyInfo->keyData[keyDataIndex + i];
- }//for
- sendSignal(ref, GSN_KEYINFO20, signal, 5 + keyLenLeft, JBB);
- } else {
- /**
- * If this node does not have a direct connection
- * to the receiving node we want to send the signals
- * routed via the control node
- */
+ /**
+ * If this node does not have a direct connection
+ * to the receiving node we want to send the signals
+ * routed via the node that controls this read
+ */
+ Uint32 routeBlockref = tcConP->clientBlockref;
+
+ if(keyLen < KeyInfo20::DataLength || !longable){
jam();
- Uint32 keyLenLeft = keyLen;
- Uint32 keyDataIndex = 19;
- BlockReference routeBlockref = tcConP->clientBlockref;
- for(; keyLenLeft > 19; keyLenLeft -= 19, keyDataIndex += 19){
- jam();
- // store final destination, but save original value
- Uint32 saveOne = keyInfo->keyData[19];
- keyInfo->keyData[19] = ref;
+ while (keyLen > (KeyInfo20::DataLength - 1)) {
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength - 1);
+ keyInfo->keyData[KeyInfo20::DataLength-1] = ref;
sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 25, JBB);
- keyInfo->keyData[19] = saveOne;
- for(Uint32 i = 0; i<19; i++){
- keyInfo->keyData[i] = keyInfo->keyData[keyDataIndex + i];
- }
- }//for
- keyInfo->keyData[keyLenLeft] = ref;
- sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 5 + keyLenLeft + 1, JBB);
- }
+ src += KeyInfo20::DataLength - 1;
+ keyLen -= KeyInfo20::DataLength - 1;
+ }
-}//Dblqh::sendKeyinfo20()
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ keyInfo->keyData[keyLen] = ref;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+keyLen+1, JBB);
+ return;
+ }
+ keyInfo->keyData[0] = ref;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+1, JBB, ptr, 1);
+ return;
+}
+
/* ------------------------------------------------------------------------
* ------- SEND SCAN_FRAGCONF TO TC THAT CONTROLS THE SCAN -------
*
* ------------------------------------------------------------------------ */
void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
{
+ if(ERROR_INSERTED(5037)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+
scanptr.p->scanTcWaiting = ZFALSE;
ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
@@ -8868,7 +9042,7 @@ void Dblqh::execTRANSID_AI(Signal* signal)
ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::COPY_TUPKEY);
Uint32 * src = &signal->theData[3];
while(length > 22){
- if (saveTupattrbuf(signal, &signal->theData[3], 22) == ZOK) {
+ if (saveTupattrbuf(signal, src, 22) == ZOK) {
;
} else {
jam();
@@ -9832,9 +10006,11 @@ void Dblqh::execLCP_HOLDOPCONF(Signal* signal)
return;
} else {
jam();
+
/* NO MORE HOLDOPS NEEDED */
lcpLocptr.p->lcpLocstate = LcpLocRecord::HOLDOP_READY;
checkLcpHoldop(signal);
+
if (lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH) {
if (fragptr.p->activeList == RNIL) {
jam();
@@ -9852,6 +10028,7 @@ void Dblqh::execLCP_HOLDOPCONF(Signal* signal)
}//if
}//if
}//if
+
/* ----------------------- */
/* ELSE */
/* ------------------------------------------------------------------------
@@ -9924,7 +10101,6 @@ void Dblqh::execTUP_LCPSTARTED(Signal* signal)
void Dblqh::lcpStartedLab(Signal* signal)
{
checkLcpStarted(signal);
-
if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) {
jam();
/* ----------------------------------------------------------------------
@@ -9943,7 +10119,7 @@ void Dblqh::lcpStartedLab(Signal* signal)
sendAccContOp(signal); /* START OPERATIONS IN ACC */
moveAccActiveFrag(signal); /* MOVE FROM ACC BLOCKED LIST TO ACTIVE LIST
ON FRAGMENT */
- }//if
+ }
/*---------------*/
/* ELSE */
/*-------------------------------------------------------------------------*/
@@ -10004,32 +10180,27 @@ void Dblqh::execLQH_RESTART_OP(Signal* signal)
lcpPtr.i = signal->theData[1];
ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
- if (fragptr.p->fragStatus == Fragrecord::BLOCKED) {
- if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) {
- jam();
- /***********************************************************************/
- /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND
- * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE
- * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED.
- ***********************************************************************/
- restartOperationsLab(signal);
- return;
- } else {
- jam();
- if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
- jam();
- /*******************************************************************>
- * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP
- * ALL OPERATIONS AGAIN.
- * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT
- * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS.
- *******************************************************************> */
- restartOperationsLab(signal);
- return;
- }//if
- }//if
- }//if
- ndbrequire(false);
+ ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) {
+ jam();
+ /***********************************************************************/
+ /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND
+ * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE
+ * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED.
+ ***********************************************************************/
+ restartOperationsLab(signal);
+ } else if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
+ jam();
+ /*******************************************************************>
+ * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP
+ * ALL OPERATIONS AGAIN.
+ * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT
+ * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS.
+ *******************************************************************> */
+ restartOperationsLab(signal);
+ } else {
+ ndbrequire(false);
+ }
}//Dblqh::execLQH_RESTART_OP()
void Dblqh::restartOperationsLab(Signal* signal)
@@ -10082,13 +10253,13 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
* WHEN ARRIVING HERE THE OPERATION IS ALREADY SET IN THE ACTIVE LIST.
* THUS WE CAN IMMEDIATELY CALL THE METHODS THAT EXECUTE FROM WHERE
* THE OPERATION WAS STOPPED.
- *------------------------------------------------------------------------- */
+ *------------------------------------------------------------------------ */
switch (tcConnectptr.p->transactionState) {
case TcConnectionrec::STOPPED:
jam();
/*-----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND ACCKEYREQ
- *----------------------------------------------------------------------- */
+ *---------------------------------------------------------------------- */
prepareContinueAfterBlockedLab(signal);
return;
break;
@@ -10096,7 +10267,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND ACC_COMMITREQ
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
releaseActiveFrag(signal);
commitContinueAfterBlockedLab(signal);
return;
@@ -10105,7 +10276,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND ACC_ABORTREQ
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
abortContinueAfterBlockedLab(signal, true);
return;
break;
@@ -10113,7 +10284,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
continueCopyAfterBlockedLab(signal);
return;
break;
@@ -10121,7 +10292,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
continueFirstCopyAfterBlockedLab(signal);
return;
break;
@@ -10129,7 +10300,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
continueFirstScanAfterBlockedLab(signal);
return;
@@ -10138,7 +10309,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
continueAfterCheckLcpStopBlocked(signal);
return;
@@ -10147,7 +10318,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
continueScanAfterBlockedLab(signal);
return;
@@ -10157,7 +10328,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING RELEASE
* LOCKS IN SCAN
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
continueScanReleaseAfterBlockedLab(signal);
return;
@@ -10166,7 +10337,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF SCAN
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
continueCloseScanAfterBlockedLab(signal);
return;
break;
@@ -10174,7 +10345,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal)
jam();
/* ----------------------------------------------------------------------
* STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF COPY
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
continueCloseCopyAfterBlockedLab(signal);
return;
break;
@@ -10300,7 +10471,12 @@ void Dblqh::contChkpNextFragLab(Signal* signal)
* ----------------------------------------------------------------------- */
if (fragptr.p->fragStatus == Fragrecord::BLOCKED) {
jam();
+ /**
+ * LCP of fragment complete
+ * but restarting of operations isn't
+ */
lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP;
+ //restartOperationsLab(signal);
return;
}//if
@@ -10471,6 +10647,8 @@ void Dblqh::execEND_LCPCONF(Signal* signal)
clcpCompletedState = LCP_IDLE;
}//if
}//if
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId);
}//Dblqh::execEND_LCPCONF()
@@ -10577,25 +10755,25 @@ void Dblqh::checkLcpStarted(Signal* signal)
terrorCode = ZOK;
clsLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ int i = 0;
do {
ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clsLcpLocptr.p->lcpLocstate != LcpLocRecord::ACC_STARTED) {
- ndbrequire((clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED) ||
- (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED));
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){
return;
}//if
clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
} while (clsLcpLocptr.i != RNIL);
+ i = 0;
clsLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
do {
ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
- if (clsLcpLocptr.p->lcpLocstate != LcpLocRecord::TUP_STARTED) {
- ndbrequire((clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_COMPLETED) ||
- (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED));
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){
return;
}//if
clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
} while (clsLcpLocptr.i != RNIL);
lcpPtr.p->lcpState = LcpRecord::LCP_STARTED;
}//Dblqh::checkLcpStarted()
@@ -10753,18 +10931,28 @@ void Dblqh::sendAccContOp(Signal* signal)
{
LcpLocRecordPtr sacLcpLocptr;
+ int count = 0;
sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
do {
ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
sacLcpLocptr.p->accContCounter = 0;
-/* ------------------------------------------------------------------------- */
-/*SEND START OPERATIONS TO ACC AGAIN */
-/* ------------------------------------------------------------------------- */
- signal->theData[0] = lcpPtr.p->lcpAccptr;
- signal->theData[1] = sacLcpLocptr.p->locFragid;
- sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA);
+ if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED){
+ /* ------------------------------------------------------------------- */
+ /*SEND START OPERATIONS TO ACC AGAIN */
+ /* ------------------------------------------------------------------- */
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = sacLcpLocptr.p->locFragid;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA);
+ count++;
+ } else if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED){
+ signal->theData[0] = sacLcpLocptr.i;
+ sendSignal(reference(), GSN_ACC_CONTOPCONF, signal, 1, JBB);
+ } else {
+ ndbrequire(false);
+ }
sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc;
} while (sacLcpLocptr.i != RNIL);
+
}//Dblqh::sendAccContOp()
/* ------------------------------------------------------------------------- */
@@ -11897,18 +12085,18 @@ void Dblqh::writeLogfileLab(Signal* signal)
/* WRITE. */
/*---------------------------------------------------------------------------*/
switch (logFilePtr.p->fileChangeState) {
-#if 0
- case LogFileRecord::BOTH_WRITES_ONGOING:
- jam();
- ndbout_c("not crashing!!");
- // Fall-through
-#endif
case LogFileRecord::NOT_ONGOING:
jam();
checkGcpCompleted(signal,
((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
lfoPtr.p->lfoWordWritten);
break;
+#if 0
+ case LogFileRecord::BOTH_WRITES_ONGOING:
+ jam();
+ ndbout_c("not crashing!!");
+ // Fall-through
+#endif
case LogFileRecord::WRITE_PAGE_ZERO_ONGOING:
case LogFileRecord::LAST_WRITE_ONGOING:
jam();
@@ -12996,20 +13184,11 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if (!getFragmentrec(signal, fragId)) {
- jam();
- /* ----------------------------------------------------------------------
- * FRAGMENT WAS NOT DEFINED YET. PUT IT IN. IF NO LOCAL CHECKPOINT EXISTED
- * THEN THE FRAGMENT HAS ALREADY BEEN ADDED.
- * ---------------------------------------------------------------------- */
- if (!insertFragrec(signal, fragId)) {
- jam();
- startFragRefLab(signal);
- return;
- }//if
+ startFragRefLab(signal);
+ return;
}//if
tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
- initFragrec(signal, tabptr.i, fragId, ZPRIMARY_NODE);
initFragrecSr(signal);
if (startFragReq->lcpNo == ZNIL) {
jam();
@@ -13085,11 +13264,12 @@ void Dblqh::execSR_FRAGIDCONF(Signal* signal)
Uint32 noLocFrag = srFragidConf->noLocFrag;
ndbrequire(noLocFrag == 2);
Uint32 fragid[2];
- for (Uint32 i = 0; i < noLocFrag; i++) {
+ Uint32 i;
+ for (i = 0; i < noLocFrag; i++) {
fragid[i] = srFragidConf->fragId[i];
}//for
- for (Uint32 i = 0; i < noLocFrag; i++) {
+ for (i = 0; i < noLocFrag; i++) {
jam();
Uint32 fragId = fragid[i];
/* ----------------------------------------------------------------------
@@ -13495,14 +13675,22 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal)
* ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE
* FRAGMENT.
* ------------------------------------------------------------------- */
+ Uint32 next = fragptr.p->nextFrag;
if (prevFragptr.i != RNIL) {
jam();
ptrCheckGuard(prevFragptr, cfragrecFileSize, fragrecord);
- prevFragptr.p->nextFrag = fragptr.p->nextFrag;
+ prevFragptr.p->nextFrag = next;
} else {
jam();
- cfirstCompletedFragSr = fragptr.p->nextFrag;
+ cfirstCompletedFragSr = next;
}//if
+
+ /**
+ * Put fragment on list which has completed REDO log
+ */
+ fragptr.p->nextFrag = c_redo_log_complete_frags;
+ c_redo_log_complete_frags = fragptr.i;
+
fragptr.p->fragStatus = Fragrecord::FSACTIVE;
fragptr.p->logFlag = Fragrecord::STATE_TRUE;
signal->theData[0] = fragptr.p->srUserptr;
@@ -13514,7 +13702,7 @@ void Dblqh::execSTART_EXEC_SR(Signal* signal)
* THIS IS PERFORMED BY KEEPING PREV_FRAGPTR AS PREV_FRAGPTR BUT MOVING
* FRAGPTR TO THE NEXT FRAGMENT IN THE LIST.
* ------------------------------------------------------------------- */
- fragptr.i = fragptr.p->nextFrag;
+ fragptr.i = next;
}//if
signal->theData[0] = fragptr.i;
signal->theData[1] = prevFragptr.i;
@@ -14933,6 +15121,11 @@ void Dblqh::openSrFourthPhaseLab(Signal* signal)
void Dblqh::readSrFourthPhaseLab(Signal* signal)
{
+ if(c_diskless){
+ jam();
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ }
+
/* ------------------------------------------------------------------------
* INITIALISE ALL LOG PART INFO AND LOG FILE INFO THAT IS NEEDED TO
* START UP THE SYSTEM.
@@ -14961,6 +15154,7 @@ void Dblqh::readSrFourthPhaseLab(Signal* signal)
logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
logFilePtr.p->currentFilepage = logPartPtr.p->headPageNo;
logFilePtr.p->currentLogpage = logPagePtr.i;
+
initLogpage(signal);
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->headPageIndex;
logFilePtr.p->remainingWordsInMbyte =
@@ -15120,6 +15314,17 @@ void Dblqh::srFourthComp(Signal* signal)
conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
StartRecConf::SignalLength, JBB);
+
+ if(cstartType == NodeState::ST_SYSTEM_RESTART){
+ fragptr.i = c_redo_log_complete_frags;
+ while(fragptr.i != RNIL){
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = fragptr.p->tabRef;
+ signal->theData[1] = fragptr.p->fragId;
+ sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
+ fragptr.i = fragptr.p->nextFrag;
+ }
+ }
} else {
ndbrequire(false);
}//if
@@ -15657,7 +15862,7 @@ void Dblqh::completedLogPage(Signal* signal, Uint32 clpType)
/* ---------------------------------------------------------------- */
void Dblqh::deleteFragrec(Uint32 fragId)
{
- Uint32 indexFound;
+ Uint32 indexFound= RNIL;
fragptr.i = RNIL;
for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
@@ -15810,6 +16015,7 @@ void Dblqh::initialiseAttrbuf(Signal* signal)
for (attrinbufptr.i = 0;
attrinbufptr.i < cattrinbufFileSize;
attrinbufptr.i++) {
+ refresh_watch_dog();
ptrAss(attrinbufptr, attrbuf);
attrinbufptr.p->attrbuf[ZINBUF_NEXT] = attrinbufptr.i + 1;
}//for
@@ -15832,6 +16038,7 @@ void Dblqh::initialiseDatabuf(Signal* signal)
{
if (cdatabufFileSize != 0) {
for (databufptr.i = 0; databufptr.i < cdatabufFileSize; databufptr.i++) {
+ refresh_watch_dog();
ptrAss(databufptr, databuf);
databufptr.p->nextDatabuf = databufptr.i + 1;
}//for
@@ -15853,6 +16060,7 @@ void Dblqh::initialiseFragrec(Signal* signal)
{
if (cfragrecFileSize != 0) {
for (fragptr.i = 0; fragptr.i < cfragrecFileSize; fragptr.i++) {
+ refresh_watch_dog();
ptrAss(fragptr, fragrecord);
fragptr.p->fragStatus = Fragrecord::FREE;
fragptr.p->fragActiveStatus = ZFALSE;
@@ -15985,6 +16193,7 @@ void Dblqh::initialiseLogPage(Signal* signal)
{
if (clogPageFileSize != 0) {
for (logPagePtr.i = 0; logPagePtr.i < clogPageFileSize; logPagePtr.i++) {
+ refresh_watch_dog();
ptrAss(logPagePtr, logPageRecord);
logPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i + 1;
}//for
@@ -16040,17 +16249,18 @@ void Dblqh::initialisePageRef(Signal* signal)
void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
Uint32 retRef, Uint32 retData)
{
+ Uint32 i;
switch (data) {
case 0:
jam();
- for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ for (i = 0; i < MAX_NDB_NODES; i++) {
cnodeSrState[i] = ZSTART_SR;
cnodeExecSrState[i] = ZSTART_SR;
}//for
- for (Uint32 i = 0; i < 1024; i++) {
+ for (i = 0; i < 1024; i++) {
ctransidHash[i] = RNIL;
}//for
- for (Uint32 i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++) {
cactiveCopy[i] = RNIL;
}//for
cnoActiveCopy = 0;
@@ -16162,6 +16372,7 @@ void Dblqh::initialiseScanrec(Signal* signal)
DLList<ScanRecord> tmp(c_scanRecordPool);
while (tmp.seize(scanptr)){
//new (scanptr.p) ScanRecord();
+ refresh_watch_dog();
scanptr.p->scanType = ScanRecord::ST_IDLE;
scanptr.p->scanState = ScanRecord::SCAN_FREE;
scanptr.p->scanTcWaiting = ZFALSE;
@@ -16179,6 +16390,7 @@ void Dblqh::initialiseTabrec(Signal* signal)
{
if (ctabrecFileSize != 0) {
for (tabptr.i = 0; tabptr.i < ctabrecFileSize; tabptr.i++) {
+ refresh_watch_dog();
ptrAss(tabptr, tablerec);
tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
tabptr.p->usageCount = 0;
@@ -16200,6 +16412,7 @@ void Dblqh::initialiseTcrec(Signal* signal)
for (tcConnectptr.i = 0;
tcConnectptr.i < ctcConnectrecFileSize;
tcConnectptr.i++) {
+ refresh_watch_dog();
ptrAss(tcConnectptr, tcConnectionrec);
tcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
tcConnectptr.p->tcScanRec = RNIL;
@@ -16277,6 +16490,7 @@ void Dblqh::initFragrec(Signal* signal,
fragptr.p->execSrNoReplicas = 0;
fragptr.p->fragDistributionKey = 0;
fragptr.p->activeTcCounter = 0;
+ fragptr.p->tableFragptr = RNIL;
}//Dblqh::initFragrec()
/* ==========================================================================
@@ -18004,7 +18218,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
infoEvent(" lcpQueued=%d reportEmpty=%d",
TlcpPtr.p->lcpQueued,
TlcpPtr.p->reportEmpty);
- char buf[TlcpPtr.p->m_EMPTY_LCP_REQ.TextLength+1];
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
infoEvent(" m_EMPTY_LCP_REQ=%d",
TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf));
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index c87712e1887..6e32216557c 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -139,6 +139,7 @@
#define ZNOT_FOUND 626
#define ZALREADYEXIST 630
#define ZINCONSISTENTHASHINDEX 892
+#define ZNOTUNIQUE 893
#endif
class Dbtc: public SimulatedBlock {
@@ -235,7 +236,6 @@ public:
enum ReturnSignal {
RS_NO_RETURN = 0,
RS_TCKEYCONF = 1,
- RS_TCKEYREF = 2,
RS_TC_COMMITCONF = 3,
RS_TCROLLBACKCONF = 4,
RS_TCROLLBACKREP = 5
@@ -699,7 +699,7 @@ public:
UintR lqhkeyreqrec;
AbortState abortState;
Uint32 buddyPtr;
- Uint8 unused;
+ Uint8 m_exec_flag;
Uint8 unused2;
Uint8 takeOverRec;
Uint8 currentReplicaNo;
@@ -993,11 +993,94 @@ public:
typedef Ptr<TableRecord> TableRecordPtr;
/**
+ * There is max 16 ScanFragRec's for
+ * each scan started in TC. Each ScanFragRec is used by
+ * a scan fragment "process" that scans one fragment at a time.
+ * It will receive max 16 tuples in each request
+ */
+ struct ScanFragRec {
+ ScanFragRec(){
+ stopFragTimer();
+ lqhBlockref = 0;
+ scanFragState = IDLE;
+ scanRec = RNIL;
+ }
+ /**
+ * ScanFragState
+ * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
+ * fragment scan
+ * LQH_ACTIVE : The scan process has sent a command to LQH and is
+ * waiting for the response
+ * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is
+ * waiting for the response
+ * DELIVERED : The result have been delivered, this scan frag process
+ * are waiting for a SCAN_NEXTREQ to tell us to continue scanning
+ * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan
+ * soon
+ * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery
+ * to API
+ * COMPLETED : The fragment scan processes has completed and finally
+ * sent a SCAN_PROCCONF
+ */
+ enum ScanFragState {
+ IDLE = 0,
+ WAIT_GET_PRIMCONF = 1,
+ LQH_ACTIVE = 2,
+ DELIVERED = 4,
+ QUEUED_FOR_DELIVERY = 6,
+ COMPLETED = 7
+ };
+ // Timer for checking timeout of this fragment scan
+ Uint32 scanFragTimer;
+
+ // Id of the current scanned fragment
+ Uint32 scanFragId;
+
+ // Blockreference of LQH
+ BlockReference lqhBlockref;
+
+ // getNodeInfo.m_connectCount, set at seize used so that
+ // I don't accidently kill a starting node
+ Uint32 m_connectCount;
+
+ // State of this fragment scan
+ ScanFragState scanFragState;
+
+ // Id of the ScanRecord this fragment scan belongs to
+ Uint32 scanRec;
+
+ // The maximum number of operations that can be scanned before
+ // returning to TC
+ Uint16 scanFragConcurrency;
+
+ inline void startFragTimer(Uint32 timeVal){
+ scanFragTimer = timeVal;
+ }
+ inline void stopFragTimer(void){
+ scanFragTimer = 0;
+ }
+
+ Uint32 m_ops;
+ Uint32 m_chksum;
+ Uint32 m_apiPtr;
+ Uint32 m_totalLen;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<ScanFragRec> ScanFragRecPtr;
+ typedef LocalDLList<ScanFragRec> ScanFragList;
+
+ /**
* Each scan allocates one ScanRecord to store information
* about the current scan
*
*/
struct ScanRecord {
+ ScanRecord() {}
/** NOTE! This is the old comment for ScanState. - MASV
* STATE TRANSITIONS OF SCAN_STATE. SCAN_STATE IS THE STATE
* VARIABLE OF THE RECEIVE AND DELIVERY PROCESS.
@@ -1057,161 +1140,71 @@ public:
WAIT_SCAN_TAB_INFO = 1,
WAIT_AI = 2,
WAIT_FRAGMENT_COUNT = 3,
- SCAN_NEXT_ORDERED = 4,
- QUEUED_DELIVERED = 5,
- DELIVERED = 6,
- CLOSING_SCAN = 7
+ RUNNING = 4,
+ CLOSING_SCAN = 5
};
+
// State of this scan
ScanState scanState;
- // References to ScanFragRecs
- Uint32 scanFragrec[16];
- // Refrences to ScanOperationRecords
- Uint32 scanOprec[16];
- // Number of ScanOperationRecords allocated
- Uint32 noScanOprec;
+
+ DLList<ScanFragRec>::Head m_running_scan_frags; // Currently in LQH
+ union { Uint32 m_queued_count; Uint32 scanReceivedOperations; };
+ DLList<ScanFragRec>::Head m_queued_scan_frags; // In TC !sent to API
+ DLList<ScanFragRec>::Head m_delivered_scan_frags;// Delivered to API
+ DLList<ScanFragRec>::Head m_completed_scan_frags;// Completed
+
// Id of the next fragment to be scanned. Used by scan fragment
// processes when they are ready for the next fragment
Uint32 scanNextFragId;
+
// Total number of fragments in the table we are scanning
Uint32 scanNoFrag;
+
// Index of next ScanRecords when in free list
Uint32 nextScan;
+
// Length of expected attribute information
Uint32 scanAiLength;
+
// Reference to ApiConnectRecord
Uint32 scanApiRec;
+
// Reference to TcConnectRecord
Uint32 scanTcrec;
+
// Number of scan frag processes that belong to this scan
Uint32 scanParallel;
- // The number of recieved operations so far
- Uint32 scanReceivedOperations;
+
// Schema version used by this scan
Uint32 scanSchemaVersion;
+
// Index of stored procedure belonging to this scan
Uint32 scanStoredProcId;
+
// The index of table that is scanned
Uint32 scanTableref;
+
// Number of operation records per scanned fragment
Uint16 noOprecPerFrag;
- // The number of SCAN_TABINFO to receive
- Uint16 noScanTabInfo;
- // The number of SCAN_TABINFO received so far
- Uint16 scanTabInfoReceived;
- // apiIsClosed indicates if it's ok to release all resources
- // and send a response to the API
- // If it's false resources should not be released wait for API
- // to close the scan
- bool apiIsClosed;
- // The number of scan frag processes that have completed their task
- Uint8 scanProcessesCompleted;
- // This variable is ZFALSE as long as any scan process is still alive
- // It is ZTRUE as soon as all scan processes have been stopped
- Uint8 scanCompletedStatus;
+
// Shall the locks be held until the application have read the
// records
Uint8 scanLockHold;
+
// Shall the locks be read or write locks
Uint8 scanLockMode;
+
// Skip locks by other transactions and read latest committed
Uint8 readCommitted;
+
// Scan is on ordered index
Uint8 rangeScan;
+
+ // Close is ordered
+ bool m_close_scan_req;
};
typedef Ptr<ScanRecord> ScanRecordPtr;
- /**
- * Each scan has max 16 ScanOperationRecords
- * they are used for storing data to be sent to the api
- */
- struct ScanOperationRecord {
- // Reference to the scan operation in api
- Uint32 apiOpptr[16];
- // Index and length of all recieved operations
- // They will be cached here until SCAN_TABCONF is sent to api
- Uint32 scanOpLength[16];
- // Next ScanOperationRecord when in free list
- Uint32 nextScanOp;
- }; /* p2c: size = 132 bytes */
-
- typedef Ptr<ScanOperationRecord> ScanOperationRecordPtr;
-
- /**
- * There is max 16 ScanFragRec's for
- * each scan started in TC. Each ScanFragRec is used by
- * a scan fragment "process" that scans one fragment at a time.
- * It will receive max 16 tuples in each request
- */
- struct ScanFragRec {
- /**
- * ScanFragState
- * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
- * fragment scan
- * LQH_ACTIVE : The scan process has sent a command to LQH and is
- * waiting for the response
- * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is
- * waiting for the response
- * DELIVERED : The result have been delivered, this scan frag process
- * are waiting for a SCAN_NEXTREQ to tell us to continue scanning
- * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan
- * soon
- * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery
- * to API
- * COMPLETED : The fragment scan processes has completed and finally
- * sent a SCAN_PROCCONF
- */
- enum ScanFragState {
- IDLE = 0,
- WAIT_GET_PRIMCONF = 1,
- LQH_ACTIVE = 2,
- LQH_ACTIVE_CLOSE = 3,
- DELIVERED = 4,
- RETURNING_FROM_DELIVERY = 5,
- QUEUED_FOR_DELIVERY = 6,
- COMPLETED = 7
- };
- // Timer for checking timeout of this fragment scan
- Uint32 scanFragTimer;
- // Id of the current scanned fragment
- Uint32 scanFragId;
- // Blockreference of LQH
- BlockReference lqhBlockref;
- // getNodeInfo.m_connectCount, set at seize used so that
- // I don't accidently kill a starting node
- Uint32 m_connectCount;
- // State of this fragment scan
- ScanFragState scanFragState;
- // Id of the ScanRecord this fragment scan belongs to
- Uint32 scanRec;
- // Index of next ScanFragRec, when in list of
- // free ScanFragRec's
- Uint32 nextScanFrag;
- // Process id of this scan process within the total scan
- Uint32 scanFragProcId;
- // Node where current fragment resides
- NodeId scanFragNodeId;
- // Index of where to store the result in ScanRecord
- Uint16 scanIndividual;
- // The maximum number of operations that can be scanned before
- // returning to TC
- Uint16 scanFragConcurrency;
- // Current status of the fragment scan
- // * 0 = NOT COMPLETED
- // * 1 = COMPLETED
- // * 2 = CLOSED
- Uint8 scanFragCompletedStatus;
-
- inline void startFragTimer(Uint32 timeVal){
- scanFragTimer = timeVal;
- }
- inline void stopFragTimer(void){
- scanFragTimer = 0;
- }
- };
-
- typedef Ptr<ScanFragRec> ScanFragRecPtr;
-
/* **********************************************************************$ */
/* ******$ DATA BUFFER ******$ */
/* */
@@ -1369,6 +1362,7 @@ private:
void execCREATE_TAB_REQ(Signal* signal);
void execPREP_DROP_TAB_REQ(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REF(Signal* signal);
void execWAIT_DROP_TAB_CONF(Signal* signal);
void checkWaitDropTabFailedLqh(Signal*, Uint32 nodeId, Uint32 tableId);
void execALTER_TAB_REQ(Signal* signal);
@@ -1403,7 +1397,7 @@ private:
void sendCompleteLqh(Signal* signal,
TcConnectRecord * const regTcPtr);
void sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord *);
- void sendTCKEY_FAILCONF(Signal* signal, const ApiConnectRecord *);
+ void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *);
void checkStartTimeout(Signal* signal);
void checkStartFragTimeout(Signal* signal);
void timeOutFoundFragLab(Signal* signal, Uint32 TscanConPtr);
@@ -1428,23 +1422,17 @@ private:
Uint32 buddyPtr,
UintR transid1,
UintR transid2);
- void initScanOprec(Signal* signal);
- void initScanrec(Signal* signal,
+ void initScanrec(ScanRecordPtr, const class ScanTabReq*,
const UintR scanParallel,
const UintR noOprecPerFrag);
void initScanfragrec(Signal* signal);
- void releaseScanrec(Signal* signal);
- void releaseScanResources(Signal* signal);
- void releaseScanFragrec(Signal* signal);
- void releaseScanOprec(Signal* signal);
- void seizeScanrec(Signal* signal);
- void seizeScanFragrec(Signal* signal);
- void seizeScanOprec(Signal* signal);
- void sendScanFragReq(Signal* signal);
- void sendScanTabConf(Signal* signal);
- void sendScanProcConf(Signal* signal);
- void setScanReceived(Signal* signal, Uint32 noCompletedOps);
-
+ void releaseScanResources(ScanRecordPtr);
+ ScanRecordPtr seizeScanrec(Signal* signal);
+ void sendScanFragReq(Signal* signal, ScanRecord*, ScanFragRec*);
+ void sendScanTabConf(Signal* signal, ScanRecord*);
+ void close_scan_req(Signal*, ScanRecordPtr, bool received_req);
+ void close_scan_req_send_conf(Signal*, ScanRecordPtr);
+
void checkGcp(Signal* signal);
void commitGciHandling(Signal* signal, UintR Tgci);
void copyApi(Signal* signal);
@@ -1473,12 +1461,12 @@ private:
void releaseApiCon(Signal* signal, UintR aApiConnectPtr);
void releaseApiConCopy(Signal* signal);
void releaseApiConnectFail(Signal* signal);
- void releaseAttrinfo(Signal* signal);
+ void releaseAttrinfo();
void releaseGcp(Signal* signal);
- void releaseKeys(Signal* signal);
+ void releaseKeys();
void releaseSimpleRead(Signal* signal);
void releaseDirtyWrite(Signal* signal);
- void releaseTcCon(Signal* signal);
+ void releaseTcCon();
void releaseTcConnectFail(Signal* signal);
void releaseTransResources(Signal* signal);
void saveAttrbuf(Signal* signal);
@@ -1577,11 +1565,11 @@ private:
void systemErrorLab(Signal* signal);
void sendSignalErrorRefuseLab(Signal* signal);
void scanTabRefLab(Signal* signal, Uint32 errCode);
- void diFcountReqLab(Signal* signal);
+ void diFcountReqLab(Signal* signal, ScanRecordPtr);
void signalErrorRefuseLab(Signal* signal);
void abort080Lab(Signal* signal);
void packKeyData000Lab(Signal* signal, BlockReference TBRef);
- void abortScanLab(Signal* signal, Uint32 errCode);
+ void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode);
void sendAbortedAfterTimeout(Signal* signal, int Tcheck);
void abort010Lab(Signal* signal);
void abort015Lab(Signal* signal);
@@ -1609,7 +1597,7 @@ private:
void attrinfo020Lab(Signal* signal);
void scanReleaseResourcesLab(Signal* signal);
void scanCompletedLab(Signal* signal);
- void scanFragError(Signal* signal, Uint32 errorCode);
+ void scanError(Signal* signal, ScanRecordPtr, Uint32 errorCode);
void diverify010Lab(Signal* signal);
void intstartphase2x010Lab(Signal* signal);
void intstartphase3x010Lab(Signal* signal);
@@ -1638,6 +1626,8 @@ private:
void checkScanActiveInFailedLqh(Signal* signal,
Uint32 scanPtrI,
Uint32 failedNodeId);
+ void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP,
+ LocalDLList<ScanFragRec>::Head&);
// Initialisation
void initData();
@@ -1717,20 +1707,12 @@ private:
ApiConnectRecordPtr timeOutptr;
ScanRecord *scanRecord;
- ScanRecordPtr scanptr;
UintR cscanrecFileSize;
- ScanOperationRecord *scanOperationRecord;
- ScanOperationRecordPtr scanOpptr;
- UintR cscanOprecFileSize;
-
- ScanFragRec *scanFragmentRecord;
+ UnsafeArrayPool<ScanFragRec> c_scan_frag_pool;
ScanFragRecPtr scanFragptr;
- UintR cscanFragrecFileSize;
- UintR cfirstfreeScanOprec;
- UintR cnoFreeScanOprec;
- UintR cfirstfreeScanFragrec;
+ UintR cscanFragrecFileSize;
UintR cdatabufFilesize;
BlockReference cdictblockref;
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 61ecca513f0..6803c3609ed 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -35,7 +35,6 @@ void Dbtc::initData()
cgcpFilesize = ZGCP_FILESIZE;
cscanrecFileSize = ZSCANREC_FILE_SIZE;
cscanFragrecFileSize = ZSCAN_FRAGREC_FILE_SIZE;
- cscanOprecFileSize = ZSCAN_OPREC_FILE_SIZE;
ctabrecFilesize = ZTABREC_FILESIZE;
ctcConnectFilesize = ZTC_CONNECT_FILESIZE;
cdihblockref = DBDIH_REF;
@@ -49,8 +48,6 @@ void Dbtc::initData()
hostRecord = 0;
tableRecord = 0;
scanRecord = 0;
- scanOperationRecord = 0;
- scanFragmentRecord = 0;
databufRecord = 0;
attrbufRecord = 0;
gcpRecord = 0;
@@ -143,16 +140,19 @@ void Dbtc::initRecords()
sizeof(ScanRecord),
cscanrecFileSize);
- scanOperationRecord = (ScanOperationRecord*)
- allocRecord("ScanOperationRecord",
- sizeof(ScanOperationRecord),
- cscanOprecFileSize);
- scanFragmentRecord = (ScanFragRec*)
- allocRecord("ScanFragRec",
- sizeof(ScanFragRec),
- cscanFragrecFileSize);
+ c_scan_frag_pool.setSize(cscanFragrecFileSize);
+ {
+ ScanFragRecPtr ptr;
+ SLList<ScanFragRec> tmp(c_scan_frag_pool);
+ while(tmp.seize(ptr)) {
+ new (ptr.p) ScanFragRec();
+ }
+ tmp.release();
+ }
+ indexOps.release();
+
databufRecord = (DatabufRecord*)allocRecord("DatabufRecord",
sizeof(DatabufRecord),
cdatabufFilesize);
@@ -213,10 +213,7 @@ Dbtc::Dbtc(const class Configuration & conf):
addRecSignal(GSN_ATTRINFO, &Dbtc::execATTRINFO);
addRecSignal(GSN_CONTINUEB, &Dbtc::execCONTINUEB);
addRecSignal(GSN_KEYINFO, &Dbtc::execKEYINFO);
- addRecSignal(GSN_SCAN_TABINFO, &Dbtc::execSCAN_TABINFO);
addRecSignal(GSN_SCAN_NEXTREQ, &Dbtc::execSCAN_NEXTREQ);
- addRecSignal(GSN_SCAN_PROCREQ, &Dbtc::execSCAN_PROCREQ);
- addRecSignal(GSN_SCAN_PROCCONF, &Dbtc::execSCAN_PROCCONF);
addRecSignal(GSN_TAKE_OVERTCREQ, &Dbtc::execTAKE_OVERTCREQ);
addRecSignal(GSN_TAKE_OVERTCCONF, &Dbtc::execTAKE_OVERTCCONF);
addRecSignal(GSN_LQHKEYREF, &Dbtc::execLQHKEYREF);
@@ -290,11 +287,29 @@ Dbtc::Dbtc(const class Configuration & conf):
//addRecSignal(GSN_CREATE_TAB_REQ, &Dbtc::execCREATE_TAB_REQ);
addRecSignal(GSN_DROP_TAB_REQ, &Dbtc::execDROP_TAB_REQ);
addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbtc::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbtc::execWAIT_DROP_TAB_REF);
addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbtc::execWAIT_DROP_TAB_CONF);
addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ);
initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = { &apiConnectptr,
+ &tcConnectptr,
+ &cachePtr,
+ &attrbufptr,
+ &hostptr,
+ &gcpPtr,
+ &tmpApiConnectptr,
+ &timeOutptr,
+ &scanFragptr,
+ &databufptr,
+ &tmpDatabufptr };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
}//Dbtc::Dbtc()
Dbtc::~Dbtc()
@@ -323,17 +338,7 @@ Dbtc::~Dbtc()
deallocRecord((void **)&scanRecord, "ScanRecord",
sizeof(ScanRecord),
cscanrecFileSize);
-
- deallocRecord((void **)&scanOperationRecord,
- "ScanOperationRecord",
- sizeof(ScanOperationRecord),
- cscanOprecFileSize);
-
- deallocRecord((void **)&scanFragmentRecord,
- "ScanFragRec",
- sizeof(ScanFragRec),
- cscanFragrecFileSize);
-
+
deallocRecord((void **)&databufRecord, "DatabufRecord",
sizeof(DatabufRecord),
cdatabufFilesize);
@@ -360,5 +365,3 @@ Dbtc::~Dbtc()
BLOCK_FUNCTIONS(Dbtc);
-
-
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 5afd79687a1..7e1db71faee 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -65,6 +65,7 @@
#include <signaldata/DictTabInfo.hpp>
#include <NdbOut.hpp>
+#include <DebuggerNames.hpp>
// Use DEBUG to print messages that should be
// seen only when we debug the product
@@ -76,6 +77,68 @@
#define INTERNAL_TRIGGER_TCKEYREQ_JBA 0
+#ifdef VM_TRACE
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ConnectionState state){
+ switch(state){
+ case Dbtc::CS_CONNECTED: out << "CS_CONNECTED"; break;
+ case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break;
+ case Dbtc::CS_STARTED: out << "CS_STARTED"; break;
+ case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break;
+ case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break;
+ case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break;
+ case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break;
+ case Dbtc::CS_RESTART: out << "CS_RESTART"; break;
+ case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break;
+ case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break;
+ case Dbtc::CS_COMPLETE_SENT: out << "CS_COMPLETE_SENT"; break;
+ case Dbtc::CS_PREPARE_TO_COMMIT: out << "CS_PREPARE_TO_COMMIT"; break;
+ case Dbtc::CS_COMMIT_SENT: out << "CS_COMMIT_SENT"; break;
+ case Dbtc::CS_START_COMMITTING: out << "CS_START_COMMITTING"; break;
+ case Dbtc::CS_COMMITTING: out << "CS_COMMITTING"; break;
+ case Dbtc::CS_REC_COMMITTING: out << "CS_REC_COMMITTING"; break;
+ case Dbtc::CS_WAIT_ABORT_CONF: out << "CS_WAIT_ABORT_CONF"; break;
+ case Dbtc::CS_WAIT_COMPLETE_CONF: out << "CS_WAIT_COMPLETE_CONF"; break;
+ case Dbtc::CS_WAIT_COMMIT_CONF: out << "CS_WAIT_COMMIT_CONF"; break;
+ case Dbtc::CS_FAIL_ABORTING: out << "CS_FAIL_ABORTING"; break;
+ case Dbtc::CS_FAIL_ABORTED: out << "CS_FAIL_ABORTED"; break;
+ case Dbtc::CS_FAIL_PREPARED: out << "CS_FAIL_PREPARED"; break;
+ case Dbtc::CS_FAIL_COMMITTING: out << "CS_FAIL_COMMITTING"; break;
+ case Dbtc::CS_FAIL_COMMITTED: out << "CS_FAIL_COMMITTED"; break;
+ case Dbtc::CS_FAIL_COMPLETED: out << "CS_FAIL_COMPLETED"; break;
+ case Dbtc::CS_START_SCAN: out << "CS_START_SCAN"; break;
+ default:
+ out << "Unknown: " << (int)state; break;
+ }
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::OperationState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::AbortState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ReturnSignal state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanFragRec::ScanFragState state){
+ out << (int)state;
+ return out;
+}
+#endif
+
void
Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr)
{
@@ -110,13 +173,7 @@ void Dbtc::execCONTINUEB(Signal* signal)
switch (tcase) {
case TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY:
jam();
- scanptr.i = Tdata0;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- scanFragptr.i = Tdata1;
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- ndbrequire(scanFragptr.p->scanFragState ==
- ScanFragRec::RETURNING_FROM_DELIVERY);
- returnFromQueuedDeliveryLab(signal);
+ ndbrequire(false);
return;
case TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER:
jam();
@@ -204,6 +261,7 @@ void Dbtc::execCONTINUEB(Signal* signal)
tcConnectptr.i = Tdata0;
apiConnectptr.i = Tdata1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->counter--;
sendAbortedAfterTimeout(signal, 1);
return;
case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS:
@@ -375,6 +433,39 @@ Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal)
}
void
+Dbtc::execWAIT_DROP_TAB_REF(Signal* signal)
+{
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ ndbrequire(tabPtr.p->dropping == true);
+ Uint32 nodeId = refToNode(ref->senderRef);
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ if(!tabPtr.p->dropTable.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ {
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->dropTable.senderData;
+ sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->dropTable.senderRef = 0;
+ }
+}
+
+void
Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
{
@@ -526,7 +617,6 @@ void Dbtc::execREAD_CONFIG_REQ(Signal* signal)
ctcConnectFilesize = tcConnect;
ctabrecFilesize = tables;
cscanrecFileSize = tcScan;
- cscanOprecFileSize = localScan;
cscanFragrecFileSize = localScan;
initRecords();
@@ -882,7 +972,15 @@ Dbtc::handleFailedApiNode(Signal* signal,
// sending several signals we will increase the loop count by 64.
/*********************************************************************/
jam();
- handleScanStop(signal, TapiFailedNode);
+
+ apiConnectptr.p->apiFailState = ZTRUE;
+ capiConnectClosing[TapiFailedNode]++;
+
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ close_scan_req(signal, scanPtr, true);
+
TloopCount += 64;
break;
case CS_CONNECTED:
@@ -952,13 +1050,12 @@ Dbtc::handleFailedApiNode(Signal* signal,
apiConnectptr.i++;
if (apiConnectptr.i > ((capiConnectFilesize / 3) - 1)) {
jam();
- capiConnectClosing[TapiFailedNode]--;
/**
* Finished with scanning connection record
*
* Now scan markers
*/
- removeMarkerForFailedAPI(signal, TapiFailedNode, RNIL); // RNIL = first
+ removeMarkerForFailedAPI(signal, TapiFailedNode, 0);
return;
}//if
} while (TloopCount++ < 256);
@@ -973,15 +1070,26 @@ Dbtc::removeMarkerForFailedAPI(Signal* signal,
Uint32 nodeId,
Uint32 startBucket)
{
- CommitAckMarkerIterator iter;
- if(startBucket == RNIL){
- jam();
- capiConnectClosing[nodeId]++;
- m_commitAckMarkerHash.next(0, iter);
- } else {
+ TcFailRecordPtr node_fail_ptr;
+ node_fail_ptr.i = 0;
+ ptrAss(node_fail_ptr, tcFailRecord);
+ if(node_fail_ptr.p->failStatus != FS_IDLE) {
jam();
- m_commitAckMarkerHash.next(startBucket, iter);
+ DEBUG("Restarting removeMarkerForFailedAPI");
+ /**
+ * TC take-over in progress
+ * needs to restart as this
+ * creates new markers
+ */
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = 0;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 500, 3);
+ return;
}
+
+ CommitAckMarkerIterator iter;
+ m_commitAckMarkerHash.next(startBucket, iter);
const Uint32 RT_BREAK = 256;
for(Uint32 i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
@@ -1012,7 +1120,6 @@ Dbtc::removeMarkerForFailedAPI(Signal* signal,
/**
* Check so that the record is not still in use
*
- * (This can happen when NF and API Fail happens at the same time)
*/
ApiConnectRecordPtr apiConnectPtr;
apiConnectPtr.i = iter.curr.p->apiConnectPtr;
@@ -1024,9 +1131,8 @@ Dbtc::removeMarkerForFailedAPI(Signal* signal,
*
* Don't remove it, but continueb instead
*/
- break;
+ break;
}
-
sendRemoveMarkers(signal, iter.curr.p);
m_commitAckMarkerHash.release(iter.curr);
@@ -1061,136 +1167,6 @@ void Dbtc::handleApiFailState(Signal* signal, UintR TapiConnectptr)
}//if
}//Dbtc::handleApiFailState()
-/**
- * Dbtc::handleScanStop
- * This function is called when an entire scan should be stopped
- * Check state of the scan and take appropriate action.
- * The parameter TapiFailedNode indicates if the scan is stopped
- * because an API node has failed or if it has been stopped because
- * the scan has timed out.
- *
- */
-void Dbtc::handleScanStop(Signal* signal, UintR TapiFailedNode)
-{
- arrGuard(TapiFailedNode, MAX_NODES);
-
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- // If api has failed we must release all resources
- bool apiNodeHasFailed = (TapiFailedNode != 0);
-
- DEBUG("handleScanStop: scanState = "<< scanptr.p->scanState);
-
- switch (scanptr.p->scanState) {
- case ScanRecord::WAIT_SCAN_TAB_INFO:
- case ScanRecord::WAIT_AI:
- jam();
- /**
- * The scan process is still in the definition phase.
- * We will release the resources and then release the connection
- * to the failed API.
- */
- releaseScanResources(signal);
- if (apiNodeHasFailed) {
- jam();
- releaseApiCon(signal, apiConnectptr.i);
- }//if
- break;
-
- case ScanRecord::WAIT_FRAGMENT_COUNT:
- jam();
- if (!apiNodeHasFailed) {
- jam();
- /**
- * Time-out waiting for a local signal can only happen
- * if we have a serious problem.
- */
- systemErrorLab(signal);
- }//if
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- scanptr.p->apiIsClosed = true;
- break;
-
- case ScanRecord::CLOSING_SCAN:
- jam();
- /**
- * With CLOSING_SCAN it is enough to set the
- * fail state such that the connection is released at the end of the
- * closing process. The close process is already ongoing.
- * Set apiIsClosed to true to indicate that resources should be released
- * at the end of the close process.
- **/
-
- if (apiNodeHasFailed) {
- jam();
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- scanptr.p->apiIsClosed = true;
- }//if
- if (apiConnectptr.p->apiFailState == ZTRUE) {
- jam();
- handleApiFailState(signal, apiConnectptr.i);
- return;
- }//if
- break;
-
- case ScanRecord::SCAN_NEXT_ORDERED:
- /**
- * In the SCAN_NEXT_ORDERED state we will wait for the next natural place
- * to receive some action from the API and instead of waiting for the
- * API here we will start the abort process.
-
- * After the abort process is completed we will release the connection.
- */
- if (apiNodeHasFailed) {
- jam();
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- }//if
- // Release resources and send a response to API
- scanptr.p->apiIsClosed = true;
- scanCompletedLab(signal);
- break;
-
- case ScanRecord::DELIVERED:
- case ScanRecord::QUEUED_DELIVERED:
- /**
- * A response has been sent to the api but it has not responded
- */
-
- if (apiNodeHasFailed) {
- jam();
- capiConnectClosing[TapiFailedNode]++;
- apiConnectptr.p->apiFailState = ZTRUE;
- scanptr.p->apiIsClosed = true;
- } else {
- jam();
- /*
- In this case we have received a time-out caused by the application
- waiting too long to continue the scan. We will check the application
- time-out instead of the deadlock detetection time-out. If the
- application time-out hasn't fired we will simply ignore the condition.
- */
- if ((ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) {
- jam();
- return;
- }//if
- // Dont' release, wait until api responds or fails
- scanptr.p->apiIsClosed = false;
- }
- scanCompletedLab(signal);
- break;
-
- default:
- jam();
- systemErrorLab(signal);
- break;
-
- }//switch
-}//Dbtc::handleScanStop()
-
/****************************************************************************
* T C S E I Z E R E Q
* THE APPLICATION SENDS A REQUEST TO SEIZE A CONNECT RECORD TO CARRY OUT A
@@ -1303,13 +1279,13 @@ void Dbtc::execTCRELEASEREQ(Signal* signal)
jam(); /* JUST REPLY OK */
releaseApiCon(signal, apiConnectptr.i);
signal->theData[0] = tuserpointer;
- sendSignal(apiConnectptr.p->ndbapiBlockref,
+ sendSignal(tapiBlockref,
GSN_TCRELEASECONF, signal, 1, JBB);
} else {
jam();
signal->theData[0] = tuserpointer;
signal->theData[1] = ZINVALID_CONNECTION;
- sendSignal(apiConnectptr.p->ndbapiBlockref,
+ sendSignal(tapiBlockref,
GSN_TCRELEASEREF, signal, 2, JBB);
}
} else {
@@ -1341,6 +1317,7 @@ void Dbtc::sendSignalErrorRefuseLab(Signal* signal)
ptrGuard(apiConnectptr);
if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
jam();
+ ndbrequire(false);
signal->theData[0] = apiConnectptr.p->ndbapiConnect;
signal->theData[1] = signal->theData[ttransid_ptr];
signal->theData[2] = signal->theData[ttransid_ptr + 1];
@@ -1372,7 +1349,7 @@ void Dbtc::printState(Signal* signal, int place)
<< " counter = " << apiConnectptr.p->counter
<< " lqhkeyconfrec = " << apiConnectptr.p->lqhkeyconfrec
<< " lqhkeyreqrec = " << apiConnectptr.p->lqhkeyreqrec << endl;
- ndbout << "abortState = " << (int)apiConnectptr.p->abortState
+ ndbout << "abortState = " << apiConnectptr.p->abortState
<< " apiScanRec = " << apiConnectptr.p->apiScanRec
<< " returncode = " << apiConnectptr.p->returncode << endl;
ndbout << "tckeyrec = " << apiConnectptr.p->tckeyrec
@@ -1424,6 +1401,7 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
signal->theData[1] = t1;
signal->theData[2] = t2;
signal->theData[3] = ZABORT_ERROR;
+ ndbrequire(false);
sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
signal, 4, JBB);
return;
@@ -1747,6 +1725,13 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
return;
}
+ case 59:{
+ jam();
+ terrorCode = ZABORTINPROGRESS;
+ abortErrorLab(signal);
+ return;
+ }
+
default:
jam();
systemErrorLab(signal);
@@ -1881,7 +1866,7 @@ void Dbtc::packKeyData000Lab(Signal* signal,
/* THERE WERE UNSENT INFORMATION, SEND IT. */
/*---------------------------------------------------------------------*/
sendKeyinfo(signal, TBRef, tdataPos);
- releaseKeys(signal);
+ releaseKeys();
return;
}//if
databufptr.i = databufptr.p->nextDatabuf;
@@ -2264,6 +2249,8 @@ void Dbtc::initApiConnectRec(Signal* signal,
UintR Ttransid0 = tcKeyReq->transId1;
UintR Ttransid1 = tcKeyReq->transId2;
+ regApiPtr->m_exec_flag = 0;
+ regApiPtr->returncode = 0;
regApiPtr->returnsignal = RS_TCKEYCONF;
regApiPtr->firstTcConnect = RNIL;
regApiPtr->lastTcConnect = RNIL;
@@ -2424,9 +2411,12 @@ void Dbtc::execTCKEYREQ(Signal* signal)
apiConnectptr.p = regApiPtr;
Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo);
+ Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo);
+
bool isIndexOp = regApiPtr->isIndexOp;
bool isIndexOpReturn = regApiPtr->indexOpReturn;
regApiPtr->isIndexOp = false; // Reset marker
+ regApiPtr->m_exec_flag |= TexecFlag;
switch (regApiPtr->apiConnectstate) {
case CS_CONNECTED:{
if (TstartFlag == 1 && getAllowStartTransaction() == true){
@@ -2435,6 +2425,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
//---------------------------------------------------------------------
jam();
initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
} else {
if(getAllowStartTransaction() == true){
/*------------------------------------------------------------------
@@ -2477,14 +2468,18 @@ void Dbtc::execTCKEYREQ(Signal* signal)
//--------------------------------------------------------------------
jam();
initApiConnectRec(signal, regApiPtr);
- } else {
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ } else {
//--------------------------------------------------------------------
// The current transaction was aborted successfully.
// We will not do anything before we receive an operation
// with a start indicator. We will ignore this signal.
//--------------------------------------------------------------------
- jam();
- // DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE");
+ jam();
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE");
return;
}//if
} else {
@@ -2499,11 +2494,14 @@ void Dbtc::execTCKEYREQ(Signal* signal)
//--------------------------------------------------------------------
TCKEY_abort(signal, 2);
return;
- }//if
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ }
//----------------------------------------------------------------------
// Ignore signals without start indicator set when aborting transaction.
//----------------------------------------------------------------------
- // DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE");
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE");
return;
}//if
break;
@@ -2593,7 +2591,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regTcPtr->triggeringOperation = TsenderData;
}
- if (TcKeyReq::getExecuteFlag(Treqinfo)){
+ if (TexecFlag){
Uint32 currSPId = regApiPtr->currSavePointId;
regApiPtr->currSavePointId = ++currSPId;
}
@@ -2614,7 +2612,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint8 TDistrGroupFlag = tcKeyReq->getDistributionGroupFlag(Treqinfo);
Uint8 TDistrGroupTypeFlag = tcKeyReq->getDistributionGroupTypeFlag(Treqinfo);
Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
- Uint8 TexecuteFlag = tcKeyReq->getExecuteFlag(Treqinfo);
+ Uint8 TexecuteFlag = TexecFlag;
//RONM_TEST Disable simple reads temporarily
regCachePtr->opSimple = 0;
@@ -3238,7 +3236,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
/*--------------------------------------------------------------------
* WE HAVE SENT ALL THE SIGNALS OF THIS OPERATION. SET STATE AND EXIT.
*---------------------------------------------------------------------*/
- releaseAttrinfo(signal);
+ releaseAttrinfo();
if (Tboth) {
jam();
releaseSimpleRead(signal);
@@ -3264,7 +3262,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
/* ========================================================================= */
/* ------- RELEASE ALL ATTRINFO RECORDS IN AN OPERATION RECORD ------- */
/* ========================================================================= */
-void Dbtc::releaseAttrinfo(Signal* signal)
+void Dbtc::releaseAttrinfo()
{
UintR Tmp;
AttrbufRecordPtr Tattrbufptr;
@@ -3296,7 +3294,7 @@ void Dbtc::releaseAttrinfo(Signal* signal)
regApiPtr->cachePtr = RNIL;
return;
}//if
- systemErrorLab(signal);
+ systemErrorLab(0);
return;
}//Dbtc::releaseAttrinfo()
@@ -3306,7 +3304,7 @@ void Dbtc::releaseAttrinfo(Signal* signal)
void Dbtc::releaseSimpleRead(Signal* signal)
{
unlinkReadyTcCon(signal);
- releaseTcCon(signal);
+ releaseTcCon();
/**
* No LQHKEYCONF in Simple/Dirty read
@@ -3370,7 +3368,7 @@ void Dbtc::unlinkReadyTcCon(Signal* signal)
}//if
}//Dbtc::unlinkReadyTcCon()
-void Dbtc::releaseTcCon(Signal* signal)
+void Dbtc::releaseTcCon()
{
TcConnectRecord * const regTcPtr = tcConnectptr.p;
UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
@@ -3724,7 +3722,7 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
case CS_RECEIVING:
if (TnoOfOutStanding == 0) {
jam();
- sendtckeyconf(signal, 0);
+ sendtckeyconf(signal, 2);
return;
} else {
if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
@@ -3783,7 +3781,7 @@ void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
ptrAss(localHostptr, hostRecord);
UintR TcurrLen = localHostptr.p->noOfWordsTCKEYCONF;
UintR confInfo = 0;
- TcKeyConf::setCommitFlag(confInfo, TcommitFlag);
+ TcKeyConf::setCommitFlag(confInfo, TcommitFlag == 1);
TcKeyConf::setMarkerFlag(confInfo, Tmarker);
const UintR TpacketLen = 6 + TopWords;
regApiPtr->tckeyrec = 0;
@@ -3808,7 +3806,10 @@ void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
return; // No queued TcKeyConf
}//if
}//if
-
+ if(TcommitFlag){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
TcKeyConf::setNoOfOperations(confInfo, (TopWords >> 1));
if ((TpacketLen > 25) || !is_api){
TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
@@ -4521,6 +4522,8 @@ void Dbtc::copyApi(Signal* signal)
setApiConTimer(tmpApiConnectptr.i, 0, __LINE__);
regTmpApiPtr->apiConnectstate = CS_CONNECTED;
regTmpApiPtr->commitAckMarker = RNIL;
+ regTmpApiPtr->firstTcConnect = RNIL;
+ regTmpApiPtr->lastTcConnect = RNIL;
}//Dbtc::copyApi()
void Dbtc::unlinkApiConnect(Signal* signal)
@@ -4787,7 +4790,7 @@ void Dbtc::releaseTransResources(Signal* signal)
tcConnectptr.i = localTcConnectptr.i;
tcConnectptr.p = localTcConnectptr.p;
localTcConnectptr.i = rtrTcConnectptrIndex;
- releaseTcCon(signal);
+ releaseTcCon();
} while (localTcConnectptr.i != RNIL);
handleGcp(signal);
releaseFiredTriggerData(&apiConnectptr.p->theFiredTriggers);
@@ -4841,7 +4844,7 @@ void Dbtc::releaseApiConCopy(Signal* signal)
void Dbtc::releaseDirtyWrite(Signal* signal)
{
unlinkReadyTcCon(signal);
- releaseTcCon(signal);
+ releaseTcCon();
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
if (regApiPtr->firstTcConnect == RNIL) {
@@ -4924,7 +4927,9 @@ void Dbtc::execLQHKEYREF(Signal* signal)
// The operation executed an index trigger
const Uint32 opType = regTcPtr->operation;
- if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
+ if (errCode == ZALREADYEXIST)
+ errCode = terrorCode = ZNOTUNIQUE;
+ else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
jam();
/**
* "Normal path"
@@ -4942,7 +4947,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
regApiPtr->lqhkeyconfrec++;
unlinkReadyTcCon(signal);
- releaseTcCon(signal);
+ releaseTcCon();
opPtr.p->triggerExecutionCount--;
if (opPtr.p->triggerExecutionCount == 0) {
@@ -4958,6 +4963,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
}
}
+ Uint32 marker = regTcPtr->commitAckMarker;
markOperationAborted(regApiPtr, regTcPtr);
if(regApiPtr->apiConnectstate == CS_ABORTING){
@@ -4977,7 +4983,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
return;
}//if
- if (regTcPtr->commitAckMarker != RNIL){
+ if (marker != RNIL){
/**
* This was an insert/update/delete/write which failed
* that contained the marker
@@ -4998,12 +5004,15 @@ void Dbtc::execLQHKEYREF(Signal* signal)
Uint32 indexOp = tcConnectptr.p->indexOp;
Uint32 clientData = regTcPtr->clientData;
unlinkReadyTcCon(signal); /* LINK TC CONNECT RECORD OUT OF */
- releaseTcCon(signal); /* RELEASE THE TC CONNECT RECORD */
+ releaseTcCon(); /* RELEASE THE TC CONNECT RECORD */
setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
if (isIndexOp) {
jam();
+ regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
tcKeyRef->connectPtr = indexOp;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
+ apiConnectptr.i = regTcPtr->apiConnect;
+ apiConnectptr.p = regApiPtr;
} else {
jam();
tcKeyRef->connectPtr = clientData;
@@ -5041,11 +5050,11 @@ void Dbtc::execLQHKEYREF(Signal* signal)
jam();
diverify010Lab(signal);
return;
- } else if (regApiPtr->tckeyrec > 0) {
+ } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
jam();
- sendtckeyconf(signal, 0);
+ sendtckeyconf(signal, 2);
return;
- }//if
+ }
}//if
return;
@@ -5140,8 +5149,8 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
// We will abort it instead.
/*******************************************************************/
regApiPtr->returnsignal = RS_NO_RETURN;
- abort010Lab(signal);
errorCode = ZTRANS_STATUS_ERROR;
+ abort010Lab(signal);
}//if
} else {
jam();
@@ -5167,8 +5176,8 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
// transaction. We will abort it instead.
/***********************************************************************/
regApiPtr->returnsignal = RS_NO_RETURN;
- abort010Lab(signal);
errorCode = ZPREPAREINPROGRESS;
+ abort010Lab(signal);
break;
case CS_START_COMMITTING:
@@ -5243,6 +5252,7 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
return;
}//if
+ apiConnectptr.p->m_exec_flag = 1;
switch (apiConnectptr.p->apiConnectstate) {
case CS_STARTED:
case CS_RECEIVING:
@@ -5278,8 +5288,9 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
signal->theData[1] = apiConnectptr.p->transid[0];
signal->theData[2] = apiConnectptr.p->transid[1];
signal->theData[3] = ZROLLBACKNOTALLOWED;
+ signal->theData[4] = apiConnectptr.p->apiConnectstate;
sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF,
- signal, 4, JBB);
+ signal, 5, JBB);
break;
/* SEND A REFUSAL SIGNAL*/
case CS_ABORTING:
@@ -5700,7 +5711,10 @@ void Dbtc::abortErrorLab(Signal* signal)
return;
}
transP->returnsignal = RS_TCROLLBACKREP;
- transP->returncode = terrorCode;
+ if(transP->returncode == 0){
+ jam();
+ transP->returncode = terrorCode;
+ }
abort010Lab(signal);
}//Dbtc::abortErrorLab()
@@ -6027,8 +6041,12 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
/* THIS TRANSACTION HAVE EXPERIENCED A TIME-OUT AND WE NEED TO*/
/* FIND OUT WHAT WE NEED TO DO BASED ON THE STATE INFORMATION.*/
/*------------------------------------------------------------------*/
- DEBUG("Time-out in state = " << apiConnectptr.p->apiConnectstate
- << " apiConnectptr.i = " << apiConnectptr.i);
+ DEBUG("[ H'" << hex << apiConnectptr.p->transid[0]
+ << " H'" << apiConnectptr.p->transid[1] << "] " << dec
+ << "Time-out in state = " << apiConnectptr.p->apiConnectstate
+ << " apiConnectptr.i = " << apiConnectptr.i
+ << " - exec: " << apiConnectptr.p->m_exec_flag
+ << " - place: " << c_apiConTimer_line[apiConnectptr.i]);
switch (apiConnectptr.p->apiConnectstate) {
case CS_STARTED:
if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
@@ -6042,11 +6060,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
jam();
return;
}//if
- apiConnectptr.p->returnsignal = RS_NO_RETURN;
- } else {
- jam();
- apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
}
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
apiConnectptr.p->returncode = ZTIME_OUT_ERROR;
abort010Lab(signal);
return;
@@ -6125,11 +6140,14 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
tcConnectptr.i = apiConnectptr.p->firstTcConnect;
sendAbortedAfterTimeout(signal, 0);
break;
- case CS_START_SCAN:
+ case CS_START_SCAN:{
jam();
- apiConnectptr.p->returncode = ZSCANTIME_OUT_ERROR;
- handleScanStop(signal, 0);
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ scanError(signal, scanPtr, ZSCANTIME_OUT_ERROR);
break;
+ }
case CS_WAIT_ABORT_CONF:
jam();
tcConnectptr.i = apiConnectptr.p->currentTcConnect;
@@ -6289,9 +6307,8 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
warningEvent(buf);
ndbout_c(buf);
ndbrequire(false);
- releaseAbortResources(signal);
- return;
- }//if
+ }
+ releaseAbortResources(signal);
return;
}//if
TloopCount++;
@@ -6302,6 +6319,7 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
// away the job buffer.
/*------------------------------------------------------------------*/
setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->counter++;
signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
signal->theData[1] = tcConnectptr.i;
signal->theData[2] = apiConnectptr.i;
@@ -6396,7 +6414,7 @@ void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
UintR texpiredTime[8];
UintR TloopCount = 0;
Uint32 TtcTimer = ctcTimer;
-
+
while ((TscanConPtr + 8) < cscanFragrecFileSize) {
jam();
timeOutPtr[0].i = TscanConPtr + 0;
@@ -6408,14 +6426,14 @@ void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
timeOutPtr[6].i = TscanConPtr + 6;
timeOutPtr[7].i = TscanConPtr + 7;
- ptrAss(timeOutPtr[0], scanFragmentRecord);
- ptrAss(timeOutPtr[1], scanFragmentRecord);
- ptrAss(timeOutPtr[2], scanFragmentRecord);
- ptrAss(timeOutPtr[3], scanFragmentRecord);
- ptrAss(timeOutPtr[4], scanFragmentRecord);
- ptrAss(timeOutPtr[5], scanFragmentRecord);
- ptrAss(timeOutPtr[6], scanFragmentRecord);
- ptrAss(timeOutPtr[7], scanFragmentRecord);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[1]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[2]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[3]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[4]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[5]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[6]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[7]);
tfragTimer[0] = timeOutPtr[0].p->scanFragTimer;
tfragTimer[1] = timeOutPtr[1].p->scanFragTimer;
@@ -6467,7 +6485,7 @@ void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
for ( ; TscanConPtr < cscanFragrecFileSize; TscanConPtr++){
jam();
timeOutPtr[0].i = TscanConPtr;
- ptrAss(timeOutPtr[0], scanFragmentRecord);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
if (timeOutPtr[0].p->scanFragTimer != 0) {
texpiredTime[0] = ctcTimer - timeOutPtr[0].p->scanFragTimer;
if (texpiredTime[0] > ctimeOutValue) {
@@ -6483,6 +6501,7 @@ void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
}//if
}//for
ctimeOutCheckFragActive = TOCS_FALSE;
+
return;
}//timeOutLoopStartFragLab()
@@ -6495,19 +6514,17 @@ void Dbtc::execSCAN_HBREP(Signal* signal)
jamEntry();
scanFragptr.i = signal->theData[0];
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
-
+ c_scan_frag_pool.getPtr(scanFragptr);
switch (scanFragptr.p->scanFragState){
case ScanFragRec::LQH_ACTIVE:
- case ScanFragRec::LQH_ACTIVE_CLOSE:
break;
-
default:
DEBUG("execSCAN_HBREP: scanFragState="<<scanFragptr.p->scanFragState);
systemErrorLab(signal);
break;
}
+ ScanRecordPtr scanptr;
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
@@ -6538,6 +6555,7 @@ void Dbtc::execSCAN_HBREP(Signal* signal)
updateBuddyTimer(apiConnectptr);
scanFragptr.p->startFragTimer(ctcTimer);
} else {
+ ndbassert(false);
DEBUG("SCAN_HBREP when scanFragTimer was turned off");
}
}//execSCAN_HBREP()
@@ -6548,62 +6566,50 @@ void Dbtc::execSCAN_HBREP(Signal* signal)
/*--------------------------------------------------------------------------*/
void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
{
- scanFragptr.i = TscanConPtr;
- ptrAss(scanFragptr, scanFragmentRecord);
- DEBUG("timeOutFoundFragLab: scanFragState = "<<scanFragptr.p->scanFragState);
+ ScanFragRecPtr ptr;
+ c_scan_frag_pool.getPtr(ptr, TscanConPtr);
+ DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState);
/*-------------------------------------------------------------------------*/
// The scan fragment has expired its timeout. Check its state to decide
// what to do.
/*-------------------------------------------------------------------------*/
- switch (scanFragptr.p->scanFragState) {
-
+ switch (ptr.p->scanFragState) {
case ScanFragRec::WAIT_GET_PRIMCONF:
jam();
- // Crash the system if we do not return from DIGETPRIMREQ in time.
- systemErrorLab(signal);
+ ndbrequire(false);
break;
-
- case ScanFragRec::LQH_ACTIVE:
+ case ScanFragRec::LQH_ACTIVE:{
jam();
+
/**
* The LQH expired it's timeout, try to close it
*/
- scanFragError(signal, ZSCAN_FRAG_LQH_ERROR);
- DEBUG(" LQH_ACTIVE - closing the fragment scan in node "
- <<scanFragptr.p->scanFragNodeId);
- break;
-
- case ScanFragRec::LQH_ACTIVE_CLOSE:{
- jam();
- /**
- * The close of LQH expired its time-out. This is not
- * acceptable behaviour from LQH and thus we will shoot
- * it down.
- */
- Uint32 nodeId = scanFragptr.p->scanFragNodeId;
- Uint32 cc = scanFragptr.p->m_connectCount;
- if(getNodeInfo(nodeId).m_connectCount == cc){
- const BlockReference errRef = calcNdbCntrBlockRef(nodeId);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::ScanfragTimeout;
- sysErr->errorRef = reference();
- sysErr->data1 = scanFragptr.i;
- sysErr->data2 = scanFragptr.p->scanRec;
- sendSignal(errRef, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBA);
- DEBUG(" node " << nodeId << " killed");
- } else {
- DEBUG(" node " << nodeId << " not killed as it has restarted");
+ Uint32 nodeId = refToNode(ptr.p->lqhBlockref);
+ Uint32 connectCount = getNodeInfo(nodeId).m_connectCount;
+ ScanRecordPtr scanptr;
+ scanptr.i = ptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ if(connectCount != ptr.p->m_connectCount){
+ jam();
+ /**
+ * The node has died
+ */
+ ptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(ptr);
+ comp.add(ptr);
+ ptr.p->stopFragTimer();
}
- scanFragptr.p->stopFragTimer();
+
+ scanError(signal, scanptr, ZSCAN_FRAG_LQH_ERROR);
break;
}
-
case ScanFragRec::DELIVERED:
jam();
- case ScanFragRec::RETURNING_FROM_DELIVERY:
- jam();
case ScanFragRec::IDLE:
jam();
case ScanFragRec::QUEUED_FOR_DELIVERY:
@@ -6725,7 +6731,8 @@ void Dbtc::execNODE_FAILREP(Signal* signal)
tcNodeFailptr.i = 0;
ptrAss(tcNodeFailptr, tcFailRecord);
- for (Uint32 tindex = 0; tindex < tnoOfNodes; tindex++) {
+ Uint32 tindex;
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
jam();
hostptr.i = cdata[tindex];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
@@ -6842,8 +6849,7 @@ void Dbtc::execNODE_FAILREP(Signal* signal)
}//if
}//for
}//if
-
- for (Uint32 tindex = 0; tindex < tnoOfNodes; tindex++) {
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
jam();
hostptr.i = cdata[tindex];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
@@ -6859,47 +6865,41 @@ void Dbtc::execNODE_FAILREP(Signal* signal)
}//Dbtc::execNODE_FAILREP()
void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
- Uint32 scanPtrI,
- Uint32 failedNodeId){
+ Uint32 scanPtrI,
+ Uint32 failedNodeId){
+ ScanRecordPtr scanptr;
for (scanptr.i = scanPtrI; scanptr.i < cscanrecFileSize; scanptr.i++) {
jam();
ptrAss(scanptr, scanRecord);
+ bool found = false;
if (scanptr.p->scanState != ScanRecord::IDLE){
- for (Uint32 i=0; i<16; i++) {
- jam();
- scanFragptr.i = scanptr.p->scanFragrec[i];
- if (scanFragptr.i != RNIL) {
+ jam();
+ ScanFragRecPtr ptr;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ for(run.first(ptr); !ptr.isNull(); ){
+ jam();
+ ScanFragRecPtr curr = ptr;
+ run.next(ptr);
+ if (curr.p->scanFragState == ScanFragRec::LQH_ACTIVE &&
+ refToNode(curr.p->lqhBlockref) == failedNodeId){
jam();
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- if (scanFragptr.p->scanFragNodeId == failedNodeId){
- switch (scanFragptr.p->scanFragState){
- case ScanFragRec::LQH_ACTIVE:
- case ScanFragRec::LQH_ACTIVE_CLOSE:
- jam();
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize,
- apiConnectRecord);
-
- // The connection to this LQH is closed
- scanFragptr.p->lqhBlockref = RNIL;
-
- DEBUG("checkScanActiveInFailedLqh: scanFragError");
- scanFragError(signal, ZSCAN_LQH_ERROR);
-
- break;
-
- default:
- /* empty */
- jam();
- break;
- }// switch
+
+ run.remove(curr);
+ comp.add(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ found = true;
+ }
+ }
+ }
+ if(found){
+ jam();
+ scanError(signal, scanptr, ZSCAN_LQH_ERROR);
+ }
- } //if
- } //if
- } //for
- } //if
-
// Send CONTINUEB to continue later
signal->theData[0] = TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH;
signal->theData[1] = scanptr.i + 1; // Check next scanptr
@@ -6909,6 +6909,15 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
}//for
}
+void
+Dbtc::checkScanFragList(Signal* signal,
+ Uint32 failedNodeId,
+ ScanRecord * scanP,
+ ScanFragList::Head & head){
+
+ DEBUG("checkScanActiveInFailedLqh: scanFragError");
+}
+
void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
{
jamEntry();
@@ -7336,25 +7345,23 @@ Dbtc::sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord * regApiPtr){
}
void
-Dbtc::sendTCKEY_FAILCONF(Signal* signal, const ApiConnectRecord * regApiPtr){
+Dbtc::sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord * regApiPtr){
jam();
TcKeyFailConf * const failConf = (TcKeyFailConf *)&signal->theData[0];
- if(regApiPtr->commitAckMarker == RNIL){
- jam();
- failConf->apiConnectPtr = regApiPtr->ndbapiConnect;
- } else {
- jam();
- failConf->apiConnectPtr = regApiPtr->ndbapiConnect | 1;
+ const Uint32 ref = regApiPtr->ndbapiBlockref;
+ const Uint32 marker = regApiPtr->commitAckMarker;
+ if(ref != 0){
+ failConf->apiConnectPtr = regApiPtr->ndbapiConnect | (marker != RNIL);
+ failConf->transId1 = regApiPtr->transid[0];
+ failConf->transId2 = regApiPtr->transid[1];
+
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB);
}
- failConf->transId1 = regApiPtr->transid[0];
- failConf->transId2 = regApiPtr->transid[1];
-
- sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB);
+ regApiPtr->commitAckMarker = RNIL;
}
-
/*------------------------------------------------------------*/
/* THIS PART HANDLES THE ABORT PHASE IN THE CASE OF A */
/* NODE FAILURE BEFORE THE COMMIT DECISION. */
@@ -8415,10 +8422,17 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
Uint32 currSavePointId = 0;
- Uint8 scanConcurrency = scanTabReq->getParallelism(reqinfo);
- Uint32 scanParallel;
- Uint32 noOprecPerFrag;
+ Uint32 scanConcurrency = scanTabReq->getParallelism(reqinfo);
+ Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(reqinfo);
+ Uint32 scanParallel = scanConcurrency;
Uint32 errCode;
+ ScanRecordPtr scanptr;
+
+ if(noOprecPerFrag == 0){
+ jam();
+ scanParallel = (scanConcurrency + 15) / 16;
+ noOprecPerFrag = (scanConcurrency >= 16 ? 16 : scanConcurrency & 15);
+ }
jamEntry();
apiConnectptr.i = scanTabReq->apiConnectPtr;
@@ -8433,12 +8447,13 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
return;
}//if
ptrAss(apiConnectptr, apiConnectRecord);
+ ApiConnectRecord * transP = apiConnectptr.p;
- if (apiConnectptr.p->apiConnectstate != CS_CONNECTED) {
+ if (transP->apiConnectstate != CS_CONNECTED) {
jam();
// could be left over from TCKEYREQ rollback
- if (apiConnectptr.p->apiConnectstate == CS_ABORTING &&
- apiConnectptr.p->abortState == AS_IDLE) {
+ if (transP->apiConnectstate == CS_ABORTING &&
+ transP->abortState == AS_IDLE) {
jam();
} else {
jam();
@@ -8469,43 +8484,19 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
errCode = ZNO_CONCURRENCY_ERROR;
goto SCAN_TAB_error;
}//if
- if (scanConcurrency <= 16) {
- jam();
- noOprecPerFrag = scanConcurrency;
- } else {
- if (scanConcurrency <= 240) {
- jam();
- //If scanConcurrency > 16 it must be a multiple of 16
- if (((scanConcurrency >> 4) << 4) < scanConcurrency) {
- scanConcurrency = ((scanConcurrency >> 4) << 4) + 16;
- }//if
- } else {
- jam();
- errCode = ZTOO_HIGH_CONCURRENCY_ERROR;
- goto SCAN_TAB_error;
- }//if
- noOprecPerFrag = 16;
- }//if
-
- scanParallel = ((scanConcurrency - 1) >> 4) + 1;
+
/**********************************************************
* CALCULATE THE NUMBER OF SCAN_TABINFO SIGNALS THAT WILL
* ARRIVE TO DEFINE THIS SCAN. THIS ALSO DEFINES THE NUMBER
* OF PARALLEL SCANS AND IT ALSO DEFINES THE NUMBER OF SCAN
* OPERATION POINTER RECORDS TO ALLOCATE.
**********************************************************/
- if (cnoFreeScanOprec < scanParallel) {
- jam();
- errCode = ZNO_SCANREC_ERROR;
- goto SCAN_TAB_error;
- // WE DID NOT HAVE ENOUGH OF FREE SCAN OPERATION POINTER RECORDS.
- // THUS WE REFUSE THE SCAN OPERATION.
- }//if
if (cfirstfreeTcConnect == RNIL) {
jam();
errCode = ZNO_FREE_TC_CONNECTION;
goto SCAN_TAB_error;
}//if
+
if (cfirstfreeScanrec == RNIL) {
jam();
errCode = ZNO_SCANREC_ERROR;
@@ -8521,22 +8512,39 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
if ((transid1 == buddyApiPtr.p->transid[0]) &&
(transid2 == buddyApiPtr.p->transid[1])) {
jam();
+
+ if (buddyApiPtr.p->apiConnectstate == CS_ABORTING) {
+ // transaction has been aborted
+ jam();
+ errCode = buddyApiPtr.p->returncode;
+ goto SCAN_TAB_error;
+ }//if
currSavePointId = buddyApiPtr.p->currSavePointId;
buddyApiPtr.p->currSavePointId++;
}
}
seizeTcConnect(signal);
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+
seizeCacheRecord(signal);
- seizeScanrec(signal);
- initScanrec(signal, scanParallel, noOprecPerFrag);
- initScanTcrec(signal);
- initScanApirec(signal, buddyPtr, transid1, transid2);
- cnoFreeScanOprec = cnoFreeScanOprec - scanParallel;
+ scanptr = seizeScanrec(signal);
+
+ ndbrequire(transP->apiScanRec == RNIL);
+ ndbrequire(scanptr.p->scanApiRec == RNIL);
+
+ initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag);
+
+ //initScanApirec(signal, buddyPtr, transid1, transid2);
+ transP->apiScanRec = scanptr.i;
+ transP->returncode = 0;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+ transP->buddyPtr = buddyPtr;
// The scan is started
- apiConnectptr.p->apiConnectstate = CS_START_SCAN;
- apiConnectptr.p->currSavePointId = currSavePointId;
+ transP->apiConnectstate = CS_START_SCAN;
+ transP->currSavePointId = currSavePointId;
/**********************************************************
* We start the timer on scanRec to be able to discover a
@@ -8544,11 +8552,7 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
***********************************************************/
setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
updateBuddyTimer(apiConnectptr);
- if (scanptr.p->noScanTabInfo > 1) {
- jam();
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_TAB_INFO;
- return;
- }//if
+
/***********************************************************
* WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN
* THE API. WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO
@@ -8564,48 +8568,28 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
SCAN_TAB_error:
jam();
ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->apiConnectPtr = transP->ndbapiConnect;
ref->transId1 = transid1;
ref->transId2 = transid2;
ref->errorCode = errCode;
- sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ ref->closeNeeded = 0;
+ sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF,
signal, ScanTabRef::SignalLength, JBB);
return;
}//Dbtc::execSCAN_TABREQ()
-void Dbtc::initScanTcrec(Signal* signal)
-{
- tcConnectptr.p->apiConnect = apiConnectptr.i;
-}//Dbtc::initScanTcrec()
void Dbtc::initScanApirec(Signal* signal,
Uint32 buddyPtr, UintR transid1, UintR transid2)
{
- ApiConnectRecord * apiPtr = apiConnectptr.p;
- apiPtr->apiScanRec = scanptr.i;
- apiPtr->returncode = 0;
- apiPtr->transid[0] = transid1;
- apiPtr->transid[1] = transid2;
- apiPtr->buddyPtr = buddyPtr;
-
}//Dbtc::initScanApirec()
-void Dbtc::initScanOprec(Signal* signal)
-{
- UintR tisoIndex;
-
- for (tisoIndex = 0; tisoIndex < 16; tisoIndex++) {
- scanOpptr.p->apiOpptr[tisoIndex] = cdata[tisoIndex];
- scanOpptr.p->scanOpLength[tisoIndex] = RNIL;
- }//for
-}//Dbtc::initScanOprec()
-
-void Dbtc::initScanrec(Signal* signal,
+void Dbtc::initScanrec(ScanRecordPtr scanptr,
+ const ScanTabReq * scanTabReq,
UintR scanParallel,
UintR noOprecPerFrag)
{
- const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
const UintR reqinfo = scanTabReq->requestInfo;
ndbrequire(scanParallel < 16);
@@ -8615,34 +8599,26 @@ void Dbtc::initScanrec(Signal* signal,
scanptr.p->scanTableref = tabptr.i;
scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
scanptr.p->scanParallel = scanParallel;
- scanptr.p->noScanOprec = scanParallel;
- scanptr.p->noScanTabInfo = scanParallel;
- scanptr.p->scanTabInfoReceived = 1;
- scanptr.p->scanProcessesCompleted = 0;
+ scanptr.p->noOprecPerFrag = noOprecPerFrag;
scanptr.p->scanLockMode = ScanTabReq::getLockMode(reqinfo);
scanptr.p->scanLockHold = ScanTabReq::getHoldLockFlag(reqinfo);
scanptr.p->readCommitted = ScanTabReq::getReadCommittedFlag(reqinfo);
scanptr.p->rangeScan = ScanTabReq::getRangeScanFlag(reqinfo);
scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
- scanptr.p->scanReceivedOperations = 0;
- scanptr.p->noOprecPerFrag = noOprecPerFrag;
- scanptr.p->apiIsClosed = false;
- scanptr.p->scanCompletedStatus = ZFALSE;
- scanptr.p->scanState = ScanRecord::SCAN_NEXT_ORDERED;
- for (Uint32 i = 0; i < 16; i++) {
- if (i < scanParallel){
- jam();
- seizeScanOprec(signal);
- scanptr.p->scanOprec[i] = scanOpptr.i;
- } else {
- jam();
- scanptr.p->scanOprec[i] = RNIL;
- }
- scanptr.p->scanFragrec[i] = RNIL;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+ scanptr.p->m_queued_count = 0;
+
+ ScanFragList list(c_scan_frag_pool,
+ scanptr.p->m_running_scan_frags);
+ for (Uint32 i = 0; i < scanParallel; i++) {
+ jam();
+ ScanFragRecPtr ptr;
+ ndbrequire(list.seize(ptr));
+ ptr.p->scanRec = scanptr.i;
+ ptr.p->scanFragId = 0;
+ ptr.p->scanFragConcurrency = noOprecPerFrag;
+ ptr.p->m_apiPtr = cdata[i];
}//for
- scanOpptr.i = scanptr.p->scanOprec[0];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- initScanOprec(signal);
}//Dbtc::initScanrec()
void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
@@ -8652,68 +8628,18 @@ void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
ref->transId1 = apiConnectptr.p->transid[0];
ref->transId2 = apiConnectptr.p->transid[1];
ref->errorCode = errCode;
+ ref->closeNeeded = 0;
sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
signal, ScanTabRef::SignalLength, JBB);
}//Dbtc::scanTabRefLab()
-/******************************************************
- * execSCAN_TABINFO
- ******************************************************/
-void Dbtc::execSCAN_TABINFO(Signal* signal)
-{
- jamEntry();
- apiConnectptr.i = signal->theData[0];
- for(int i=0; i<16; i++)
- cdata[i] = signal->theData[i+1];
-
- if (apiConnectptr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal);
- return;
- }//if
- ptrAss(apiConnectptr, apiConnectRecord);
-
- if (apiConnectptr.p->apiConnectstate != CS_START_SCAN){
- jam();
- DEBUG("apiPtr(" << apiConnectptr.i << ") Dropping SCAN_TABINFO, wrong state: " << apiConnectptr.p->apiConnectstate);
- return;
- }
-
- scanptr.i = apiConnectptr.p->apiScanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- const Uint32 tscanOprec = scanptr.p->scanTabInfoReceived;
- scanptr.p->scanTabInfoReceived++;
- arrGuard(tscanOprec, 16);
- scanOpptr.i = scanptr.p->scanOprec[tscanOprec];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- // Start timer and wait for response from API node.
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- updateBuddyTimer(apiConnectptr);
-
- initScanOprec(signal);
- // Start timer and wait for response from API node.
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- updateBuddyTimer(apiConnectptr);
-
- if (scanptr.p->scanTabInfoReceived == scanptr.p->noScanTabInfo) {
- jam();
- /******************************************************************
- * WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN THE API.
- * WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO IF ANY TO RECEIVE.
- ******************************************************************/
- scanptr.p->scanState = ScanRecord::WAIT_AI;
- return;
- }
- ndbrequire(scanptr.p->scanTabInfoReceived <= scanptr.p->noScanTabInfo);
-}//Dbtc::execSCAN_TABINFO()
-
/*---------------------------------------------------------------------------*/
/* */
/* RECEPTION OF ATTRINFO FOR SCAN TABLE REQUEST. */
/*---------------------------------------------------------------------------*/
void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
{
+ ScanRecordPtr scanptr;
scanptr.i = apiConnectptr.p->apiScanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
tcConnectptr.i = scanptr.p->scanTcrec;
@@ -8743,7 +8669,7 @@ void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
* THIS SCAN. WE ARE READY TO START THE ACTUAL
* EXECUTION OF THE SCAN QUERY
**************************************************/
- diFcountReqLab(signal);
+ diFcountReqLab(signal, scanptr);
return;
}//if
}//if
@@ -8751,21 +8677,21 @@ void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
scanAttrinfo_attrbuf_error:
jam();
- abortScanLab(signal, ZGET_ATTRBUF_ERROR);
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
return;
scanAttrinfo_attrbuf2_error:
jam();
- abortScanLab(signal, ZGET_ATTRBUF_ERROR);
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
return;
scanAttrinfo_len_error:
jam();
- abortScanLab(signal, ZLENGTH_ERROR);
+ abortScanLab(signal, scanptr, ZLENGTH_ERROR);
return;
}//Dbtc::scanAttrinfoLab()
-void Dbtc::diFcountReqLab(Signal* signal)
+void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr)
{
/**
* Check so that the table is not being dropped
@@ -8776,7 +8702,8 @@ void Dbtc::diFcountReqLab(Signal* signal)
if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
;
} else {
- abortScanLab(signal, tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
return;
}
@@ -8808,18 +8735,19 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal)
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
apiConnectptr.i = tcConnectptr.p->apiConnect;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
scanptr.i = apiConnectptr.p->apiScanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
if (apiConnectptr.p->apiFailState == ZTRUE) {
jam();
- releaseScanResources(signal);
+ releaseScanResources(scanptr);
handleApiFailState(signal, apiConnectptr.i);
return;
}//if
if (tfragCount == 0) {
jam();
- abortScanLab(signal, ZNO_FRAGMENT_ERROR);
+ abortScanLab(signal, scanptr, ZNO_FRAGMENT_ERROR);
return;
}//if
@@ -8832,35 +8760,43 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal)
if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
;
} else {
- abortScanLab(signal, tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
return;
}
- if (tfragCount < scanptr.p->scanParallel) {
+ if(scanptr.p->scanParallel > tfragCount){
jam();
- for (Uint32 i = tfragCount; i < scanptr.p->scanParallel; i++) {
- jam();
- arrGuard(i, 16);
- scanOpptr.i = scanptr.p->scanOprec[i];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- releaseScanOprec(signal);
- scanptr.p->scanOprec[i] = RNIL;
- }//for
- scanptr.p->scanParallel = tfragCount;
- }//if
+ abortScanLab(signal, scanptr, ZTOO_HIGH_CONCURRENCY_ERROR);
+ return;
+ }
+
+ scanptr.p->scanParallel = tfragCount;
scanptr.p->scanNoFrag = tfragCount;
- for (UintR i = 0; i < scanptr.p->scanParallel; i++) {
- jam();
- // START EACH OF THE PARALLEL SCAN PROCESSES
- signal->theData[0] = scanptr.i;
- signal->theData[1] = i;
- signal->theData[2] = scanptr.p->noOprecPerFrag;
- sendSignal(cownref, GSN_SCAN_PROCREQ, signal, 3, JBB);
- }//for
- // We don't need the timer for checking API anymore, control goes to LQH.
+ scanptr.p->scanNextFragId = 0;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+
setApiConTimer(apiConnectptr.i, 0, __LINE__);
- scanptr.p->scanNextFragId = scanptr.p->scanParallel;
- scanptr.p->scanState = ScanRecord::SCAN_NEXT_ORDERED;
+ updateBuddyTimer(apiConnectptr);
+
+ ScanFragRecPtr ptr;
+ ScanFragList list(c_scan_frag_pool,
+ scanptr.p->m_running_scan_frags);
+ for (list.first(ptr); !ptr.isNull(); list.next(ptr)){
+ jam();
+
+ ptr.p->lqhBlockref = 0;
+ ptr.p->startFragTimer(ctcTimer);
+ ptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ ptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ ptr.p->startFragTimer(ctcTimer);
+
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = ptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }//for
}//Dbtc::execDI_FCOUNTCONF()
/******************************************************
@@ -8874,140 +8810,60 @@ void Dbtc::execDI_FCOUNTREF(Signal* signal)
const Uint32 errCode = signal->theData[1];
apiConnectptr.i = tcConnectptr.p->apiConnect;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
scanptr.i = apiConnectptr.p->apiScanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
if (apiConnectptr.p->apiFailState == ZTRUE) {
jam();
- releaseScanResources(signal);
+ releaseScanResources(scanptr);
handleApiFailState(signal, apiConnectptr.i);
return;
}//if
- abortScanLab(signal, errCode);
+ abortScanLab(signal, scanptr, errCode);
}//Dbtc::execDI_FCOUNTREF()
-void Dbtc::abortScanLab(Signal* signal, Uint32 errCode)
+void Dbtc::abortScanLab(Signal* signal, ScanRecordPtr scanptr, Uint32 errCode)
{
- releaseScanResources(signal);
scanTabRefLab(signal, errCode);
+ releaseScanResources(scanptr);
}//Dbtc::abortScanLab()
-void Dbtc::scanReleaseResourcesLab(Signal* signal)
-{
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- if (apiConnectptr.p->returncode != 0) {
- jam();
- ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
- ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- ref->transId1 = apiConnectptr.p->transid[0];
- ref->transId2 = apiConnectptr.p->transid[1];
- ref->errorCode = apiConnectptr.p->returncode;
- sendSignal(apiConnectptr.p->ndbapiBlockref,
- GSN_SCAN_TABREF, signal, ScanTabRef::SignalLength, JBB);
- } else {
- jam();
- sendScanTabConf(signal);
- }//if
- releaseScanResources(signal);
- if (apiConnectptr.p->apiFailState == ZTRUE) {
- jam();
- handleApiFailState(signal, apiConnectptr.i);
- return;
- }//if
-}//Dbtc::scanReleaseResourcesLab()
-
-void Dbtc::releaseScanResources(Signal* signal)
+void Dbtc::releaseScanResources(ScanRecordPtr scanPtr)
{
if (apiConnectptr.p->cachePtr != RNIL) {
cachePtr.i = apiConnectptr.p->cachePtr;
ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- releaseAttrinfo(signal);
+ releaseAttrinfo();
}//if
- cnoFreeScanOprec = cnoFreeScanOprec + scanptr.p->noScanOprec;
- scanptr.p->scanCompletedStatus = ZCLOSED;
- tcConnectptr.i = scanptr.p->scanTcrec;
+ tcConnectptr.i = scanPtr.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- releaseTcCon(signal);
- for (Uint32 i = 0; i < 16; i++) {
- jam();
- scanFragptr.i = scanptr.p->scanFragrec[i];
- scanptr.p->scanFragrec[i] = RNIL;
- if (scanFragptr.i != RNIL) {
- jam();
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- releaseScanFragrec(signal);
- }//if
- scanOpptr.i = scanptr.p->scanOprec[i];
- scanptr.p->scanOprec[i] = RNIL;
- if (scanOpptr.i != RNIL) {
- jam();
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- releaseScanOprec(signal);
- }//if
- }//for
- releaseScanrec(signal);
+ releaseTcCon();
+
+ ScanFragList x(c_scan_frag_pool,
+ scanPtr.p->m_completed_scan_frags);
+ x.release();
+ ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+
+
+ ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i);
+ ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i);
+
+ // link into free list
+ scanPtr.p->nextScan = cfirstfreeScanrec;
+ scanPtr.p->scanState = ScanRecord::IDLE;
+ scanPtr.p->scanTcrec = RNIL;
+ scanPtr.p->scanApiRec = RNIL;
+ cfirstfreeScanrec = scanPtr.i;
+
apiConnectptr.p->apiScanRec = RNIL;
apiConnectptr.p->apiConnectstate = CS_CONNECTED;
setApiConTimer(apiConnectptr.i, 0, __LINE__);
}//Dbtc::releaseScanResources()
-/******************************************************
- * execSCAN_PROCREQ
- ******************************************************/
-void Dbtc::execSCAN_PROCREQ(Signal* signal)
-{
- jamEntry();
- scanptr.i = signal->theData[0];
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
- const UintR tscanFragId = signal->theData[1];
- ndbrequire(tscanFragId < 16);
- const UintR tscanNoOprec = signal->theData[2];
-
- ndbrequire(cfirstfreeScanFragrec != RNIL);
- seizeScanFragrec(signal);
-
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
-
- scanptr.p->scanFragrec[tscanFragId] = scanFragptr.i;
- scanFragptr.p->scanRec = scanptr.i;
- scanFragptr.p->scanIndividual = tscanFragId * tscanNoOprec;
- scanFragptr.p->scanFragProcId = tscanFragId;
- scanFragptr.p->scanFragId = tscanFragId;
- scanFragptr.p->scanFragConcurrency = tscanNoOprec;
- scanFragptr.p->scanFragCompletedStatus = ZFALSE;
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
-
- {
- /**
- * Check table
- */
- TableRecordPtr tabPtr;
- tabPtr.i = scanptr.p->scanTableref;
- ptrAss(tabPtr, tableRecord);
- Uint32 schemaVersion = scanptr.p->scanSchemaVersion;
- if(tabPtr.p->checkTable(schemaVersion) == false){
- jam();
- scanFragError(signal, tabPtr.p->getErrorCode(schemaVersion));
- return;
- }
- }
-
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanFragptr.i;
- signal->theData[2] = scanptr.p->scanTableref;
- signal->theData[3] = tscanFragId;
- sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
- scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
- updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
-}//Dbtc::execSCAN_PROCREQ()
-
/****************************************************************
* execDIGETPRIMCONF
*
@@ -9020,15 +8876,15 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
jamEntry();
// tcConnectptr.i in theData[0] is not used
scanFragptr.i = signal->theData[1];
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
+ c_scan_frag_pool.getPtr(scanFragptr);
tnodeid = signal->theData[2];
arrGuard(tnodeid, MAX_NDB_NODES);
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
scanFragptr.p->stopFragTimer();
- scanFragptr.p->lqhBlockref = RNIL;
-
+
+ ScanRecordPtr scanptr;
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
@@ -9042,7 +8898,12 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
Uint32 schemaVersion = scanptr.p->scanSchemaVersion;
if(tabPtr.p->checkTable(schemaVersion) == false){
jam();
- scanFragError(signal, tabPtr.p->getErrorCode(schemaVersion));
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(scanFragptr);
+ comp.add(scanFragptr);
+ scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion));
return;
}
}
@@ -9057,18 +8918,24 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
case ScanRecord::CLOSING_SCAN:
jam();
updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
- sendScanProcConf(signal);
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(scanFragptr);
+ comp.add(scanFragptr);
+ }
+ close_scan_req_send_conf(signal, scanptr);
return;
default:
jam();
/*empty*/;
break;
}//switch
- scanFragptr.p->scanFragNodeId = tnodeid;
- scanFragptr.p->lqhBlockref = calcLqhBlockRef(tnodeid);
+ Uint32 ref = calcLqhBlockRef(tnodeid);
+ scanFragptr.p->lqhBlockref = ref;
scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
- sendScanFragReq(signal);
+ sendScanFragReq(signal, scanptr.p, scanFragptr.p);
attrbufptr.i = cachePtr.p->firstAttrbuf;
while (attrbufptr.i != RNIL) {
jam();
@@ -9076,12 +8943,12 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
sendAttrinfo(signal,
scanFragptr.i,
attrbufptr.p,
- scanFragptr.p->lqhBlockref);
+ ref);
attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
}//while
scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- updateBuddyTimer(apiConnectptr);
scanFragptr.p->startFragTimer(ctcTimer);
+ updateBuddyTimer(apiConnectptr);
/*********************************************
* WE HAVE NOW STARTED A FRAGMENT SCAN. NOW
* WAIT FOR THE FIRST SCANNED RECORDS
@@ -9101,9 +8968,20 @@ void Dbtc::execDIGETPRIMREF(Signal* signal)
// tcConnectptr.i in theData[0] is not used.
scanFragptr.i = signal->theData[1];
const Uint32 errCode = signal->theData[2];
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
+ c_scan_frag_pool.getPtr(scanFragptr);
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
- scanFragError(signal, errCode);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(scanFragptr);
+ comp.add(scanFragptr);
+
+ scanError(signal, scanptr, errCode);
}//Dbtc::execDIGETPRIMREF()
/**
@@ -9120,8 +8998,9 @@ void Dbtc::execSCAN_FRAGREF(Signal* signal)
const Uint32 errCode = ref->errorCode;
scanFragptr.i = ref->senderData;
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ScanRecordPtr scanptr;
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
@@ -9141,56 +9020,64 @@ void Dbtc::execSCAN_FRAGREF(Signal* signal)
* stop fragment timer and call scanFragError to start
* close of the other fragment scans
*/
- scanFragptr.p->lqhBlockref = RNIL;
- scanFragError(signal, errCode);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+ {
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(scanFragptr);
+ comp.add(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ }
+ scanError(signal, scanptr, errCode);
}//Dbtc::execSCAN_FRAGREF()
/**
- * Dbtc::scanFragError
+ * Dbtc::scanError
*
* Called when an error occurs during
- * a scan of a fragment.
- * NOTE that one scan may consist of several fragment scans.
- *
*/
-void Dbtc::scanFragError(Signal* signal, Uint32 errorCode)
+void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode)
{
jam();
- scanptr.i = scanFragptr.p->scanRec;
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- DEBUG("scanFragError, errorCode = "<< errorCode
- << ", scanState = " << scanptr.p->scanState);
-
- scanFragptr.p->stopFragTimer();
+ ScanRecord* scanP = scanptr.p;
+
+ DEBUG("scanError, errorCode = "<< errorCode <<
+ ", scanState = " << scanptr.p->scanState);
- apiConnectptr.i = scanptr.p->scanApiRec;
+ apiConnectptr.i = scanP->scanApiRec;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- if (scanFragptr.p->lqhBlockref == RNIL){
- // Since the lqh is closed, this scan process should be reported
- // as completed immediately
- jam();
- updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
- sendScanProcConf(signal);
- }//if
+ ndbrequire(apiConnectptr.p->apiScanRec == scanptr.i);
- // If close of the scan is not already started
- if (scanptr.p->scanState != ScanRecord::CLOSING_SCAN) {
+ if(scanP->scanState == ScanRecord::CLOSING_SCAN){
jam();
- apiConnectptr.p->returncode = errorCode;
-
- /**
- * Only set apiIsClosed if API is waiting for an answer
- */
- if (scanptr.p->scanState == ScanRecord::SCAN_NEXT_ORDERED){
- jam();
- scanptr.p->apiIsClosed = true;
- }
- scanCompletedLab(signal);
+ close_scan_req_send_conf(signal, scanptr);
return;
- }//if
-}//Dbtc::scanFragError()
-
+ }
+
+ ndbrequire(scanP->scanState == ScanRecord::RUNNING);
+
+ /**
+ * Close scan wo/ having received an order to do so
+ */
+ close_scan_req(signal, scanptr, false);
+
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+ if(apiFail){
+ jam();
+ return;
+ }
+
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = apiConnectptr.p->transid[0];
+ ref->transId2 = apiConnectptr.p->transid[1];
+ ref->errorCode = errorCode;
+ ref->closeNeeded = 1;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+}//Dbtc::scanError()
/************************************************************
* execSCAN_FRAGCONF
@@ -9205,14 +9092,13 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
const Uint32 noCompletedOps = conf->completedOps;
- for(Uint32 i = 0; i<noCompletedOps; i++)
- cdata[i] = conf->opReturnDataLen[i];
scanFragptr.i = conf->senderData;
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
-
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ ScanRecordPtr scanptr;
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
-
+
apiConnectptr.i = scanptr.p->scanApiRec;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -9223,270 +9109,79 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
jam();
systemErrorLab(signal);
}//if
-
- scanFragptr.p->scanFragCompletedStatus = conf->fragmentCompleted;
- scanFragptr.p->stopFragTimer();
-
- switch (scanFragptr.p->scanFragCompletedStatus) {
- case ZFALSE:
- case ZTRUE:
- jam();
- /* empty */
- break;
-
- case ZCLOSED:
- /* The scan has finished this fragment. */
- jam();
- returnFromQueuedDeliveryLab(signal);
- return;
- break;
-
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
-
- // CHECK THE STATE OF THE DELIVERY PROCESS TO THE APPLICATION.
- switch (scanptr.p->scanState) {
- case ScanRecord::SCAN_NEXT_ORDERED:
+
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ const Uint32 status = conf->fragmentCompleted;
+
+ if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
jam();
- /**
- * THE APPLICATION HAVE ISSUED A SCAN_NEXTREQ AND IS WAITING
- * FOR MORE OPERATIONS. SEND OPERATIONS DIRECTLY
- */
- if (noCompletedOps > 0) {
- jam();
- setScanReceived(signal, noCompletedOps);
- sendScanTabConf(signal);
- scanptr.p->scanState = ScanRecord::DELIVERED;
- scanFragptr.p->scanFragState = ScanFragRec::DELIVERED;
+ if(status == ZFALSE){
+ /**
+ * We have started closing = we sent a close -> ignore this
+ */
return;
- }//if
- break;
-
- case ScanRecord::DELIVERED:
- case ScanRecord::QUEUED_DELIVERED:
- jam();
- /**
- * THE APPLICATION HAVE ALREADY RECEIVED A DELIVERY.
- * QUEUE THE RECEIVED SCAN OPERATIONS AND ISSUE THEM
- * WHEN THE APPLICATION ASKS FOR MORE.
- */
- if (noCompletedOps > 0) {
+ } else {
jam();
- setScanReceived(signal, noCompletedOps);
- scanptr.p->scanState = ScanRecord::QUEUED_DELIVERED;
- scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
- return;
- }//if
- break;
-
- case ScanRecord::CLOSING_SCAN:
- jam();
- /*************************************************
- * WE ARE CURRENTLY CLOSING THE SCAN.
- *
- * WE HAVE ALREADY ORDERED THE FRAGMENT TO CLOSE ITS
- * SCAN. THIS SIGNAL MUST HAVE BEEN SENT BEFORE THIS
- * CLOSE SIGNAL ARRIVED. SIMPLY IGNORE THIS SIGNAL.
- **************************************************/
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
+
+ run.remove(scanFragptr);
+ comp.add(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ }
+ close_scan_req_send_conf(signal, scanptr);
return;
- break;
-
- default:
- jam();
- systemErrorLab(signal);
- break;
-
- }//switch
+ }
- /**
- * THERE WAS NO TUPLES LEFT TO REPORT IN THIS FRAGMENT. CLOSE SCAN
- * HAVE NOT BEEN ORDERED. WE CAN CONTINUE THE SCAN PROCESS IMMEDIATELY.
- * THE COMPLETED STATUS MUST BE TRUE SINCE IT IS NOT CLOSED. IF IT WAS
- * FALSE IT MUST HAVE BEEN MORE TUPLES TO SCAN AND AT LEAST ONE OF
- * THOSE SHOULD HAVE BEEN REPORTED.
- */
- if (scanFragptr.p->scanFragCompletedStatus == ZFALSE) {
- jam();
+ if(status == ZCLOSED && scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
/**
- * THE SENDING NODE IS OUT OF ORDER WE WILL KILL IT BY SENDING SYSTEM
- * ERROR TO IT
+ * Start on next fragment
*/
- const BlockReference errRef =
- calcNdbCntrBlockRef(scanFragptr.p->scanFragNodeId);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::ScanfragStateError;
- sysErr->errorRef = reference();
- sendSignal(errRef, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBA);
- return;
- }//if
- returnFromQueuedDeliveryLab(signal);
-}//Dbtc::execSCAN_FRAGCONF()
-
-void Dbtc::returnFromQueuedDeliveryLab(Signal* signal)
-{
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-
- switch(scanFragptr.p->scanFragCompletedStatus) {
- case ZFALSE:
- {
- /*********************************************************************
- * WE HAVE SENT THE SCANNED OPERATION TO THE APPLICATION AND WE HAVE
- * RECEIVED THE ORDER TO CONTINUE SCANNING. THE CURRENT FRAGMENT STILL
- * CONTAINS MORE TUPLES TO SCAN.
- *********************************************************************/
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->senderData = scanFragptr.i;
- nextReq->closeFlag = ZFALSE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- }
- break;
-
- case ZTRUE:
- {
- /*********************************************************************
- * WE HAVE SENT THE SCANNED OPERATION TO THE APPLICATION AND WE HAVE
- * RECEIVED THE ORDER TO CONTINUE SCANNING. THE CURRENT FRAGMENT HAVE
- * BEEN COMPLETELY SCANNED AND WE ARE READY TO CLOSE IT.
- *********************************************************************/
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE_CLOSE;
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->senderData = scanFragptr.i;
- nextReq->closeFlag = ZTRUE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- }
- break;
-
- case ZCLOSED:
- {
- /********************************************************************
- * THE SCANNED FRAGMENT HAVE BEEN CLOSED. IF CLOSE SCAN HAVE BEEN
- * ORDERED THEN WE CAN REPORT THAT THIS SCAN PROCESS IS COMPLETED.
- * ALSO IF THERE ARE NO MORE FRAGMENTS TO SCAN WE CAN REPORT THAT
- * THE SCAN PROCESS IS COMPLETED.
- ********************************************************************/
- jam();
- scanFragptr.p->lqhBlockref = RNIL;
- if ((scanptr.p->scanState != ScanRecord::CLOSING_SCAN) &&
- (scanptr.p->scanNextFragId < scanptr.p->scanNoFrag)){
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
- scanFragptr.p->scanFragId = scanptr.p->scanNextFragId;
- scanptr.p->scanNextFragId++;
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanFragptr.i;
- signal->theData[2] = scanptr.p->scanTableref;
- signal->theData[3] = scanFragptr.p->scanFragId;
- sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
- } else {
- jam();
- sendScanProcConf(signal);
- }//if
- }
- break;
-
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
-
- updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
-}//Dbtc::returnFromQueuedDeliveryLab()
-
-/**********************************************************
- * execSCAN_PROCCONF
- **********************************************************/
-void Dbtc::execSCAN_PROCCONF(Signal* signal)
-{
- jamEntry();
+ ndbrequire(noCompletedOps == 0);
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ scanFragptr.p->startFragTimer(ctcTimer);
- scanptr.i = signal->theData[0];
- ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
- scanptr.p->scanProcessesCompleted++;
- ndbassert(scanptr.p->scanProcessesCompleted <= scanptr.p->scanParallel);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ return;
+ }
+
+ Uint32 chksum = 0;
+ Uint32 totalLen = 0;
+ for(Uint32 i = 0; i<noCompletedOps; i++){
+ Uint32 tmp = conf->opReturnDataLen[i];
+ chksum += (tmp << i);
+ totalLen += tmp;
+ }
+
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+
+ run.remove(scanFragptr);
+ queued.add(scanFragptr);
+ scanptr.p->m_queued_count++;
+ }
- scanFragptr.i = signal->theData[1];
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
+ scanFragptr.p->m_ops = noCompletedOps;
+ scanFragptr.p->m_chksum = chksum;
+ scanFragptr.p->m_totalLen = totalLen;
+ scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
scanFragptr.p->stopFragTimer();
- scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
-
- if (scanptr.p->scanProcessesCompleted == scanptr.p->scanParallel) {
+
+ if(scanptr.p->m_queued_count > /** Min */ 0){
jam();
-
- // Check that all scan processes are in state COMPLETED
- for (Uint32 i = 0; i < 16; i++) {
- scanFragptr.i = scanptr.p->scanFragrec[i];
- if (scanFragptr.i != RNIL) {
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::COMPLETED);
- }
- }
-
- // ALL SCAN PROCESSES HAS COMPLETED
- scanptr.p->scanCompletedStatus = ZTRUE;
- switch (scanptr.p->scanState) {
-
- case ScanRecord::CLOSING_SCAN:
- jam();
- if (scanptr.p->apiIsClosed == true) {
- jam();
- /*
- * The API has either failed or ordered a close of this scan
- * it's resources should be released and a response sent
- */
- scanReleaseResourcesLab(signal);
- return;
- }//if
-
- /**
- * The close have been performed but the API is still alive and not
- * expecting a response, keep resources until API fails or it orders
- * a close
- */
- return;
- case ScanRecord::SCAN_NEXT_ORDERED:
- jam();
- /**
- * The scan is completed and api is waiting for a response.
- * Reslease resources and send a response.
- */
- scanReleaseResourcesLab(signal);
- return;
- case ScanRecord::DELIVERED:
- case ScanRecord::QUEUED_DELIVERED:
- jam();
- /**
- * All processes have reported completion, wait for a new request from
- * API and start close of the scan then.
- */
- scanptr.p->scanReceivedOperations = 0;
- scanptr.p->scanState = ScanRecord::CLOSING_SCAN;
- return;
- default:
- jam();
- systemErrorLab(signal);
- break;
- }//switch
+ sendScanTabConf(signal, scanptr.p);
}
-}//Dbtc::execSCAN_PROCCONF()
-
+}//Dbtc::execSCAN_FRAGCONF()
/****************************************************************************
* execSCAN_NEXTREQ
@@ -9522,6 +9217,7 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
ref->transId1 = transid1;
ref->transId2 = transid2;
ref->errorCode = ZSTATE_ERROR;
+ ref->closeNeeded = 0;
sendSignal(signal->senderBlockRef(), GSN_SCAN_TABREF,
signal, ScanTabRef::SignalLength, JBB);
DEBUG("Wrong transid");
@@ -9546,358 +9242,340 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
}
DEBUG("scanTabRefLab: ZSTATE_ERROR");
DEBUG(" apiConnectstate="<<apiConnectptr.p->apiConnectstate);
+ ndbrequire(false); //B2 indication of strange things going on
scanTabRefLab(signal, ZSTATE_ERROR);
return;
}//if
-
+
/*******************************************************
* START THE ACTUAL LOGIC OF SCAN_NEXTREQ.
********************************************************/
// Stop the timer that is used to check for timeout in the API
setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ ScanRecordPtr scanptr;
scanptr.i = apiConnectptr.p->apiScanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ScanRecord* scanP = scanptr.p;
- if (scanptr.p->apiIsClosed == true) {
- jam();
- /**
- * The close is already started. Api has failed or
- * has not responded in time so this signal is not allowed
- */
- DEBUG("execSCAN_NEXTREQ: apiIsClosed == true");
- DEBUG(" apiConnectstate="<<apiConnectptr.p->apiConnectstate);
- DEBUG(" scanState="<<scanptr.p->scanState);
- return;
- }//if
-
-
- if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN) {
- jam();
- /*********************************************************************
- * WE HAVE STARTED A CLOSE OF THIS SCAN OPERATION. NOW WE CAN REPORT
- * THIS TO THE APPLICATION. BEFORE WE REPORT IT TO THE APPLICATION WE
- * MUST COMPLETE THE CLOSE FIRST.
- *********************************************************************/
- if (scanptr.p->scanCompletedStatus == ZTRUE) {
- jam();
- /*********************************************************************
- * THE SCAN IS ALREADY COMPLETED. WE ARE NOW READY TO COMPLETE THE SCAN
- * BY RELEASING ALL RESOURCES AND SENDING THE CONFIRMATION TO THE
- * APPLICATION.
- *********************************************************************/
- scanReleaseResourcesLab(signal);
- return;
- } else {
- jam();
- /*********************************************************************
- * THE CLOSE IS ONGOING BUT NOT YET COMPLETED. WE WILL SET THE STATE
- * TO INDICATE THAT THE APPLICATION IS WAITING FOR THE RESPONSE.
- *********************************************************************/
- scanptr.p->apiIsClosed = true;
- return;
- }//if
- }//if
+ const Uint32 len = signal->getLength() - 4;
if (stopScan == ZTRUE) {
jam();
/*********************************************************************
* APPLICATION IS CLOSING THE SCAN.
**********************************************************************/
- scanptr.p->apiIsClosed = true;
- scanCompletedLab(signal);
+ ndbrequire(len == 0);
+ close_scan_req(signal, scanptr, true);
return;
}//if
- /*********************************************************************
- * THOSE SCAN PROCESSES THAT WAS SENT IN PREVIOUS MESSAGE ARE
- * ACKNOWLEDGED BY THIS REQUEST FOR MORE SCANNED OPERATIONS. WE CAN
- * THUS RESTART THOSE SCAN PROCESSES.
- *********************************************************************/
- for (Uint32 i = 0; i < 16; i++) {
+ if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
jam();
- scanFragptr.i = scanptr.p->scanFragrec[i];
- if (scanFragptr.i != RNIL) {
- jam();
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- if (scanFragptr.p->scanFragState == ScanFragRec::DELIVERED) {
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::RETURNING_FROM_DELIVERY;
- signal->theData[0] = TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY;
- signal->theData[1] = scanptr.i;
- signal->theData[2] = scanFragptr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
- }
- }//if
- }//for
-
- switch (scanptr.p->scanState) {
- case ScanRecord::QUEUED_DELIVERED:
- /*********************************************************************
- * A NUMBER OF SCAN PROCESSES ARE READY TO DELIVER. DELIVER AND SET
- * STATE TO DELIVERED. ALSO CONTINUE PROCESS QUEUED SCAN PROCESSES.
- *********************************************************************/
- jam();
- sendScanTabConf(signal);
- scanptr.p->scanState = ScanRecord::DELIVERED;
- /*********************************************************************
- * UPDATE STATUS OF THE SCAN PROCESSES THAT WAS NOW SENT TO THE
- * APPLICATION TO DELIVERED. PREVIOUSLY THEY WERE QUEUED FOR DELIVERY.
- *********************************************************************/
- for (Uint32 i = 0; i < 16; i++) {
- jam();
- scanFragptr.i = scanptr.p->scanFragrec[i];
- if (scanFragptr.i != RNIL) {
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- if (scanFragptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY) {
- jam();
- scanFragptr.p->scanFragState = ScanFragRec::DELIVERED;
- }//if
- }//if
- }//for
- return;
- case ScanRecord::DELIVERED:
- jam();
- /*********************************************************************
- * WE HAVE NOT ANY QUEUED DELIVERIES. SET STATE TO INDICATE IT IS OK
- * TO SEND SCAN_TABCONF AS SOON AS ANY FRAGMENT IS READY TO DELIVER.
- *********************************************************************/
- scanptr.p->scanState = ScanRecord::SCAN_NEXT_ORDERED;
- return;
- case ScanRecord::SCAN_NEXT_ORDERED:
- jam();
- /* empty */
+ /**
+ * The scan is closing (typically due to error)
+ * but the API hasn't understood it yet
+ *
+ * Wait for API close request
+ */
return;
- default:
+ }
+
+ // Copy op ptrs so I dont overwrite them when sending...
+ memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
+
+ ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
+ nextReq->closeFlag = ZFALSE;
+ nextReq->transId1 = apiConnectptr.p->transid[0];
+ nextReq->transId2 = apiConnectptr.p->transid[1];
+
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ for(Uint32 i = 0 ; i<len; i++){
jam();
- systemErrorLab(signal);
- return;
- }//switch
+ scanFragptr.i = signal->theData[i+25];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
+
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ scanFragptr.p->startFragTimer(ctcTimer);
+
+ scanFragptr.p->m_ops = 0;
+ nextReq->senderData = scanFragptr.i;
+ sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ delivered.remove(scanFragptr);
+ running.add(scanFragptr);
+ }//for
+
}//Dbtc::execSCAN_NEXTREQ()
-void Dbtc::scanCompletedLab(Signal* signal) {
- scanptr.p->scanReceivedOperations = 0;
- scanptr.p->scanState = ScanRecord::CLOSING_SCAN;
+void
+Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
- // Iterate over all fragment scans and check if
- // they need to be closed in LQH
- for (Uint32 i = 0; i < 16; i++) {
- if (scanptr.p->scanFragrec[i] == RNIL) {
- jam();
- continue;
- }
- scanFragptr.i = scanptr.p->scanFragrec[i];
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
+ ScanRecord* scanP = scanPtr.p;
+ ndbrequire(scanPtr.p->scanState != ScanRecord::IDLE);
+ scanPtr.p->scanState = ScanRecord::CLOSING_SCAN;
+ scanPtr.p->m_close_scan_req = req_received;
+
+ /**
+ * Queue : Action
+ * ============= : =================
+ * completed : -
+ * running : close -> LQH
+ * delivered w/ : close -> LQH
+ * delivered wo/ : move to completed
+ * queued w/ : close -> LQH
+ * queued wo/ : move to completed
+ */
+
+ ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
+ nextReq->closeFlag = ZTRUE;
+ nextReq->transId1 = apiConnectptr.p->transid[0];
+ nextReq->transId2 = apiConnectptr.p->transid[1];
+
+ {
+ ScanFragRecPtr ptr;
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList completed(c_scan_frag_pool, scanP->m_completed_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
- if (scanFragptr.p->lqhBlockref == RNIL){
- // The connection to this LQH has been closed
- jam();
- continue;
+ // Close running
+ for(running.first(ptr); !ptr.isNull(); ){
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ running.next(ptr);
+
+ if(curr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF){
+ jam();
+ continue;
+ }
+ ndbrequire(curr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ curr.p->startFragTimer(ctcTimer);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
}
-
- if (scanFragptr.p->scanFragCompletedStatus == ZCLOSED){
- // The fragment scan is already completed
+
+ // Close delivered
+ for(delivered.first(ptr); !ptr.isNull(); ){
jam();
- continue;
- }
-
- if (scanFragptr.p->scanFragState == ScanFragRec::RETURNING_FROM_DELIVERY){
- // The scan process is soon to continue executing
- // Set scanFragCompletedStatus to ZTRUE so that LQH is properly closed
- // when this scan process "returns from delivery"
- jam();
- DEBUG("scanCompletedLab: setting scanFragCompletedStatus to ZTRUE");
- scanFragptr.p->scanFragCompletedStatus = ZTRUE;
- continue;
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ delivered.next(ptr);
+
+ ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
+ delivered.remove(curr);
+
+ if(curr.p->m_ops > 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+
+ } else {
+ jam();
+ completed.add(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }//for
+
+ /**
+ * All queued with data should be closed
+ */
+ for(queued.first(ptr); !ptr.isNull(); ){
+ jam();
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ queued.remove(curr);
+ scanP->m_queued_count--;
+
+ if(curr.p->m_ops > 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ } else {
+ jam();
+ completed.add(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
}
-
- apiConnectptr.i = scanptr.p->scanApiRec;
- ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ }
+ close_scan_req_send_conf(signal, scanPtr);
+}
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->senderData = scanFragptr.i;
- nextReq->closeFlag = ZTRUE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
- updateBuddyTimer(apiConnectptr);
+void
+Dbtc::close_scan_req_send_conf(Signal* signal, ScanRecordPtr scanPtr){
- updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE_CLOSE;
+ jam();
- }//for
-}//Dbtc::scanCompletedLab()
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+ //ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
-void Dbtc::sendScanProcConf(Signal* signal){
- signal->theData[0] = scanptr.i;
- signal->theData[1] = scanFragptr.i;
- sendSignal(cownref, GSN_SCAN_PROCCONF, signal, 2, JBB);
-}
+#if 0
+ {
+ ScanFragList comp(c_scan_frag_pool, scanPtr.p->m_completed_scan_frags);
+ ScanFragRecPtr ptr;
+ for(comp.first(ptr); !ptr.isNull(); comp.next(ptr)){
+ ndbrequire(ptr.p->scanFragTimer == 0);
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::COMPLETED);
+ }
+ }
+#endif
+
+ if(!scanPtr.p->m_running_scan_frags.isEmpty()){
+ jam();
+ return;
+ }
-void Dbtc::releaseScanrec(Signal* signal) {
- scanptr.p->nextScan = cfirstfreeScanrec;
- scanptr.p->scanState = ScanRecord::IDLE;
- scanptr.p->scanTcrec = RNIL;
- cfirstfreeScanrec = scanptr.i;
-}//Dbtc::releaseScanrec()
-
-void Dbtc::releaseScanFragrec(Signal* signal) {
- scanFragptr.p->nextScanFrag = cfirstfreeScanFragrec;
- scanFragptr.p->scanFragState = ScanFragRec::IDLE;
- cfirstfreeScanFragrec = scanFragptr.i;
- scanFragptr.p->stopFragTimer();
-}//Dbtc::releaseScanFragrec()
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+
+ if(!scanPtr.p->m_close_scan_req){
+ jam();
+ /**
+ * The API hasn't order closing yet
+ */
+ return;
+ }
-void Dbtc::releaseScanOprec(Signal* signal) {
- scanOpptr.p->nextScanOp = cfirstfreeScanOprec;
- cfirstfreeScanOprec = scanOpptr.i;
-}//Dbtc::releaseScanOprec()
+ Uint32 ref = apiConnectptr.p->ndbapiBlockref;
+ if(!apiFail && ref){
+ jam();
+ ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
+ conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ conf->requestInfo = ScanTabConf::EndOfData;
+ conf->transId1 = apiConnectptr.p->transid[0];
+ conf->transId2 = apiConnectptr.p->transid[1];
+ sendSignal(ref, GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB);
+ }
+
+ releaseScanResources(scanPtr);
+
+ if(apiFail){
+ jam();
+ /**
+ * API has failed
+ */
+ handleApiFailState(signal, apiConnectptr.i);
+ }
+}
-void Dbtc::seizeScanrec(Signal* signal) {
+Dbtc::ScanRecordPtr
+Dbtc::seizeScanrec(Signal* signal) {
+ ScanRecordPtr scanptr;
scanptr.i = cfirstfreeScanrec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
cfirstfreeScanrec = scanptr.p->nextScan;
scanptr.p->nextScan = RNIL;
ndbrequire(scanptr.p->scanState == ScanRecord::IDLE);
+ return scanptr;
}//Dbtc::seizeScanrec()
-void Dbtc::seizeScanFragrec(Signal* signal) {
- scanFragptr.i = cfirstfreeScanFragrec;
- ptrCheckGuard(scanFragptr, cscanFragrecFileSize, scanFragmentRecord);
- cfirstfreeScanFragrec = scanFragptr.p->nextScanFrag;
- scanFragptr.p->nextScanFrag = RNIL;
- ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::IDLE);
-}//Dbtc::seizeScanFragrec()
-
-void Dbtc::seizeScanOprec(Signal* signal) {
- scanOpptr.i = cfirstfreeScanOprec;
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- cfirstfreeScanOprec = scanOpptr.p->nextScanOp;
- scanOpptr.p->nextScanOp = RNIL;
-}//Dbtc::seizeScanOprec()
-
-
-void Dbtc::sendScanFragReq(Signal* signal) {
- arrGuard(scanFragptr.p->scanFragProcId, 16);
- scanOpptr.i = scanptr.p->scanOprec[scanFragptr.p->scanFragProcId];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
-
+void Dbtc::sendScanFragReq(Signal* signal,
+ ScanRecord* scanP,
+ ScanFragRec* scanFragP){
Uint32 requestInfo = 0;
- ScanFragReq::setConcurrency(requestInfo, scanFragptr.p->scanFragConcurrency);
- ScanFragReq::setLockMode(requestInfo, scanptr.p->scanLockMode);
- ScanFragReq::setHoldLockFlag(requestInfo, scanptr.p->scanLockHold);
- if(scanptr.p->scanLockMode == 1){ // Not read -> keyinfo
+ ScanFragReq::setConcurrency(requestInfo, scanFragP->scanFragConcurrency);
+ ScanFragReq::setLockMode(requestInfo, scanP->scanLockMode);
+ ScanFragReq::setHoldLockFlag(requestInfo, scanP->scanLockHold);
+ if(scanP->scanLockMode == 1){ // Not read -> keyinfo
jam();
ScanFragReq::setKeyinfoFlag(requestInfo, 1);
}
- ScanFragReq::setReadCommittedFlag(requestInfo, scanptr.p->readCommitted);
- ScanFragReq::setRangeScanFlag(requestInfo, scanptr.p->rangeScan);
- ScanFragReq::setAttrLen(requestInfo, scanptr.p->scanAiLength);
+ ScanFragReq::setReadCommittedFlag(requestInfo, scanP->readCommitted);
+ ScanFragReq::setRangeScanFlag(requestInfo, scanP->rangeScan);
+ ScanFragReq::setAttrLen(requestInfo, scanP->scanAiLength);
ScanFragReq::setScanPrio(requestInfo, 1);
- apiConnectptr.i = scanptr.p->scanApiRec;
+ apiConnectptr.i = scanP->scanApiRec;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
req->senderData = scanFragptr.i;
req->resultRef = apiConnectptr.p->ndbapiBlockref;
req->requestInfo = requestInfo;
req->savePointId = apiConnectptr.p->currSavePointId;
- req->tableId = scanptr.p->scanTableref;
- req->fragmentNo = scanFragptr.p->scanFragId;
- req->schemaVersion = scanptr.p->scanSchemaVersion;
+ req->tableId = scanP->scanTableref;
+ req->fragmentNo = scanFragP->scanFragId;
+ req->schemaVersion = scanP->scanSchemaVersion;
req->transId1 = apiConnectptr.p->transid[0];
req->transId2 = apiConnectptr.p->transid[1];
for(int i = 0; i<16; i++){
- req->clientOpPtr[i] = scanOpptr.p->apiOpptr[i];
+ req->clientOpPtr[i] = scanFragP->m_apiPtr;
}
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_FRAGREQ, signal, 25, JBB);
+ sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal, 25, JBB);
updateBuddyTimer(apiConnectptr);
- scanFragptr.p->startFragTimer(ctcTimer);
- scanFragptr.p->scanFragCompletedStatus = ZFALSE;
+ scanFragP->startFragTimer(ctcTimer);
}//Dbtc::sendScanFragReq()
-void Dbtc::sendScanTabConf(Signal* signal) {
+void Dbtc::sendScanTabConf(Signal* signal, ScanRecord * scanP) {
jam();
- /*******************************************************
- * Send SCAN_TABINFO with information about all
- * received operations
- *******************************************************/
- Int32 operationsToSend = scanptr.p->scanReceivedOperations;
- Uint32 sstOpIndex = 0;
-
- while (operationsToSend > 0){
+ Uint32* ops = signal->getDataPtrSend()+4;
+ Uint32 op_count = scanP->m_queued_count;
+ if(4 + 3 * op_count > 25){
jam();
-
- ScanTabInfo * info = (ScanTabInfo*)&signal->theData[0];
- info->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
-
- for (int i = 0; i < 16; i++){
- jam();
- arrGuard(sstOpIndex, 16);
- scanOpptr.i = scanptr.p->scanOprec[sstOpIndex];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- info->operLenAndIdx[i] = scanOpptr.p->scanOpLength[i];
- operationsToSend--;
- scanOpptr.p->scanOpLength[i] = RNIL;
- }
- sstOpIndex++;
- sendSignal(apiConnectptr.p->ndbapiBlockref,
- GSN_SCAN_TABINFO, signal, ScanTabInfo::SignalLength, JBB);
+ ops += 21;
}
-
- /********************************************************
- * Send SCAN_TABCONF signaling that a result set have
- * been sent to the API
- *********************************************************/
- Uint32 requestInfo = 0;
- ScanTabConf::setOperations(requestInfo, scanptr.p->scanReceivedOperations);
- ScanTabConf::setScanStatus(requestInfo, scanptr.p->scanCompletedStatus);
-
+
ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
- conf->requestInfo = requestInfo;
+ conf->requestInfo = op_count;
conf->transId1 = apiConnectptr.p->transid[0];
conf->transId2 = apiConnectptr.p->transid[1];
- sendSignal(apiConnectptr.p->ndbapiBlockref,
- GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB);
-
- scanptr.p->scanReceivedOperations = 0;
- // Start the scanRec-timer again and wait for response from the API.
- setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
- updateBuddyTimer(apiConnectptr);
+ ScanFragRecPtr ptr;
+ ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
+ ScanFragList completed(c_scan_frag_pool, scanP->m_completed_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ for(queued.first(ptr); !ptr.isNull(); ){
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ * ops++ = curr.p->m_apiPtr;
+ * ops++ = curr.i;
+ * ops++ = (curr.p->m_totalLen << 5) + curr.p->m_ops;
+
+ queued.remove(curr);
+ if(curr.p->m_ops > 0){
+ delivered.add(curr);
+ curr.p->scanFragState = ScanFragRec::DELIVERED;
+ curr.p->stopFragTimer();
+ } else {
+ (* --ops) = ScanTabConf::EndOfData; ops++;
+ completed.add(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }
+
+ if(4 + 3 * op_count > 25){
+ jam();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = signal->getDataPtrSend()+25;
+ ptr[0].sz = 3 * op_count;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength, JBB, ptr, 1);
+ } else {
+ jam();
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength + 3 * op_count, JBB);
+ }
+ scanP->m_queued_count = 0;
}//Dbtc::sendScanTabConf()
-/*
- * Write index and length of all operations received into
- * scanOprec->scanOpLength buffer
- */
-void Dbtc::setScanReceived(Signal* signal, Uint32 noCompletedOps)
-{
- UintR tssrIndividual;
- UintR tssrOprecIndex;
- UintR tssrLengthPlusIndex;
- UintR tssrOpIndex;
-
- ndbrequire(noCompletedOps <= 16);
- tssrIndividual = scanFragptr.p->scanIndividual;
- for (Uint32 i = 0; i < noCompletedOps; i++) {
- jam();
- tssrOprecIndex = scanptr.p->scanReceivedOperations >> 4;
- arrGuard(tssrOprecIndex, 16);
- scanOpptr.i = scanptr.p->scanOprec[tssrOprecIndex];
- ptrCheckGuard(scanOpptr, cscanOprecFileSize, scanOperationRecord);
- tssrLengthPlusIndex = tssrIndividual << 24;
- tssrLengthPlusIndex += cdata[i];
- tssrOpIndex = scanptr.p->scanReceivedOperations & 15;
- scanOpptr.p->scanOpLength[tssrOpIndex] = tssrLengthPlusIndex;
- scanptr.p->scanReceivedOperations++;
- tssrIndividual++;
- }//for
-}//Dbtc::setScanReceived()
-
void Dbtc::gcpTcfinished(Signal* signal)
{
signal->theData[1] = tcheckGcpId;
@@ -9913,6 +9591,7 @@ void Dbtc::initApiConnect(Signal* signal)
ndbrequire(tiacTmp > 0);
guard4 = tiacTmp + 1;
for (cachePtr.i = 0; cachePtr.i < guard4; cachePtr.i++) {
+ refresh_watch_dog();
ptrAss(cachePtr, cacheRecord);
cachePtr.p->firstAttrbuf = RNIL;
cachePtr.p->lastAttrbuf = RNIL;
@@ -9927,6 +9606,7 @@ void Dbtc::initApiConnect(Signal* signal)
guard4 = tiacTmp - 1;
for (apiConnectptr.i = 0; apiConnectptr.i <= guard4; apiConnectptr.i++) {
+ refresh_watch_dog();
jam();
ptrAss(apiConnectptr, apiConnectRecord);
apiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
@@ -9952,6 +9632,7 @@ void Dbtc::initApiConnect(Signal* signal)
guard4 = (2 * tiacTmp) - 1;
for (apiConnectptr.i = tiacTmp; apiConnectptr.i <= guard4; apiConnectptr.i++)
{
+ refresh_watch_dog();
jam();
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
apiConnectptr.p->apiConnectstate = CS_RESTART;
@@ -9977,6 +9658,7 @@ void Dbtc::initApiConnect(Signal* signal)
guard4 = (3 * tiacTmp) - 1;
for (apiConnectptr.i = 2 * tiacTmp; apiConnectptr.i <= guard4;
apiConnectptr.i++) {
+ refresh_watch_dog();
jam();
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
setApiConTimer(apiConnectptr.i, 0, __LINE__);
@@ -10005,6 +9687,7 @@ void Dbtc::initattrbuf(Signal* signal)
{
ndbrequire(cattrbufFilesize > 0);
for (attrbufptr.i = 0; attrbufptr.i < cattrbufFilesize; attrbufptr.i++) {
+ refresh_watch_dog();
jam();
ptrAss(attrbufptr, attrbufRecord);
attrbufptr.p->attrbuf[ZINBUF_NEXT] = attrbufptr.i + 1; /* NEXT ATTRBUF */
@@ -10019,6 +9702,7 @@ void Dbtc::initdatabuf(Signal* signal)
{
ndbrequire(cdatabufFilesize > 0);
for (databufptr.i = 0; databufptr.i < cdatabufFilesize; databufptr.i++) {
+ refresh_watch_dog();
ptrAss(databufptr, databufRecord);
databufptr.p->nextDatabuf = databufptr.i + 1;
}//for
@@ -10143,11 +9827,15 @@ void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0,
/* ========================================================================= */
void Dbtc::initialiseScanrec(Signal* signal)
{
+ ScanRecordPtr scanptr;
ndbrequire(cscanrecFileSize > 0);
for (scanptr.i = 0; scanptr.i < cscanrecFileSize; scanptr.i++) {
+ refresh_watch_dog();
jam();
ptrAss(scanptr, scanRecord);
+ new (scanptr.p) ScanRecord();
scanptr.p->scanState = ScanRecord::IDLE;
+ scanptr.p->scanApiRec = RNIL;
scanptr.p->nextScan = scanptr.i + 1;
}//for
scanptr.i = cscanrecFileSize - 1;
@@ -10158,34 +9846,10 @@ void Dbtc::initialiseScanrec(Signal* signal)
void Dbtc::initialiseScanFragrec(Signal* signal)
{
- ndbrequire(cscanFragrecFileSize > 0);
- for (scanFragptr.i = 0; scanFragptr.i < cscanFragrecFileSize;
- scanFragptr.i++) {
- jam();
- ptrAss(scanFragptr, scanFragmentRecord);
- scanFragptr.p->scanFragState = ScanFragRec::IDLE;
- scanFragptr.p->stopFragTimer();
- scanFragptr.p->nextScanFrag = scanFragptr.i + 1;
- }//for
- scanFragptr.i = cscanFragrecFileSize - 1;
- ptrAss(scanFragptr, scanFragmentRecord);
- scanFragptr.p->nextScanFrag = RNIL;
- cfirstfreeScanFragrec = 0;
}//Dbtc::initialiseScanFragrec()
void Dbtc::initialiseScanOprec(Signal* signal)
{
- ndbrequire(cscanOprecFileSize > 0);
- for (scanOpptr.i = 0; scanOpptr.i < cscanOprecFileSize; scanOpptr.i++) {
- jam();
- ptrAss(scanOpptr, scanOperationRecord);
- scanOpptr.p->nextScanOp = scanOpptr.i + 1;
- }//for
- scanOpptr.i = cscanOprecFileSize - 1;
- ptrAss(scanOpptr, scanOperationRecord);
- scanOpptr.p->nextScanOp = RNIL;
- cfirstfreeScanOprec = 0;
- cnoFreeScanOprec = cscanOprecFileSize;
}//Dbtc::initialiseScanOprec()
void Dbtc::initTable(Signal* signal)
@@ -10193,6 +9857,7 @@ void Dbtc::initTable(Signal* signal)
ndbrequire(ctabrecFilesize > 0);
for (tabptr.i = 0; tabptr.i < ctabrecFilesize; tabptr.i++) {
+ refresh_watch_dog();
ptrAss(tabptr, tableRecord);
tabptr.p->currentSchemaVersion = 0;
tabptr.p->storedTable = true;
@@ -10209,6 +9874,7 @@ void Dbtc::initialiseTcConnect(Signal* signal)
// Place half of tcConnectptr's in cfirstfreeTcConnectFail list
Uint32 titcTmp = ctcConnectFilesize / 2;
for (tcConnectptr.i = 0; tcConnectptr.i < titcTmp; tcConnectptr.i++) {
+ refresh_watch_dog();
jam();
ptrAss(tcConnectptr, tcConnectRecord);
tcConnectptr.p->tcConnectstate = OS_RESTART;
@@ -10224,6 +9890,7 @@ void Dbtc::initialiseTcConnect(Signal* signal)
// Place other half in cfirstfreeTcConnect list
for (tcConnectptr.i = titcTmp; tcConnectptr.i < ctcConnectFilesize;
tcConnectptr.i++) {
+ refresh_watch_dog();
jam();
ptrAss(tcConnectptr, tcConnectRecord);
tcConnectptr.p->tcConnectstate = OS_RESTART;
@@ -10310,8 +9977,8 @@ void Dbtc::releaseAbortResources(Signal* signal)
if (apiConnectptr.p->cachePtr != RNIL) {
cachePtr.i = apiConnectptr.p->cachePtr;
ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
- releaseAttrinfo(signal);
- releaseKeys(signal);
+ releaseAttrinfo();
+ releaseKeys();
}//if
tcConnectptr.i = apiConnectptr.p->firstTcConnect;
while (tcConnectptr.i != RNIL) {
@@ -10320,7 +9987,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
// Clear any markers that were set in CS_RECEIVING state
clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
rarTcConnectptr.i = tcConnectptr.p->nextTcConnect;
- releaseTcCon(signal);
+ releaseTcCon();
tcConnectptr.i = rarTcConnectptr.i;
}//while
apiConnectptr.p->firstTcConnect = RNIL;
@@ -10334,48 +10001,53 @@ void Dbtc::releaseAbortResources(Signal* signal)
apiConnectptr.p->apiConnectstate = CS_ABORTING;
apiConnectptr.p->abortState = AS_IDLE;
- bool ok = false;
- Uint32 blockRef = apiConnectptr.p->ndbapiBlockref;
- switch(apiConnectptr.p->returnsignal){
- case RS_TCROLLBACKCONF:
+ if(apiConnectptr.p->m_exec_flag || apiConnectptr.p->apiFailState == ZTRUE){
jam();
- ok = true;
- signal->theData[0] = apiConnectptr.p->ndbapiConnect;
- signal->theData[1] = apiConnectptr.p->transid[0];
- signal->theData[2] = apiConnectptr.p->transid[1];
- sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB);
- break;
- case RS_TCROLLBACKREP:{
- jam();
- ok = true;
- TcRollbackRep * const tcRollbackRep =
- (TcRollbackRep *) signal->getDataPtr();
-
- tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect;
- tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
- tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
- tcRollbackRep->returnCode = apiConnectptr.p->returncode;
- sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
- TcRollbackRep::SignalLength, JBB);
- }
- break;
- case RS_NO_RETURN:
- jam();
- ok = true;
- break;
- case RS_TCKEYCONF:
- case RS_TCKEYREF:
- case RS_TC_COMMITCONF:
- break;
- }
- if(!ok){
- jam();
- ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal);
- sendSystemError(signal);
- }//if
+ bool ok = false;
+ Uint32 blockRef = apiConnectptr.p->ndbapiBlockref;
+ ReturnSignal ret = apiConnectptr.p->returnsignal;
+ apiConnectptr.p->returnsignal = RS_NO_RETURN;
+ apiConnectptr.p->m_exec_flag = 0;
+ switch(ret){
+ case RS_TCROLLBACKCONF:
+ jam();
+ ok = true;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB);
+ break;
+ case RS_TCROLLBACKREP:{
+ jam();
+ ok = true;
+ TcRollbackRep * const tcRollbackRep =
+ (TcRollbackRep *) signal->getDataPtr();
+
+ tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect;
+ tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
+ tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
+ tcRollbackRep->returnCode = apiConnectptr.p->returncode;
+ sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
+ TcRollbackRep::SignalLength, JBB);
+ }
+ break;
+ case RS_NO_RETURN:
+ jam();
+ ok = true;
+ break;
+ case RS_TCKEYCONF:
+ case RS_TC_COMMITCONF:
+ break;
+ }
+ if(!ok){
+ jam();
+ ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal);
+ sendSystemError(signal);
+ }//if
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- apiConnectptr.p->abortState = AS_IDLE;
+ }
+ setApiConTimer(apiConnectptr.i, 0,
+ 100000+c_apiConTimer_line[apiConnectptr.i]);
if (apiConnectptr.p->apiFailState == ZTRUE) {
jam();
handleApiFailState(signal, apiConnectptr.i);
@@ -10393,6 +10065,8 @@ void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr)
cfirstfreeApiConnect = TlocalApiConnectptr.i;
setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);
TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
+ ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL);
+ TlocalApiConnectptr.p->ndbapiBlockref = 0;
}//Dbtc::releaseApiCon()
void Dbtc::releaseApiConnectFail(Signal* signal)
@@ -10411,7 +10085,7 @@ void Dbtc::releaseGcp(Signal* signal)
cfirstfreeGcp = gcpPtr.i;
}//Dbtc::releaseGcp()
-void Dbtc::releaseKeys(Signal* signal)
+void Dbtc::releaseKeys()
{
UintR Tmp;
databufptr.i = cachePtr.p->firstKeybuf;
@@ -10445,12 +10119,12 @@ void Dbtc::seizeApiConnect(Signal* signal)
apiConnectptr.p->nextApiConnect = RNIL;
setApiConTimer(apiConnectptr.i, 0, __LINE__);
apiConnectptr.p->apiConnectstate = CS_CONNECTED; /* STATE OF CONNECTION */
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
} else {
jam();
terrorCode = ZNO_FREE_API_CONNECTION;
}//if
- apiConnectptr.p->triggerPending = false;
- apiConnectptr.p->isIndexOp = false;
}//Dbtc::seizeApiConnect()
void Dbtc::seizeApiConnectFail(Signal* signal)
@@ -10690,20 +10364,15 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
ScanFragRecPtr sfp;
sfp.i = recordNo;
- ptrAss(sfp, scanFragmentRecord);
- infoEvent("Dbtc::ScanFragRec[%d]: state=%d, status=%d, "
- "fragid=%d, procid=%d, ",
+ c_scan_frag_pool.getPtr(sfp);
+ infoEvent("Dbtc::ScanFragRec[%d]: state=%d fragid=%d",
sfp.i,
sfp.p->scanFragState,
- sfp.p->scanFragCompletedStatus,
- sfp.p->scanFragId,
- sfp.p->scanFragProcId);
- infoEvent(" nodeid=%d, ind=%d, concurr=%d, timer=%d, next=%d",
- sfp.p->scanFragNodeId,
- sfp.p->scanIndividual,
+ sfp.p->scanFragId);
+ infoEvent(" nodeid=%d, concurr=%d, timer=%d",
+ refToNode(sfp.p->lqhBlockref),
sfp.p->scanFragConcurrency,
- sfp.p->scanFragTimer,
- sfp.p->nextScanFrag);
+ sfp.p->scanFragTimer);
}
// Dump all ScanRecords
@@ -10770,11 +10439,10 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
ScanRecordPtr sp;
sp.i = recordNo;
ptrAss(sp, scanRecord);
- infoEvent("Dbtc::ScanRecord[%d]: state=%d, scanOprec=%d, "
+ infoEvent("Dbtc::ScanRecord[%d]: state=%d"
"nextfrag=%d, nofrag=%d",
sp.i,
sp.p->scanState,
- sp.p->noScanOprec,
sp.p->scanNextFragId,
sp.p->scanNoFrag);
infoEvent(" ailen=%d, para=%d, receivedop=%d, noOprePperFrag=%d",
@@ -10782,17 +10450,11 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sp.p->scanParallel,
sp.p->scanReceivedOperations,
sp.p->noOprecPerFrag);
- infoEvent(" schv=%d, tab=%d, sproc=%d, noTI=%d, norecTI=%d",
+ infoEvent(" schv=%d, tab=%d, sproc=%d",
sp.p->scanSchemaVersion,
sp.p->scanTableref,
- sp.p->scanStoredProcId,
- sp.p->noScanTabInfo,
- sp.p->scanTabInfoReceived);
- infoEvent(" apiclosed=%d, noProcCompl=%d, "
- "complStat=%d, lhold=%d, lmode=%d",
- sp.p->apiIsClosed,
- sp.p->scanProcessesCompleted,
- sp.p->scanCompletedStatus,
+ sp.p->scanStoredProcId);
+ infoEvent(" lhold=%d, lmode=%d",
sp.p->scanLockHold,
sp.p->scanLockMode);
infoEvent(" apiRec=%d, next=%d",
@@ -10800,13 +10462,20 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
if (sp.p->scanState != ScanRecord::IDLE){
// Request dump of ScanFragRec
- for (Uint32 i = 0; i < 16; i++){
- if (sp.p->scanFragrec[i] != RNIL){
- dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec;
- dumpState->args[1] = sp.p->scanFragrec[i];
- execDUMP_STATE_ORD(signal);
- }
- }
+ ScanFragRecPtr sfptr;
+#define DUMP_SFR(x){\
+ ScanFragList list(c_scan_frag_pool, x);\
+ for(list.first(sfptr); !sfptr.isNull(); list.next(sfptr)){\
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; \
+ dumpState->args[1] = sfptr.i;\
+ execDUMP_STATE_ORD(signal);\
+ }}
+
+ DUMP_SFR(sp.p->m_running_scan_frags);
+ DUMP_SFR(sp.p->m_queued_scan_frags);
+ DUMP_SFR(sp.p->m_delivered_scan_frags);
+ DUMP_SFR(sp.p->m_completed_scan_frags);
+
// Request dump of ApiConnectRecord
dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
dumpState->args[1] = sp.p->scanApiRec;
@@ -10883,6 +10552,13 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
set_timeout_value(signal->theData[1]);
}
}
+
+ if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){
+ jam();
+ if(signal->getLength() > 1){
+ set_appl_timeout_value(signal->theData[1]);
+ }
+ }
}//Dbtc::execDUMP_STATE_ORD()
void Dbtc::execSET_VAR_REQ(Signal* signal)
@@ -11405,11 +11081,16 @@ void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag)
UintR TcurrLen = localHostptr.p->noOfWordsTCINDXCONF;
UintR confInfo = 0;
TcIndxConf::setNoOfOperations(confInfo, (TopWords >> 1));
- TcIndxConf::setCommitFlag(confInfo, TcommitFlag);
+ TcIndxConf::setCommitFlag(confInfo, TcommitFlag == 1);
TcIndxConf::setMarkerFlag(confInfo, Tmarker);
const UintR TpacketLen = 6 + TopWords;
regApiPtr->tcindxrec = 0;
+ if(TcommitFlag || (regApiPtr->lqhkeyreqrec == regApiPtr->lqhkeyconfrec)){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
+
if ((TpacketLen > 25) || !is_api){
TcIndxConf * const tcIndxConf = (TcIndxConf *)signal->getDataPtrSend();
@@ -11653,6 +11334,8 @@ void Dbtc::execTCKEYCONF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ apiConnectptr.p = regApiPtr;
+ apiConnectptr.i = TconnectIndex;
switch(indexOp->indexOpState) {
case(IOS_NOOP): {
jam();
@@ -11952,7 +11635,8 @@ void Dbtc::readIndexTable(Signal* signal,
Uint32 transId1 = indexOp->tcIndxReq->transId1;
Uint32 transId2 = indexOp->tcIndxReq->transId2;
- const Uint8 opType = TcKeyReq::getOperationType(tcKeyRequestInfo);
+ const Operation_t opType =
+ (Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
// Find index table
if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq->indexId)) == NULL) {
@@ -12494,34 +12178,33 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
// Calculate key length and renumber attribute id:s
AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
+ bool skipNull = false;
for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) {
jam();
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
attrHeader->setAttributeId(attrId);
keyLength += attrHeader->getDataSize();
hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
moreKeyAttrs = afterValues.next(iter, hops);
}
-
- // Filter out single NULL attributes
- if (attrId == 1) {
+ if (skipNull) {
jam();
- afterValues.first(iter);
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
- if (attrHeader->isNULL() && !afterValues.next(iter)) {
- jam();
- opRecord->triggerExecutionCount--;
- if (opRecord->triggerExecutionCount == 0) {
- /*
- We have completed current trigger execution
- Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opRecord);
- }//if
- return;
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
}//if
+ return;
}//if
// Calculate total length of primary key to be stored in index table
@@ -12849,36 +12532,36 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
// Calculate key length and renumber attribute id:s
AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
+ bool skipNull = false;
for(bool moreKeyAttrs = beforeValues.first(iter);
(moreKeyAttrs);
attrId++) {
jam();
AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
attrHeader->setAttributeId(attrId);
keyLength += attrHeader->getDataSize();
hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
moreKeyAttrs = beforeValues.next(iter, hops);
}
- // Filter out single NULL attributes
- if (attrId == 1) {
+ if (skipNull) {
jam();
- beforeValues.first(iter);
- AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
- if (attrHeader->isNULL() && !beforeValues.next(iter)) {
- jam();
- opRecord->triggerExecutionCount--;
- if (opRecord->triggerExecutionCount == 0) {
- /*
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
We have completed current trigger execution
Continue triggering operation
- */
- jam();
- continueTriggeringOp(signal, opRecord);
- }//if
- return;
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
}//if
+ return;
}//if
TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 4781230a311..b792edf9333 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -950,9 +950,6 @@ typedef Ptr<TableDescriptor> TableDescriptorPtr;
struct HostBuffer {
bool inPackedList;
- Uint32 packetLenRC;
- Uint32 noOfPacketsRC;
- Uint32 packetBufferRC[29];
Uint32 packetLenTA;
Uint32 noOfPacketsTA;
Uint32 packetBufferTA[30];
@@ -1017,9 +1014,15 @@ public:
void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData);
/*
- * TUX reads primary key for md5 summing and when returning keyinfo.
+ * TUX reads primary key without headers into an array of words. Used
+ * for md5 summing and when returning keyinfo.
*/
- void tuxReadKeys(); // under construction
+ void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData);
+
+ /*
+ * TUX checks if tuple is visible to scan.
+ */
+ bool tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId);
private:
BLOCK_DEFINES(Dbtup);
@@ -1065,9 +1068,6 @@ private:
void execTUP_WRITELOG_REQ(Signal* signal);
// Ordered index related
- void execTUP_READ_ATTRS(Signal* signal);
- void execTUP_QUERY_TH(Signal* signal);
- void execTUP_STORE_TH(Signal* signal);
void execBUILDINDXREQ(Signal* signal);
void buildIndex(Signal* signal, Uint32 buildPtrI);
void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec);
@@ -1662,11 +1662,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
- void bufferREADCONF(Signal* signal, BlockReference aRef, Uint32* buffer, Uint32 Tlen);
-
-//------------------------------------------------------------------
-//------------------------------------------------------------------
- void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32* buffer, Uint32 Tlen);
+ void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32 Tlen);
//------------------------------------------------------------------
// Trigger handling routines
@@ -2326,10 +2322,15 @@ private:
// Counters for num UNDO log records executed
Uint32 cSrUndoRecords[9];
+ STATIC_CONST(MAX_PARALLELL_TUP_SRREQ = 2);
+ Uint32 c_sr_free_page_0;
+
Uint32 c_errorInsert4000TableId;
void initGlobalTemporaryVars();
void reportMemoryUsage(Signal* signal, int incDec);
+
+
#ifdef VM_TRACE
struct Th {
Uint32 data[1];
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
index 90c6dbc6802..ea46ee94fdc 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
@@ -35,7 +35,6 @@ void Dbtup::execSEND_PACKED(Signal* signal)
hostId = cpackedList[i];
ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero
Uint32 TpacketTA = hostBuffer[hostId].noOfPacketsTA;
- Uint32 TpacketRC = hostBuffer[hostId].noOfPacketsRC;
if (TpacketTA != 0) {
ljam();
BlockReference TBref = numberToRef(API_PACKED, hostId);
@@ -47,91 +46,20 @@ void Dbtup::execSEND_PACKED(Signal* signal)
hostBuffer[hostId].noOfPacketsTA = 0;
hostBuffer[hostId].packetLenTA = 0;
}//if
- if (TpacketRC != 0) {
- ljam();
- BlockReference TBref = numberToRef(API_PACKED, hostId);
- Uint32 TpacketLen = hostBuffer[hostId].packetLenRC;
- MEMCOPY_NO_WORDS(&signal->theData[0],
- &hostBuffer[hostId].packetBufferRC[0],
- TpacketLen);
- sendSignal(TBref, GSN_READCONF, signal, TpacketLen, JBB);
- hostBuffer[hostId].noOfPacketsRC = 0;
- hostBuffer[hostId].packetLenRC = 0;
- }//if
hostBuffer[hostId].inPackedList = false;
}//for
cpackedListIndex = 0;
}//Dbtup::execSEND_PACKED()
-void Dbtup::bufferREADCONF(Signal* signal, BlockReference aRef,
- Uint32* buffer, Uint32 Tlen)
-{
- Uint32 hostId = refToNode(aRef);
- Uint32 Theader = ((refToBlock(aRef) << 16) + (Tlen-3));
-
- ndbrequire(hostId < MAX_NODES);
- Uint32 TpacketLen = hostBuffer[hostId].packetLenRC;
- Uint32 TnoOfPackets = hostBuffer[hostId].noOfPacketsRC;
- Uint32 sig0 = signal->theData[0];
- Uint32 sig1 = signal->theData[1];
- Uint32 sig2 = signal->theData[2];
- Uint32 sig3 = signal->theData[3];
-
- BlockReference TBref = numberToRef(API_PACKED, hostId);
-
- if ((Tlen + TpacketLen + 1) <= 25) {
-// ----------------------------------------------------------------
-// There is still space in the buffer. We will copy it into the
-// buffer.
-// ----------------------------------------------------------------
- ljam();
- updatePackedList(signal, hostId);
- } else if (TnoOfPackets == 1) {
-// ----------------------------------------------------------------
-// The buffer is full and there was only one packet buffered. We
-// will send this as a normal signal.
-// ----------------------------------------------------------------
- Uint32 TnewRef = numberToRef((hostBuffer[hostId].packetBufferRC[0] >> 16),
- hostId);
- MEMCOPY_NO_WORDS(&signal->theData[0],
- &hostBuffer[hostId].packetBufferRC[1],
- TpacketLen - 1);
- sendSignal(TnewRef, GSN_READCONF, signal, (TpacketLen - 1), JBB);
- TpacketLen = 0;
- TnoOfPackets = 0;
- } else {
-// ----------------------------------------------------------------
-// The buffer is full but at least two packets. Send those in
-// packed form.
-// ----------------------------------------------------------------
- MEMCOPY_NO_WORDS(&signal->theData[0],
- &hostBuffer[hostId].packetBufferRC[0],
- TpacketLen);
- sendSignal(TBref, GSN_READCONF, signal, TpacketLen, JBB);
- TpacketLen = 0;
- TnoOfPackets = 0;
- }//if
-// ----------------------------------------------------------------
-// Copy the signal into the buffer
-// ----------------------------------------------------------------
- hostBuffer[hostId].packetBufferRC[TpacketLen + 0] = Theader;
- hostBuffer[hostId].packetBufferRC[TpacketLen + 1] = sig0;
- hostBuffer[hostId].packetBufferRC[TpacketLen + 2] = sig1;
- hostBuffer[hostId].packetBufferRC[TpacketLen + 3] = sig2;
- hostBuffer[hostId].packetBufferRC[TpacketLen + 4] = sig3;
- hostBuffer[hostId].noOfPacketsRC = TnoOfPackets + 1;
- hostBuffer[hostId].packetLenRC = Tlen + TpacketLen + 1;
- MEMCOPY_NO_WORDS(&hostBuffer[hostId].packetBufferRC[TpacketLen + 5],
- buffer,
- Tlen - 4);
-}//Dbtup::bufferREADCONF()
-
void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
- Uint32* buffer, Uint32 Tlen)
+ Uint32 Tlen)
{
+ if(Tlen == 3)
+ return;
+
Uint32 hostId = refToNode(aRef);
Uint32 Theader = ((refToBlock(aRef) << 16)+(Tlen-3));
-
+
ndbrequire(hostId < MAX_NODES);
Uint32 TpacketLen = hostBuffer[hostId].packetLenTA;
Uint32 TnoOfPackets = hostBuffer[hostId].noOfPacketsTA;
@@ -148,7 +76,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
// ----------------------------------------------------------------
ljam();
updatePackedList(signal, hostId);
- } else if (TnoOfPackets == 1) {
+ } else if (false && TnoOfPackets == 1) {
// ----------------------------------------------------------------
// The buffer is full and there was only one packet buffered. We
// will send this as a normal signal.
@@ -183,7 +111,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
hostBuffer[hostId].noOfPacketsTA = TnoOfPackets + 1;
hostBuffer[hostId].packetLenTA = Tlen + TpacketLen + 1;
MEMCOPY_NO_WORDS(&hostBuffer[hostId].packetBufferTA[TpacketLen + 4],
- buffer,
+ &signal->theData[25],
Tlen - 3);
}//Dbtup::bufferTRANSID_AI()
@@ -206,124 +134,122 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
const Operationrec * const regOperPtr)
{
const BlockReference recBlockref = regOperPtr->recBlockref;
- bool toOwnNode = refToNode(recBlockref) == getOwnNodeId();
- bool connectedToNode = getNodeInfo(refToNode(recBlockref)).m_connected;
- const Uint32 type = getNodeInfo(refToNode(recBlockref)).m_type;
+ const Uint32 block = refToBlock(recBlockref);
+ const Uint32 nodeId = refToNode(recBlockref);
+
+ bool connectedToNode = getNodeInfo(nodeId).m_connected;
+ const Uint32 type = getNodeInfo(nodeId).m_type;
bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
- if (ERROR_INSERTED(4006)){
+ if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){
// Use error insert to turn routing on
ljam();
connectedToNode = false;
}
- if (!toOwnNode && !connectedToNode){
- /**
- * If this node does not have a direct connection
- * to the receiving node we want to send the signals
- * routed via the node that controls this read
- */
- Uint32 routeBlockref = regOperPtr->coordinatorTC;
-
+ Uint32 sig0 = regOperPtr->tcOperationPtr;
+ Uint32 sig1 = regOperPtr->transid1;
+ Uint32 sig2 = regOperPtr->transid2;
+
+ TransIdAI * transIdAI = (TransIdAI *)signal->getDataPtrSend();
+ transIdAI->connectPtr = sig0;
+ transIdAI->transId[0] = sig1;
+ transIdAI->transId[1] = sig2;
+
+ if (connectedToNode){
/**
- * Fill in a TRANSID_AI signal, use last word to store
- * final destination and send it to route node
- * as signal TRANSID_AI_R (R as in Routed)
- */
- TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
- transIdAI->connectPtr = regOperPtr->tcOperationPtr;
- transIdAI->transId[0] = regOperPtr->transid1;
- transIdAI->transId[1] = regOperPtr->transid2;
+ * Own node -> execute direct
+ */
+ if(nodeId != getOwnNodeId()){
+ ljam();
+
+ /**
+ * Send long sig
+ */
+ if(ToutBufIndex >= 22 && is_api && !old_dest) {
+ ljam();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = &signal->theData[25];
+ ptr[0].sz = ToutBufIndex;
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3, JBB, ptr, 1);
+ return;
+ }
- Uint32 tot = ToutBufIndex;
- Uint32 sent = 0;
- Uint32 maxLen = TransIdAI::DataLength - 1;
- while (sent < tot) {
- ljam();
- Uint32 dataLen = (tot - sent > maxLen) ? maxLen : tot - sent;
- Uint32 sigLen = dataLen + TransIdAI::HeaderLength + 1;
- MEMCOPY_NO_WORDS(&transIdAI->attrData,
- &coutBuffer[sent],
- dataLen);
- // Set final destination in last word
- transIdAI->attrData[dataLen] = recBlockref;
-
- sendSignal(routeBlockref, GSN_TRANSID_AI_R,
- signal, sigLen, JBB);
- sent += dataLen;
+ /**
+ * short sig + api -> buffer
+ */
+#ifndef NDB_NO_DROPPED_SIGNAL
+ if (ToutBufIndex < 22 && is_api){
+ ljam();
+ bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex);
+ return;
+ }//if
+#endif
+
+ /**
+ * rest -> old send sig
+ */
+ Uint32 * src = signal->theData+25;
+ if(ToutBufIndex >= 22){
+ do {
+ ljam();
+ MEMCOPY_NO_WORDS(&signal->theData[3], src, 22);
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB);
+ ToutBufIndex -= 22;
+ src += 22;
+ } while(ToutBufIndex >= 22);
+ }
+ if(ToutBufIndex > 0){
+ ljam();
+ MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex);
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB);
+ }
+ return;
}
+ EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex);
+ ljamEntry();
return;
}
- Uint32 TbufIndex = 0;
- Uint32 sig0 = regOperPtr->tcOperationPtr;
- Uint32 sig1 = regOperPtr->transid1;
- Uint32 sig2 = regOperPtr->transid2;
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
-
- while (ToutBufIndex > 21) {
- ljam();
- MEMCOPY_NO_WORDS(&signal->theData[3],
- &coutBuffer[TbufIndex],
- 22);
- TbufIndex += 22;
- ToutBufIndex -= 22;
- const BlockReference sendBref = regOperPtr->recBlockref;
- if (refToNode(sendBref) != getOwnNodeId()) {
- ljam();
- sendSignal(sendBref, GSN_TRANSID_AI, signal, 25, JBB);
- ljam();
- } else {
- ljam();
- EXECUTE_DIRECT(refToBlock(sendBref), GSN_TRANSID_AI, signal, 25);
- ljamEntry();
- }//if
- }//while
-
- Uint32 TsigNumber;
- Uint32 TsigLen;
- Uint32 TdataIndex;
- if ((regOperPtr->opSimple == ZTRUE) &&
- (regOperPtr->optype == ZREAD)) {
- /* DIRTY OPERATIONS ARE ALSO SIMPLE */
- ljam();
- Uint32 sig3 = regOperPtr->attroutbufLen;
- TdataIndex = 4;
- TsigLen = 4 + ToutBufIndex;
- TsigNumber = GSN_READCONF;
- signal->theData[3] = sig3;
- if ((TsigLen < 18) && is_api){
- bufferREADCONF(signal, regOperPtr->recBlockref,
- &coutBuffer[TbufIndex], TsigLen);
- return;
- }//if
- } else if (ToutBufIndex > 0) {
- ljam();
- TdataIndex = 3;
- TsigLen = 3 + ToutBufIndex;
- TsigNumber = GSN_TRANSID_AI;
- if ((TsigLen < 18) && is_api){
- ljam();
- bufferTRANSID_AI(signal, regOperPtr->recBlockref,
- &coutBuffer[TbufIndex], TsigLen);
- return;
- }//if
- } else {
+ /**
+ * If this node does not have a direct connection
+ * to the receiving node we want to send the signals
+ * routed via the node that controls this read
+ */
+ Uint32 routeBlockref = regOperPtr->coordinatorTC;
+
+ if(true){ // TODO is_api && !old_dest){
ljam();
+ transIdAI->attrData[0] = recBlockref;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = &signal->theData[25];
+ ptr[0].sz = ToutBufIndex;
+ sendSignal(routeBlockref, GSN_TRANSID_AI_R, signal, 4, JBB, ptr, 1);
return;
- }//if
- MEMCOPY_NO_WORDS(&signal->theData[TdataIndex],
- &coutBuffer[TbufIndex],
- ToutBufIndex);
- const BlockReference sendBref = regOperPtr->recBlockref;
- if (refToNode(sendBref) != getOwnNodeId()) {
- ljam();
- sendSignal(sendBref, TsigNumber, signal, TsigLen, JBB);
- } else {
- EXECUTE_DIRECT(refToBlock(sendBref), GSN_TRANSID_AI, signal, TsigLen);
- ljamEntry();
- }//if
+ }
+
+ /**
+ * Fill in a TRANSID_AI signal, use last word to store
+ * final destination and send it to route node
+ * as signal TRANSID_AI_R (R as in Routed)
+ */
+ Uint32 tot = ToutBufIndex;
+ Uint32 sent = 0;
+ Uint32 maxLen = TransIdAI::DataLength - 1;
+ while (sent < tot) {
+ ljam();
+ Uint32 dataLen = (tot - sent > maxLen) ? maxLen : tot - sent;
+ Uint32 sigLen = dataLen + TransIdAI::HeaderLength + 1;
+ MEMCOPY_NO_WORDS(&transIdAI->attrData,
+ &signal->theData[25+sent],
+ dataLen);
+ // Set final destination in last word
+ transIdAI->attrData[dataLen] = recBlockref;
+
+ sendSignal(routeBlockref, GSN_TRANSID_AI_R,
+ signal, sigLen, JBB);
+ sent += dataLen;
+ }
}//Dbtup::sendReadAttrinfo()
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
index c38fde23404..768a61655b5 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
@@ -201,6 +201,10 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
ndbrequire(chunk.pageCount <= alloc);
if(chunk.pageCount != 0){
chunks.push_back(chunk);
+ if(chunk.pageCount != alloc) {
+ ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
+ alloc, chunk.pageCount, free);
+ }
} else {
ndbout_c(" Failed to alloc %d pages with %d pages free",
alloc, free);
@@ -212,6 +216,9 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
ptrCheckGuard(pagePtr, cnoOfPage, page);
pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
}
+
+ if(alloc == 1 && free > 0)
+ ndbrequire(chunk.pageCount == alloc);
}
break;
}
@@ -238,11 +245,12 @@ void Dbtup::execMEMCHECKREQ(Signal* signal)
ljamEntry();
BlockReference blockref = signal->theData[0];
- for (Uint32 i = 0; i < 25; i++) {
+ Uint32 i;
+ for (i = 0; i < 25; i++) {
ljam();
data[i] = 0;
}//for
- for (Uint32 i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++) {
regPagePtr.i = cfreepageList[i];
ljam();
while (regPagePtr.i != RNIL) {
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 07bad00acf1..0dc196d5f56 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -873,6 +873,7 @@ int Dbtup::handleReadReq(Signal* signal,
Page* pagePtr)
{
Uint32 Ttupheadoffset = regOperPtr->pageOffset;
+ const BlockReference sendBref = regOperPtr->recBlockref;
if (regTabPtr->checksumIndicator &&
(calculateChecksum(pagePtr, Ttupheadoffset,
regTabPtr->tupheadsize) != 0)) {
@@ -882,14 +883,29 @@ int Dbtup::handleReadReq(Signal* signal,
return -1;
}//if
+ Uint32 * dst = &signal->theData[25];
+ Uint32 dstLen = (sizeof(signal->theData) / 4) - 25;
+ const Uint32 node = refToNode(sendBref);
+ if(node != 0 && node != getOwnNodeId()) {
+ ;
+ } else {
+ jam();
+ /**
+ * execute direct
+ */
+ dst = &signal->theData[3];
+ dstLen = (sizeof(signal->theData) / 4) - 3;
+ }
+
if (regOperPtr->interpretedExec != 1) {
jam();
+
Uint32 TnoOfDataRead = readAttributes(pagePtr,
Ttupheadoffset,
&cinBuffer[0],
regOperPtr->attrinbufLen,
- &coutBuffer[0],
- (Uint32)ZATTR_BUFFER_SIZE);
+ dst,
+ dstLen);
if (TnoOfDataRead != (Uint32)-1) {
/* ------------------------------------------------------------------------- */
// We have read all data into coutBuffer. Now send it to the API.
@@ -1214,11 +1230,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
Uint32 RattrinbufLen = regOperPtr->attrinbufLen;
const BlockReference sendBref = regOperPtr->recBlockref;
- Uint32 * dst = &coutBuffer[0];
- Uint32 dstLen = sizeof(coutBuffer) / 4;
- Uint32 * tmp = &signal->theData[3];
- Uint32 tmpLen = (sizeof(signal->theData) / 4) - 3;
- bool executeDirect = false;
+ Uint32 * dst = &signal->theData[25];
+ Uint32 dstLen = (sizeof(signal->theData) / 4) - 25;
const Uint32 node = refToNode(sendBref);
if(node != 0 && node != getOwnNodeId()) {
;
@@ -1227,12 +1240,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
/**
* execute direct
*/
- executeDirect = true;
dst = &signal->theData[3];
dstLen = (sizeof(signal->theData) / 4) - 3;
-
- tmp = &coutBuffer[0];
- tmpLen = sizeof(coutBuffer) / 4;
}
RtotalLen = RinitReadLen;
@@ -1292,8 +1301,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
RexecRegionLen,
&cinBuffer[RsubPC],
RsubLen,
- tmp,
- tmpLen);
+ &coutBuffer[0],
+ sizeof(coutBuffer) / 4);
if (TnoDataRW != (Uint32)-1) {
RinstructionCounter += RexecRegionLen;
RlogSize = TnoDataRW;
@@ -1350,20 +1359,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
}//if
regOperPtr->logSize = RlogSize;
regOperPtr->attroutbufLen = RattroutCounter;
- if(!executeDirect) {
- jam();
- sendReadAttrinfo(signal, RattroutCounter, regOperPtr);
- } else {
- jam();
- Uint32 sig0 = regOperPtr->tcOperationPtr;
- Uint32 sig1 = regOperPtr->transid1;
- Uint32 sig2 = regOperPtr->transid2;
- signal->theData[0] = sig0;
- signal->theData[1] = sig1;
- signal->theData[2] = sig2;
- EXECUTE_DIRECT(refToBlock(sendBref), GSN_TRANSID_AI, signal,
- 3 + RattroutCounter);
- }//if
+ sendReadAttrinfo(signal, RattroutCounter, regOperPtr);
if (RlogSize > 0) {
sendLogAttrinfo(signal, RlogSize, regOperPtr);
}//if
@@ -1445,7 +1441,10 @@ int Dbtup::interpreterNextLab(Signal* signal,
register Uint32 theRegister;
Uint32 TdataWritten = 0;
Uint32 RstackPtr = 0;
- Uint32 TregMemBuffer[32];
+ union {
+ Uint32 TregMemBuffer[32];
+ Uint64 Tdummy[16];
+ };
Uint32 TstackMemBuffer[32];
/* ---------------------------------------------------------------- */
@@ -1496,19 +1495,23 @@ int Dbtup::interpreterNextLab(Signal* signal,
// word read. Thus we set the register to be a 32 bit register.
/* ------------------------------------------------------------- */
TregMemBuffer[theRegister] = 0x50;
- TregMemBuffer[theRegister + 2] = 0;
+ * (Int64*)(TregMemBuffer+theRegister+2) = TregMemBuffer[theRegister+1];
} else if (TnoDataRW == 3) {
/* ------------------------------------------------------------- */
// Three words read means that we get the instruction plus two
// 32 words read. Thus we set the register to be a 64 bit register.
/* ------------------------------------------------------------- */
TregMemBuffer[theRegister] = 0x60;
+ TregMemBuffer[theRegister+3] = TregMemBuffer[theRegister+2];
+ TregMemBuffer[theRegister+2] = TregMemBuffer[theRegister+1];
} else if (TnoDataRW == 1) {
/* ------------------------------------------------------------- */
// One word read means that we must have read a NULL value. We set
// the register to indicate a NULL value.
/* ------------------------------------------------------------- */
TregMemBuffer[theRegister] = 0;
+ TregMemBuffer[theRegister + 2] = 0;
+ TregMemBuffer[theRegister + 3] = 0;
} else if (TnoDataRW == (Uint32)-1) {
jam();
tupkeyErrorLab(signal);
@@ -1550,8 +1553,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
AttributeHeader& ah = AttributeHeader::init(&TdataForUpdate[0],
TattrId, TattrNoOfWords);
- TdataForUpdate[1] = TregMemBuffer[theRegister + 1];
- TdataForUpdate[2] = TregMemBuffer[theRegister + 2];
+ TdataForUpdate[1] = TregMemBuffer[theRegister + 2];
+ TdataForUpdate[2] = TregMemBuffer[theRegister + 3];
Tlen = TattrNoOfWords + 1;
if (Toptype == ZUPDATE) {
if (TattrNoOfWords <= 2) {
@@ -1597,24 +1600,22 @@ int Dbtup::interpreterNextLab(Signal* signal,
case Interpreter::LOAD_CONST16:
jam();
TregMemBuffer[theRegister] = 0x50; /* 32 BIT UNSIGNED CONSTANT */
- TregMemBuffer[theRegister + 1] = theInstruction >> 16;
- TregMemBuffer[theRegister + 2] = 0;
+ * (Int64*)(TregMemBuffer+theRegister+2) = theInstruction >> 16;
break;
case Interpreter::LOAD_CONST32:
jam();
TregMemBuffer[theRegister] = 0x50; /* 32 BIT UNSIGNED CONSTANT */
- TregMemBuffer[theRegister + 1] = TcurrentProgram[TprogramCounter];
- TregMemBuffer[theRegister + 2] = 0;
+ * (Int64*)(TregMemBuffer+theRegister+2) = *
+ (TcurrentProgram+TprogramCounter);
TprogramCounter++;
break;
case Interpreter::LOAD_CONST64:
jam();
TregMemBuffer[theRegister] = 0x60; /* 64 BIT UNSIGNED CONSTANT */
- TregMemBuffer[theRegister + 1] = TcurrentProgram[TprogramCounter + 0];
- TregMemBuffer[theRegister + 2] = TcurrentProgram[TprogramCounter + 1];
- TprogramCounter += 2;
+ TregMemBuffer[theRegister + 2 ] = * (TcurrentProgram + TprogramCounter++);
+ TregMemBuffer[theRegister + 3 ] = * (TcurrentProgram + TprogramCounter++);
break;
case Interpreter::ADD_REG_REG:
@@ -1624,27 +1625,16 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TdestRegister = Interpreter::getReg3(theInstruction) << 2;
Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
- Uint32 Tany64bit = (((TleftType | TrightType) & 0x60) == 0x60);
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
if ((TleftType | TrightType) != 0) {
- Uint32 Tdest0 = Tleft0 + Tright0;
- Uint32 Tdest1 = 0;
- TregMemBuffer[TdestRegister + 1] = Tdest0;
- TregMemBuffer[TdestRegister] = 0x50;
- if (Tany64bit) {
- TregMemBuffer[TdestRegister] = 0x60;
- Tdest1 = Tleft1 + Tright1;
- if (Tdest0 < Tleft0) {
- Tdest1++;
- }
- }//if
- TregMemBuffer[TdestRegister + 2] = Tdest1;
+ Uint64 Tdest0 = Tleft0 + Tright0;
+ * (Int64*)(TregMemBuffer+TdestRegister+2) = Tdest0;
+ TregMemBuffer[TdestRegister] = 0x60;
} else {
return TUPKEY_abort(signal, 20);
}
@@ -1658,30 +1648,18 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TdestRegister = Interpreter::getReg3(theInstruction) << 2;
Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
-
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
- Uint32 Tany64bit = (((TleftType | TrightType) & 0x60) == 0x60);
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
if ((TleftType | TrightType) != 0) {
- Uint32 Tdest0 = Tleft0 - Tright0;
- Uint32 Tdest1 = 0;
- TregMemBuffer[TdestRegister + 1] = Tdest0;
- TregMemBuffer[TdestRegister] = 0x50;
- if (Tany64bit) {
- TregMemBuffer[TdestRegister] = 0x60;
- Tdest1 = Tleft1 - Tright1;
- if (Tdest0 > Tleft0) {
- Tdest1--;
- }//if
- }//if
- TregMemBuffer[TdestRegister + 2] = Tdest1;
+ Int64 Tdest0 = Tleft0 - Tright0;
+ * (Int64*)(TregMemBuffer+TdestRegister+2) = Tdest0;
+ TregMemBuffer[TdestRegister] = 0x60;
} else {
- return TUPKEY_abort(signal, 21);
- }//if
+ return TUPKEY_abort(signal, 20);
+ }
break;
}
@@ -1715,12 +1693,12 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft0 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft1 = TregMemBuffer[theRegister + 3];
Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright0 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright1 = TregMemBuffer[TrightRegister + 3];
if ((TrightType | TleftType) != 0) {
jam();
if ((Tleft0 == Tright0) && (Tleft1 == Tright1)) {
@@ -1737,12 +1715,12 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft0 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft1 = TregMemBuffer[theRegister + 3];
Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright0 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright1 = TregMemBuffer[TrightRegister + 3];
if ((TrightType | TleftType) != 0) {
jam();
if ((Tleft0 != Tright0) || (Tleft1 != Tright1)) {
@@ -1758,17 +1736,16 @@ int Dbtup::interpreterNextLab(Signal* signal,
{
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
- Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
if ((TrightType | TleftType) != 0) {
jam();
- if ((Tleft0 < Tright0) || ((Tleft0 == Tright0) &&
- (Tleft1 < Tright1))) {
+ if (Tleft0 < Tright0) {
TprogramCounter = brancher(theInstruction, TprogramCounter);
}//if
} else {
@@ -1781,17 +1758,16 @@ int Dbtup::interpreterNextLab(Signal* signal,
{
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
- Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
if ((TrightType | TleftType) != 0) {
jam();
- if ((Tleft0 < Tright0) || ((Tleft0 == Tright0) &&
- (Tleft1 <= Tright1))) {
+ if (Tleft0 <= Tright0) {
TprogramCounter = brancher(theInstruction, TprogramCounter);
}//if
} else {
@@ -1804,17 +1780,16 @@ int Dbtup::interpreterNextLab(Signal* signal,
{
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
- Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
if ((TrightType | TleftType) != 0) {
jam();
- if ((Tleft0 > Tright0) || ((Tleft0 == Tright0) &&
- (Tleft1 > Tright1))) {
+ if (Tleft0 > Tright0){
TprogramCounter = brancher(theInstruction, TprogramCounter);
}//if
} else {
@@ -1827,17 +1802,16 @@ int Dbtup::interpreterNextLab(Signal* signal,
{
Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
Uint32 TleftType = TregMemBuffer[theRegister];
- Uint32 Tleft0 = TregMemBuffer[theRegister + 1];
- Uint32 Tleft1 = TregMemBuffer[theRegister + 2];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
- Uint32 TrightType = TregMemBuffer[TrightRegister];
- Uint32 Tright0 = TregMemBuffer[TrightRegister + 1];
- Uint32 Tright1 = TregMemBuffer[TrightRegister + 2];
if ((TrightType | TleftType) != 0) {
jam();
- if ((Tleft0 > Tright0) || ((Tleft0 == Tright0) &&
- (Tleft1 >= Tright1))) {
+ if (Tleft0 >= Tright0){
TprogramCounter = brancher(theInstruction, TprogramCounter);
}//if
} else {
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 095ea412701..f5c3e2b4128 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -132,9 +132,6 @@ Dbtup::Dbtup(const class Configuration & conf)
addRecSignal(GSN_TUP_WRITELOG_REQ, &Dbtup::execTUP_WRITELOG_REQ);
// Ordered index related
- addRecSignal(GSN_TUP_READ_ATTRS, &Dbtup::execTUP_READ_ATTRS);
- addRecSignal(GSN_TUP_QUERY_TH, &Dbtup::execTUP_QUERY_TH);
- addRecSignal(GSN_TUP_STORE_TH, &Dbtup::execTUP_STORE_TH);
addRecSignal(GSN_BUILDINDXREQ, &Dbtup::execBUILDINDXREQ);
initData();
@@ -701,7 +698,8 @@ void Dbtup::initRecords()
page = (Page*)allocRecord("Page",
sizeof(Page),
- cnoOfPage);
+ cnoOfPage,
+ false);
pageRange = (PageRange*)allocRecord("PageRange",
sizeof(PageRange),
@@ -891,6 +889,7 @@ void Dbtup::initializeAttrbufrec()
AttrbufrecPtr attrBufPtr;
for (attrBufPtr.i = 0;
attrBufPtr.i < cnoOfAttrbufrec; attrBufPtr.i++) {
+ refresh_watch_dog();
ptrAss(attrBufPtr, attrbufrec);
attrBufPtr.p->attrbuf[ZBUF_NEXT] = attrBufPtr.i + 1;
}//for
@@ -947,6 +946,7 @@ void Dbtup::initializeFragrecord()
{
FragrecordPtr regFragPtr;
for (regFragPtr.i = 0; regFragPtr.i < cnoOfFragrec; regFragPtr.i++) {
+ refresh_watch_dog();
ptrAss(regFragPtr, fragrecord);
regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
regFragPtr.p->checkpointVersion = RNIL;
@@ -966,9 +966,7 @@ void Dbtup::initializeHostBuffer()
for (hostId = 0; hostId < MAX_NODES; hostId++) {
hostBuffer[hostId].inPackedList = false;
hostBuffer[hostId].noOfPacketsTA = 0;
- hostBuffer[hostId].noOfPacketsRC = 0;
hostBuffer[hostId].packetLenTA = 0;
- hostBuffer[hostId].packetLenRC = 0;
}//for
}//Dbtup::initializeHostBuffer()
@@ -987,6 +985,7 @@ void Dbtup::initializeOperationrec()
{
OperationrecPtr regOpPtr;
for (regOpPtr.i = 0; regOpPtr.i < cnoOfOprec; regOpPtr.i++) {
+ refresh_watch_dog();
ptrAss(regOpPtr, operationrec);
regOpPtr.p->firstAttrinbufrec = RNIL;
regOpPtr.p->lastAttrinbufrec = RNIL;
@@ -1041,6 +1040,7 @@ void Dbtup::initializeTablerec()
TablerecPtr regTabPtr;
for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) {
ljam();
+ refresh_watch_dog();
ptrAss(regTabPtr, tablerec);
initTab(regTabPtr.p);
}//for
@@ -1104,6 +1104,7 @@ void Dbtup::initializeTabDescr()
cfreeTdList[i] = RNIL;
}//for
for (regTabDesPtr.i = 0; regTabDesPtr.i < cnoOfTabDescrRec; regTabDesPtr.i++) {
+ refresh_watch_dog();
ptrAss(regTabDesPtr, tableDescriptor);
regTabDesPtr.p->tabDescr = RNIL;
}//for
@@ -1116,6 +1117,7 @@ void Dbtup::initializeUndoPage()
for (undoPagep.i = 0;
undoPagep.i < cnoOfUndoPage;
undoPagep.i = undoPagep.i + ZUB_SEGMENT_SIZE) {
+ refresh_watch_dog();
ptrAss(undoPagep, undoPage);
undoPagep.p->undoPageWord[ZPAGE_NEXT_POS] = undoPagep.i +
ZUB_SEGMENT_SIZE;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index f11de5238e2..e7a431f17de 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -22,7 +22,6 @@
#include <AttributeDescriptor.hpp>
#include "AttributeOffset.hpp"
#include <AttributeHeader.hpp>
-#include <signaldata/TupAccess.hpp>
#include <signaldata/TuxMaint.hpp>
#define ljam() { jamLine(28000 + __LINE__); }
@@ -152,10 +151,10 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
for (Uint32 i = 0; i < numAttrs; i++) {
AttributeHeader ah(attrIds[i]);
- Uint32 attrId = ah.getAttributeId();
- Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
- Uint32 desc1 = tableDescriptor[index].tabDescr;
- Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
+ const Uint32 attrId = ah.getAttributeId();
+ const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
+ const Uint32 desc1 = tableDescriptor[index].tabDescr;
+ const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
if (AttributeDescriptor::getNullable(desc1)) {
Uint32 offset = AttributeOffset::getNullFlagOffset(desc2);
ndbrequire(offset < tablePtr.p->tupNullWords);
@@ -171,275 +170,78 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
}
}
-void // under construction
-Dbtup::tuxReadKeys()
-{
-}
-
-// deprecated signal interfaces
-
void
-Dbtup::execTUP_READ_ATTRS(Signal* signal)
+Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData)
{
ljamEntry();
- TupReadAttrs* const sig = (TupReadAttrs*)signal->getDataPtrSend();
- TupReadAttrs reqCopy = *sig;
- TupReadAttrs* const req = &reqCopy;
- req->errorCode = 0;
- // get table
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
TablerecPtr tablePtr;
- tablePtr.i = req->tableId;
+ tablePtr.i = fragPtr.p->fragTableId;
ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
- // get fragment
- FragrecordPtr fragPtr;
- if (req->fragPtrI == RNIL) {
- ljam();
- getFragmentrec(fragPtr, req->fragId, tablePtr.p);
- ndbrequire(fragPtr.i != RNIL);
- req->fragPtrI = fragPtr.i;
- } else {
- fragPtr.i = req->fragPtrI;
- ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
- ndbrequire(req->fragId == fragPtr.p->fragmentId);
- }
- // get page
PagePtr pagePtr;
- if (req->pageId == RNIL) {
- ljam();
- Uint32 fragPageId = req->tupAddr >> MAX_TUPLES_BITS;
- Uint32 pageIndex = req->tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
- ndbrequire((pageIndex & 0x1) == 0);
- // data returned for original tuple
- req->pageId = getRealpid(fragPtr.p, fragPageId);
- req->pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
- }
- pagePtr.i = req->pageId;
+ pagePtr.i = pageId;
ptrCheckGuard(pagePtr, cnoOfPage, page);
- Uint32 pageOffset = req->pageOffset;
- // search for tuple version if not original
- if (! (req->requestInfo & TupReadAttrs::ReadKeys) &&
- pagePtr.p->pageWord[pageOffset + 1] != req->tupVersion) {
- ljam();
- OperationrecPtr opPtr;
- opPtr.i = pagePtr.p->pageWord[pageOffset];
- Uint32 loopGuard = 0;
- while (true) {
- ptrCheckGuard(opPtr, cnoOfOprec, operationrec);
- if (opPtr.p->realPageIdC != RNIL) {
- pagePtr.i = opPtr.p->realPageIdC;
- pageOffset = opPtr.p->pageOffsetC;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- if (pagePtr.p->pageWord[pageOffset + 1] == req->tupVersion) {
- ljam();
- break;
- }
- }
- ljam();
- // next means before in event order
- opPtr.i = opPtr.p->nextActiveOp;
- ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
+ const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
+ const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
+ const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
+ const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
+ Uint32 size = 0;
+ for (Uint32 i = 0; i < numAttrs; i++) {
+ AttributeHeader ah(attrIds[i]);
+ const Uint32 attrId = ah.getAttributeId();
+ const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
+ const Uint32 desc1 = tableDescriptor[index].tabDescr;
+ const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
+ ndbrequire(! AttributeDescriptor::getNullable(desc1));
+ const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1);
+ const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2);
+ for (Uint32 j = 0; j < attrSize; j++) {
+ pkData[size + j] = attrData[j];
}
+ size += attrSize;
}
- // shared buffer
- Uint32* buffer = (Uint32*)sig + TupReadAttrs::SignalLength;
- // if request is for keys then we create input section
- if (req->requestInfo & TupReadAttrs::ReadKeys) {
- ljam();
- buffer[0] = tablePtr.p->noOfKeyAttr;
- const Uint32* keyArray = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
- MEMCOPY_NO_WORDS(&buffer[1], keyArray, tablePtr.p->noOfKeyAttr);
- }
- Uint32 inBufLen = buffer[0];
- Uint32* inBuffer = &buffer[1];
- Uint32* outBuffer = &buffer[1 + inBufLen];
- Uint32 maxRead = ZATTR_BUFFER_SIZE;
- // save globals
- TablerecPtr tabptr_old = tabptr;
- FragrecordPtr fragptr_old = fragptr;
- OperationrecPtr operPtr_old = operPtr;
- // new globals
- tabptr = tablePtr;
- fragptr = fragPtr;
- operPtr.i = RNIL; // XXX check later
- operPtr.p = NULL;
- int ret = readAttributes(pagePtr.p, pageOffset, inBuffer, inBufLen, outBuffer, maxRead);
- // restore globals
- tabptr = tabptr_old;
- fragptr = fragptr_old;
- operPtr = operPtr_old;
- // check error
- if ((Uint32)ret == (Uint32)-1) {
- ljam();
- req->errorCode = terrorCode;
- }
- // copy back
- *sig = *req;
+ *pkSize = size;
}
-void
-Dbtup::execTUP_QUERY_TH(Signal* signal)
+bool
+Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId)
{
ljamEntry();
- Operationrec tempOp;
- TupQueryTh* const req = (TupQueryTh*)signal->getDataPtrSend();
- Uint32 tableId = req->tableId;
- Uint32 fragId = req->fragId;
- Uint32 tupAddr = req->tupAddr;
- Uint32 req_tupVersion = req->tupVersion;
- Uint32 transid1 = req->transId1;
- Uint32 transid2 = req->transId2;
- Uint32 savePointId = req->savePointId;
- Uint32 ret_result = 0;
- // get table
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
TablerecPtr tablePtr;
- tablePtr.i = tableId;
+ tablePtr.i = fragPtr.p->fragTableId;
ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
- // get fragment
- FragrecordPtr fragPtr;
- getFragmentrec(fragPtr, fragId, tablePtr.p);
- ndbrequire(fragPtr.i != RNIL);
// get page
PagePtr pagePtr;
Uint32 fragPageId = tupAddr >> MAX_TUPLES_BITS;
Uint32 pageIndex = tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
-
+ // use temp op rec
+ Operationrec tempOp;
tempOp.fragPageId = fragPageId;
tempOp.pageIndex = pageIndex;
- tempOp.transid1 = transid1;
- tempOp.transid2 = transid2;
+ tempOp.transid1 = transId1;
+ tempOp.transid2 = transId2;
tempOp.savePointId = savePointId;
tempOp.optype = ZREAD;
tempOp.dirtyOp = 1;
if (getPage(pagePtr, &tempOp, fragPtr.p, tablePtr.p)) {
/*
- We use the normal getPage which will return the tuple to be used
- for this transaction and savepoint id. If its tuple version equals
- the requested then we have a visible tuple otherwise not.
+ * We use the normal getPage which will return the tuple to be used
+ * for this transaction and savepoint id. If its tuple version
+ * equals the requested then we have a visible tuple otherwise not.
*/
ljam();
Uint32 read_tupVersion = pagePtr.p->pageWord[tempOp.pageOffset + 1];
- if (read_tupVersion == req_tupVersion) {
+ if (read_tupVersion == tupVersion) {
ljam();
- ret_result = 1;
- }
- }
- req->returnCode = ret_result;
- return;
-}
-
-void
-Dbtup::execTUP_STORE_TH(Signal* signal)
-{
- ljamEntry();
- TupStoreTh* const sig = (TupStoreTh*)signal->getDataPtrSend();
- TupStoreTh reqCopy = *sig;
- TupStoreTh* const req = &reqCopy;
- req->errorCode = 0;
- ndbrequire(req->tupVersion == 0);
- // get table
- TablerecPtr tablePtr;
- tablePtr.i = req->tableId;
- ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
- // offset to attribute 0
- Uint32 attrDescIndex = tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
- Uint32 attrDataOffset = AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
- // get fragment
- FragrecordPtr fragPtr;
- if (req->fragPtrI == RNIL) {
- ljam();
- getFragmentrec(fragPtr, req->fragId, tablePtr.p);
- ndbrequire(fragPtr.i != RNIL);
- req->fragPtrI = fragPtr.i;
- } else {
- fragPtr.i = req->fragPtrI;
- ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
- ndbrequire(req->fragId == fragPtr.p->fragmentId);
- }
- // handle each case
- switch (req->opCode) {
- case TupStoreTh::OpRead:
- ljam();
- {
- PagePtr pagePtr;
- if (req->pageId == RNIL) {
- ljam();
- Uint32 fragPageId = req->tupAddr >> MAX_TUPLES_BITS;
- Uint32 pageIndex = req->tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
- ndbrequire((pageIndex & 0x1) == 0);
- req->pageId = getRealpid(fragPtr.p, fragPageId);
- req->pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
- }
- pagePtr.i = req->pageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- Uint32* data = &pagePtr.p->pageWord[req->pageOffset] + attrDataOffset;
- Uint32* buffer = (Uint32*)sig + TupStoreTh::SignalLength;
- ndbrequire(req->dataOffset + req->dataSize <= tablePtr.p->tupheadsize);
- memcpy(buffer + req->dataOffset, data + req->dataOffset, req->dataSize << 2);
- }
- break;
- case TupStoreTh::OpInsert:
- ljam();
- {
- PagePtr pagePtr;
- if (! allocTh(fragPtr.p, tablePtr.p, NORMAL_PAGE, signal, req->pageOffset, pagePtr)) {
- ljam();
- req->errorCode = terrorCode;
- break;
- }
- req->pageId = pagePtr.i;
- Uint32 fragPageId = pagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
- Uint32 pageIndex = ((req->pageOffset - ZPAGE_HEADER_SIZE) / tablePtr.p->tupheadsize) << 1;
- req->tupAddr = (fragPageId << MAX_TUPLES_BITS) | pageIndex;
- ndbrequire(req->dataOffset + req->dataSize <= tablePtr.p->tupheadsize);
- Uint32* data = &pagePtr.p->pageWord[req->pageOffset] + attrDataOffset;
- Uint32* buffer = (Uint32*)sig + TupStoreTh::SignalLength;
- memcpy(data + req->dataOffset, buffer + req->dataOffset, req->dataSize << 2);
+ return true;
}
- break;
- case TupStoreTh::OpUpdate:
- ljam();
- {
- PagePtr pagePtr;
- if (req->pageId == RNIL) {
- ljam();
- Uint32 fragPageId = req->tupAddr >> MAX_TUPLES_BITS;
- Uint32 pageIndex = req->tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
- ndbrequire((pageIndex & 0x1) == 0);
- req->pageId = getRealpid(fragPtr.p, fragPageId);
- req->pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
- }
- pagePtr.i = req->pageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- Uint32* data = &pagePtr.p->pageWord[req->pageOffset] + attrDataOffset;
- Uint32* buffer = (Uint32*)sig + TupStoreTh::SignalLength;
- ndbrequire(req->dataOffset + req->dataSize <= tablePtr.p->tupheadsize);
- memcpy(data + req->dataOffset, buffer + req->dataOffset, req->dataSize << 2);
- }
- break;
- case TupStoreTh::OpDelete:
- ljam();
- {
- PagePtr pagePtr;
- if (req->pageId == RNIL) {
- ljam();
- Uint32 fragPageId = req->tupAddr >> MAX_TUPLES_BITS;
- Uint32 pageIndex = req->tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
- ndbrequire((pageIndex & 0x1) == 0);
- req->pageId = getRealpid(fragPtr.p, fragPageId);
- req->pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
- }
- pagePtr.i = req->pageId;
- ptrCheckGuard(pagePtr, cnoOfPage, page);
- freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, req->pageOffset);
- // null location
- req->tupAddr = (Uint32)-1;
- req->pageId = RNIL;
- req->pageOffset = 0;
- }
- break;
}
- // copy back
- *sig = *req;
+ return false;
}
// ordered index build
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp b/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
index b74b2c00e3e..f8f2b9bdbd2 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
@@ -344,6 +344,8 @@ void Dbtup::lcpSaveDataPageLab(Signal* signal, Uint32 ciIndex)
if (ciPtr.p->lcpTabPtr == c_errorInsert4000TableId) {
// Delay writing of data pages during LCP
ndbout << "Delay writing of data pages during LCP" << endl;
+ signal->theData[0] = ZCONT_SAVE_DP;
+ signal->theData[1] = ciIndex;
sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 1000, 2);
return;
}//if
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
index 410cafee161..9722aa437c0 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
@@ -123,6 +123,7 @@ void Dbtup::initializePage()
PagePtr pagePtr;
for (pagePtr.i = 0; pagePtr.i < cnoOfPage; pagePtr.i++) {
ljam();
+ refresh_watch_dog();
ptrAss(pagePtr, page);
pagePtr.p->pageWord[ZPAGE_PHYSICAL_INDEX] = pagePtr.i;
pagePtr.p->pageWord[ZPAGE_NEXT_POS] = pagePtr.i + 1;
@@ -138,20 +139,29 @@ void Dbtup::initializePage()
pagePtr.i = 0;
ptrAss(pagePtr, page);
pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
+
+ for(size_t j = 0; j<MAX_PARALLELL_TUP_SRREQ; j++){
+ pagePtr.i = 1+j;
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
+ }
- returnCommonArea(1, cnoOfPage - 1);
- cnoOfAllocatedPages = 1;
+ Uint32 tmp = 1 + MAX_PARALLELL_TUP_SRREQ;
+ returnCommonArea(tmp, cnoOfPage - tmp);
+ cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea
+ c_sr_free_page_0 = ~0;
}//Dbtup::initializePage()
void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
Uint32& noOfPagesAllocated,
Uint32& allocPageRef)
{
- if (noOfPagesToAllocate == 0) {
+ if (noOfPagesToAllocate == 0){
ljam();
noOfPagesAllocated = 0;
return;
}//if
+
Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1);
for (Uint32 i = firstListToCheck; i < 16; i++) {
ljam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
index 580d764c96f..ed835dc057a 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
@@ -92,12 +92,25 @@ void Dbtup::rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr)
seizeDiskBufferSegmentRecord(dbsiPtr);
riPtr.p->sriDataBufferSegmentP = dbsiPtr.i;
- Uint32 retPageRef;
+ Uint32 retPageRef = RNIL;
Uint32 noAllocPages = 1;
Uint32 noOfPagesAllocated;
- allocConsPages(noAllocPages, noOfPagesAllocated, retPageRef);
- ndbrequire(noOfPagesAllocated == 1);
-
+ {
+ /**
+ * Use low pages for 0-pages during SR
+ * bitmask of free pages is kept in c_sr_free_page_0
+ */
+ Uint32 tmp = c_sr_free_page_0;
+ for(Uint32 i = 1; i<(1+MAX_PARALLELL_TUP_SRREQ); i++){
+ if(tmp & (1 << i)){
+ retPageRef = i;
+ c_sr_free_page_0 = tmp & (~(1 << i));
+ break;
+ }
+ }
+ ndbrequire(retPageRef != RNIL);
+ }
+
dbsiPtr.p->pdxDataPage[0] = retPageRef;
dbsiPtr.p->pdxNumDataPages = 1;
dbsiPtr.p->pdxFilePage = 0;
@@ -150,7 +163,10 @@ Dbtup::rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr)
/* LETS REMOVE IT AND REUSE THE SEGMENT FOR REAL DATA PAGES */
/* REMOVE ONE PAGE ONLY, PAGEP IS ALREADY SET TO THE RESTART INFO PAGE */
/************************************************************************/
- returnCommonArea(pagePtr.i, 1);
+ {
+ ndbrequire(pagePtr.i > 0 && pagePtr.i <= MAX_PARALLELL_TUP_SRREQ);
+ c_sr_free_page_0 |= (1 << pagePtr.i);
+ }
Uint32 undoFileVersion = TzeroDataPage[ZSRI_UNDO_FILE_VER];
lliPtr.i = (undoFileVersion << 2) + (regTabPtr.i & 0x3);
@@ -494,16 +510,17 @@ void Dbtup::readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr,
Uint32 dataPages[16];
ndbrequire(dbsiPtr.p->pdxFilePage > 0);
ndbrequire(dbsiPtr.p->pdxFilePage <= ZUB_SEGMENT_SIZE);
- for (Uint32 i = 0; i < dbsiPtr.p->pdxFilePage; i++) {
+ Uint32 i;
+ for (i = 0; i < dbsiPtr.p->pdxFilePage; i++) {
ljam();
dataPages[i] = dbsiPtr.p->pdxDataPage[i + ZUB_SEGMENT_SIZE];
}//for
- for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ for (i = 0; i < ZUB_SEGMENT_SIZE; i++) {
ljam();
dataPages[i + dbsiPtr.p->pdxFilePage] = dbsiPtr.p->pdxDataPage[i];
}//for
Uint32 limitLoop = ZUB_SEGMENT_SIZE + dbsiPtr.p->pdxFilePage;
- for (Uint32 i = 0; i < limitLoop; i++) {
+ for (i = 0; i < limitLoop; i++) {
ljam();
dbsiPtr.p->pdxDataPage[i] = dataPages[i];
}//for
@@ -977,7 +994,8 @@ void Dbtup::allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoP
seizeDiskBufferSegmentRecord(dbsiPtr);
dbsiPtr.p->pdxBuffertype = UNDO_RESTART_PAGES;
dbsiPtr.p->pdxUndoBufferSet[0] = undoPagePtr.i;
- for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ Uint32 i;
+ for (i = 0; i < ZUB_SEGMENT_SIZE; i++) {
dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + i;
}//for
@@ -994,7 +1012,7 @@ void Dbtup::allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoP
undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
dbsiPtr.p->pdxUndoBufferSet[1] = undoPagePtr.i;
// lliPtr.p->lliUndoPage = undoPagePtr.i;
- for (Uint32 i = ZUB_SEGMENT_SIZE; i < (2 * ZUB_SEGMENT_SIZE); i++) {
+ for (i = ZUB_SEGMENT_SIZE; i < (2 * ZUB_SEGMENT_SIZE); i++) {
dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + (i - ZUB_SEGMENT_SIZE);
}//for
return;
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index 25e85ba9f5f..1a3c7f64ac3 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -37,7 +37,6 @@
#include <signaldata/AlterIndx.hpp>
#include <signaldata/DropTab.hpp>
#include <signaldata/TuxMaint.hpp>
-#include <signaldata/TupAccess.hpp>
#include <signaldata/AccScan.hpp>
#include <signaldata/TuxBound.hpp>
#include <signaldata/NextScan.hpp>
@@ -77,10 +76,14 @@
#define jam() jamLine(60000 + __LINE__)
#define jamEntry() jamEntryLine(60000 + __LINE__)
#endif
-#ifdef DBTUX_CMP_CPP
+#ifdef DBTUX_SEARCH_CPP
#define jam() jamLine(70000 + __LINE__)
#define jamEntry() jamEntryLine(70000 + __LINE__)
#endif
+#ifdef DBTUX_CMP_CPP
+#define jam() jamLine(80000 + __LINE__)
+#define jamEntry() jamEntryLine(80000 + __LINE__)
+#endif
#ifdef DBTUX_DEBUG_CPP
#define jam() jamLine(90000 + __LINE__)
#define jamEntry() jamEntryLine(90000 + __LINE__)
@@ -112,6 +115,7 @@ public:
static const unsigned DescPageSize = 256;
private:
static const unsigned MaxTreeNodeSize = MAX_TTREE_NODE_SIZE;
+ static const unsigned MaxPrefSize = MAX_TTREE_PREF_SIZE;
static const unsigned ScanBoundSegmentSize = 7;
static const unsigned MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN;
BLOCK_DEFINES(Dbtux);
@@ -206,19 +210,19 @@ private:
unsigned m_fragBit : 1; // which duplicated table fragment
TreeEnt();
// methods
+ bool eq(const TreeEnt ent) const;
int cmp(const TreeEnt ent) const;
};
static const unsigned TreeEntSize = sizeof(TreeEnt) >> 2;
static const TreeEnt NullTreeEnt;
/*
- * Tree node has 1) fixed part 2) actual table data for min and max
- * prefix 3) max and min entries 4) rest of entries 5) one extra entry
+ * Tree node has 1) fixed part 2) a prefix of index key data for min
+ * entry 3) max and min entries 4) rest of entries 5) one extra entry
* used as work space.
*
* struct TreeNode part 1, size 6 words
* min prefix part 2, size TreeHead::m_prefSize
- * max prefix part 2, size TreeHead::m_prefSize
* max entry part 3
* min entry part 3
* rest of entries part 4
@@ -265,14 +269,14 @@ private:
friend struct TreeHead;
struct TreeHead {
Uint8 m_nodeSize; // words in tree node
- Uint8 m_prefSize; // words in min/max prefix each
+ Uint8 m_prefSize; // words in min prefix
Uint8 m_minOccup; // min entries in internal node
Uint8 m_maxOccup; // max entries in node
TupLoc m_root; // root node
TreeHead();
// methods
unsigned getSize(AccSize acc) const;
- Data getPref(TreeNode* node, unsigned i) const;
+ Data getPref(TreeNode* node) const;
TreeEnt* getEntList(TreeNode* node) const;
};
@@ -442,6 +446,7 @@ private:
Uint32 m_descPage; // descriptor page
Uint16 m_descOff; // offset within the page
Uint16 m_numAttrs;
+ bool m_storeNullKey;
union {
Uint32 nextPool;
};
@@ -465,6 +470,7 @@ private:
Uint32 m_descPage; // copy from index level
Uint16 m_descOff;
Uint16 m_numAttrs;
+ bool m_storeNullKey;
TreeHead m_tree;
TupLoc m_freeLoc; // one node pre-allocated for insert
DLList<ScanOp> m_scanList; // current scans on this fragment
@@ -514,6 +520,8 @@ private:
NodeHandle(Frag& frag);
NodeHandle(const NodeHandle& node);
NodeHandle& operator=(const NodeHandle& node);
+ // check if unassigned
+ bool isNull();
// getters
TupLoc getLink(unsigned i);
unsigned getChilds(); // cannot spell
@@ -528,56 +536,13 @@ private:
void setBalance(int b);
void setNodeScan(Uint32 scanPtrI);
// access other parts of the node
- Data getPref(unsigned i);
+ Data getPref();
TreeEnt getEnt(unsigned pos);
TreeEnt getMinMax(unsigned i);
// for ndbrequire and ndbassert
void progError(int line, int cause, const char* file);
};
- // parameters for methods
-
- /*
- * Copy attribute data.
- */
- struct CopyPar {
- unsigned m_items; // number of attributes
- bool m_headers; // copy headers flag (default true)
- unsigned m_maxwords; // limit size (default no limit)
- // output
- unsigned m_numitems; // number of attributes fully copied
- unsigned m_numwords; // number of words copied
- CopyPar();
- };
-
- /*
- * Read index key attributes.
- */
- struct ReadPar;
- friend struct ReadPar;
- struct ReadPar {
- TreeEnt m_ent; // tuple to read
- unsigned m_first; // first index attribute
- unsigned m_count; // number of consecutive index attributes
- Data m_data; // set pointer if 0 else copy result to it
- unsigned m_size; // number of words (set in read keys only)
- ReadPar();
- };
-
- /*
- * Scan bound comparison.
- */
- struct BoundPar;
- friend struct BoundPar;
- struct BoundPar {
- ConstData m_data1; // full bound data
- ConstData m_data2; // full or prefix data
- unsigned m_count1; // number of bounds
- unsigned m_len2; // words in data2 buffer
- unsigned m_dir; // 0-lower bound 1-upper bound
- BoundPar();
- };
-
// methods
/*
@@ -589,7 +554,7 @@ private:
// utils
void setKeyAttrs(const Frag& frag);
void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData);
- void copyAttrs(Data dst, ConstData src, CopyPar& copyPar);
+ void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData);
void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
/*
@@ -607,8 +572,6 @@ private:
* DbtuxMaint.cpp
*/
void execTUX_MAINT_REQ(Signal* signal);
- void tupReadAttrs(Signal* signal, const Frag& frag, ReadPar& readPar);
- void tupReadKeys(Signal* signal, const Frag& frag, ReadPar& readPar);
/*
* DbtuxNode.cpp
@@ -618,7 +581,7 @@ private:
void selectNode(Signal* signal, NodeHandle& node, TupLoc loc, AccSize acc);
void insertNode(Signal* signal, NodeHandle& node, AccSize acc);
void deleteNode(Signal* signal, NodeHandle& node);
- void setNodePref(Signal* signal, NodeHandle& node, unsigned i);
+ void setNodePref(Signal* signal, NodeHandle& node);
// node operations
void nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt& ent);
void nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent);
@@ -633,7 +596,6 @@ private:
/*
* DbtuxTree.cpp
*/
- void treeSearch(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
void treeAdd(Signal* signal, Frag& frag, TreePos treePos, TreeEnt ent);
void treeRemove(Signal* signal, Frag& frag, TreePos treePos);
void treeRotateSingle(Signal* signal, Frag& frag, NodeHandle& node, unsigned i);
@@ -658,11 +620,19 @@ private:
void releaseScanOp(ScanOpPtr& scanPtr);
/*
+ * DbtuxSearch.cpp
+ */
+ void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+
+ /*
* DbtuxCmp.cpp
*/
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, ConstData data2, unsigned maxlen2 = MaxAttrDataSize);
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, TableData data2);
- int cmpScanBound(const Frag& frag, const BoundPar boundPar);
+ int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
+ int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey);
+ int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
+ int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey);
/*
* DbtuxDebug.cpp
@@ -675,6 +645,7 @@ private:
TupLoc m_parent; // expected parent address
int m_depth; // returned depth
unsigned m_occup; // returned occupancy
+ TreeEnt m_minmax[2]; // returned subtree min and max
bool m_ok; // returned status
PrintPar();
};
@@ -699,6 +670,8 @@ private:
DebugTree = 4, // log and check tree after each op
DebugScan = 8 // log scans
};
+ static const int DataFillByte = 0xa2;
+ static const int NodeFillByte = 0xa4;
#endif
// start up info
@@ -859,13 +832,18 @@ Dbtux::TreeEnt::TreeEnt() :
{
}
+inline bool
+Dbtux::TreeEnt::eq(const TreeEnt ent) const
+{
+ return
+ m_tupLoc == ent.m_tupLoc &&
+ m_tupVersion == ent.m_tupVersion &&
+ m_fragBit == ent.m_fragBit;
+}
+
inline int
Dbtux::TreeEnt::cmp(const TreeEnt ent) const
{
- if (m_fragBit < ent.m_fragBit)
- return -1;
- if (m_fragBit > ent.m_fragBit)
- return +1;
if (m_tupLoc.m_pageId < ent.m_tupLoc.m_pageId)
return -1;
if (m_tupLoc.m_pageId > ent.m_tupLoc.m_pageId)
@@ -878,6 +856,10 @@ Dbtux::TreeEnt::cmp(const TreeEnt ent) const
return -1;
if (m_tupVersion > ent.m_tupVersion)
return +1;
+ if (m_fragBit < ent.m_fragBit)
+ return -1;
+ if (m_fragBit > ent.m_fragBit)
+ return +1;
return 0;
}
@@ -920,25 +902,25 @@ Dbtux::TreeHead::getSize(AccSize acc) const
case AccHead:
return NodeHeadSize;
case AccPref:
- return NodeHeadSize + 2 * m_prefSize + 2 * TreeEntSize;
+ return NodeHeadSize + m_prefSize + 2 * TreeEntSize;
case AccFull:
return m_nodeSize;
}
- REQUIRE(false, "invalid Dbtux::AccSize");
+ abort();
return 0;
}
inline Dbtux::Data
-Dbtux::TreeHead::getPref(TreeNode* node, unsigned i) const
+Dbtux::TreeHead::getPref(TreeNode* node) const
{
- Uint32* ptr = (Uint32*)node + NodeHeadSize + i * m_prefSize;
+ Uint32* ptr = (Uint32*)node + NodeHeadSize;
return ptr;
}
inline Dbtux::TreeEnt*
Dbtux::TreeHead::getEntList(TreeNode* node) const
{
- Uint32* ptr = (Uint32*)node + NodeHeadSize + 2 * m_prefSize;
+ Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize;
return (TreeEnt*)ptr;
}
@@ -1013,7 +995,8 @@ Dbtux::Index::Index() :
m_numFrags(0),
m_descPage(RNIL),
m_descOff(0),
- m_numAttrs(0)
+ m_numAttrs(0),
+ m_storeNullKey(false)
{
for (unsigned i = 0; i < MaxIndexFragments; i++) {
m_fragId[i] = ZNIL;
@@ -1032,6 +1015,7 @@ Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
m_descPage(RNIL),
m_descOff(0),
m_numAttrs(ZNIL),
+ m_storeNullKey(false),
m_tree(),
m_freeLoc(),
m_scanList(scanOpPool),
@@ -1087,6 +1071,12 @@ Dbtux::NodeHandle::operator=(const NodeHandle& node)
return *this;
}
+inline bool
+Dbtux::NodeHandle::isNull()
+{
+ return m_node == 0;
+}
+
inline Dbtux::TupLoc
Dbtux::NodeHandle::getLink(unsigned i)
{
@@ -1161,11 +1151,11 @@ Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI)
}
inline Dbtux::Data
-Dbtux::NodeHandle::getPref(unsigned i)
+Dbtux::NodeHandle::getPref()
{
TreeHead& tree = m_frag.m_tree;
- ndbrequire(m_acc >= AccPref && i <= 1);
- return tree.getPref(m_node, i);
+ ndbrequire(m_acc >= AccPref);
+ return tree.getPref(m_node);
}
inline Dbtux::TreeEnt
@@ -1193,36 +1183,6 @@ Dbtux::NodeHandle::getMinMax(unsigned i)
// parameters for methods
-inline
-Dbtux::CopyPar::CopyPar() :
- m_items(0),
- m_headers(true),
- m_maxwords(~0), // max unsigned
- // output
- m_numitems(0),
- m_numwords(0)
-{
-}
-
-inline
-Dbtux::ReadPar::ReadPar() :
- m_first(0),
- m_count(0),
- m_data(0),
- m_size(0)
-{
-}
-
-inline
-Dbtux::BoundPar::BoundPar() :
- m_data1(0),
- m_data2(0),
- m_count1(0),
- m_len2(0),
- m_dir(255)
-{
-}
-
#ifdef VM_TRACE
inline
Dbtux::PrintPar::PrintPar() :
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
index 7601a14a242..1b8755a1dc4 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
@@ -25,14 +25,14 @@
* prefix may be partial in which case CmpUnknown may be returned.
*/
int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, ConstData data2, unsigned maxlen2)
+Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen)
{
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
// number of words of attribute data left
- unsigned len2 = maxlen2;
+ unsigned len2 = maxlen;
// skip to right position in search key
- data1 += start;
+ searchKey += start;
int ret = 0;
while (start < numAttrs) {
if (len2 < AttributeHeaderSize) {
@@ -41,20 +41,20 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, ConstDat
break;
}
len2 -= AttributeHeaderSize;
- if (*data1 != 0) {
- if (! data2.ah().isNULL()) {
+ if (*searchKey != 0) {
+ if (! entryData.ah().isNULL()) {
jam();
// current attribute
const DescAttr& descAttr = descEnt.m_descAttr[start];
const unsigned typeId = descAttr.m_typeId;
// full data size
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- ndbrequire(size1 != 0 && size1 == data2.ah().getDataSize());
+ ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
const unsigned size2 = min(size1, len2);
len2 -= size2;
// compare
- const Uint32* const p1 = *data1;
- const Uint32* const p2 = &data2[AttributeHeaderSize];
+ const Uint32* const p1 = *searchKey;
+ const Uint32* const p2 = &entryData[AttributeHeaderSize];
ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size2);
if (ret != 0) {
jam();
@@ -62,20 +62,20 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, ConstDat
}
} else {
jam();
- // not NULL < NULL
- ret = -1;
+ // not NULL > NULL
+ ret = +1;
break;
}
} else {
- if (! data2.ah().isNULL()) {
+ if (! entryData.ah().isNULL()) {
jam();
- // NULL > not NULL
- ret = +1;
+ // NULL < not NULL
+ ret = -1;
break;
}
}
- data1 += 1;
- data2 += AttributeHeaderSize + data2.ah().getDataSize();
+ searchKey += 1;
+ entryData += AttributeHeaderSize + entryData.ah().getDataSize();
start++;
}
// XXX until data format errors are handled
@@ -89,17 +89,17 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, ConstDat
* Start position is updated as in previous routine.
*/
int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, TableData data2)
+Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey)
{
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
// skip to right position
- data1 += start;
- data2 += start;
+ searchKey += start;
+ entryKey += start;
int ret = 0;
while (start < numAttrs) {
- if (*data1 != 0) {
- if (*data2 != 0) {
+ if (*searchKey != 0) {
+ if (*entryKey != 0) {
jam();
// current attribute
const DescAttr& descAttr = descEnt.m_descAttr[start];
@@ -107,8 +107,8 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, TableDat
// full data size
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
// compare
- const Uint32* const p1 = *data1;
- const Uint32* const p2 = *data2;
+ const Uint32* const p1 = *searchKey;
+ const Uint32* const p2 = *entryKey;
ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size1);
if (ret != 0) {
jam();
@@ -116,20 +116,20 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, TableDat
}
} else {
jam();
- // not NULL < NULL
- ret = -1;
+ // not NULL > NULL
+ ret = +1;
break;
}
} else {
- if (*data2 != 0) {
+ if (*entryKey != 0) {
jam();
- // NULL > not NULL
- ret = +1;
+ // NULL < not NULL
+ ret = -1;
break;
}
}
- data1 += 1;
- data2 += 1;
+ searchKey += 1;
+ entryKey += 1;
start++;
}
// XXX until data format errors are handled
@@ -137,94 +137,96 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData data1, TableDat
return ret;
}
-
/*
- * Scan bound vs tree entry.
+ * Scan bound vs node prefix.
*
* Compare lower or upper bound and index attribute data. The attribute
* data may be partial in which case CmpUnknown may be returned.
- * Returns -1 if the boundary is to the left of the compared key and +1 if
- * the boundary is to the right of the compared key.
+ * Returns -1 if the boundary is to the left of the compared key and +1
+ * if the boundary is to the right of the compared key.
*
- * To get this behaviour we treat equality a little bit special.
- * If the boundary is a lower bound then the boundary is to the left of all
- * equal keys and if it is an upper bound then the boundary is to the right
- * of all equal keys.
+ * To get this behaviour we treat equality a little bit special. If the
+ * boundary is a lower bound then the boundary is to the left of all
+ * equal keys and if it is an upper bound then the boundary is to the
+ * right of all equal keys.
*
* When searching for the first key we are using the lower bound to try
- * to find the first key that is to the right of the boundary.
- * Then we start scanning from this tuple (including the tuple itself)
- * until we find the first key which is to the right of the boundary. Then
- * we stop and do not include that key in the scan result.
+ * to find the first key that is to the right of the boundary. Then we
+ * start scanning from this tuple (including the tuple itself) until we
+ * find the first key which is to the right of the boundary. Then we
+ * stop and do not include that key in the scan result.
*/
int
-Dbtux::cmpScanBound(const Frag& frag, const BoundPar boundPar)
+Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen)
{
- unsigned type = 4;
- int ret = 0;
- /*
- No boundary means full scan, low boundary is to the right of all keys.
- Thus we should always return -1. For upper bound we are to the right of
- all keys, thus we should always return +1. We achieve this behaviour
- by initialising return value to 0 and set type to 4.
- */
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- ConstData data1 = boundPar.m_data1;
- ConstData data2 = boundPar.m_data2;
// direction 0-lower 1-upper
- const unsigned dir = boundPar.m_dir;
ndbrequire(dir <= 1);
// number of words of data left
- unsigned len2 = boundPar.m_len2;
- for (unsigned i = 0; i < boundPar.m_count1; i++) {
+ unsigned len2 = maxlen;
+ /*
+ * No boundary means full scan, low boundary is to the right of all
+ * keys. Thus we should always return -1. For upper bound we are to
+ * the right of all keys, thus we should always return +1. We achieve
+ * this behaviour by initializing type to 4.
+ */
+ unsigned type = 4;
+ while (boundCount != 0) {
if (len2 < AttributeHeaderSize) {
jam();
return NdbSqlUtil::CmpUnknown;
}
len2 -= AttributeHeaderSize;
// get and skip bound type
- type = data1[0];
- data1 += 1;
- ndbrequire(! data1.ah().isNULL());
- if (! data2.ah().isNULL()) {
- jam();
- // current attribute
- const unsigned index = data1.ah().getAttributeId();
- const DescAttr& descAttr = descEnt.m_descAttr[index];
- const unsigned typeId = descAttr.m_typeId;
- ndbrequire(data2.ah().getAttributeId() == descAttr.m_primaryAttrId);
- // full data size
- const unsigned size1 = data1.ah().getDataSize();
- ndbrequire(size1 != 0 && size1 == data2.ah().getDataSize());
- const unsigned size2 = min(size1, len2);
- len2 -= size2;
- // compare
- const Uint32* const p1 = &data1[AttributeHeaderSize];
- const Uint32* const p2 = &data2[AttributeHeaderSize];
- ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size2);
- if (ret != 0) {
+ type = boundInfo[0];
+ boundInfo += 1;
+ if (! boundInfo.ah().isNULL()) {
+ if (! entryData.ah().isNULL()) {
jam();
- return ret;
+ // current attribute
+ const unsigned index = boundInfo.ah().getAttributeId();
+ const DescAttr& descAttr = descEnt.m_descAttr[index];
+ const unsigned typeId = descAttr.m_typeId;
+ ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ // full data size
+ const unsigned size1 = boundInfo.ah().getDataSize();
+ ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
+ const unsigned size2 = min(size1, len2);
+ len2 -= size2;
+ // compare
+ const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
+ const Uint32* const p2 = &entryData[AttributeHeaderSize];
+ int ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size2);
+ // XXX until data format errors are handled
+ ndbrequire(ret != NdbSqlUtil::CmpError);
+ if (ret != 0) {
+ jam();
+ return ret;
+ }
+ } else {
+ jam();
+ // not NULL > NULL
+ return +1;
}
} else {
jam();
- /*
- NULL is bigger than any bound, thus the boundary is always to the
- left of NULL
- */
- return -1;
+ if (! entryData.ah().isNULL()) {
+ jam();
+ // NULL < not NULL
+ return -1;
+ }
}
- data1 += AttributeHeaderSize + data1.ah().getDataSize();
- data2 += AttributeHeaderSize + data2.ah().getDataSize();
+ boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
+ entryData += AttributeHeaderSize + entryData.ah().getDataSize();
+ boundCount -= 1;
}
- ndbassert(ret == 0);
if (dir == 0) {
jam();
/*
- Looking for the lower bound. If strict lower bound then the boundary is
- to the right of the compared key and otherwise (equal included in range)
- then the boundary is to the left of the key.
- */
+ * Looking for the lower bound. If strict lower bound then the
+ * boundary is to the right of the compared key and otherwise (equal
+ * included in range) then the boundary is to the left of the key.
+ */
if (type == 1) {
jam();
return +1;
@@ -233,10 +235,11 @@ Dbtux::cmpScanBound(const Frag& frag, const BoundPar boundPar)
} else {
jam();
/*
- Looking for the upper bound. If strict upper bound then the boundary is
- to the left of all equal keys and otherwise (equal included in the
- range) then the boundary is to the right of all equal keys.
- */
+ * Looking for the upper bound. If strict upper bound then the
+ * boundary is to the left of all equal keys and otherwise (equal
+ * included in the range) then the boundary is to the right of all
+ * equal keys.
+ */
if (type == 3) {
jam();
return -1;
@@ -245,3 +248,72 @@ Dbtux::cmpScanBound(const Frag& frag, const BoundPar boundPar)
}
}
+/*
+ * Scan bound vs tree entry.
+ */
+int
+Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey)
+{
+ const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
+ // direction 0-lower 1-upper
+ ndbrequire(dir <= 1);
+ // initialize type to equality
+ unsigned type = 4;
+ while (boundCount != 0) {
+ // get and skip bound type
+ type = boundInfo[0];
+ boundInfo += 1;
+ if (! boundInfo.ah().isNULL()) {
+ if (*entryKey != 0) {
+ jam();
+ // current attribute
+ const unsigned index = boundInfo.ah().getAttributeId();
+ const DescAttr& descAttr = descEnt.m_descAttr[index];
+ const unsigned typeId = descAttr.m_typeId;
+ // full data size
+ const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
+ // compare
+ const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
+ const Uint32* const p2 = *entryKey;
+ int ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size1);
+ // XXX until data format errors are handled
+ ndbrequire(ret != NdbSqlUtil::CmpError);
+ if (ret != 0) {
+ jam();
+ return ret;
+ }
+ } else {
+ jam();
+ // not NULL > NULL
+ return +1;
+ }
+ } else {
+ jam();
+ if (*entryKey != 0) {
+ jam();
+ // NULL < not NULL
+ return -1;
+ }
+ }
+ boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
+ entryKey += 1;
+ boundCount -= 1;
+ }
+ if (dir == 0) {
+ // lower bound
+ jam();
+ if (type == 1) {
+ jam();
+ return +1;
+ }
+ return -1;
+ } else {
+ // upper bound
+ jam();
+ if (type == 3) {
+ jam();
+ return -1;
+ }
+ return +1;
+ }
+}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index c4931685305..11f4f12b7f6 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -137,16 +137,17 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
par.m_ok = false;
}
}
+ static const char* const sep = " *** ";
// check child-parent links
if (node.getLink(2) != par.m_parent) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "parent loc " << hex << node.getLink(2);
out << " should be " << hex << par.m_parent << endl;
}
if (node.getSide() != par.m_side) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "side " << dec << node.getSide();
out << " should be " << dec << par.m_side << endl;
}
@@ -154,26 +155,26 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
const int balance = -cpar[0].m_depth + cpar[1].m_depth;
if (node.getBalance() != balance) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "balance " << node.getBalance();
out << " should be " << balance << endl;
}
if (abs(node.getBalance()) > 1) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "balance " << node.getBalance() << " is invalid" << endl;
}
// check occupancy
- if (node.getOccup() > tree.m_maxOccup) {
+ if (node.getOccup() == 0 || node.getOccup() > tree.m_maxOccup) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "occupancy " << node.getOccup();
- out << " greater than max " << tree.m_maxOccup << endl;
+ out << " zero or greater than max " << tree.m_maxOccup << endl;
}
// check for occupancy of interior node
if (node.getChilds() == 2 && node.getOccup() < tree.m_minOccup) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "occupancy " << node.getOccup() << " of interior node";
out << " less than min " << tree.m_minOccup << endl;
}
@@ -183,13 +184,74 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
node.getLink(1 - i) == NullTupLoc &&
node.getOccup() + cpar[i].m_occup <= tree.m_maxOccup) {
par.m_ok = false;
- out << par.m_path << " *** ";
+ out << par.m_path << sep;
out << "missed merge with child " << i << endl;
}
}
+ // check inline prefix
+ { ConstData data1 = node.getPref();
+ Uint32 data2[MaxPrefSize];
+ memset(data2, DataFillByte, MaxPrefSize << 2);
+ readKeyAttrs(frag, node.getMinMax(0), 0, c_searchKey);
+ copyAttrs(frag, c_searchKey, data2, tree.m_prefSize);
+ for (unsigned n = 0; n < tree.m_prefSize; n++) {
+ if (data1[n] != data2[n]) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "inline prefix mismatch word " << n;
+ out << " value " << hex << data1[n];
+ out << " should be " << hex << data2[n] << endl;
+ break;
+ }
+ }
+ }
+ // check ordering within node
+ for (unsigned j = 1; j < node.getOccup(); j++) {
+ unsigned start = 0;
+ const TreeEnt ent1 = node.getEnt(j - 1);
+ const TreeEnt ent2 = node.getEnt(j);
+ if (j == 1) {
+ readKeyAttrs(frag, ent1, start, c_searchKey);
+ } else {
+ memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2);
+ }
+ readKeyAttrs(frag, ent2, start, c_entryKey);
+ int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
+ if (ret == 0)
+ ret = ent1.cmp(ent2);
+ if (ret != -1) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << " disorder within node at pos " << j << endl;
+ }
+ }
+ // check ordering wrt subtrees
+ for (unsigned i = 0; i <= 1; i++) {
+ if (node.getLink(i) == NullTupLoc)
+ continue;
+ const TreeEnt ent1 = cpar[i].m_minmax[1 - i];
+ const TreeEnt ent2 = node.getMinMax(i);
+ unsigned start = 0;
+ readKeyAttrs(frag, ent1, start, c_searchKey);
+ readKeyAttrs(frag, ent2, start, c_entryKey);
+ int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
+ if (ret == 0)
+ ret = ent1.cmp(ent2);
+ if (ret != (i == 0 ? -1 : +1)) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << " disorder wrt subtree " << i << endl;
+ }
+ }
// return values
par.m_depth = 1 + max(cpar[0].m_depth, cpar[1].m_depth);
par.m_occup = node.getOccup();
+ for (unsigned i = 0; i <= 1; i++) {
+ if (node.getLink(i) == NullTupLoc)
+ par.m_minmax[i] = node.getMinMax(i);
+ else
+ par.m_minmax[i] = cpar[i].m_minmax[i];
+ }
}
NdbOut&
@@ -355,20 +417,19 @@ operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
out << " [acc " << dec << node.m_acc << "]";
out << " [node " << *node.m_node << "]";
if (node.m_acc >= Dbtux::AccPref) {
- for (unsigned i = 0; i <= 1; i++) {
- out << " [pref " << dec << i;
- const Uint32* data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + i * tree.m_prefSize;
- for (unsigned j = 0; j < node.m_frag.m_tree.m_prefSize; j++)
- out << " " << hex << data[j];
- out << "]";
- }
+ const Uint32* data;
+ out << " [pref";
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize;
+ for (unsigned j = 0; j < tree.m_prefSize; j++)
+ out << " " << hex << data[j];
+ out << "]";
out << " [entList";
unsigned numpos = node.m_node->m_occup;
if (node.m_acc < Dbtux::AccFull && numpos > 2) {
numpos = 2;
out << "(" << dec << numpos << ")";
}
- const Uint32* data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + 2 * tree.m_prefSize;
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize;
const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data;
for (unsigned pos = 0; pos < numpos; pos++)
out << " " << entList[pos];
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index 93a5c78338c..f6f1610c8c1 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -26,8 +26,13 @@ Dbtux::Dbtux(const Configuration& conf) :
#ifdef VM_TRACE
debugFile(0),
debugOut(*new NullOutputStream()),
+ // until ndb_mgm supports dump
+#ifdef DBTUX_DEBUG_TREE
+ debugFlags(DebugTree),
+#else
debugFlags(0),
#endif
+#endif
c_internalStartPhase(0),
c_typeOfStart(NodeState::ST_ILLEGAL_TYPE),
c_dataBuffer(0)
@@ -187,6 +192,7 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
IndexPtr indexPtr;
while (1) {
jam();
+ refresh_watch_dog();
c_indexPool.seize(indexPtr);
if (indexPtr.i == RNIL) {
jam();
@@ -241,37 +247,14 @@ Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData key
}
void
-Dbtux::copyAttrs(Data dst, ConstData src, CopyPar& copyPar)
+Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData)
{
- CopyPar c = copyPar;
- c.m_numitems = 0;
- c.m_numwords = 0;
- while (c.m_numitems < c.m_items) {
- jam();
- if (c.m_headers) {
- unsigned i = 0;
- while (i < AttributeHeaderSize) {
- if (c.m_numwords >= c.m_maxwords) {
- copyPar = c;
- return;
- }
- dst[c.m_numwords++] = src[i++];
- }
- }
- unsigned size = src.ah().getDataSize();
- src += AttributeHeaderSize;
- unsigned i = 0;
- while (i < size) {
- if (c.m_numwords >= c.m_maxwords) {
- copyPar = c;
- return;
- }
- dst[c.m_numwords++] = src[i++];
- }
- src += size;
- c.m_numitems++;
- }
- copyPar = c;
+ const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
+ const TupLoc tupLoc = ent.m_tupLoc;
+ Uint32 size = 0;
+ c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData);
+ ndbrequire(size != 0);
+ pkSize = size;
}
/*
@@ -314,6 +297,9 @@ Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2
keyAttrs += 1;
data1 += 1;
}
+#ifdef VM_TRACE
+ memset(data2, DataFillByte, len2 << 2);
+#endif
}
BLOCK_FUNCTIONS(Dbtux);
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
index fc72611a273..24b030bf8ec 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
@@ -82,8 +82,8 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
ent.m_fragBit = fragBit;
// read search key
readKeyAttrs(frag, ent, 0, c_searchKey);
- // check if all keys are null
- {
+ if (! frag.m_storeNullKey) {
+ // check if all keys are null
const unsigned numAttrs = frag.m_numAttrs;
bool allNull = true;
for (unsigned i = 0; i < numAttrs; i++) {
@@ -111,19 +111,18 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
debugOut << endl;
}
#endif
- // find position in tree
- TreePos treePos;
- treeSearch(signal, frag, c_searchKey, ent, treePos);
-#ifdef VM_TRACE
- if (debugFlags & DebugMaint) {
- debugOut << treePos << endl;
- }
-#endif
// do the operation
req->errorCode = 0;
+ TreePos treePos;
switch (opCode) {
case TuxMaintReq::OpAdd:
jam();
+ searchToAdd(signal, frag, c_searchKey, ent, treePos);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ debugOut << treePos << endl;
+ }
+#endif
if (treePos.m_match) {
jam();
// there is no "Building" state so this will have to do
@@ -152,6 +151,12 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
break;
case TuxMaintReq::OpRemove:
jam();
+ searchToRemove(signal, frag, c_searchKey, ent, treePos);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ debugOut << treePos << endl;
+ }
+#endif
if (! treePos.m_match) {
jam();
// there is no "Building" state so this will have to do
@@ -167,7 +172,6 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
ndbrequire(false);
break;
}
- // commit and release nodes
#ifdef VM_TRACE
if (debugFlags & DebugTree) {
printTree(signal, frag, debugOut);
@@ -176,89 +180,3 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
// copy back
*sig = *req;
}
-
-/*
- * Read index key attributes from TUP. If buffer is provided the data
- * is copied to it. Otherwise pointer is set to signal data.
- */
-void
-Dbtux::tupReadAttrs(Signal* signal, const Frag& frag, ReadPar& readPar)
-{
- // define the direct signal
- const TreeEnt ent = readPar.m_ent;
- TupReadAttrs* const req = (TupReadAttrs*)signal->getDataPtrSend();
- req->errorCode = RNIL;
- req->requestInfo = 0;
- req->tableId = frag.m_tableId;
- req->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
- req->fragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
- req->tupAddr = (Uint32)-1;
- req->tupVersion = ent.m_tupVersion;
- req->pageId = ent.m_tupLoc.m_pageId;
- req->pageOffset = ent.m_tupLoc.m_pageOffset;
- req->bufferId = 0;
- // add count and list of attribute ids
- Data data = (Uint32*)req + TupReadAttrs::SignalLength;
- data[0] = readPar.m_count;
- data += 1;
- const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- for (Uint32 i = 0; i < readPar.m_count; i++) {
- jam();
- const DescAttr& descAttr = descEnt.m_descAttr[readPar.m_first + i];
- data.ah() = AttributeHeader(descAttr.m_primaryAttrId, 0);
- data += 1;
- }
- // execute
- EXECUTE_DIRECT(DBTUP, GSN_TUP_READ_ATTRS, signal, TupReadAttrs::SignalLength);
- jamEntry();
- ndbrequire(req->errorCode == 0);
- // data is at output
- if (readPar.m_data == 0) {
- readPar.m_data = data;
- } else {
- jam();
- CopyPar copyPar;
- copyPar.m_items = readPar.m_count;
- copyPar.m_headers = true;
- copyAttrs(readPar.m_data, data, copyPar);
- }
-}
-
-/*
- * Read primary keys. Copy the data without attribute headers into the
- * given buffer. Number of words is returned in ReadPar argument.
- */
-void
-Dbtux::tupReadKeys(Signal* signal, const Frag& frag, ReadPar& readPar)
-{
- // define the direct signal
- const TreeEnt ent = readPar.m_ent;
- TupReadAttrs* const req = (TupReadAttrs*)signal->getDataPtrSend();
- req->errorCode = RNIL;
- req->requestInfo = TupReadAttrs::ReadKeys;
- req->tableId = frag.m_tableId;
- req->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
- req->fragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
- req->tupAddr = (Uint32)-1;
- req->tupVersion = RNIL; // not used
- req->pageId = ent.m_tupLoc.m_pageId;
- req->pageOffset = ent.m_tupLoc.m_pageOffset;
- req->bufferId = 0;
- // execute
- EXECUTE_DIRECT(DBTUP, GSN_TUP_READ_ATTRS, signal, TupReadAttrs::SignalLength);
- jamEntry();
- ndbrequire(req->errorCode == 0);
- // copy out in special format
- ConstData data = (Uint32*)req + TupReadAttrs::SignalLength;
- const Uint32 numKeys = data[0];
- data += 1 + numKeys;
- // copy out without headers
- ndbrequire(readPar.m_data != 0);
- CopyPar copyPar;
- copyPar.m_items = numKeys;
- copyPar.m_headers = false;
- copyAttrs(readPar.m_data, data, copyPar);
- // return counts
- readPar.m_count = numKeys;
- readPar.m_size = copyPar.m_numwords;
-}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index 0612f191830..b30b555ccad 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -85,6 +85,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragPtr.p->m_fragOff = req->fragOff;
fragPtr.p->m_fragId = req->fragId;
fragPtr.p->m_numAttrs = req->noOfAttr;
+ fragPtr.p->m_storeNullKey = true; // not yet configurable
fragPtr.p->m_tupIndexFragPtrI = req->tupIndexFragPtrI;
fragPtr.p->m_tupTableFragPtrI[0] = req->tupTableFragPtrI[0];
fragPtr.p->m_tupTableFragPtrI[1] = req->tupTableFragPtrI[1];
@@ -111,6 +112,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
indexPtr.p->m_tableId = req->primaryTableId;
indexPtr.p->m_fragOff = req->fragOff;
indexPtr.p->m_numAttrs = req->noOfAttr;
+ indexPtr.p->m_storeNullKey = true; // not yet configurable
// allocate attribute descriptors
if (! allocDescEnt(indexPtr)) {
jam();
@@ -405,14 +407,15 @@ Dbtux::freeDescEnt(IndexPtr indexPtr)
index2.m_descPage == pagePtr.i &&
index2.m_descOff == off + size);
// move the entry (overlapping copy if size < size2)
- for (unsigned i = 0; i < size2; i++) {
+ unsigned i;
+ for (i = 0; i < size2; i++) {
jam();
data[off + i] = data[off + size + i];
}
off += size2;
// adjust page offset in index and all fragments
index2.m_descOff -= size;
- for (unsigned i = 0; i < index2.m_numFrags; i++) {
+ for (i = 0; i < index2.m_numFrags; i++) {
jam();
Frag& frag2 = *c_fragPool.getPtr(index2.m_fragPtrI[i]);
frag2.m_descOff -= size;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
index c969e35dc82..a1bfa2179bb 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
@@ -85,10 +85,9 @@ Dbtux::insertNode(Signal* signal, NodeHandle& node, AccSize acc)
new (node.m_node) TreeNode();
#ifdef VM_TRACE
TreeHead& tree = frag.m_tree;
- memset(node.getPref(0), 0xa2, tree.m_prefSize << 2);
- memset(node.getPref(1), 0xa2, tree.m_prefSize << 2);
+ memset(node.getPref(), DataFillByte, tree.m_prefSize << 2);
TreeEnt* entList = tree.getEntList(node.m_node);
- memset(entList, 0xa4, (tree.m_maxOccup + 1) * (TreeEntSize << 2));
+ memset(entList, NodeFillByte, (tree.m_maxOccup + 1) * (TreeEntSize << 2));
#endif
}
@@ -116,12 +115,12 @@ Dbtux::deleteNode(Signal* signal, NodeHandle& node)
* attribute headers for now. XXX use null mask instead
*/
void
-Dbtux::setNodePref(Signal* signal, NodeHandle& node, unsigned i)
+Dbtux::setNodePref(Signal* signal, NodeHandle& node)
{
const Frag& frag = node.m_frag;
const TreeHead& tree = frag.m_tree;
- readKeyAttrs(frag, node.getMinMax(i), 0, c_entryKey);
- copyAttrs(frag, c_entryKey, node.getPref(i), tree.m_prefSize);
+ readKeyAttrs(frag, node.getMinMax(0), 0, c_entryKey);
+ copyAttrs(frag, c_entryKey, node.getPref(), tree.m_prefSize);
}
// node operations
@@ -173,11 +172,9 @@ Dbtux::nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt&
tmpList[pos] = ent;
entList[0] = entList[occup + 1];
node.setOccup(occup + 1);
- // fix prefixes
+ // fix prefix
if (occup == 0 || pos == 0)
- setNodePref(signal, node, 0);
- if (occup == 0 || pos == occup)
- setNodePref(signal, node, 1);
+ setNodePref(signal, node);
}
/*
@@ -248,11 +245,9 @@ Dbtux::nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
}
entList[0] = entList[occup - 1];
node.setOccup(occup - 1);
- // fix prefixes
+ // fix prefix
if (occup != 1 && pos == 0)
- setNodePref(signal, node, 0);
- if (occup != 1 && pos == occup - 1)
- setNodePref(signal, node, 1);
+ setNodePref(signal, node);
}
/*
@@ -325,11 +320,9 @@ Dbtux::nodePushDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent
tmpList[pos] = ent;
ent = oldMin;
entList[0] = entList[occup];
- // fix prefixes
+ // fix prefix
if (true)
- setNodePref(signal, node, 0);
- if (occup == 1 || pos == occup - 1)
- setNodePref(signal, node, 1);
+ setNodePref(signal, node);
}
/*
@@ -403,11 +396,9 @@ Dbtux::nodePopUp(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
}
tmpList[0] = newMin;
entList[0] = entList[occup];
- // fix prefixes
+ // fix prefix
if (true)
- setNodePref(signal, node, 0);
- if (occup == 1 || pos == occup - 1)
- setNodePref(signal, node, 1);
+ setNodePref(signal, node);
}
/*
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index 703b0abb683..c4c33ff931f 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -137,7 +137,7 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength;
unsigned offset = 5;
// walk through entries
- while (offset + 2 < req->boundAiLength) {
+ while (offset + 2 <= req->boundAiLength) {
jam();
const unsigned type = data[offset];
if (type > 4) {
@@ -379,8 +379,8 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
scanNext(signal, scanPtr);
}
// for reading tuple key in Current or Locked state
- ReadPar keyPar;
- keyPar.m_data = 0; // indicates not yet done
+ Data pkData = c_dataBuffer;
+ unsigned pkSize = 0; // indicates not yet done
if (scan.m_state == ScanOp::Current) {
// found an entry to return
jam();
@@ -389,9 +389,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
jam();
const TreeEnt ent = scan.m_scanPos.m_ent;
// read tuple key
- keyPar.m_ent = ent;
- keyPar.m_data = c_dataBuffer;
- tupReadKeys(signal, frag, keyPar);
+ readTablePk(frag, ent, pkSize, pkData);
// get read lock or exclusive lock
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
lockReq->returnCode = RNIL;
@@ -403,9 +401,9 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
lockReq->tableId = scan.m_tableId;
lockReq->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
lockReq->fragPtrI = frag.m_accTableFragPtrI[ent.m_fragBit];
- const Uint32* const buf32 = static_cast<Uint32*>(keyPar.m_data);
+ const Uint32* const buf32 = static_cast<Uint32*>(pkData);
const Uint64* const buf64 = reinterpret_cast<const Uint64*>(buf32);
- lockReq->hashValue = md5_hash(buf64, keyPar.m_size);
+ lockReq->hashValue = md5_hash(buf64, pkSize);
lockReq->tupAddr = getTupAddr(frag, ent);
lockReq->transId1 = scan.m_transId1;
lockReq->transId2 = scan.m_transId2;
@@ -480,11 +478,9 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
const TreeEnt ent = scan.m_scanPos.m_ent;
if (scan.m_keyInfo) {
jam();
- if (keyPar.m_data == 0) {
+ if (pkSize == 0) {
jam();
- keyPar.m_ent = ent;
- keyPar.m_data = c_dataBuffer;
- tupReadKeys(signal, frag, keyPar);
+ readTablePk(frag, ent, pkSize, pkData);
}
}
// conf signal
@@ -510,10 +506,10 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
// add key info
if (scan.m_keyInfo) {
jam();
- conf->keyLength = keyPar.m_size;
+ conf->keyLength = pkSize;
// piggy-back first 4 words of key data
for (unsigned i = 0; i < 4; i++) {
- conf->key[i] = i < keyPar.m_size ? keyPar.m_data[i] : 0;
+ conf->key[i] = i < pkSize ? pkData[i] : 0;
}
signalLength = 11;
}
@@ -525,18 +521,18 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength);
}
// send rest of key data
- if (scan.m_keyInfo && keyPar.m_size > 4) {
+ if (scan.m_keyInfo && pkSize > 4) {
unsigned total = 4;
- while (total < keyPar.m_size) {
+ while (total < pkSize) {
jam();
- unsigned length = keyPar.m_size - total;
+ unsigned length = pkSize - total;
if (length > 20)
length = 20;
signal->theData[0] = scan.m_userPtr;
signal->theData[1] = 0;
signal->theData[2] = 0;
signal->theData[3] = length;
- memcpy(&signal->theData[4], &keyPar.m_data[total], length << 2);
+ memcpy(&signal->theData[4], &pkData[total], length << 2);
sendSignal(scan.m_userRef, GSN_ACC_SCAN_INFO24,
signal, 4 + length, JBB);
total += length;
@@ -606,6 +602,8 @@ Dbtux::execACCKEYCONF(Signal* signal)
// LQH has the ball
return;
}
+ // lose the lock
+ scan.m_accLockOp = RNIL;
// continue at ACC_ABORTCONF
}
@@ -648,6 +646,8 @@ Dbtux::execACCKEYREF(Signal* signal)
// LQH has the ball
return;
}
+ // lose the lock
+ scan.m_accLockOp = RNIL;
// continue at ACC_ABORTCONF
}
@@ -689,16 +689,9 @@ Dbtux::scanFirst(Signal* signal, ScanOpPtr scanPtr)
ScanOp& scan = *scanPtr.p;
Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
TreeHead& tree = frag.m_tree;
- if (tree.m_root == NullTupLoc) {
- // tree may have become empty
- jam();
- scan.m_state = ScanOp::Last;
- return;
- }
- TreePos pos;
- pos.m_loc = tree.m_root;
- NodeHandle node(frag);
- // unpack lower bound
+ // set up index keys for this operation
+ setKeyAttrs(frag);
+ // unpack lower bound into c_dataBuffer
const ScanBound& bound = *scan.m_bound[0];
ScanBoundIterator iter;
bound.first(iter);
@@ -707,103 +700,22 @@ Dbtux::scanFirst(Signal* signal, ScanOpPtr scanPtr)
c_dataBuffer[j] = *iter.data;
bound.next(iter);
}
- // comparison parameters
- BoundPar boundPar;
- boundPar.m_data1 = c_dataBuffer;
- boundPar.m_count1 = scan.m_boundCnt[0];
- boundPar.m_dir = 0;
-loop: {
+ // search for scan start position
+ TreePos treePos;
+ searchToScan(signal, frag, c_dataBuffer, scan.m_boundCnt[0], treePos);
+ if (treePos.m_loc == NullTupLoc) {
+ // empty tree
jam();
- selectNode(signal, node, pos.m_loc, AccPref);
- const unsigned occup = node.getOccup();
- ndbrequire(occup != 0);
- for (unsigned i = 0; i <= 1; i++) {
- jam();
- // compare prefix
- boundPar.m_data2 = node.getPref(i);
- boundPar.m_len2 = tree.m_prefSize;
- int ret = cmpScanBound(frag, boundPar);
- if (ret == NdbSqlUtil::CmpUnknown) {
- jam();
- // read full value
- ReadPar readPar;
- readPar.m_ent = node.getMinMax(i);
- readPar.m_first = 0;
- readPar.m_count = frag.m_numAttrs;
- readPar.m_data = 0; // leave in signal data
- tupReadAttrs(signal, frag, readPar);
- // compare full value
- boundPar.m_data2 = readPar.m_data;
- boundPar.m_len2 = ZNIL; // big
- ret = cmpScanBound(frag, boundPar);
- ndbrequire(ret != NdbSqlUtil::CmpUnknown);
- }
- if (i == 0 && ret < 0) {
- jam();
- const TupLoc loc = node.getLink(i);
- if (loc != NullTupLoc) {
- jam();
- // continue to left subtree
- pos.m_loc = loc;
- goto loop;
- }
- // start scanning this node
- pos.m_pos = 0;
- pos.m_match = false;
- pos.m_dir = 3;
- scan.m_scanPos = pos;
- scan.m_state = ScanOp::Next;
- linkScan(node, scanPtr);
- return;
- }
- if (i == 1 && ret > 0) {
- jam();
- const TupLoc loc = node.getLink(i);
- if (loc != NullTupLoc) {
- jam();
- // continue to right subtree
- pos.m_loc = loc;
- goto loop;
- }
- // start scanning upwards
- pos.m_dir = 1;
- scan.m_scanPos = pos;
- scan.m_state = ScanOp::Next;
- linkScan(node, scanPtr);
- return;
- }
- }
- // read rest of current node
- accessNode(signal, node, AccFull);
- // look for first entry
- ndbrequire(occup >= 2);
- for (unsigned j = 1; j < occup; j++) {
- jam();
- ReadPar readPar;
- readPar.m_ent = node.getEnt(j);
- readPar.m_first = 0;
- readPar.m_count = frag.m_numAttrs;
- readPar.m_data = 0; // leave in signal data
- tupReadAttrs(signal, frag, readPar);
- // compare
- boundPar.m_data2 = readPar.m_data;
- boundPar.m_len2 = ZNIL; // big
- int ret = cmpScanBound(frag, boundPar);
- ndbrequire(ret != NdbSqlUtil::CmpUnknown);
- if (ret < 0) {
- jam();
- // start scanning this node
- pos.m_pos = j;
- pos.m_match = false;
- pos.m_dir = 3;
- scan.m_scanPos = pos;
- scan.m_state = ScanOp::Next;
- linkScan(node, scanPtr);
- return;
- }
- }
- ndbrequire(false);
+ scan.m_state = ScanOp::Last;
+ return;
}
+ // set position and state
+ scan.m_scanPos = treePos;
+ scan.m_state = ScanOp::Next;
+ // link the scan to node found
+ NodeHandle node(frag);
+ selectNode(signal, node, treePos.m_loc, AccFull);
+ linkScan(node, scanPtr);
}
/*
@@ -830,7 +742,9 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
if (scan.m_state == ScanOp::Locked) {
jam();
// version of a tuple locked by us cannot disappear (assert only)
+#ifdef dbtux_wl_1942_is_done
ndbassert(false);
+#endif
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
lockReq->returnCode = RNIL;
lockReq->requestInfo = AccLockReq::Unlock;
@@ -841,7 +755,9 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
scan.m_accLockOp = RNIL;
scan.m_state = ScanOp::Current;
}
- // unpack upper bound
+ // set up index keys for this operation
+ setKeyAttrs(frag);
+ // unpack upper bound into c_dataBuffer
const ScanBound& bound = *scan.m_bound[1];
ScanBoundIterator iter;
bound.first(iter);
@@ -850,11 +766,6 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
c_dataBuffer[j] = *iter.data;
bound.next(iter);
}
- // comparison parameters
- BoundPar boundPar;
- boundPar.m_data1 = c_dataBuffer;
- boundPar.m_count1 = scan.m_boundCnt[1];
- boundPar.m_dir = 1;
// use copy of position
TreePos pos = scan.m_scanPos;
// get and remember original node
@@ -912,17 +823,9 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
jam();
pos.m_ent = node.getEnt(pos.m_pos);
pos.m_dir = 3; // unchanged
- // XXX implement prefix optimization
- ReadPar readPar;
- readPar.m_ent = pos.m_ent;
- readPar.m_first = 0;
- readPar.m_count = frag.m_numAttrs;
- readPar.m_data = 0; // leave in signal data
- tupReadAttrs(signal, frag, readPar);
- // compare
- boundPar.m_data2 = readPar.m_data;
- boundPar.m_len2 = ZNIL; // big
- int ret = cmpScanBound(frag, boundPar);
+ // read and compare all attributes
+ readKeyAttrs(frag, pos.m_ent, 0, c_entryKey);
+ int ret = cmpScanBound(frag, 1, c_dataBuffer, scan.m_boundCnt[1], c_entryKey);
ndbrequire(ret != NdbSqlUtil::CmpUnknown);
if (ret < 0) {
jam();
@@ -994,35 +897,25 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
bool
Dbtux::scanVisible(Signal* signal, ScanOpPtr scanPtr, TreeEnt ent)
{
- TupQueryTh* const req = (TupQueryTh*)signal->getDataPtrSend();
const ScanOp& scan = *scanPtr.p;
const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
- /* Assign table, fragment, tuple address + version */
- Uint32 tableId = frag.m_tableId;
Uint32 fragBit = ent.m_fragBit;
+ Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit];
Uint32 fragId = frag.m_fragId | (fragBit << frag.m_fragOff);
Uint32 tupAddr = getTupAddr(frag, ent);
Uint32 tupVersion = ent.m_tupVersion;
- /* Check for same tuple twice in row */
+ // check for same tuple twice in row
if (scan.m_lastEnt.m_tupLoc == ent.m_tupLoc &&
scan.m_lastEnt.m_fragBit == fragBit) {
jam();
return false;
}
- req->tableId = tableId;
- req->fragId = fragId;
- req->tupAddr = tupAddr;
- req->tupVersion = tupVersion;
- /* Assign transaction info, trans id + savepoint id */
Uint32 transId1 = scan.m_transId1;
Uint32 transId2 = scan.m_transId2;
Uint32 savePointId = scan.m_savePointId;
- req->transId1 = transId1;
- req->transId2 = transId2;
- req->savePointId = savePointId;
- EXECUTE_DIRECT(DBTUP, GSN_TUP_QUERY_TH, signal, TupQueryTh::SignalLength);
+ bool ret = c_tup->tuxQueryTh(tableFragPtrI, tupAddr, tupVersion, transId1, transId2, savePointId);
jamEntry();
- return (bool)req->returnCode;
+ return ret;
}
/*
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
new file mode 100644
index 00000000000..84048b308bc
--- /dev/null
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
@@ -0,0 +1,333 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_SEARCH_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Search for entry to add.
+ *
+ * Similar to searchToRemove (see below).
+ *
+ * TODO optimize for initial equal attrs in node min/max
+ */
+void
+Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ const unsigned numAttrs = frag.m_numAttrs;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ if (currNode.m_loc == NullTupLoc) {
+ // empty tree
+ jam();
+ treePos.m_match = false;
+ return;
+ }
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ /*
+ * In order to not (yet) change old behaviour, a position between
+ * 2 nodes returns the one at the bottom of the tree.
+ */
+ NodeHandle bottomNode(frag);
+ while (true) {
+ jam();
+ selectNode(signal, currNode, currNode.m_loc, AccPref);
+ int ret;
+ // compare prefix
+ unsigned start = 0;
+ ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare remaining attributes
+ ndbrequire(start < numAttrs);
+ readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getMinMax(0));
+ }
+ if (ret < 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ }
+ } else if (ret > 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_match = true;
+ return;
+ }
+ break;
+ }
+ // access rest of current node
+ accessNode(signal, currNode, AccFull);
+ for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ int ret;
+ // read and compare attributes
+ unsigned start = 0;
+ readKeyAttrs(frag, currNode.getEnt(j), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getEnt(j));
+ }
+ if (ret <= 0) {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j;
+ treePos.m_match = (ret == 0);
+ return;
+ }
+ }
+ if (! bottomNode.isNull()) {
+ jam();
+ // backwards compatible for now
+ treePos.m_loc = bottomNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_match = false;
+ return;
+ }
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = currNode.getOccup();
+ treePos.m_match = false;
+}
+
+/*
+ * Search for entry to remove.
+ *
+ * Compares search key to each node min. A move to right subtree can
+ * overshoot target node. The last such node is saved. The final node
+ * is a half-leaf or leaf. If search key is less than final node min
+ * then the saved node is the g.l.b of the final node and we move back
+ * to it.
+ */
+void
+Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ const unsigned numAttrs = frag.m_numAttrs;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ if (currNode.m_loc == NullTupLoc) {
+ // empty tree
+ jam();
+ treePos.m_match = false;
+ return;
+ }
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ while (true) {
+ jam();
+ selectNode(signal, currNode, currNode.m_loc, AccPref);
+ int ret;
+ // compare prefix
+ unsigned start = 0;
+ ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare remaining attributes
+ ndbrequire(start < numAttrs);
+ readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getMinMax(0));
+ }
+ if (ret < 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b
+ currNode = glbNode;
+ }
+ } else if (ret > 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_match = true;
+ return;
+ }
+ break;
+ }
+ // access rest of current node
+ accessNode(signal, currNode, AccFull);
+ // pos 0 was handled above
+ for (unsigned j = 1, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ // compare only the entry
+ if (searchEnt.eq(currNode.getEnt(j))) {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j;
+ treePos.m_match = true;
+ return;
+ }
+ }
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = currNode.getOccup();
+ treePos.m_match = false;
+}
+
+/*
+ * Search for scan start position.
+ *
+ * Similar to searchToAdd.
+ */
+void
+Dbtux::searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ if (currNode.m_loc == NullTupLoc) {
+ // empty tree
+ jam();
+ treePos.m_match = false;
+ return;
+ }
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ NodeHandle bottomNode(frag);
+ while (true) {
+ jam();
+ selectNode(signal, currNode, currNode.m_loc, AccPref);
+ int ret;
+ // compare prefix
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare all attributes
+ readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey);
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret < 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ } else {
+ // start scanning this node
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_match = false;
+ treePos.m_dir = 3;
+ return;
+ }
+ } else if (ret > 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ ndbassert(false);
+ }
+ break;
+ }
+ // access rest of current node
+ accessNode(signal, currNode, AccFull);
+ for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ int ret;
+ // read and compare attributes
+ readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey);
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret < 0) {
+ // start scanning from current entry
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j;
+ treePos.m_match = false;
+ treePos.m_dir = 3;
+ return;
+ }
+ }
+ if (! bottomNode.isNull()) {
+ jam();
+ // start scanning the l.u.b
+ treePos.m_loc = bottomNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_match = false;
+ treePos.m_dir = 3;
+ return;
+ }
+ // start scanning upwards (pretend we came from right child)
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_dir = 1;
+}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 7c3f5fa36b8..3baa62998db 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -18,112 +18,6 @@
#include "Dbtux.hpp"
/*
- * Search for entry.
- *
- * Search key is index attribute data and tree entry value. Start from
- * root node and compare the key to min/max of each node. Use linear
- * search on the final (bounding) node. Initial attributes which are
- * same in min/max need not be checked.
- */
-void
-Dbtux::treeSearch(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
-{
- const TreeHead& tree = frag.m_tree;
- const unsigned numAttrs = frag.m_numAttrs;
- treePos.m_loc = tree.m_root;
- if (treePos.m_loc == NullTupLoc) {
- // empty tree
- jam();
- treePos.m_pos = 0;
- treePos.m_match = false;
- return;
- }
- NodeHandle node(frag);
-loop: {
- jam();
- selectNode(signal, node, treePos.m_loc, AccPref);
- const unsigned occup = node.getOccup();
- ndbrequire(occup != 0);
- // number of equal initial attributes in bounding node
- unsigned start = ZNIL;
- for (unsigned i = 0; i <= 1; i++) {
- jam();
- unsigned start1 = 0;
- // compare prefix
- int ret = cmpSearchKey(frag, start1, searchKey, node.getPref(i), tree.m_prefSize);
- if (ret == NdbSqlUtil::CmpUnknown) {
- jam();
- // read and compare remaining attributes
- readKeyAttrs(frag, node.getMinMax(i), start1, c_entryKey);
- ret = cmpSearchKey(frag, start1, searchKey, c_entryKey);
- ndbrequire(ret != NdbSqlUtil::CmpUnknown);
- }
- if (start > start1)
- start = start1;
- if (ret == 0) {
- jam();
- // keys are equal, compare entry values
- ret = searchEnt.cmp(node.getMinMax(i));
- }
- if (i == 0 ? (ret < 0) : (ret > 0)) {
- jam();
- const TupLoc loc = node.getLink(i);
- if (loc != NullTupLoc) {
- jam();
- // continue to left/right subtree
- treePos.m_loc = loc;
- goto loop;
- }
- // position is immediately before/after this node
- treePos.m_pos = (i == 0 ? 0 : occup);
- treePos.m_match = false;
- return;
- }
- if (ret == 0) {
- jam();
- // position is at first/last entry
- treePos.m_pos = (i == 0 ? 0 : occup - 1);
- treePos.m_match = true;
- return;
- }
- }
- // access rest of the bounding node
- accessNode(signal, node, AccFull);
- // position is strictly within the node
- ndbrequire(occup >= 2);
- const unsigned numWithin = occup - 2;
- for (unsigned j = 1; j <= numWithin; j++) {
- jam();
- int ret = 0;
- if (start < numAttrs) {
- jam();
- // read and compare remaining attributes
- unsigned start1 = start;
- readKeyAttrs(frag, node.getEnt(j), start1, c_entryKey);
- ret = cmpSearchKey(frag, start1, searchKey, c_entryKey);
- ndbrequire(ret != NdbSqlUtil::CmpUnknown);
- }
- if (ret == 0) {
- jam();
- // keys are equal, compare entry values
- ret = searchEnt.cmp(node.getEnt(j));
- }
- if (ret <= 0) {
- jam();
- // position is before or at this entry
- treePos.m_pos = j;
- treePos.m_match = (ret == 0);
- return;
- }
- }
- // position is before last entry
- treePos.m_pos = occup - 1;
- treePos.m_match = false;
- return;
- }
-}
-
-/*
* Add entry.
*/
void
@@ -283,7 +177,8 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
nodePopDown(signal, node, pos, ent);
ndbrequire(node.getChilds() <= 1);
// handle half-leaf
- for (unsigned i = 0; i <= 1; i++) {
+ unsigned i;
+ for (i = 0; i <= 1; i++) {
jam();
TupLoc childLoc = node.getLink(i);
if (childLoc != NullTupLoc) {
@@ -297,7 +192,7 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
// get parent if any
TupLoc parentLoc = node.getLink(2);
NodeHandle parentNode(frag);
- unsigned i = node.getSide();
+ i = node.getSide();
// move all that fits into parent
if (parentLoc != NullTupLoc) {
jam();
diff --git a/ndb/src/kernel/blocks/dbtux/Makefile.am b/ndb/src/kernel/blocks/dbtux/Makefile.am
index 0b48ad5724f..7d012924522 100644
--- a/ndb/src/kernel/blocks/dbtux/Makefile.am
+++ b/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -7,6 +7,7 @@ libdbtux_a_SOURCES = \
DbtuxNode.cpp \
DbtuxTree.cpp \
DbtuxScan.cpp \
+ DbtuxSearch.cpp \
DbtuxCmp.cpp \
DbtuxDebug.cpp
diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt
index 16c4102249b..c4744a23c07 100644
--- a/ndb/src/kernel/blocks/dbtux/Times.txt
+++ b/ndb/src/kernel/blocks/dbtux/Times.txt
@@ -1,17 +1,32 @@
-index maintenance overhead
-==========================
+ordered index performance
+=========================
"mc02" 2x1700 MHz linux-2.4.9 gcc-2.96 -O3 one db-node
-case a: index on Unsigned
-testOIBasic -case u -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+case a: maintenance: index on Unsigned
+testOIBasic -case u -table 1 -index 2 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
-case b: index on Varchar(5) + Varchar(5) + Varchar(20) + Unsigned
-testOIBasic -case u -table 2 -index 4 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+case b: maintenance: index on Varchar(5) + Varchar(5) + Varchar(20) + Unsigned
+testOIBasic -case u -table 2 -index 5 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+case c: full scan: index on PK Unsigned
+testOIBasic -case v -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+
+case d: scan 1 tuple via EQ: index on PK Unsigned
+testOIBasic -case w -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -samples 10000 -subloop 1 -nologging -v2
+
+a, b
1 million rows, pk update without index, pk update with index
shows ms / 1000 rows for each and pct overhead
-the figures are based on single run on idle machine
+
+c
+1 million rows, index on PK, full table scan, full index scan
+shows ms / 1000 rows for each and index time overhead
+
+d
+1 million rows, index on PK, read table via each pk, scan index for each pk
+shows ms / 1000 rows for each and index time overhead
+samples 10% of all PKs (100,000 pk reads, 100,000 scans)
040616 mc02/a 40 ms 87 ms 114 pct
mc02/b 51 ms 128 ms 148 pct
@@ -49,4 +64,22 @@ optim 10 mc02/a 44 ms 65 ms 46 pct
optim 11 mc02/a 43 ms 63 ms 46 pct
mc02/b 52 ms 86 ms 63 pct
+optim 12 mc02/a 38 ms 55 ms 43 pct
+ mc02/b 47 ms 77 ms 63 pct
+ mc02/c 10 ms 14 ms 47 pct
+ mc02/d 176 ms 281 ms 59 pct
+
+optim 13 mc02/a 40 ms 57 ms 42 pct
+ mc02/b 47 ms 77 ms 61 pct
+ mc02/c 9 ms 13 ms 50 pct
+ mc02/d 170 ms 256 ms 50 pct
+
+after wl-1884 store all-NULL keys (the tests have pctnull=10 per column)
+[ what happened to PK read performance? ]
+
+optim 13 mc02/a 39 ms 59 ms 50 pct
+ mc02/b 47 ms 77 ms 61 pct
+ mc02/c 9 ms 12 ms 44 pct
+ mc02/d 246 ms 289 ms 17 pct
+
vim: set et:
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index 92410e1a784..ecaead3ba5a 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -2581,3 +2581,5 @@ DbUtil::execUTIL_DESTORY_LOCK_REQ(Signal* signal){
sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_REF, signal,
UtilDestroyLockRef::SignalLength, JBB);
}
+
+template class ArrayPool<DbUtil::Page32>;
diff --git a/ndb/src/kernel/blocks/grep/Grep.cpp b/ndb/src/kernel/blocks/grep/Grep.cpp
index ee506ce922a..8b93ef9cd20 100644
--- a/ndb/src/kernel/blocks/grep/Grep.cpp
+++ b/ndb/src/kernel/blocks/grep/Grep.cpp
@@ -73,7 +73,7 @@ Grep::getNodeGroupMembers(Signal* signal) {
c_noNodesInGroup++;
}
}
- ndbrequire(c_noNodesInGroup >= 0); // at least 1 node in the nodegroup
+ ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup
#ifdef NODEFAIL_DEBUG
for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
@@ -253,7 +253,8 @@ Grep::execREAD_NODESCONF(Signal* signal)
/******************************
* Check which REP nodes exist
******************************/
- for (Uint32 i = 1; i < MAX_NODES; i++)
+ Uint32 i;
+ for (i = 1; i < MAX_NODES; i++)
{
jam();
#if 0
@@ -279,7 +280,7 @@ Grep::execREAD_NODESCONF(Signal* signal)
m_aliveNodes.clear();
Uint32 count = 0;
- for(Uint32 i = 0; i<MAX_NDB_NODES; i++)
+ for(i = 0; i<MAX_NDB_NODES; i++)
{
if (NodeBitmask::get(conf->allNodes, i))
{
diff --git a/ndb/src/kernel/blocks/grep/Grep.hpp b/ndb/src/kernel/blocks/grep/Grep.hpp
index ba8f5780522..eeabac36966 100644
--- a/ndb/src/kernel/blocks/grep/Grep.hpp
+++ b/ndb/src/kernel/blocks/grep/Grep.hpp
@@ -148,7 +148,7 @@ private:
*/
class Grep : public SimulatedBlock //GrepParticipant
{
- //BLOCK_DEFINES(Grep);
+ BLOCK_DEFINES(Grep);
public:
Grep(const Configuration & conf);
@@ -519,19 +519,6 @@ public:
typedef void (Grep::* ExecSignalLocal1) (Signal* signal);
typedef void (Grep::PSCoord::* ExecSignalLocal2) (Signal* signal);
typedef void (Grep::PSPart::* ExecSignalLocal4) (Signal* signal);
-
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal1 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal2 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
- void
- addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal4 f, bool force = false){
- addRecSignalImpl(gsn, (ExecFunction)f, force);
- }
};
diff --git a/ndb/src/kernel/blocks/grep/GrepInit.cpp b/ndb/src/kernel/blocks/grep/GrepInit.cpp
index 70bf6678754..cfb454a1f9b 100644
--- a/ndb/src/kernel/blocks/grep/GrepInit.cpp
+++ b/ndb/src/kernel/blocks/grep/GrepInit.cpp
@@ -132,7 +132,7 @@ Grep::~Grep()
{
}
-//BLOCK_FUNCTIONS(Grep);
+BLOCK_FUNCTIONS(Grep);
Grep::PSPart::PSPart(Grep * sb) :
BlockComponent(sb),
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index 06453155f33..ff4876b1506 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -554,11 +554,13 @@ Ndbcntr::execCNTR_START_REP(Signal* signal){
}
if(cmasterNodeId != getOwnNodeId()){
+ jam();
c_start.reset();
return;
}
if(c_start.m_waiting.isclear()){
+ jam();
c_start.reset();
return;
}
@@ -597,6 +599,7 @@ Ndbcntr::execCNTR_START_REQ(Signal * signal){
ndbrequire(false);
case NodeState::SL_STARTING:
case NodeState::SL_STARTED:
+ jam();
break;
case NodeState::SL_STOPPING_1:
@@ -616,9 +619,11 @@ Ndbcntr::execCNTR_START_REQ(Signal * signal){
c_start.m_waiting.set(nodeId);
switch(st){
case NodeState::ST_INITIAL_START:
+ jam();
c_start.m_withoutLog.set(nodeId);
break;
case NodeState::ST_SYSTEM_RESTART:
+ jam();
c_start.m_withLog.set(nodeId);
if(starting && lastGci > c_start.m_lastGci){
jam();
@@ -631,6 +636,7 @@ Ndbcntr::execCNTR_START_REQ(Signal * signal){
return;
}
if(starting){
+ jam();
Uint32 i = c_start.m_logNodesCount++;
c_start.m_logNodes[i].m_nodeId = nodeId;
c_start.m_logNodes[i].m_lastGci = req->lastGci;
@@ -652,11 +658,12 @@ Ndbcntr::execCNTR_START_REQ(Signal * signal){
}
if(starting){
+ jam();
trySystemRestart(signal);
} else {
+ jam();
startWaitingNodes(signal);
}
-
return;
}
@@ -670,6 +677,7 @@ Ndbcntr::startWaitingNodes(Signal * signal){
NodeState::StartType nrType = NodeState::ST_NODE_RESTART;
if(c_start.m_withoutLog.get(nodeId)){
+ jam();
nrType = NodeState::ST_INITIAL_NODE_RESTART;
}
@@ -706,6 +714,7 @@ Ndbcntr::startWaitingNodes(Signal * signal){
char buf[100];
if(!c_start.m_withLog.isclear()){
+ jam();
ndbout_c("Starting nodes w/ log: %s", c_start.m_withLog.getText(buf));
NodeReceiverGroup rg(NDBCNTR, c_start.m_withLog);
@@ -716,6 +725,7 @@ Ndbcntr::startWaitingNodes(Signal * signal){
}
if(!c_start.m_withoutLog.isclear()){
+ jam();
ndbout_c("Starting nodes wo/ log: %s", c_start.m_withoutLog.getText(buf));
NodeReceiverGroup rg(NDBCNTR, c_start.m_withoutLog);
conf->startType = NodeState::ST_INITIAL_NODE_RESTART;
@@ -777,6 +787,7 @@ Ndbcntr::trySystemRestart(Signal* signal){
jam();
return false;
}
+ jam();
srType = NodeState::ST_INITIAL_START;
c_start.m_starting = c_start.m_withoutLog; // Used for starting...
c_start.m_withoutLog.clear();
@@ -793,13 +804,11 @@ Ndbcntr::trySystemRestart(Signal* signal){
// If we lose with all nodes, then we're in trouble
ndbrequire(!allNodes);
return false;
- break;
case CheckNodeGroups::Partitioning:
jam();
bool allowPartition = (c_start.m_startPartitionedTimeout != (Uint64)~0);
if(allNodes){
- jam();
if(allowPartition){
jam();
break;
@@ -1043,8 +1052,10 @@ void Ndbcntr::ph5ALab(Signal* signal)
return;
case NodeState::ST_NODE_RESTART:
case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
break;
case NodeState::ST_ILLEGAL_TYPE:
+ jam();
break;
}
ndbrequire(false);
@@ -1602,6 +1613,7 @@ void Ndbcntr::startInsertTransactions(Signal* signal)
ckey = 1;
ctransidPhase = ZTRUE;
+ signal->theData[0] = 0;
signal->theData[1] = reference();
sendSignal(DBTC_REF, GSN_TCSEIZEREQ, signal, 2, JBB);
return;
@@ -1661,7 +1673,7 @@ void Ndbcntr::crSystab7Lab(Signal* signal)
tcKeyReq->requestInfo = reqInfo;
tcKeyReq->tableSchemaVersion = ZSYSTAB_VERSION;
tcKeyReq->transId1 = 0;
- tcKeyReq->transId2 = 0;
+ tcKeyReq->transId2 = ckey;
//-------------------------------------------------------------
// There is no optional part in this TCKEYREQ. There is one
@@ -1729,6 +1741,7 @@ void Ndbcntr::crSystab8Lab(Signal* signal)
}//if
signal->theData[0] = ctcConnectionP;
signal->theData[1] = reference();
+ signal->theData[2] = 0;
sendSignal(DBTC_REF, GSN_TCRELEASEREQ, signal, 2, JBB);
return;
}//Ndbcntr::crSystab8Lab()
@@ -2313,6 +2326,18 @@ void Ndbcntr::execWAIT_GCP_REF(Signal* signal){
void Ndbcntr::execWAIT_GCP_CONF(Signal* signal){
jamEntry();
+ ndbrequire(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo));
+ NodeState newState(NodeState::SL_STOPPING_3, true);
+
+ /**
+ * Inform QMGR so that arbitrator won't kill us
+ */
+ NodeStateRep * rep = (NodeStateRep *)&signal->theData[0];
+ rep->nodeState = newState;
+ rep->nodeState.masterNodeId = cmasterNodeId;
+ rep->nodeState.setNodeGroup(c_nodeGroup);
+ EXECUTE_DIRECT(QMGR, GSN_NODE_STATE_REP, signal, NodeStateRep::SignalLength);
+
if(StopReq::getPerformRestart(c_stopRec.stopReq.requestInfo)){
jam();
StartOrd * startOrd = (StartOrd *)&signal->theData[0];
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index 7ba7d0d25c6..18e46d2d004 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -108,8 +108,8 @@ AsyncFile::AsyncFile() :
}
void
-AsyncFile::doStart(const char * filesystemPath) {
- theFileName.init(filesystemPath);
+AsyncFile::doStart(Uint32 nodeId, const char * filesystemPath) {
+ theFileName.init(nodeId, filesystemPath);
// Stacksize for filesystem threads
// An 8k stack should be enough
@@ -229,7 +229,7 @@ AsyncFile::run()
endReq();
return;
default:
- THREAD_REQUIRE(false, "Using default switch in AsyncFile::run");
+ abort();
break;
}//switch
theReportTo->writeChannel(request);
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index caa03e52d0c..9a405bc1580 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -181,7 +181,7 @@ public:
void execute( Request* request );
- void doStart(const char * fspath);
+ void doStart(Uint32 nodeId, const char * fspath);
// its a thread so its always running
void run();
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.cpp b/ndb/src/kernel/blocks/ndbfs/Filename.cpp
index 494c9c74eb9..660fe6eee94 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Filename.cpp
@@ -46,7 +46,7 @@ Filename::Filename() :
}
void
-Filename::init(const char * pFileSystemPath){
+Filename::init(Uint32 nodeid, const char * pFileSystemPath){
if (pFileSystemPath == NULL) {
ERROR_SET(fatal, AFS_ERROR_NOPATH, ""," Filename::init()");
return;
@@ -75,8 +75,15 @@ Filename::init(const char * pFileSystemPath){
DIR_SEPARATOR) != 0)
strcat(theBaseDirectory, DIR_SEPARATOR);
-}
+ snprintf(buf2, sizeof(buf2), "ndb_%u_fs%s", nodeid, DIR_SEPARATOR);
+ strcat(theBaseDirectory, buf2);
+#ifdef NDB_WIN32
+ CreateDirectory(theBaseDirectory, 0);
+#else
+ mkdir(theBaseDirectory, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+#endif
+}
Filename::~Filename(){
}
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.hpp b/ndb/src/kernel/blocks/ndbfs/Filename.hpp
index 29aba79c9dc..25c06092436 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/Filename.hpp
@@ -68,7 +68,7 @@ public:
int levels() const;
const char* c_str() const;
- void init(const char * fileSystemPath);
+ void init(Uint32 nodeid, const char * fileSystemPath);
private:
int theLevelDepth;
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
index 435a6a6b208..03911d195ec 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
@@ -120,8 +120,7 @@ template <class T> void MemoryChannel<T>::writeChannel( T *t)
{
NdbMutex_Lock(theMutexPtr);
- REQUIRE(!full(theWriteIndex, theReadIndex), "Memory Channel Full");
- REQUIRE(theChannel != NULL, "Memory Channel Full");
+ if(full(theWriteIndex, theReadIndex) || theChannel == NULL) abort();
theChannel[theWriteIndex]= t;
++theWriteIndex;
NdbMutex_Unlock(theMutexPtr);
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index fe737fc584b..3b8cb20fe5c 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -559,7 +559,7 @@ Ndbfs::createAsyncFile(){
}
AsyncFile* file = new AsyncFile;
- file->doStart(theFileSystemPath);
+ file->doStart(getOwnNodeId(), theFileSystemPath);
// Put the file in list of all files
theFiles.push_back(file);
@@ -1010,3 +1010,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal)
BLOCK_FUNCTIONS(Ndbfs);
+template class Vector<AsyncFile*>;
+template class Vector<OpenFiles::OpenFileItem>;
+template class MemoryChannel<Request>;
+template class Pool<Request>;
diff --git a/ndb/src/kernel/blocks/ndbfs/Pool.hpp b/ndb/src/kernel/blocks/ndbfs/Pool.hpp
index a26fa730727..0410673af6f 100644
--- a/ndb/src/kernel/blocks/ndbfs/Pool.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/Pool.hpp
@@ -215,7 +215,6 @@ protected:
T** tList = theList;
int i;
theList = new T*[aSize+theCurrentSize];
- REQUIRE(theList != 0, "Allocate in Pool.hpp failed");
// allocate full list
for (i = 0; i < theTop; i++) {
theList[i] = tList[i];
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index f2d2edb615d..6017365a463 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -258,7 +258,6 @@ void Qmgr::execCONNECT_REP(Signal* signal)
{
const Uint32 nodeId = signal->theData[0];
c_connectedNodes.set(nodeId);
-
NodeRecPtr nodePtr;
nodePtr.i = getOwnNodeId();
ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
@@ -679,7 +678,6 @@ void Qmgr::execCM_REGREF(Signal* signal)
UintR TaddNodeno = signal->theData[1];
UintR TrefuseReason = signal->theData[2];
Uint32 candidate = signal->theData[3];
-
DEBUG_START3(signal, TrefuseReason);
if(candidate != cpresidentCandidate){
@@ -768,7 +766,6 @@ void Qmgr::execCM_REGREF(Signal* signal)
Uint64 now = NdbTick_CurrentMillisecond();
if((c_regReqReqRecv == cnoOfNodes) || now > c_stopElectionTime){
jam();
-
electionWon();
sendSttorryLab(signal);
@@ -1704,6 +1701,7 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
+
/**
* GREP also need the information that an API node
* (actually a REP node) has failed.
@@ -1978,8 +1976,10 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
apiRegConf->nodeState.dynamicId = -dynamicId;
}
}
+ apiRegConf->nodeState.m_connected_nodes.assign(c_connectedNodes);
+
sendSignal(ref, GSN_API_REGCONF, signal, ApiRegConf::SignalLength, JBB);
-
+
if ((getNodeState().startLevel == NodeState::SL_STARTED ||
getNodeState().getSingleUserMode())
&& apiNodePtr.p->phase == ZAPI_INACTIVE) {
@@ -2138,7 +2138,8 @@ void Qmgr::execPREP_FAILREQ(Signal* signal)
Uint16 TfailureNr = prepFail->failNo;
cnoPrepFailedNodes = prepFail->noOfNodes;
UintR arrayIndex = 0;
- for (Uint32 Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) {
+ Uint32 Tindex;
+ for (Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) {
if (NodeBitmask::get(prepFail->theNodes, Tindex)){
cprepFailedNodes[arrayIndex] = Tindex;
arrayIndex++;
@@ -2166,7 +2167,7 @@ void Qmgr::execPREP_FAILREQ(Signal* signal)
guard0 = cnoPrepFailedNodes - 1;
arrGuard(guard0, MAX_NDB_NODES);
- for (Uint32 Tindex = 0; Tindex <= guard0; Tindex++) {
+ for (Tindex = 0; Tindex <= guard0; Tindex++) {
jam();
failReport(signal,
cprepFailedNodes[Tindex],
@@ -2308,6 +2309,15 @@ void Qmgr::execPREP_FAILCONF(Signal* signal)
* Continues via sendCommitFailReq() if successful.
*/
arbitRec.failureNr = cfailureNr;
+ const NodeState & s = getNodeState();
+ if(s.startLevel == NodeState::SL_STOPPING_3 && s.stopping.systemShutdown){
+ jam();
+ /**
+ * We're performing a system shutdown,
+ * don't let artibtrator shut us down
+ */
+ return;
+ }
handleArbitCheck(signal);
return;
}//Qmgr::execPREP_FAILCONF()
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index 9718845de43..24e264291e7 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -267,6 +267,40 @@ Suma::execREAD_NODESCONF(Signal* signal){
sendSTTORRY(signal);
}
+#if 0
+void
+Suma::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES,
+ &cnoLogFiles));
+ ndbrequire(cnoLogFiles > 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &cfragrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
+ &ctcConnectrecFileSize));
+ clogFileFileSize = 4 * cnoLogFiles;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
+ cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_SCANS_PER_FRAG;
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ return;
+}//Dblqh::execSIZEALT_REP()
+#endif
+
void
Suma::sendSTTORRY(Signal* signal){
signal->theData[0] = 0;
@@ -581,34 +615,33 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
jamEntry();
Uint32 tCase = signal->theData[0];
- if(tCase < 8000 || tCase > 8004)
- return;
-
- SubscriptionPtr subPtr;
- c_subscriptions.getPtr(subPtr, g_subPtrI);
-
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
-
- if(tCase == 8000){
- syncPtr.p->startMeta(signal);
- }
-
- if(tCase == 8001){
- syncPtr.p->startScan(signal);
- }
-
- if(tCase == 8002){
- syncPtr.p->startTrigger(signal);
- }
+ if(tCase >= 8000 && tCase <= 8003){
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, g_subPtrI);
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+
+ if(tCase == 8000){
+ syncPtr.p->startMeta(signal);
+ }
+
+ if(tCase == 8001){
+ syncPtr.p->startScan(signal);
+ }
- if(tCase == 8003){
- subPtr.p->m_subscriptionType = SubCreateReq::SingleTableScan;
- LocalDataBuffer<15> attrs(c_dataBufferPool, syncPtr.p->m_attributeList);
- Uint32 tab = 0;
- Uint32 att[] = { 0, 1, 1 };
- syncPtr.p->m_tableList.append(&tab, 1);
- attrs.append(att, 3);
+ if(tCase == 8002){
+ syncPtr.p->startTrigger(signal);
+ }
+
+ if(tCase == 8003){
+ subPtr.p->m_subscriptionType = SubCreateReq::SingleTableScan;
+ LocalDataBuffer<15> attrs(c_dataBufferPool, syncPtr.p->m_attributeList);
+ Uint32 tab = 0;
+ Uint32 att[] = { 0, 1, 1 };
+ syncPtr.p->m_tableList.append(&tab, 1);
+ attrs.append(att, 3);
+ }
}
if(tCase == 8004){
@@ -1229,6 +1262,9 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
if(!tabPtr.isNull() &&
tabPtr.p->m_schemaVersion != tableDesc.TableVersion){
jam();
+
+ tabPtr.p->release(* this);
+
// oops wrong schema version in stored tabledesc
// we need to find all subscriptions with old table desc
// and all subscribers to this
@@ -3972,3 +4008,6 @@ Suma::execSUMA_HANDOVER_CONF(Signal* signal) {
}
}
}
+
+template void append(DataBuffer<11>&,SegmentedSectionPtr,SectionSegmentPool&);
+
diff --git a/ndb/src/kernel/blocks/trix/Trix.cpp b/ndb/src/kernel/blocks/trix/Trix.cpp
index f058433840c..4088d55c76d 100644
--- a/ndb/src/kernel/blocks/trix/Trix.cpp
+++ b/ndb/src/kernel/blocks/trix/Trix.cpp
@@ -814,8 +814,8 @@ void Trix::executeInsertTransaction(Signal* signal,
for(Uint32 i = 0; i < headerPtr.sz; i++) {
AttributeHeader* keyAttrHead = (AttributeHeader *) headerBuffer + i;
- // Filter out single NULL attributes
- if (keyAttrHead->isNULL() && (i == (Uint32)0) && (headerPtr.sz == (Uint32)2))
+ // Filter out NULL attributes
+ if (keyAttrHead->isNULL())
return;
if (i < subRec->noOfIndexColumns)
@@ -965,3 +965,5 @@ void Trix::checkParallelism(Signal* signal, SubscriptionRecord* subRec)
}
BLOCK_FUNCTIONS(Trix);
+
+template void append(DataBuffer<15>&,SegmentedSectionPtr,SectionSegmentPool&);
diff --git a/ndb/src/kernel/error/ErrorReporter.cpp b/ndb/src/kernel/error/ErrorReporter.cpp
index 20a9dd8a993..f1320c44e09 100644
--- a/ndb/src/kernel/error/ErrorReporter.cpp
+++ b/ndb/src/kernel/error/ErrorReporter.cpp
@@ -27,6 +27,8 @@
#include <NdbConfig.h>
#include <Configuration.hpp>
+#include <NdbAutoPtr.hpp>
+
#define MESSAGE_LENGTH 400
const char* errorType[] = {
@@ -66,23 +68,23 @@ ErrorReporter::formatTimeStampString(){
return (const char *)&theDateTimeString;
}
-void
-ErrorReporter::formatTraceFileName(char* theName, int maxLen){
+int
+ErrorReporter::get_trace_no(){
FILE *stream;
unsigned int traceFileNo;
- char fileNameBuf[255];
- char buf[255];
+
+ char *file_name= NdbConfig_NextTraceFileName(globalData.ownId);
+ NdbAutoPtr<char> tmp_aptr(file_name);
- NdbConfig_HomePath(fileNameBuf, 255);
- strncat(fileNameBuf, "NextTraceFileNo.log", 255);
/*
* Read last number from tracefile
*/
- stream = fopen(fileNameBuf, "r+");
+ stream = fopen(file_name, "r+");
if (stream == NULL){
traceFileNo = 1;
} else {
+ char buf[255];
fgets(buf, 255, stream);
const int scan = sscanf(buf, "%u", &traceFileNo);
if(scan != 1){
@@ -103,16 +105,13 @@ ErrorReporter::formatTraceFileName(char* theName, int maxLen){
/**
* Save new number to the file
*/
- stream = fopen(fileNameBuf, "w");
+ stream = fopen(file_name, "w");
if(stream != NULL){
fprintf(stream, "%u", traceFileNo);
fclose(stream);
}
- /**
- * Format trace file name
- */
- snprintf(theName, maxLen, "%sNDB_TraceFile_%u.trace",
- NdbConfig_HomePath(fileNameBuf, 255), traceFileNo);
+
+ return traceFileNo;
}
@@ -214,16 +213,22 @@ WriteMessage(ErrorCategory thrdType, int thrdMessageID,
unsigned offset;
unsigned long maxOffset; // Maximum size of file.
char theMessage[MESSAGE_LENGTH];
- char theTraceFileName[255];
- char theErrorFileName[255];
- ErrorReporter::formatTraceFileName(theTraceFileName, 255);
+
+ /**
+ * Format trace file name
+ */
+ int file_no= ErrorReporter::get_trace_no();
+ char *theTraceFileName= NdbConfig_TraceFileName(globalData.ownId, file_no);
+ NdbAutoPtr<char> tmp_aptr1(theTraceFileName);
// The first 69 bytes is info about the current offset
Uint32 noMsg = globalEmulatorData.theConfiguration->maxNoOfErrorLogs();
maxOffset = (69 + (noMsg * MESSAGE_LENGTH));
- NdbConfig_ErrorFileName(theErrorFileName, 255);
+ char *theErrorFileName= (char *)NdbConfig_ErrorFileName(globalData.ownId);
+ NdbAutoPtr<char> tmp_aptr2(theErrorFileName);
+
stream = fopen(theErrorFileName, "r+");
if (stream == NULL) { /* If the file could not be opened. */
diff --git a/ndb/src/kernel/error/ErrorReporter.hpp b/ndb/src/kernel/error/ErrorReporter.hpp
index b43b30f1873..2c79f242eea 100644
--- a/ndb/src/kernel/error/ErrorReporter.hpp
+++ b/ndb/src/kernel/error/ErrorReporter.hpp
@@ -23,35 +23,6 @@
#include "Error.hpp"
#include <Emulator.hpp>
-
-#ifdef ASSERT
-#undef ASSERT
-#endif
-
-#define REQUIRE(trueToContinue, message) \
- if ( (trueToContinue) ) { } else { \
- ErrorReporter::handleAssert(message, __FILE__, __LINE__); }
-
-#define THREAD_REQUIRE(trueToContinue, message) \
- if ( (trueToContinue) ) { } else { \
- ErrorReporter::handleThreadAssert(message, __FILE__, __LINE__); }
-
-#ifdef NDEBUG
-#define NDB_ASSERT(trueToContinue, message)
-#else
-#define NDB_ASSERT(trueToContinue, message) \
- if ( !(trueToContinue) ) { \
- ErrorReporter::handleAssert(message, __FILE__, __LINE__); }
-#endif
-
- // Description:
- // This macro is used to report programming errors.
- // Parameters:
- // trueToContinue IN An expression. If it evaluates to 0
- // execution is stopped.
- // message IN A message from the programmer
- // explaining what went wrong.
-
class ErrorReporter
{
public:
@@ -81,7 +52,7 @@ public:
const char* theNameOfTheTraceFile,
char* messptr);
- static void formatTraceFileName(char* theName, int maxLen);
+ static int get_trace_no();
static const char* formatTimeStampString();
diff --git a/ndb/src/kernel/Main.cpp b/ndb/src/kernel/main.cpp
index 7bd4e75ca18..4d3a0afe6ed 100644
--- a/ndb/src/kernel/Main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -20,7 +20,7 @@
#include "Configuration.hpp"
#include <TransporterRegistry.hpp>
-#include "SimBlockList.hpp"
+#include "vm/SimBlockList.hpp"
#include "ThreadConfig.hpp"
#include <SignalLoggerManager.hpp>
#include <NdbOut.hpp>
@@ -31,20 +31,19 @@
#include <LogLevel.hpp>
#include <EventLogger.hpp>
-#include <NodeState.hpp>
+
+#include <NdbAutoPtr.hpp>
#if defined NDB_SOLARIS // ok
#include <sys/processor.h> // For system informatio
#endif
-#if !defined NDB_SOFTOSE && !defined NDB_OSE
-#include <signal.h> // For process signals
-#endif
-
extern EventLogger g_eventLogger;
void catchsigs(bool ignore); // for process signal handling
-extern "C" void handler(int signo); // for process signal handling
+
+extern "C" void handler_shutdown(int signum); // for process signal handling
+extern "C" void handler_error(int signum); // for process signal handling
// Shows system information
void systemInfo(const Configuration & conf,
@@ -68,18 +67,16 @@ NDB_MAIN(ndb_kernel){
}
{ // Do configuration
- theConfig->setupConfiguration();
+ signal(SIGPIPE, SIG_IGN);
+ theConfig->fetch_configuration();
}
-
- // Get NDB_HOME path
- char homePath[255];
- NdbConfig_HomePath(homePath, 255);
-
+
if (theConfig->getDaemonMode()) {
// Become a daemon
- char lockfile[255], logfile[255];
- snprintf(lockfile, 255, "%snode%d.pid", homePath, globalData.ownId);
- snprintf(logfile, 255, "%snode%d.out", homePath, globalData.ownId);
+ char *lockfile= NdbConfig_PidFileName(globalData.ownId);
+ char *logfile= NdbConfig_StdoutFileName(globalData.ownId);
+ NdbAutoPtr<char> tmp_aptr1(lockfile), tmp_aptr2(logfile);
+
if (NdbDaemon_Make(lockfile, logfile, 0) == -1) {
ndbout << "Cannot become daemon: " << NdbDaemon_ErrorText << endl;
return 1;
@@ -132,24 +129,26 @@ NDB_MAIN(ndb_kernel){
exit(0);
}
g_eventLogger.info("Ndb has terminated (pid %d) restarting", child);
+ theConfig->fetch_configuration();
}
g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid());
+ theConfig->setupConfiguration();
systemInfo(* theConfig, * theConfig->m_logLevel);
-
+
// Load blocks
globalEmulatorData.theSimBlockList->load(* theConfig);
// Set thread concurrency for Solaris' light weight processes
int status;
status = NdbThread_SetConcurrencyLevel(30);
- NDB_ASSERT(status == 0, "Can't set appropriate concurrency level.");
+ assert(status == 0);
#ifdef VM_TRACE
// Create a signal logger
- char buf[255];
- strcpy(buf, homePath);
- FILE * signalLog = fopen(strncat(buf,"Signal.log", 255), "a");
+ char *buf= NdbConfig_SignalLogFileName(globalData.ownId);
+ NdbAutoPtr<char> tmp_aptr(buf);
+ FILE * signalLog = fopen(buf, "a");
globalSignalLoggers.setOwnNodeId(globalData.ownId);
globalSignalLoggers.setOutputStream(signalLog);
#endif
@@ -168,16 +167,38 @@ NDB_MAIN(ndb_kernel){
globalEmulatorData.theThreadConfig->doStart(NodeState::SL_STARTING);
break;
default:
- NDB_ASSERT(0, "Illegal state globalData.theRestartFlag");
+ assert("Illegal state globalData.theRestartFlag" == 0);
}
+ SocketServer socket_server;
+
globalTransporterRegistry.startSending();
globalTransporterRegistry.startReceiving();
+ if (!globalTransporterRegistry.start_service(socket_server)){
+ ndbout_c("globalTransporterRegistry.start_service() failed");
+ exit(-1);
+ }
+
+ if (!globalTransporterRegistry.start_clients()){
+ ndbout_c("globalTransporterRegistry.start_clients() failed");
+ exit(-1);
+ }
+
globalEmulatorData.theWatchDog->doStart();
+ socket_server.startServer();
+
+ // theConfig->closeConfiguration();
+
globalEmulatorData.theThreadConfig->ipControlLoop();
NdbShutdown(NST_Normal);
+
+ socket_server.stopServer();
+ socket_server.stopSessions();
+
+ globalTransporterRegistry.stop_clients();
+
return NRT_Default;
}
@@ -229,74 +250,91 @@ systemInfo(const Configuration & config, const LogLevel & logLevel){
}
+#define handler_register(signum, handler, ignore)\
+{\
+ if (ignore) {\
+ if(signum != SIGCHLD)\
+ signal(signum, SIG_IGN);\
+ } else\
+ signal(signum, handler);\
+}
+
void
catchsigs(bool ignore){
#if ! defined NDB_SOFTOSE && !defined NDB_OSE
-#if defined SIGRTMIN
- #define MAX_SIG_CATCH SIGRTMIN
-#elif defined NSIG
- #define MAX_SIG_CATCH NSIG
-#else
- #error "neither SIGRTMIN or NSIG is defined on this platform, please report bug at bugs.mysql.com"
+ static const int signals_shutdown[] = {
+#ifdef SIGBREAK
+ SIGBREAK,
#endif
-
- // Makes the main process catch process signals, eg installs a
- // handler named "handler". "handler" will then be called is instead
- // of the defualt process signal handler)
- if(ignore){
- for(int i = 1; i < MAX_SIG_CATCH; i++){
- if(i != SIGCHLD)
- signal(i, SIG_IGN);
- }
- } else {
- for(int i = 1; i < MAX_SIG_CATCH; i++){
- signal(i, handler);
- }
- }
+ SIGHUP,
+ SIGINT,
+#if defined SIGPWR
+ SIGPWR,
+#elif defined SIGINFO
+ SIGINFO,
#endif
-}
-
-extern "C"
-void
-handler(int sig){
- switch(sig){
- case SIGHUP: /* 1 - Hang up */
- case SIGINT: /* 2 - Interrupt */
- case SIGQUIT: /* 3 - Quit */
- case SIGTERM: /* 15 - Terminate */
-#ifdef SIGPWR
- case SIGPWR: /* 19 - Power fail */
+ SIGQUIT,
+ SIGTERM,
+#ifdef SIGTSTP
+ SIGTSTP,
+#endif
+ SIGTTIN,
+ SIGTTOU
+ };
+
+ static const int signals_error[] = {
+ SIGABRT,
+ SIGALRM,
+#ifdef SIGBUS
+ SIGBUS,
+#endif
+ SIGCHLD,
+ SIGFPE,
+ SIGILL,
+#ifdef SIGIO
+ SIGIO,
#endif
#ifdef SIGPOLL
- case SIGPOLL: /* 22 */
+ SIGPOLL,
#endif
- case SIGSTOP: /* 23 */
- case SIGTSTP: /* 24 */
- case SIGTTIN: /* 26 */
- case SIGTTOU: /* 27 */
- globalData.theRestartFlag = perform_stop;
- break;
-#ifdef SIGWINCH
- case SIGWINCH:
+ SIGSEGV,
+#ifdef SIGTRAP
+ SIGTRAP
#endif
- case SIGPIPE:
- /**
- * Can happen in TCP Transporter
- *
- * Just ignore
- */
- break;
- default:
- // restart the system
- char errorData[40];
- snprintf(errorData, 40, "Signal %d received", sig);
- ERROR_SET(fatal, 0, errorData, __FILE__);
- break;
- }
+ };
+#endif
+
+ static const int signals_ignore[] = {
+ SIGPIPE
+ };
+
+ size_t i;
+ for(i = 0; i < sizeof(signals_shutdown)/sizeof(signals_shutdown[0]); i++)
+ handler_register(signals_shutdown[i], handler_shutdown, ignore);
+ for(i = 0; i < sizeof(signals_error)/sizeof(signals_error[0]); i++)
+ handler_register(signals_error[i], handler_error, ignore);
+ for(i = 0; i < sizeof(signals_ignore)/sizeof(signals_ignore[0]); i++)
+ handler_register(signals_ignore[i], SIG_IGN, ignore);
+}
+
+extern "C"
+void
+handler_shutdown(int signum){
+ g_eventLogger.info("Received signal %d. Performing stop.", signum);
+ globalData.theRestartFlag = perform_stop;
+}
+
+extern "C"
+void
+handler_error(int signum){
+ g_eventLogger.info("Received signal %d. Running error handler.", signum);
+ // restart the system
+ char errorData[40];
+ snprintf(errorData, 40, "Signal %d received", signum);
+ ERROR_SET(fatal, 0, errorData, __FILE__);
}
-
diff --git a/ndb/src/kernel/vm/ArrayPool.hpp b/ndb/src/kernel/vm/ArrayPool.hpp
index 4fc6bb97f73..924ed51ee15 100644
--- a/ndb/src/kernel/vm/ArrayPool.hpp
+++ b/ndb/src/kernel/vm/ArrayPool.hpp
@@ -148,24 +148,6 @@ public:
void releaseList(Uint32 n, Uint32 first, Uint32 last);
//private:
- /**
- * Print
- * (Run operator NdbOut<< on every element)
- */
- void print(NdbOut & out){
- out << "FirstFree = " << firstFree << endl;
- for(Uint32 i = 0; i<size; i++){
-#ifdef ARRAY_GUARD
- if(BitmaskImpl::get(bitmaskSz, theAllocatedBitmask, i))
- out << "A ";
- else
- out << "F ";
-#endif
- out << i << ": " << theArray[i] << " ";
- }
- out << endl;
- }
-
#ifdef DEBUG
Uint32 getNoOfFree2() const {
Uint32 c2 = size;
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp
index c97ad951cf3..257b7a098e0 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/ndb/src/kernel/vm/Configuration.cpp
@@ -35,6 +35,7 @@
#include <ndb_limits.h>
#include "pc.hpp"
#include <LogLevel.hpp>
+#include <NdbSleep.h>
extern "C" {
void ndbSetOwnVersion();
@@ -49,7 +50,7 @@ Configuration::init(int argc, const char** argv){
/**
* Default values for arguments
*/
- int _start = 1;
+ int _no_start = 0;
int _initial = 0;
const char* _connect_str = NULL;
int _deamon = 0;
@@ -59,14 +60,18 @@ Configuration::init(int argc, const char** argv){
/**
* Arguments to NDB process
*/
- struct getargs args[] = {
- { "version", 'v', arg_flag, &_print_version, "Print version", "" },
- { "start", 's', arg_flag, &_start, "Start ndb immediately", "" },
- { "nostart", 'n', arg_negative_flag, &_start, "Don't start ndb immediately", "" },
- { "deamon", 'd', arg_flag, &_deamon, "Start ndb as deamon", "" },
- { "initial", 'i', arg_flag, &_initial, "Start ndb immediately", "" },
- { "connect-string", 'c', arg_string, &_connect_str, "\"nodeid=<id>;host=<hostname:port>\"\n", "constr" },
+ struct getargs args[] = {
+ { "version", 'v', arg_flag, &_print_version, "Print ndbd version", "" },
+ { "nostart", 'n', arg_flag, &_no_start,
+ "Don't start ndbd immediately. Ndbd will await command from ndb_mgmd", "" },
+ { "daemon", 'd', arg_flag, &_deamon, "Start ndbd as daemon", "" },
+ { "initial", 'i', arg_flag, &_initial,
+ "Perform initial start of ndbd, including cleaning the file system. Consult documentation before using this", "" },
+
+ { "connect-string", 'c', arg_string, &_connect_str,
+ "Set connect string for connecting to ndb_mgmd. <constr>=\"host=<hostname:port>[;nodeid=<id>]\". Overides specifying entries in NDB_CONNECTSTRING and config file",
+ "<constr>" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -80,7 +85,7 @@ Configuration::init(int argc, const char** argv){
}
#if 0
- ndbout << "start=" <<_start<< endl;
+ ndbout << "no_start=" <<_no_start<< endl;
ndbout << "initial=" <<_initial<< endl;
ndbout << "deamon=" <<_deamon<< endl;
ndbout << "connect_str="<<_connect_str<<endl;
@@ -96,27 +101,18 @@ Configuration::init(int argc, const char** argv){
}
// Check the start flag
- if (_start)
- globalData.theRestartFlag = perform_start;
- else
+ if (_no_start)
globalData.theRestartFlag = initial_state;
+ else
+ globalData.theRestartFlag = perform_start;
// Check the initial flag
if (_initial)
_initialStart = true;
// Check connectstring
- if (_connect_str){
-
- if(_connect_str[0] == '-' ||
- strstr(_connect_str, "host") == 0 ||
- strstr(_connect_str, "nodeid") == 0) {
- ndbout << "Illegal/empty connectString: " << _connect_str << endl;
- arg_printusage(args, num_args, argv[0], desc);
- return false;
- }
+ if (_connect_str)
_connectString = strdup(_connect_str);
- }
// Check deamon flag
if (_deamon)
@@ -138,6 +134,7 @@ Configuration::Configuration()
_fsPath = 0;
_initialStart = false;
_daemonMode = false;
+ m_config_retriever= 0;
}
Configuration::~Configuration(){
@@ -146,37 +143,97 @@ Configuration::~Configuration(){
if(_fsPath != NULL)
free(_fsPath);
+
+ if (m_config_retriever) {
+ delete m_config_retriever;
+ }
}
void
-Configuration::setupConfiguration(){
+Configuration::closeConfiguration(){
+ if (m_config_retriever) {
+ delete m_config_retriever;
+ }
+ m_config_retriever= 0;
+}
+
+void
+Configuration::fetch_configuration(){
/**
* Fetch configuration from management server
*/
- ConfigRetriever cr;
- cr.setConnectString(_connectString);
- stopOnError(true);
- ndb_mgm_configuration * p = cr.getConfig(NDB_VERSION, NODE_TYPE_DB);
+ if (m_config_retriever) {
+ delete m_config_retriever;
+ }
+
+ m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_DB);
+ m_config_retriever->setConnectString(_connectString ? _connectString : "");
+ if(m_config_retriever->init() == -1 ||
+ m_config_retriever->do_connect() == -1){
+
+ const char * s = m_config_retriever->getErrorString();
+ if(s == 0)
+ s = "No error given!";
+
+ /* Set stop on error to true otherwise NDB will
+ go into an restart loop...
+ */
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could connect to ndb_mgmd", s);
+ }
+
+ ConfigRetriever &cr= *m_config_retriever;
+
+ if((globalData.ownId = cr.allocNodeId()) == 0){
+ for(Uint32 i = 0; i<3; i++){
+ NdbSleep_SecSleep(3);
+ if(globalData.ownId = cr.allocNodeId())
+ break;
+ }
+ }
+
+ if(globalData.ownId == 0){
+ ERROR_SET(fatal, ERR_INVALID_CONFIG,
+ "Unable to alloc node id", m_config_retriever->getErrorString());
+ }
+
+ ndb_mgm_configuration * p = cr.getConfig();
if(p == 0){
const char * s = cr.getErrorString();
if(s == 0)
s = "No error given!";
-
+
/* Set stop on error to true otherwise NDB will
go into an restart loop...
- */
-
+ */
+
ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could not fetch configuration"
"/invalid configuration", s);
}
+ if(m_clusterConfig)
+ free(m_clusterConfig);
+
+ m_clusterConfig = p;
+
+ ndb_mgm_configuration_iterator iter(* p, CFG_SECTION_NODE);
+ if (iter.find(CFG_NODE_ID, globalData.ownId)){
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched", "DB missing");
+ }
+
+ if(iter.get(CFG_DB_STOP_ON_ERROR, &_stopOnError)){
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
+ "StopOnError missing");
+ }
+}
- Uint32 nodeId = globalData.ownId = cr.getOwnNodeId();
+void
+Configuration::setupConfiguration(){
+ ndb_mgm_configuration * p = m_clusterConfig;
/**
* Configure transporters
*/
{
- int res = IPCConfig::configureTransporters(nodeId,
+ int res = IPCConfig::configureTransporters(globalData.ownId,
* p,
globalTransporterRegistry);
if(res <= 0){
@@ -238,11 +295,6 @@ Configuration::setupConfiguration(){
}
}
- if(iter.get(CFG_DB_STOP_ON_ERROR, &_stopOnError)){
- ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
- "StopOnError missing");
- }
-
if(iter.get(CFG_DB_STOP_ON_ERROR_INSERT, &m_restartOnErrorInsert)){
ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
"RestartOnErrorInsert missing");
@@ -259,7 +311,6 @@ Configuration::setupConfiguration(){
ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);
- m_clusterConfig = p;
m_clusterConfigIter = ndb_mgm_create_configuration_iterator
(p, CFG_SECTION_NODE);
@@ -501,7 +552,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
// The remainder are allowed for use by the scan processes.
/*-----------------------------------------------------------------------*/
cfg.put(CFG_ACC_OP_RECS,
- noOfReplicas*((16 * noOfOperations) / 10 + 50) +
+ ((11 * noOfOperations) / 10 + 50) +
(noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
NODE_RECOVERY_SCAN_OP_RECORDS);
@@ -526,18 +577,9 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
*/
cfg.put(CFG_DICT_ATTRIBUTE,
noOfAttributes);
-
- cfg.put(CFG_DICT_CONNECT,
- noOfOperations + 32);
-
- cfg.put(CFG_DICT_FRAG_CONNECT,
- NO_OF_FRAG_PER_NODE * noOfDBNodes * noOfReplicas);
cfg.put(CFG_DICT_TABLE,
noOfTables);
-
- cfg.put(CFG_DICT_TC_CONNECT,
- 2* noOfOperations);
}
{
@@ -548,7 +590,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
2 * noOfTransactions);
cfg.put(CFG_DIH_CONNECT,
- noOfOperations + 46);
+ noOfOperations + noOfTransactions + 46);
cfg.put(CFG_DIH_FRAG_CONNECT,
NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes);
@@ -578,18 +620,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg.put(CFG_LQH_FRAG,
NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
- cfg.put(CFG_LQH_CONNECT,
- noOfReplicas*((11 * noOfOperations) / 10 + 50));
-
cfg.put(CFG_LQH_TABLE,
noOfTables);
cfg.put(CFG_LQH_TC_CONNECT,
- noOfReplicas*((16 * noOfOperations) / 10 + 50));
+ (11 * noOfOperations) / 10 + 50);
- cfg.put(CFG_LQH_REPLICAS,
- noOfReplicas);
-
cfg.put(CFG_LQH_SCAN,
noOfLocalScanRecords);
}
@@ -602,7 +638,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
3 * noOfTransactions);
cfg.put(CFG_TC_TC_CONNECT,
- noOfOperations + 16 + noOfTransactions);
+ (2 * noOfOperations) + 16 + noOfTransactions);
cfg.put(CFG_TC_TABLE,
noOfTables);
@@ -622,7 +658,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
cfg.put(CFG_TUP_OP_RECS,
- noOfReplicas*((16 * noOfOperations) / 10 + 50));
+ (11 * noOfOperations) / 10 + 50);
cfg.put(CFG_TUP_PAGE,
noOfDataPages);
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp
index 1706ad05867..bd91f3fa74b 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/ndb/src/kernel/vm/Configuration.hpp
@@ -20,6 +20,8 @@
#include <mgmapi.h>
#include <ndb_types.h>
+class ConfigRetriever;
+
class Configuration {
public:
Configuration();
@@ -30,7 +32,9 @@ public:
*/
bool init(int argc, const char** argv);
+ void fetch_configuration();
void setupConfiguration();
+ void closeConfiguration();
bool lockPagesInMainMemory() const;
@@ -78,6 +82,8 @@ private:
ndb_mgm_configuration_iterator * m_clusterConfigIter;
ndb_mgm_configuration_iterator * m_ownConfigIterator;
+ ConfigRetriever *m_config_retriever;
+
/**
* arguments to NDB process
*/
diff --git a/ndb/src/kernel/vm/DataBuffer.hpp b/ndb/src/kernel/vm/DataBuffer.hpp
index 7dc89aa638c..7f553898eb5 100644
--- a/ndb/src/kernel/vm/DataBuffer.hpp
+++ b/ndb/src/kernel/vm/DataBuffer.hpp
@@ -33,7 +33,7 @@ public:
Uint32 data[sz];
NdbOut& print(NdbOut& out){
out << "[DataBuffer<" << sz << ">::Segment this="
- << hex << (Uint32)this << dec << " nextPool= "
+ << this << dec << " nextPool= "
<< nextPool << " ]";
return out;
}
diff --git a/ndb/src/kernel/vm/Emulator.cpp b/ndb/src/kernel/vm/Emulator.cpp
index 07998794d01..75aea2bda7f 100644
--- a/ndb/src/kernel/vm/Emulator.cpp
+++ b/ndb/src/kernel/vm/Emulator.cpp
@@ -35,8 +35,6 @@
#include <NdbSleep.h>
#include <new>
-#include <signal.h> // For process signals
-
extern "C" {
extern void (* ndb_new_handler)();
}
@@ -202,7 +200,7 @@ NdbShutdown(NdbShutdownType type,
if(type != NST_Normal && type != NST_Restart){
ndbout << "Error handler shutdown completed - " << exitAbort << endl;
-#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
+#if ( defined VM_TRACE || defined ERROR_INSERT ) && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
signal(6, SIG_DFL);
abort();
#else
diff --git a/ndb/src/kernel/vm/MetaData.cpp b/ndb/src/kernel/vm/MetaData.cpp
index bcde6c63272..51afbf21503 100644
--- a/ndb/src/kernel/vm/MetaData.cpp
+++ b/ndb/src/kernel/vm/MetaData.cpp
@@ -47,7 +47,7 @@ MetaData::MetaData(SimulatedBlock* block) :
MetaData::~MetaData()
{
for (int i = false; i <= true; i++) {
- NDB_ASSERT(m_common.m_lock[i] >= m_lock[i], "invalid lock count");
+ assert(m_common.m_lock[i] >= m_lock[i]);
m_common.m_lock[i] -= m_lock[i];
m_lock[i] = 0;
}
diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp
index a6a8a6242cd..18b7f474ddc 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -104,6 +104,11 @@ SimulatedBlock::SimulatedBlock(BlockNumber blockNumber,
UpgradeStartup::installEXEC(this);
CLEAR_ERROR_INSERT_VALUE;
+
+#ifdef VM_TRACE
+ m_global_variables = new Ptr<void> * [1];
+ m_global_variables[0] = 0;
+#endif
}
SimulatedBlock::~SimulatedBlock()
@@ -112,6 +117,10 @@ SimulatedBlock::~SimulatedBlock()
#ifdef VM_TRACE_TIME
printTimes(stdout);
#endif
+
+#ifdef VM_TRACE
+ delete [] m_global_variables;
+#endif
}
void
@@ -136,12 +145,12 @@ SimulatedBlock::installSimulatedBlockFunctions(){
void
SimulatedBlock::addRecSignalImpl(GlobalSignalNumber gsn,
ExecFunction f, bool force){
- REQUIRE(gsn <= MAX_GSN, "Illegal signal added in block (GSN too high)");
- char probData[255];
- snprintf(probData, 255,
- "Signal (%d) already added in block",
- gsn);
- REQUIRE(force || theExecArray[gsn] == 0, probData);
+ if(gsn > MAX_GSN || (!force && theExecArray[gsn] != 0)){
+ char errorMsg[255];
+ snprintf(errorMsg, 255,
+ "Illeagal signal (%d %d)", gsn, MAX_GSN);
+ ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
+ }
theExecArray[gsn] = f;
}
@@ -636,12 +645,12 @@ SimulatedBlock::getBatSize(Uint16 blockNo){
}
void*
-SimulatedBlock::allocRecord(const char * type, size_t s, size_t n) const
+SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear)
{
- void* p = NULL;
+ void * p = NULL;
size_t size = n*s;
-
+ refresh_watch_dog();
if (size > 0){
#ifdef VM_TRACE_MEM
ndbout_c("%s::allocRecord(%s, %u, %u) = %u bytes",
@@ -656,43 +665,31 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n) const
char buf1[255];
char buf2[255];
snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s",
- getBlockName(number()), type);
- snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes", (Uint32)s, (Uint32)n, (Uint32)size);
+ getBlockName(number()), type);
+ snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes",
+ (Uint32)s, (Uint32)n, (Uint32)size);
ERROR_SET(fatal, ERR_MEMALLOC, buf1, buf2);
}
-
-
- // Set the allocated memory to zero
-#ifndef NDB_PURIFY
-#if defined NDB_OSE
- int pages = (size / 4096);
- if ((size % 4096)!=0)
- pages++;
-
- char* p2 =(char*) p;
- for (int i = 0; i < pages; i++){
- memset(p2, 0, 4096);
- p2 = p2 + 4096;
+
+ if(clear){
+ char * ptr = (char*)p;
+ const Uint32 chunk = 128 * 1024;
+ while(size > chunk){
+ refresh_watch_dog();
+ memset(ptr, 0, chunk);
+ ptr += chunk;
+ size -= chunk;
+ }
+ refresh_watch_dog();
+ memset(ptr, 0, size);
}
-#elif 1
- /**
- * This code should be enabled in order to find logical errors and not
- * initalised errors in the kernel.
- *
- * NOTE! It's not just "uninitialised errors" that are found by doing this
- * it will also find logical errors that have been hidden by all the zeros.
- */
-
- memset(p, 0xF1, size);
-#endif
-#endif
}
return p;
}
void
SimulatedBlock::deallocRecord(void ** ptr,
- const char * type, size_t s, size_t n) const {
+ const char * type, size_t s, size_t n){
(void)type;
(void)s;
(void)n;
@@ -704,6 +701,12 @@ SimulatedBlock::deallocRecord(void ** ptr,
}
void
+SimulatedBlock::refresh_watch_dog()
+{
+ globalData.incrementWatchDogCounter(1);
+}
+
+void
SimulatedBlock::progError(int line, int err_code, const char* extra) const {
jamLine(line);
@@ -1005,7 +1008,8 @@ SimulatedBlock::assembleFragments(Signal * signal){
/**
* FragInfo == 2 or 3
*/
- for(Uint32 i = 0; i<secs; i++){
+ Uint32 i;
+ for(i = 0; i<secs; i++){
Uint32 sectionNo = secNos[i];
ndbassert(sectionNo < 3);
Uint32 sectionPtrI = signal->m_sectionPtr[i].i;
@@ -1027,7 +1031,6 @@ SimulatedBlock::assembleFragments(Signal * signal){
/**
* fragInfo = 3
*/
- Uint32 i;
for(i = 0; i<3; i++){
Uint32 ptrI = fragPtr.p->m_sectionPtrI[i];
if(ptrI != RNIL){
@@ -1777,3 +1780,25 @@ SimulatedBlock::execUPGRADE(Signal* signal){
break;
}
}
+
+#ifdef VM_TRACE
+void
+SimulatedBlock::clear_global_variables(){
+ Ptr<void> ** tmp = m_global_variables;
+ while(* tmp != 0){
+ (* tmp)->i = RNIL;
+ (* tmp)->p = 0;
+ tmp++;
+ }
+}
+
+void
+SimulatedBlock::init_globals_list(void ** tmp, size_t cnt){
+ m_global_variables = new Ptr<void> * [cnt+1];
+ for(size_t i = 0; i<cnt; i++){
+ m_global_variables[i] = (Ptr<void>*)tmp[i];
+ }
+ m_global_variables[cnt] = 0;
+}
+
+#endif
diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/ndb/src/kernel/vm/SimulatedBlock.hpp
index 491d432625e..6d46e9cc377 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -96,7 +96,7 @@ protected:
* Handling of execFunctions
*/
typedef void (SimulatedBlock::* ExecFunction)(Signal* signal);
- void addRecSignalImpl(GlobalSignalNumber g, ExecFunction fun, bool f = false);
+ void addRecSignalImpl(GlobalSignalNumber g, ExecFunction fun, bool f =false);
void installSimulatedBlockFunctions();
ExecFunction theExecArray[MAX_GSN+1];
public:
@@ -304,7 +304,13 @@ protected:
BlockNumber number() const;
BlockReference reference() const;
NodeId getOwnNodeId() const;
-
+
+ /**
+ * Refresh Watch Dog in initialising code
+ *
+ */
+ void refresh_watch_dog();
+
/**
* Prog error
* This function should be called when this node should be shutdown
@@ -344,14 +350,14 @@ protected:
* Allocates memory for the datastructures where ndb keeps the data
*
*/
- void* allocRecord(const char * type, size_t s, size_t n) const ;
+ void* allocRecord(const char * type, size_t s, size_t n, bool clear = true);
/**
* Deallocate record
*
* NOTE: Also resets pointer
*/
- void deallocRecord(void **, const char * type, size_t s, size_t n) const ;
+ void deallocRecord(void **, const char * type, size_t s, size_t n);
/**
* General info event (sent to cluster log)
@@ -441,6 +447,12 @@ public:
} m_timeTrace[MAX_GSN+1];
Uint32 m_currentGsn;
#endif
+
+#ifdef VM_TRACE
+ Ptr<void> **m_global_variables;
+ void clear_global_variables();
+ void init_globals_list(void ** tmp, size_t cnt);
+#endif
};
inline
@@ -448,6 +460,9 @@ void
SimulatedBlock::executeFunction(GlobalSignalNumber gsn, Signal* signal){
ExecFunction f = theExecArray[gsn];
if(gsn <= MAX_GSN && f != 0){
+#ifdef VM_TRACE
+ clear_global_variables();
+#endif
(this->*f)(signal);
return;
}
@@ -458,11 +473,11 @@ SimulatedBlock::executeFunction(GlobalSignalNumber gsn, Signal* signal){
char errorMsg[255];
if (!(gsn <= MAX_GSN)) {
snprintf(errorMsg, 255, "Illegal signal received (GSN %d too high)", gsn);
- REQUIRE(false, errorMsg);
+ ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
}
if (!(theExecArray[gsn] != 0)) {
snprintf(errorMsg, 255, "Illegal signal received (GSN %d not added)", gsn);
- REQUIRE(false, errorMsg);
+ ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
}
ndbrequire(false);
}
@@ -673,6 +688,5 @@ BLOCK::addRecSignal(GlobalSignalNumber gsn, ExecSignalLocal f, bool force){ \
addRecSignalImpl(gsn, (ExecFunction)f, force);\
}
-
#endif
diff --git a/ndb/src/kernel/vm/ThreadConfig.cpp b/ndb/src/kernel/vm/ThreadConfig.cpp
index d18b20a5bb5..4844bb9a477 100644
--- a/ndb/src/kernel/vm/ThreadConfig.cpp
+++ b/ndb/src/kernel/vm/ThreadConfig.cpp
@@ -147,8 +147,8 @@ void ThreadConfig::ipControlLoop()
// plus checking for any received messages.
//--------------------------------------------------------------------
if (i++ >= 20) {
+ globalTransporterRegistry.update_connections();
globalData.incrementWatchDogCounter(5);
- globalTransporterRegistry.checkConnections();
i = 0;
}//if
diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/ndb/src/kernel/vm/TransporterCallback.cpp
index eb7d138895c..158de64c87f 100644
--- a/ndb/src/kernel/vm/TransporterCallback.cpp
+++ b/ndb/src/kernel/vm/TransporterCallback.cpp
@@ -434,5 +434,28 @@ reportDisconnect(void * callbackObj, NodeId nodeId, Uint32 errNo){
globalScheduler.execute(&signal, JBA, CMVMI, GSN_DISCONNECT_REP);
}
-
+void
+SignalLoggerManager::printSegmentedSection(FILE * output,
+ const SignalHeader & sh,
+ const SegmentedSectionPtr ptr[3],
+ unsigned i)
+{
+ fprintf(output, "SECTION %u type=segmented", i);
+ if (i >= 3) {
+ fprintf(output, " *** invalid ***\n");
+ return;
+ }
+ const Uint32 len = ptr[i].sz;
+ SectionSegment * ssp = ptr[i].p;
+ Uint32 pos = 0;
+ fprintf(output, " size=%u\n", (unsigned)len);
+ while (pos < len) {
+ if (pos > 0 && pos % SectionSegment::DataLength == 0) {
+ ssp = g_sectionSegmentPool.getPtr(ssp->m_nextSegment);
+ }
+ printDataWord(output, pos, ssp->theData[pos % SectionSegment::DataLength]);
+ }
+ if (len > 0)
+ putc('\n', output);
+}
diff --git a/ndb/src/mgmapi/Makefile.am b/ndb/src/mgmapi/Makefile.am
index e4fa1d449c6..0f0e1cea5d8 100644
--- a/ndb/src/mgmapi/Makefile.am
+++ b/ndb/src/mgmapi/Makefile.am
@@ -9,5 +9,7 @@ DEFS_LOC = -DNO_DEBUG_MESSAGES
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
+#ndbtest_PROGRAMS = ndb_test_mgmapi
+
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index bb4b6be8221..e78b0d41cf2 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -24,6 +24,7 @@
#include <NdbOut.hpp>
#include <SocketServer.hpp>
+#include <SocketClient.hpp>
#include <Parser.hpp>
#include <OutputStream.hpp>
#include <InputStream.hpp>
@@ -63,7 +64,7 @@
0, \
0, 0 }
-class ParserDummy : SocketServer::Session
+class ParserDummy : private SocketServer::Session
{
public:
ParserDummy(NDB_SOCKET_TYPE sock);
@@ -282,6 +283,7 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
while((name = iter.next()) != NULL) {
PropertiesType t;
Uint32 val_i;
+ Uint64 val_64;
BaseString val_s;
cmd_args->getTypeOf(name, &t);
@@ -290,11 +292,15 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
cmd_args->get(name, &val_i);
out.println("%s: %d", name, val_i);
break;
+ case PropertiesType_Uint64:
+ cmd_args->get(name, &val_64);
+ out.println("%s: %Ld", name, val_64);
+ break;
case PropertiesType_char:
cmd_args->get(name, val_s);
out.println("%s: %s", name, val_s.c_str());
break;
- default:
+ case PropertiesType_Properties:
/* Ignore */
break;
}
@@ -318,8 +324,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
/**
* Print some info about why the parser returns NULL
*/
-// ndbout << " status=" << ctx.m_status << ", curr="
-// << ctx.m_currentToken << endl;
+ //ndbout << " status=" << ctx.m_status << ", curr="
+ //<< ctx.m_currentToken << endl;
}
#ifdef MGMAPI_LOG
else {
@@ -362,30 +368,11 @@ ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv)
/**
* Do connect
*/
- const NDB_SOCKET_TYPE sockfd = socket(AF_INET, SOCK_STREAM, 0);
- if (sockfd == NDB_INVALID_SOCKET) {
- SET_ERROR(handle, NDB_MGM_ILLEGAL_SOCKET, "");
- return -1;
- }
-
- struct sockaddr_in servaddr;
- memset(&servaddr, 0, sizeof(servaddr));
- servaddr.sin_family = AF_INET;
- servaddr.sin_port = htons(handle->port);
- // Convert ip address presentation format to numeric format
- const int res1 = Ndb_getInAddr(&servaddr.sin_addr, handle->hostname);
- if (res1 != 0) {
- DEBUG("Ndb_getInAddr(...) == -1");
- setError(handle, EINVAL, __LINE__, "Invalid hostname/address");
- return -1;
- }
-
- const int res2 = connect(sockfd, (struct sockaddr*) &servaddr,
- sizeof(servaddr));
- if (res2 == -1) {
- NDB_CLOSE_SOCKET(sockfd);
- setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, "Unable to connect to %s",
- mgmsrv);
+ SocketClient s(handle->hostname, handle->port);
+ const NDB_SOCKET_TYPE sockfd = s.connect();
+ if (sockfd < 0) {
+ setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
+ "Unable to connect to %s", mgmsrv);
return -1;
}
@@ -491,11 +478,12 @@ extern "C"
const char *
ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status)
{
- for(int i = 0; i<no_of_status_values; i++)
+ int i;
+ for(i = 0; i<no_of_status_values; i++)
if(status_values[i].value == status)
return status_values[i].str;
- for(int i = 0; i<no_of_status_values; i++)
+ for(i = 0; i<no_of_status_values; i++)
if(status_values[i].value == NDB_MGM_NODE_STATUS_UNKNOWN)
return status_values[i].str;
@@ -562,7 +550,7 @@ ndb_mgm_get_status(NdbMgmHandle handle)
buf[strlen(buf)-1] = '\0';
if(strcmp("node status", buf) != 0) {
- SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, "");
+ SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, buf);
return NULL;
}
@@ -1450,11 +1438,7 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) {
const Properties *prop;
prop = ndb_mgm_call(handle, reply, "get config", &args);
-
- if(prop == NULL) {
- SET_ERROR(handle, EIO, "Unable to fetch config");
- return 0;
- }
+ CHECK_REPLY(prop, 0);
do {
const char * buf;
@@ -1515,7 +1499,8 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) {
ndbout_c("Failed to unpack buffer");
break;
}
-
+
+ delete prop;
return (ndb_mgm_configuration*)cvf.m_cfg;
} while(0);
@@ -1523,6 +1508,52 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) {
return 0;
}
+extern "C"
+int
+ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodeid, int nodetype)
+{
+
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+ args.put("version", version);
+ args.put("nodetype", nodetype);
+ args.put("nodeid", *pnodeid);
+ args.put("user", "mysqld");
+ args.put("password", "mysqld");
+ args.put("public key", "a public key");
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("get nodeid reply", NULL, ""),
+ MGM_ARG("nodeid", Int, Optional, "Error message"),
+ MGM_ARG("result", String, Mandatory, "Error message"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop= ndb_mgm_call(handle, reply, "get nodeid", &args);
+ CHECK_REPLY(prop, -1);
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
+ "Could not alloc node id: %s",buf);
+ break;
+ }
+ if(!prop->get("nodeid", pnodeid) != 0){
+ ndbout_c("ERROR Message: <nodeid Unspecified>\n");
+ break;
+ }
+ res= 0;
+ }while(0);
+
+ delete prop;
+ return res;
+}
+
/*****************************************************************************
* Global Replication
******************************************************************************/
@@ -1559,3 +1590,130 @@ ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request,
delete reply;
return 0;
}
+
+extern "C"
+int
+ndb_mgm_set_int_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ unsigned value,
+ struct ndb_mgm_reply*){
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+ args.put("node: ", node);
+ args.put("param: ", param);
+ args.put("value: ", value);
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("set parameter reply", NULL, ""),
+ MGM_ARG("result", String, Mandatory, "Error message"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop= ndb_mgm_call(handle, reply, "set parameter", &args);
+ CHECK_REPLY(prop, -1);
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ ndbout_c("ERROR Message: %s\n", buf);
+ break;
+ }
+ res= 0;
+ } while(0);
+
+ delete prop;
+ return res;
+}
+
+extern "C"
+int
+ndb_mgm_set_int64_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ unsigned long long value,
+ struct ndb_mgm_reply*){
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+ args.put("node: ", node);
+ args.put("param: ", param);
+ args.put("value: ", value);
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("set parameter reply", NULL, ""),
+ MGM_ARG("result", String, Mandatory, "Error message"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop= ndb_mgm_call(handle, reply, "set parameter", &args);
+
+ if(prop == NULL) {
+ SET_ERROR(handle, EIO, "Unable set parameter");
+ return -1;
+ }
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ ndbout_c("ERROR Message: %s\n", buf);
+ break;
+ }
+ res= 0;
+ } while(0);
+
+ delete prop;
+ return res;
+}
+
+extern "C"
+int
+ndb_mgm_set_string_parameter(NdbMgmHandle handle,
+ int node,
+ int param,
+ const char * value,
+ struct ndb_mgm_reply*){
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+ args.put("node: ", node);
+ args.put("parameter: ", param);
+ args.put("value: ", value);
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("set parameter reply", NULL, ""),
+ MGM_ARG("result", String, Mandatory, "Error message"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop= ndb_mgm_call(handle, reply, "set parameter", &args);
+
+ if(prop == NULL) {
+ SET_ERROR(handle, EIO, "Unable set parameter");
+ return -1;
+ }
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ ndbout_c("ERROR Message: %s\n", buf);
+ break;
+ }
+ res= 0;
+ } while(0);
+
+ delete prop;
+ return res;
+}
+
+template class Vector<const ParserRow<ParserDummy>*>;
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index cf9d885847a..141a0be0eff 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -634,7 +634,8 @@ CommandInterpreter::executeHelp(char* parameters)
void
CommandInterpreter::executeShow(char* parameters)
-{
+{
+ int i;
connect();
if (emptyString(parameters)) {
ndbout << "Cluster Configuration" << endl
@@ -648,16 +649,27 @@ CommandInterpreter::executeShow(char* parameters)
}
int
- ndb_nodes = 0,
- api_nodes = 0,
- mgm_nodes = 0;
-
- for(int i=0; i < state->no_of_nodes; i++) {
+ master_id= 0,
+ ndb_nodes= 0,
+ api_nodes= 0,
+ mgm_nodes= 0;
+
+ for(i=0; i < state->no_of_nodes; i++) {
+ if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB &&
+ state->node_states[i].version != 0){
+ master_id= state->node_states[i].dynamic_id;
+ break;
+ }
+ }
+
+ for(i=0; i < state->no_of_nodes; i++) {
switch(state->node_states[i].node_type) {
case NDB_MGM_NODE_TYPE_API:
api_nodes++;
break;
case NDB_MGM_NODE_TYPE_NDB:
+ if (state->node_states[i].dynamic_id < master_id)
+ master_id= state->node_states[i].dynamic_id;
ndb_nodes++;
break;
case NDB_MGM_NODE_TYPE_MGM:
@@ -673,15 +685,18 @@ CommandInterpreter::executeShow(char* parameters)
<< " NDB Node(s)"
<< endl;
- for(int i=0; i < state->no_of_nodes; i++) {
+ for(i=0; i < state->no_of_nodes; i++) {
if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB) {
ndbout << "DB node:\t" << state->node_states[i].node_id;
if(state->node_states[i].version != 0) {
ndbout << " (Version: "
<< getMajor(state->node_states[i].version) << "."
<< getMinor(state->node_states[i].version) << "."
- << getBuild(state->node_states[i].version) << ")" << endl;
-
+ << getBuild(state->node_states[i].version) << ","
+ << " Nodegroup: " << state->node_states[i].node_group;
+ if (state->node_states[i].dynamic_id == master_id)
+ ndbout << ", Master";
+ ndbout << ")" << endl;
} else
{
ndbout << " (not connected) " << endl;
@@ -691,13 +706,13 @@ CommandInterpreter::executeShow(char* parameters)
}
ndbout << endl;
- ndbout << api_nodes
- << " API Node(s)"
+ ndbout << mgm_nodes
+ << " MGM Node(s)"
<< endl;
- for(int i=0; i < state->no_of_nodes; i++) {
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) {
- ndbout << "API node:\t" << state->node_states[i].node_id;
+ for(i=0; i < state->no_of_nodes; i++) {
+ if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) {
+ ndbout << "MGM node:\t" << state->node_states[i].node_id;
if(state->node_states[i].version != 0) {
ndbout << " (Version: "
<< getMajor(state->node_states[i].version) << "."
@@ -706,19 +721,19 @@ CommandInterpreter::executeShow(char* parameters)
} else
{
- ndbout << " (not connected) " << endl;
+ ndbout << " (no version information available) " << endl;
}
}
}
ndbout << endl;
-
- ndbout << mgm_nodes
- << " MGM Node(s)"
+
+ ndbout << api_nodes
+ << " API Node(s)"
<< endl;
- for(int i=0; i < state->no_of_nodes; i++) {
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) {
- ndbout << "MGM node:\t" << state->node_states[i].node_id;
+ for(i=0; i < state->no_of_nodes; i++) {
+ if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) {
+ ndbout << "API node:\t" << state->node_states[i].node_id;
if(state->node_states[i].version != 0) {
ndbout << " (Version: "
<< getMajor(state->node_states[i].version) << "."
@@ -727,11 +742,12 @@ CommandInterpreter::executeShow(char* parameters)
} else
{
- ndbout << " (no version information available) " << endl;
+ ndbout << " (not connected) " << endl;
}
}
}
ndbout << endl;
+
// ndbout << helpTextShow;
return;
} else if (strcmp(parameters, "PROPERTIES") == 0 ||
@@ -759,6 +775,7 @@ CommandInterpreter::executeShow(char* parameters)
void
CommandInterpreter::executeClusterLog(char* parameters)
{
+ int i;
connect();
if (parameters != 0 && strlen(parameters) != 0) {
enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL;
@@ -846,10 +863,10 @@ CommandInterpreter::executeClusterLog(char* parameters)
ndbout << "Cluster logging is disabled." << endl;
- for(int i = 0; i<7;i++)
+ for(i = 0; i<7;i++)
printf("enabled[%d] = %d\n", i, enabled[i]);
ndbout << "Severities enabled: ";
- for(int i = 1; i < 7; i++) {
+ for(i = 1; i < 7; i++) {
if(enabled[i])
ndbout << names[i] << " ";
}
@@ -1298,14 +1315,15 @@ CommandInterpreter::executeLog(int processId,
return;
}
int len=0;
- for(Uint32 i=0; i<blocks.size(); i++) {
+ Uint32 i;
+ for(i=0; i<blocks.size(); i++) {
ndbout_c("blocks %s %d",blocks[i], strlen(blocks[i]));
len += strlen(blocks[i]);
}
len += blocks.size()*2;
char * blockNames = (char*)malloc(len);
- for(Uint32 i=0; i<blocks.size(); i++) {
+ for(i=0; i<blocks.size(); i++) {
strcat(blockNames, blocks[i]);
strcat(blockNames, "|");
}
@@ -1478,7 +1496,7 @@ CommandInterpreter::executeSet(int /*processId*/,
<< endl;
}
else {
- NDB_ASSERT(false, "");
+ assert(false);
}
}
else {
@@ -1497,7 +1515,7 @@ CommandInterpreter::executeSet(int /*processId*/,
}
else {
// The primary is not tried to write if the write of backup file fails
- NDB_ASSERT(false, "");
+ abort();
}
}
free(newpar);
@@ -2020,3 +2038,5 @@ CmdBackupCallback(const MgmtSrvr::BackupEvent & event){
ndbout << str << endl;
}
#endif
+
+template class Vector<char const*>;
diff --git a/ndb/src/mgmclient/CpcClient.cpp b/ndb/src/mgmclient/CpcClient.cpp
index 74fa1a828ed..0291573a704 100644
--- a/ndb/src/mgmclient/CpcClient.cpp
+++ b/ndb/src/mgmclient/CpcClient.cpp
@@ -478,9 +478,9 @@ SimpleCpcClient::connect() {
if (::connect(cpc_sock, (struct sockaddr*) &sa, sizeof(sa)) < 0)
return -1;
- cpc_in = new SocketInputStream(cpc_sock);
+ cpc_in = new SocketInputStream(cpc_sock, 60000);
cpc_out = new SocketOutputStream(cpc_sock);
-
+
return 0;
}
@@ -557,4 +557,6 @@ SimpleCpcClient::cpc_call(const char *cmd,
SimpleCpcClient::ParserDummy::ParserDummy(NDB_SOCKET_TYPE sock)
: SocketServer::Session(sock) {
}
-
+
+template class Vector<SimpleCpcClient::Process>;
+template class Vector<ParserRow<SimpleCpcClient::ParserDummy> const*>;
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index 2dcadf9369d..df6659df0b1 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -24,9 +24,7 @@
#include "CommandInterpreter.hpp"
-#include <signal.h>
-
-const char *progname = "mgmtclient";
+const char *progname = "ndb_mgm";
static CommandInterpreter* com;
@@ -47,14 +45,13 @@ handler(int sig){
int main(int argc, const char** argv){
int optind = 0;
- const char *_default_connectstring = "host=localhost:2200;nodeid=0";
const char *_host = 0;
int _port = 0;
int _help = 0;
int _try_reconnect = 0;
struct getargs args[] = {
- { "try-reconnect", 0, arg_integer, &_try_reconnect, "", "" },
+ { "try-reconnect", 't', arg_integer, &_try_reconnect, "Specify number of retries for connecting to ndb_mgmd, default infinite", "#" },
{ "usage", '?', arg_flag, &_help, "Print help", "" },
};
int num_args = sizeof(args) / sizeof(args[0]); /* Number of arguments */
@@ -76,9 +73,9 @@ int main(int argc, const char** argv){
_port = atoi(argv[1]);
}
} else {
- if(cfg.init(false, 0, 0, _default_connectstring) && cfg.items > 0 && cfg.ids[0]->type == MgmId_TCP){
- _host = cfg.ids[0]->data.tcp.remoteHost;
- _port = cfg.ids[0]->data.tcp.port;
+ if(cfg.init(0, 0) && cfg.ids.size() > 0 && cfg.ids[0].type == MgmId_TCP){
+ _host = cfg.ids[0].name.c_str();
+ _port = cfg.ids[0].port;
} else {
cfg.printError();
cfg.printUsage();
diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp
index 004fc463b70..316b6d5795e 100644
--- a/ndb/src/mgmsrv/CommandInterpreter.cpp
+++ b/ndb/src/mgmsrv/CommandInterpreter.cpp
@@ -378,7 +378,8 @@ void CommandInterpreter::executeHelp(char* parameters) {
(void)parameters; // Don't want compiler warning
if (emptyString(parameters)) {
- for (int i = 0; i<noOfHelpTexts; i++) {
+ unsigned i;
+ for (i = 0; i<noOfHelpTexts; i++) {
ndbout << helpTexts[i] << endl;
}
@@ -388,7 +389,7 @@ void CommandInterpreter::executeHelp(char* parameters) {
<< endl;
ndbout << "<category> = ";
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++){
+ for(i = 0; i<EventLogger::noOfEventCategoryNames; i++){
ndbout << EventLogger::eventCategoryNames[i].name;
if (i < EventLogger::noOfEventCategoryNames - 1) {
ndbout << " | ";
@@ -1195,11 +1196,12 @@ CommandInterpreter::jonas(int processId, const char* parameters, bool all) {
data[0] = 12;
data[1] = 13;
-
- for(Uint32 i = 0; i<70; i++)
+
+ unsigned i;
+ for(i = 0; i<70; i++)
sec0[i] = i;
- for(Uint32 i = 0; i<123; i++)
+ for(i = 0; i<123; i++)
sec1[i] = 70+i;
signal.set(0, CMVMI, GSN_TESTSIG, 3);
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 713433cb8e9..0936ec234cf 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -43,10 +43,12 @@
#include <DebuggerNames.hpp>
#include <ndb_version.h>
-#include "SocketServer.hpp"
+#include <SocketServer.hpp>
#include "NodeLogLevel.hpp"
#include <NdbConfig.h>
+#include <NdbAutoPtr.hpp>
+
#include <mgmapi.h>
#include <mgmapi_configuration.hpp>
#include <mgmapi_config_parameters.h>
@@ -170,7 +172,7 @@ MgmtSrvr::signalRecvThreadRun()
siglist.push_back(SigMatch(GSN_MGM_UNLOCK_CONFIG_REQ,
&MgmtSrvr::handle_MGM_UNLOCK_CONFIG_REQ));
- while(1) {
+ while(!_isStopThread) {
SigMatch *handler = NULL;
NdbApiSignal *signal = NULL;
if(m_signalRecvQueue.waitFor(siglist, handler, signal)) {
@@ -240,23 +242,20 @@ MgmtSrvr::startEventLog()
const char * tmp;
BaseString logdest;
- char clusterLog[MAXPATHLEN];
- NdbConfig_ClusterLogFileName(clusterLog, sizeof(clusterLog));
-
-
+ char *clusterLog= NdbConfig_ClusterLogFileName(_ownNodeId);
+ NdbAutoPtr<char> tmp_aptr(clusterLog);
+
if(ndb_mgm_get_string_parameter(iter, CFG_LOG_DESTINATION, &tmp) == 0){
logdest.assign(tmp);
}
ndb_mgm_destroy_iterator(iter);
- if(logdest.length()==0) {
+ if(logdest.length() == 0 || logdest == "") {
logdest.assfmt("FILE:filename=%s,maxsize=1000000,maxfiles=6",
clusterLog);
}
-
if(!g_EventLogger.addHandler(logdest)) {
- ndbout << "ERROR: cannot parse \"" << logdest << "\"" << endl;
- exit(1);
+ ndbout << "Warning: could not add log destination \"" << logdest.c_str() << "\"" << endl;
}
}
@@ -391,6 +390,99 @@ MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const
}
int
+MgmtSrvr::getPort() const {
+ const Properties *mgmProps;
+
+ ndb_mgm_configuration_iterator * iter =
+ ndb_mgm_create_configuration_iterator(_config->m_configValues,
+ CFG_SECTION_NODE);
+ if(iter == 0)
+ return 0;
+
+ if(ndb_mgm_find(iter, CFG_NODE_ID, getOwnNodeId()) != 0){
+ ndbout << "Could not retrieve configuration for Node "
+ << getOwnNodeId() << " in config file." << endl
+ << "Have you set correct NodeId for this node?" << endl;
+ ndb_mgm_destroy_iterator(iter);
+ return 0;
+ }
+
+ unsigned type;
+ if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0 ||
+ type != NODE_TYPE_MGM){
+ ndbout << "Local node id " << getOwnNodeId()
+ << " is not defined as management server" << endl
+ << "Have you set correct NodeId for this node?" << endl;
+ ndb_mgm_destroy_iterator(iter);
+ return 0;
+ }
+
+ Uint32 port = 0;
+ if(ndb_mgm_get_int_parameter(iter, CFG_MGM_PORT, &port) != 0){
+ ndbout << "Could not find PortNumber in the configuration file." << endl;
+ ndb_mgm_destroy_iterator(iter);
+ return 0;
+ }
+
+ ndb_mgm_destroy_iterator(iter);
+
+ /*****************
+ * Set Stat Port *
+ *****************/
+#if 0
+ if (!mgmProps->get("PortNumberStats", &tmp)){
+ ndbout << "Could not find PortNumberStats in the configuration file."
+ << endl;
+ return false;
+ }
+ glob.port_stats = tmp;
+#endif
+
+#if 0
+ const char * host;
+ if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){
+ ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl;
+ ndbout << "Unable to verify own hostname" << endl;
+ return false;
+ }
+
+ const char * hostname;
+ {
+ const Properties * p;
+ char buf[255];
+ snprintf(buf, sizeof(buf), "Computer_%s", host.c_str());
+ if(!glob.cluster_config->get(buf, &p)){
+ ndbout << "Failed to find computer " << host << " in config" << endl;
+ ndbout << "Unable to verify own hostname" << endl;
+ return false;
+ }
+ if(!p->get("HostName", &hostname)){
+ ndbout << "Failed to find \"HostName\" for computer " << host
+ << " in config" << endl;
+ ndbout << "Unable to verify own hostname" << endl;
+ return false;
+ }
+ if(NdbHost_GetHostName(buf) != 0){
+ ndbout << "Unable to get own hostname" << endl;
+ ndbout << "Unable to verify own hostname" << endl;
+ return false;
+ }
+ }
+
+ const char * ip_address;
+ if(mgmProps->get("IpAddress", &ip_address)){
+ glob.use_specific_ip = true;
+ glob.interface_name = strdup(ip_address);
+ return true;
+ }
+
+ glob.interface_name = strdup(hostname);
+#endif
+
+ return port;
+}
+
+int
MgmtSrvr::getStatPort() const {
#if 0
const Properties *mgmProps;
@@ -417,9 +509,9 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
_ownReference(0),
theSignalIdleList(NULL),
theWaitState(WAIT_SUBSCRIBE_CONF),
- theConfCount(0) {
+ theConfCount(0),
+ m_allocated_resources(*this) {
- _ownNodeId = nodeId;
_config = NULL;
_isStatPortActive = false;
_isClusterLogStatActive = false;
@@ -427,8 +519,11 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
_isStopThread = false;
_logLevelThread = NULL;
_logLevelThreadSleep = 500;
+ m_signalRecvThread = NULL;
_startedNodeId = 0;
+ theFacade = 0;
+
m_newConfig = NULL;
m_configFilename = configFilename;
setCallback(CmdBackupCallback);
@@ -486,6 +581,14 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
_clusterLogLevelList = new NodeLogLevelList();
_props = NULL;
+
+ _ownNodeId= 0;
+ NodeId tmp= nodeId;
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0)){
+ ndbout << "Unable to obtain requested nodeid " << nodeId;
+ exit(-1);
+ }
+ _ownNodeId = tmp;
}
@@ -510,8 +613,7 @@ MgmtSrvr::start()
return false;
}
theFacade = TransporterFacade::start_instance
- (_ownNodeId,
- (ndb_mgm_configuration*)_config->m_configValues);
+ (_ownNodeId,(ndb_mgm_configuration*)_config->m_configValues);
if(theFacade == 0) {
DEBUG("MgmtSrvr.cpp: theFacade is NULL.");
@@ -573,8 +675,7 @@ MgmtSrvr::~MgmtSrvr()
stopEventLog();
- NdbCondition_Destroy(theMgmtWaitForResponseCondPtr);
- NdbMutex_Destroy(m_configMutex);
+ NdbCondition_Destroy(theMgmtWaitForResponseCondPtr); NdbMutex_Destroy(m_configMutex);
if(m_newConfig != NULL)
free(m_newConfig);
@@ -593,6 +694,11 @@ MgmtSrvr::~MgmtSrvr()
NdbThread_WaitFor(_logLevelThread, &res);
NdbThread_Destroy(&_logLevelThread);
}
+
+ if (m_signalRecvThread != NULL) {
+ NdbThread_WaitFor(m_signalRecvThread, &res);
+ NdbThread_Destroy(&m_signalRecvThread);
+ }
}
//****************************************************************************
@@ -818,7 +924,7 @@ MgmtSrvr::restart(bool nostart, bool initalStart, bool abort,
return 0;
}
- TransporterFacade::instance()->lock_mutex();
+ theFacade->lock_mutex();
int waitTime = timeOut/m_stopRec.sentCount;
if (receiveOptimisedResponse(waitTime) != 0) {
m_stopRec.inUse = false;
@@ -985,16 +1091,15 @@ MgmtSrvr::version(int * stopCount, bool abort,
m_versionRec.callback = callback;
m_versionRec.inUse = true ;
-
- for(Uint32 i = 0; i<MAX_NODES; i++) {
+ Uint32 i;
+ for(i = 0; i<MAX_NODES; i++) {
if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM) {
m_versionRec.callback(i, NDB_VERSION, this,0);
}
}
- for(Uint32 i = 0; i<MAX_NODES; i++) {
+ for(i = 0; i<MAX_NODES; i++) {
if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
- node =
- TransporterFacade::instance()->theClusterMgr->getNodeInfo(i);
+ node = theFacade->theClusterMgr->getNodeInfo(i);
version = node.m_info.m_version;
if(theFacade->theClusterMgr->getNodeInfo(i).connected)
m_versionRec.callback(i, version, this,0);
@@ -1003,7 +1108,7 @@ MgmtSrvr::version(int * stopCount, bool abort,
}
}
- for(Uint32 i = 0; i<MAX_NODES; i++) {
+ for(i = 0; i<MAX_NODES; i++) {
if (getNodeType(i) == NDB_MGM_NODE_TYPE_API) {
return sendVersionReq(i);
}
@@ -1148,7 +1253,7 @@ MgmtSrvr::stop(int * stopCount, bool abort, StopCallback callback,
if(m_stopRec.sentCount > 0){
if(callback == 0){
- TransporterFacade::instance()->lock_mutex();
+ theFacade->lock_mutex();
receiveOptimisedResponse(timeOut / m_stopRec.sentCount);
} else {
return 0;
@@ -1178,7 +1283,7 @@ MgmtSrvr::enterSingleUser(int * stopCount, Uint32 singleUserNodeId,
for(Uint32 i = 0; i<MAX_NODES; i++) {
if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
- node = TransporterFacade::instance()->theClusterMgr->getNodeInfo(i);
+ node = theFacade->theClusterMgr->getNodeInfo(i);
if((node.m_state.startLevel != NodeState::SL_STARTED) &&
(node.m_state.startLevel != NodeState::SL_NOTHING)) {
return 5063;
@@ -1337,7 +1442,7 @@ MgmtSrvr::status(int processId,
}
const ClusterMgr::Node node =
- TransporterFacade::instance()->theClusterMgr->getNodeInfo(processId);
+ theFacade->theClusterMgr->getNodeInfo(processId);
if(!node.connected){
* _status = NDB_MGM_NODE_STATUS_NO_CONTACT;
@@ -1463,7 +1568,8 @@ MgmtSrvr::setEventReportingLevelImpl(int processId,
const SetLogLevelOrd & ll,
bool isResend)
{
- for(Uint32 i = 0; i<ll.noOfEntries; i++){
+ Uint32 i;
+ for(i = 0; i<ll.noOfEntries; i++){
// Save log level for the cluster log
if (!isResend) {
NodeLogLevel* n = NULL;
@@ -1494,7 +1600,7 @@ MgmtSrvr::setEventReportingLevelImpl(int processId,
EventSubscribeReq * dst =
CAST_PTR(EventSubscribeReq, signal->getDataPtrSend());
- for(Uint32 i = 0; i<ll.noOfEntries; i++){
+ for(i = 0; i<ll.noOfEntries; i++){
dst->theCategories[i] = ll.theCategories[i];
dst->theLevels[i] = ll.theLevels[i];
}
@@ -1523,7 +1629,8 @@ int
MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll,
bool isResend)
{
- for(Uint32 i = 0; i<ll.noOfEntries; i++){
+ Uint32 i;
+ for(i = 0; i<ll.noOfEntries; i++){
// Save log level for the cluster log
if (!isResend) {
NodeLogLevel* n = NULL;
@@ -1554,7 +1661,7 @@ MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll,
SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal->getDataPtrSend());
- for(Uint32 i = 0; i<ll.noOfEntries; i++){
+ for(i = 0; i<ll.noOfEntries; i++){
dst->theCategories[i] = ll.theCategories[i];
dst->theLevels[i] = ll.theLevels[i];
}
@@ -1698,7 +1805,7 @@ MgmtSrvr::setSignalLoggingMode(int processId, LogMode mode,
logSpec = TestOrd::InputOutputSignals;
break;
default:
- NDB_ASSERT(false, "Unexpected value, MgmtSrvr::setSignalLoggingMode");
+ assert("Unexpected value, MgmtSrvr::setSignalLoggingMode" == 0);
}
NdbApiSignal* signal = getSignal();
@@ -1896,6 +2003,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
int returnCode;
int gsn = signal->readSignalNumber();
+
switch (gsn) {
case GSN_API_VERSION_CONF: {
if (theWaitState == WAIT_VERSION) {
@@ -2000,8 +2108,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
req->senderData = 19;
req->backupDataLen = 0;
- int i = TransporterFacade::instance()->sendSignalUnCond(&aSignal,
- aNodeId);
+ int i = theFacade->sendSignalUnCond(&aSignal, aNodeId);
if(i == 0){
return;
}
@@ -2083,7 +2190,7 @@ MgmtSrvr::handleStopReply(NodeId nodeId, Uint32 errCode)
bool failure = true;
for(Uint32 i = 0; i<MAX_NODES; i++) {
if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
- node = TransporterFacade::instance()->theClusterMgr->getNodeInfo(i);
+ node = theFacade->theClusterMgr->getNodeInfo(i);
if((node.m_state.startLevel == NodeState::SL_NOTHING))
failure = true;
else
@@ -2187,6 +2294,89 @@ MgmtSrvr::getNodeType(NodeId nodeId) const
return nodeTypes[nodeId];
}
+#ifdef NDB_WIN32
+static NdbMutex & f_node_id_mutex = * NdbMutex_Create();
+#else
+static NdbMutex f_node_id_mutex = NDB_MUTEX_INITIALIZER;
+#endif
+
+bool
+MgmtSrvr::alloc_node_id(NodeId * nodeId,
+ enum ndb_mgm_node_type type,
+ struct sockaddr *client_addr,
+ SOCKET_SIZE_TYPE *client_addr_len)
+{
+ Guard g(&f_node_id_mutex);
+#if 0
+ ndbout << "MgmtSrvr::getFreeNodeId type=" << type
+ << " *nodeid=" << *nodeId << endl;
+#endif
+
+ NodeBitmask connected_nodes(m_reserved_nodes);
+ if (theFacade && theFacade->theClusterMgr) {
+ for(Uint32 i = 0; i < MAX_NODES; i++)
+ if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
+ const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
+ if (node.connected)
+ connected_nodes.bitOR(node.m_state.m_connected_nodes);
+ }
+ }
+
+ ndb_mgm_configuration_iterator iter(*(ndb_mgm_configuration *)_config->m_configValues,
+ CFG_SECTION_NODE);
+ for(iter.first(); iter.valid(); iter.next()) {
+ unsigned tmp= 0;
+ if(iter.get(CFG_NODE_ID, &tmp)) abort();
+ if (connected_nodes.get(tmp))
+ continue;
+ if (*nodeId && *nodeId != tmp)
+ continue;
+ unsigned type_c;
+ if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) abort();
+ if(type_c != type)
+ continue;
+ const char *config_hostname = 0;
+ if(iter.get(CFG_NODE_HOST, &config_hostname)) abort();
+
+ if (config_hostname && config_hostname[0] != 0 && client_addr) {
+ // check hostname compatability
+ struct in_addr config_addr;
+ const void *tmp= &(((sockaddr_in*)client_addr)->sin_addr);
+ if(Ndb_getInAddr(&config_addr, config_hostname) != 0
+ || memcmp(&config_addr, tmp, sizeof(config_addr)) != 0) {
+ struct in_addr tmp_addr;
+ if(Ndb_getInAddr(&tmp_addr, "localhost") != 0
+ || memcmp(&tmp_addr, tmp, sizeof(config_addr)) != 0) {
+ // not localhost
+#if 0
+ ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" << config_hostname
+ << "\" id=" << tmp << endl;
+#endif
+ continue;
+ }
+ // connecting through localhost
+ // check if config_hostname match hostname
+ char my_hostname[256];
+ if (gethostname(my_hostname, sizeof(my_hostname)) != 0)
+ continue;
+ if(Ndb_getInAddr(&tmp_addr, my_hostname) != 0
+ || memcmp(&tmp_addr, &config_addr, sizeof(config_addr)) != 0) {
+ // no match
+ continue;
+ }
+ }
+ }
+ *nodeId= tmp;
+ m_reserved_nodes.set(tmp);
+#if 0
+ ndbout << "MgmtSrvr::getFreeNodeId found type=" << type
+ << " *nodeid=" << *nodeId << endl;
+#endif
+ return true;
+ }
+ return false;
+}
+
bool
MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const
{
@@ -2573,3 +2763,128 @@ MgmtSrvr::getPrimaryNode() const {
return 0;
#endif
}
+
+
+MgmtSrvr::Allocated_resources::Allocated_resources(MgmtSrvr &m)
+ : m_mgmsrv(m)
+{
+}
+
+MgmtSrvr::Allocated_resources::~Allocated_resources()
+{
+ Guard g(&f_node_id_mutex);
+ m_mgmsrv.m_reserved_nodes.bitANDC(m_reserved_nodes);
+}
+
+void
+MgmtSrvr::Allocated_resources::reserve_node(NodeId id)
+{
+ m_reserved_nodes.set(id);
+}
+
+int
+MgmtSrvr::setDbParameter(int node, int param, const char * value,
+ BaseString& msg){
+ /**
+ * Check parameter
+ */
+ ndb_mgm_configuration_iterator iter(* _config->m_configValues,
+ CFG_SECTION_NODE);
+ if(iter.first() != 0){
+ msg.assign("Unable to find node section (iter.first())");
+ return -1;
+ }
+
+ Uint32 type = NODE_TYPE_DB + 1;
+ if(node != 0){
+ if(iter.find(CFG_NODE_ID, node) != 0){
+ msg.assign("Unable to find node (iter.find())");
+ return -1;
+ }
+ if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){
+ msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))");
+ return -1;
+ }
+ } else {
+ do {
+ if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){
+ msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))");
+ return -1;
+ }
+ if(type == NODE_TYPE_DB)
+ break;
+ } while(iter.next() == 0);
+ }
+
+ if(type != NODE_TYPE_DB){
+ msg.assfmt("Invalid node type or no such node (%d %d)",
+ type, NODE_TYPE_DB);
+ return -1;
+ }
+
+ int p_type;
+ unsigned val_32;
+ unsigned long long val_64;
+ const char * val_char;
+ do {
+ p_type = 0;
+ if(iter.get(param, &val_32) == 0){
+ val_32 = atoi(value);
+ break;
+ }
+
+ p_type++;
+ if(iter.get(param, &val_64) == 0){
+ val_64 = strtoll(value, 0, 10);
+ break;
+ }
+ p_type++;
+ if(iter.get(param, &val_char) == 0){
+ val_char = value;
+ break;
+ }
+ msg.assign("Could not get parameter");
+ return -1;
+ } while(0);
+
+ bool res = false;
+ do {
+ int ret = iter.get(CFG_TYPE_OF_SECTION, &type);
+ assert(ret == 0);
+
+ if(type != NODE_TYPE_DB)
+ continue;
+
+ Uint32 node;
+ ret = iter.get(CFG_NODE_ID, &node);
+ assert(ret == 0);
+
+ ConfigValues::Iterator i2(_config->m_configValues->m_config,
+ iter.m_config);
+ switch(p_type){
+ case 0:
+ res = i2.set(param, val_32);
+ ndbout_c("Updateing node %d param: %d to %d", node, param, val_32);
+ break;
+ case 1:
+ res = i2.set(param, val_64);
+ ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32);
+ break;
+ case 2:
+ res = i2.set(param, val_char);
+ ndbout_c("Updateing node %d param: %d to %s", node, param, val_char);
+ break;
+ default:
+ abort();
+ }
+ assert(res);
+ } while(node == 0 && iter.next() == 0);
+
+ msg.assign("Success");
+ return 0;
+}
+
+template class Vector<SigMatch>;
+#if __SUNPRO_CC != 0x560
+template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned);
+#endif
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 1d394a14857..b26eaeb4ab9 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -68,6 +68,22 @@ public:
virtual void println_statistics(const BaseString &s) = 0;
};
+ // some compilers need all of this
+ class Allocated_resources;
+ friend class Allocated_resources;
+ class Allocated_resources {
+ public:
+ Allocated_resources(class MgmtSrvr &m);
+ ~Allocated_resources();
+ // methods to reserve/allocate resources which
+ // will be freed when running destructor
+ void reserve_node(NodeId id);
+ bool is_reserved(NodeId nodeId) { return m_reserved_nodes.get(nodeId);}
+ private:
+ MgmtSrvr &m_mgmsrv;
+ NodeBitmask m_reserved_nodes;
+ };
+
/**
* Set a reference to the socket server.
*/
@@ -150,10 +166,12 @@ public:
enum LogMode {In, Out, InOut, Off};
/* Constructor */
+
MgmtSrvr(NodeId nodeId, /* Local nodeid */
const BaseString &config_filename, /* Where to save config */
const BaseString &ndb_config_filename, /* Ndb.cfg filename */
Config * config);
+ NodeId getOwnNodeId() const {return _ownNodeId;};
/**
* Read (initial) config file, create TransporterFacade,
@@ -448,6 +466,8 @@ public:
* @return false if none found
*/
bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ;
+ bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type,
+ struct sockaddr *client_addr, SOCKET_SIZE_TYPE *client_addr_len);
/**
*
@@ -492,8 +512,14 @@ public:
* @return statistic port number.
*/
int getStatPort() const;
+ /**
+ * Returns the port number.
+ * @return port number.
+ */
+ int getPort() const;
-
+ int setDbParameter(int node, int parameter, const char * value, BaseString&);
+
//**************************************************************************
private:
//**************************************************************************
@@ -530,13 +556,14 @@ private:
BaseString m_configFilename;
BaseString m_localNdbConfigFilename;
Uint32 m_nextConfigGenerationNumber;
+
+ NodeBitmask m_reserved_nodes;
+ Allocated_resources m_allocated_resources;
int _setVarReqResult; // The result of the SET_VAR_REQ response
Statistics _statistics; // handleSTATISTICS_CONF store the result here,
// and getStatistics reads it.
-
-
//**************************************************************************
// Specific signal handling methods
//**************************************************************************
diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
index 10316bd2851..44c2aadd1e2 100644
--- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
@@ -288,16 +288,15 @@ MgmtSrvr::readConfig() {
Config *
MgmtSrvr::fetchConfig() {
- ConfigRetriever cr;
+ ConfigRetriever cr(NDB_VERSION, NODE_TYPE_MGM);
cr.setLocalConfigFileName(m_localNdbConfigFilename.c_str());
- struct ndb_mgm_configuration * tmp = cr.getConfig(NDB_VERSION,
- NODE_TYPE_MGM);
+ struct ndb_mgm_configuration * tmp = cr.getConfig();
if(tmp != 0){
Config * conf = new Config();
conf->m_configValues = tmp;
return conf;
}
-
+
return 0;
}
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index 739eef90c52..7bf408583de 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <string.h>
+#include <ndb_global.h>
#include <ctype.h>
#include <uucode.h>
@@ -31,6 +31,8 @@
#include "Services.hpp"
+extern bool g_StopServer;
+
static const unsigned int MAX_READ_TIMEOUT = 1000 ;
static const unsigned int MAX_WRITE_TIMEOUT = 100 ;
@@ -121,6 +123,15 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("version", Int, Mandatory, "Configuration version number"),
MGM_ARG("node", Int, Optional, "Node ID"),
+ MGM_CMD("get nodeid", &MgmApiSession::get_nodeid, ""),
+ MGM_ARG("version", Int, Mandatory, "Configuration version number"),
+ MGM_ARG("nodetype", Int, Mandatory, "Node type"),
+ MGM_ARG("transporter", String, Optional, "Transporter type"),
+ MGM_ARG("nodeid", Int, Optional, "Node ID"),
+ MGM_ARG("user", String, Mandatory, "Password"),
+ MGM_ARG("password", String, Mandatory, "Password"),
+ MGM_ARG("public key", String, Mandatory, "Public key"),
+
MGM_CMD("get version", &MgmApiSession::getVersion, ""),
MGM_CMD("get status", &MgmApiSession::getStatus, ""),
@@ -216,6 +227,16 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("parameter", String, Mandatory, "Parameter"),
MGM_ARG("value", String, Mandatory, "Value"),
+ MGM_CMD("config lock", &MgmApiSession::configLock, ""),
+
+ MGM_CMD("config unlock", &MgmApiSession::configUnlock, ""),
+ MGM_ARG("commit", Int, Mandatory, "Commit changes"),
+
+ MGM_CMD("set parameter", &MgmApiSession::setParameter, ""),
+ MGM_ARG("node", String, Mandatory, "Node"),
+ MGM_ARG("parameter", String, Mandatory, "Parameter"),
+ MGM_ARG("value", String, Mandatory, "Value"),
+
MGM_END()
};
@@ -224,6 +245,19 @@ MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock)
m_input = new SocketInputStream(sock);
m_output = new SocketOutputStream(sock);
m_parser = new Parser_t(commands, *m_input, true, true, true);
+ m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv);
+}
+
+MgmApiSession::~MgmApiSession()
+{
+ if (m_input)
+ delete m_input;
+ if (m_output)
+ delete m_output;
+ if (m_parser)
+ delete m_parser;
+ if (m_allocated_resources)
+ delete m_allocated_resources;
}
void
@@ -333,6 +367,82 @@ backward(const char * base, const Properties* reply){
}
void
+MgmApiSession::get_nodeid(Parser_t::Context &,
+ const class Properties &args)
+{
+ const char *cmd= "get nodeid reply";
+ Uint32 version, nodeid= 0, nodetype= 0xff;
+ const char * transporter;
+ const char * user;
+ const char * password;
+ const char * public_key;
+
+ args.get("version", &version);
+ args.get("nodetype", &nodetype);
+ args.get("transporter", &transporter);
+ args.get("nodeid", &nodeid);
+ args.get("user", &user);
+ args.get("password", &password);
+ args.get("public key", &public_key);
+
+ bool compatible;
+ switch (nodetype) {
+ case NODE_TYPE_MGM:
+ case NODE_TYPE_API:
+ compatible = ndbCompatible_mgmt_api(NDB_VERSION, version);
+ break;
+ case NODE_TYPE_DB:
+ compatible = ndbCompatible_mgmt_ndb(NDB_VERSION, version);
+ break;
+ default:
+ m_output->println(cmd);
+ m_output->println("result: unknown nodetype %d", nodetype);
+ m_output->println("");
+ return;
+ }
+
+ struct sockaddr addr;
+ SOCKET_SIZE_TYPE addrlen= sizeof(addr);
+ int r = getpeername(m_socket, &addr, &addrlen);
+ if (r != 0 ) {
+ m_output->println(cmd);
+ m_output->println("result: getpeername(%d) failed, err= %d", m_socket, r);
+ m_output->println("");
+ return;
+ }
+
+ NodeId tmp= nodeid;
+ if(tmp == 0 || !m_allocated_resources->is_reserved(tmp)){
+ if (!m_mgmsrv.alloc_node_id(&tmp, (enum ndb_mgm_node_type)nodetype,
+ &addr, &addrlen)){
+ m_output->println(cmd);
+ m_output->println("result: no free nodeid %d for nodetype %d",
+ nodeid, nodetype);
+ m_output->println("");
+ return;
+ }
+ }
+
+#if 0
+ if (!compatible){
+ m_output->println(cmd);
+ m_output->println("result: incompatible version mgmt 0x%x and node 0x%x",
+ NDB_VERSION, version);
+ m_output->println("");
+ return;
+ }
+#endif
+
+ m_output->println(cmd);
+ m_output->println("nodeid: %u", tmp);
+ m_output->println("result: Ok");
+ m_output->println("");
+ m_allocated_resources->reserve_node(tmp);
+
+ return;
+}
+
+void
MgmApiSession::getConfig_common(Parser_t::Context &,
const class Properties &args,
bool compat) {
@@ -432,7 +542,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
m_output->println("Content-Transfer-Encoding: base64");
m_output->println("");
m_output->println(str.c_str());
- m_output->println("");
return;
}
@@ -905,10 +1014,27 @@ MgmApiSession::stop(Parser<MgmApiSession>::Context &,
nodes.push_back(atoi(p));
}
+ int stop_self= 0;
+
+ for(size_t i=0; i < nodes.size(); i++) {
+ if (nodes[i] == m_mgmsrv.getOwnNodeId()) {
+ stop_self= 1;
+ if (i != nodes.size()-1) {
+ m_output->println("stop reply");
+ m_output->println("result: server must be stopped last");
+ m_output->println("");
+ return;
+ }
+ }
+ }
+
int stopped = 0, result = 0;
for(size_t i=0; i < nodes.size(); i++)
- if((result = m_mgmsrv.stopNode(nodes[i], abort != 0)) == 0)
+ if (nodes[i] != m_mgmsrv.getOwnNodeId()) {
+ if((result = m_mgmsrv.stopNode(nodes[i], abort != 0)) == 0)
+ stopped++;
+ } else
stopped++;
m_output->println("stop reply");
@@ -918,6 +1044,9 @@ MgmApiSession::stop(Parser<MgmApiSession>::Context &,
m_output->println("result: Ok");
m_output->println("stopped: %d", stopped);
m_output->println("");
+
+ if (stop_self)
+ g_StopServer= true;
}
@@ -1119,7 +1248,8 @@ void
MgmStatService::println_statistics(const BaseString &line){
MutexVector<NDB_SOCKET_TYPE> copy(m_sockets.size());
m_sockets.lock();
- for(int i = m_sockets.size() - 1; i >= 0; i--){
+ int i;
+ for(i = m_sockets.size() - 1; i >= 0; i--){
if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){
copy.push_back(m_sockets[i]);
m_sockets.erase(i, false);
@@ -1127,7 +1257,7 @@ MgmStatService::println_statistics(const BaseString &line){
}
m_sockets.unlock();
- for(int i = copy.size() - 1; i >= 0; i--){
+ for(i = copy.size() - 1; i >= 0; i--){
NDB_CLOSE_SOCKET(copy[i]);
copy.erase(i);
}
@@ -1142,5 +1272,28 @@ MgmStatService::stopSessions(){
NDB_CLOSE_SOCKET(m_sockets[i]);
m_sockets.erase(i);
}
+}
+
+void
+MgmApiSession::setParameter(Parser_t::Context &,
+ Properties const &args) {
+ BaseString node, param, value;
+ args.get("node", node);
+ args.get("parameter", param);
+ args.get("value", value);
+ BaseString result;
+ int ret = m_mgmsrv.setDbParameter(atoi(node.c_str()),
+ atoi(param.c_str()),
+ value.c_str(),
+ result);
+
+ m_output->println("set parameter reply");
+ m_output->println("message: %s", result.c_str());
+ m_output->println("result: %d", ret);
+ m_output->println("");
}
+
+template class MutexVector<int>;
+template class Vector<ParserRow<MgmApiSession> const*>;
+template class Vector<unsigned short>;
diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp
index 3690f1a5a93..9cf8b59be8f 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/ndb/src/mgmsrv/Services.hpp
@@ -36,6 +36,7 @@ private:
InputStream *m_input;
OutputStream *m_output;
Parser_t *m_parser;
+ MgmtSrvr::Allocated_resources *m_allocated_resources;
void getConfig_common(Parser_t::Context &ctx,
const class Properties &args,
@@ -43,6 +44,7 @@ private:
public:
MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock);
+ virtual ~MgmApiSession();
void runSession();
void getStatPort(Parser_t::Context &ctx, const class Properties &args);
@@ -51,6 +53,7 @@ public:
void getConfig_old(Parser_t::Context &ctx);
#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
+ void get_nodeid(Parser_t::Context &ctx, const class Properties &args);
void getVersion(Parser_t::Context &ctx, const class Properties &args);
void getStatus(Parser_t::Context &ctx, const class Properties &args);
void getInfoClusterLog(Parser_t::Context &ctx, const class Properties &args);
@@ -79,6 +82,8 @@ public:
void configUnlock(Parser_t::Context &ctx, const class Properties &args);
void configChange(Parser_t::Context &ctx, const class Properties &args);
+ void setParameter(Parser_t::Context &ctx, const class Properties &args);
+
void repCommand(Parser_t::Context &ctx, const class Properties &args);
};
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index d9eb0001c44..719226b51df 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -16,11 +16,9 @@
#include <ndb_global.h>
-#include <signal.h>
-
#include "MgmtSrvr.hpp"
#include "EventLogger.hpp"
-#include "Config.hpp"
+#include <Config.hpp>
#include "InitConfigFileParser.hpp"
#include <SocketServer.hpp>
#include "Services.hpp"
@@ -37,6 +35,8 @@
#include <mgmapi_config_parameters.h>
#include <getarg.h>
+#include <NdbAutoPtr.hpp>
+
#if defined NDB_OSE || defined NDB_SOFTOSE
#include <efs.h>
#else
@@ -88,7 +88,6 @@ static MgmGlobals glob;
******************************************************************************/
static bool readLocalConfig();
static bool readGlobalConfig();
-static bool setPortNo();
/**
* Global variables
@@ -100,16 +99,16 @@ extern int global_mgmt_server_check;
int _print_version = 0;
struct getargs args[] = {
- { "version", 0, arg_flag, &_print_version,
- "Print versions"},
- { NULL, 'c', arg_string, &glob.config_filename,
- "Running cluster configuration file", "filename" },
- { NULL, 'd', arg_flag, &glob.daemon,
- "Daemon mode" },
+ { "version", 'v', arg_flag, &_print_version,
+ "Print ndb_mgmd version"},
+ { "config-file", 'c', arg_string, &glob.config_filename,
+ "Specify cluster configuration file", "filename" },
+ { "daemon", 'd', arg_flag, &glob.daemon,
+ "Run ndb_mgmd in daemon mode" },
{ NULL, 'l', arg_string, &glob.local_config_filename,
- "Local configuration file (Ndb.cfg)",
+ "Specify configuration file connect string (will default use Ndb.cfg if available)",
"filename" },
- { NULL, 'n', arg_flag, &glob.non_interactive,
+ { "nodaemon", 'n', arg_flag, &glob.non_interactive,
"Don't run as daemon, but don't read from stdin", "non-interactive" }
};
@@ -146,7 +145,9 @@ NDB_MAIN(mgmsrv){
exit(1);
}
glob.socketServer = new SocketServer();
+
MgmApiService * mapi = new MgmApiService();
+
MgmStatService * mstat = new MgmStatService();
/****************************
@@ -157,16 +158,34 @@ NDB_MAIN(mgmsrv){
if (!readGlobalConfig())
goto error_end;
- if (!setPortNo())
+ glob.mgmObject = new MgmtSrvr(glob.localNodeId,
+ BaseString(glob.config_filename),
+ BaseString(glob.local_config_filename == 0 ?
+ "" : glob.local_config_filename),
+ glob.cluster_config);
+
+ glob.cluster_config = 0;
+ glob.localNodeId= glob.mgmObject->getOwnNodeId();
+
+ if (glob.localNodeId == 0) {
goto error_end;
-
+ }
+
+ glob.port= glob.mgmObject->getPort();
+
+ if (glob.port == 0)
+ goto error_end;
+
+ glob.interface_name = 0;
+ glob.use_specific_ip = false;
+
if(!glob.use_specific_ip){
if(!glob.socketServer->tryBind(glob.port, glob.interface_name)){
ndbout_c("Unable to setup port: %s:%d!\n"
"Please check if the port is already used,\n"
"(perhaps a mgmtsrvr is already running),\n"
"and if you are executing on the correct computer",
- glob.interface_name, glob.port);
+ (glob.interface_name ? glob.interface_name : "*"), glob.port);
goto error_end;
}
free(glob.interface_name);
@@ -190,31 +209,25 @@ NDB_MAIN(mgmsrv){
goto error_end;
}
- glob.mgmObject = new MgmtSrvr(glob.localNodeId,
- BaseString(glob.config_filename),
- BaseString(glob.local_config_filename == 0 ? "" : glob.local_config_filename),
- glob.cluster_config);
-
- glob.cluster_config = 0;
-
if(!glob.mgmObject->check_start()){
- ndbout_c("Unable to start management server.");
+ ndbout_c("Unable to check start management server.");
ndbout_c("Probably caused by illegal initial configuration file.");
goto error_end;
}
if (glob.daemon) {
// Become a daemon
- char homePath[255],lockfile[255], logfile[255];
- NdbConfig_HomePath(homePath, 255);
- snprintf(lockfile, 255, "%snode%d.pid", homePath, glob.localNodeId);
- snprintf(logfile, 255, "%snode%d.out", homePath, glob.localNodeId);
+ char *lockfile= NdbConfig_PidFileName(glob.localNodeId);
+ char *logfile= NdbConfig_StdoutFileName(glob.localNodeId);
+ NdbAutoPtr<char> tmp_aptr1(lockfile), tmp_aptr2(logfile);
+
if (NdbDaemon_Make(lockfile, logfile, 0) == -1) {
ndbout << "Cannot become daemon: " << NdbDaemon_ErrorText << endl;
return 1;
}
}
+ signal(SIGPIPE, SIG_IGN);
if(!glob.mgmObject->start()){
ndbout_c("Unable to start management server.");
ndbout_c("Probably caused by illegal initial configuration file.");
@@ -233,8 +246,8 @@ NDB_MAIN(mgmsrv){
ndbout_c(msg);
g_EventLogger.info(msg);
- snprintf(msg, 256, "Command port: %d, Statistics port: %d",
- glob.port, glob.port_stats);
+ snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d",
+ glob.localNodeId, glob.port, glob.port_stats);
ndbout_c(msg);
g_EventLogger.info(msg);
@@ -298,14 +311,11 @@ MgmGlobals::~MgmGlobals(){
static bool
readLocalConfig(){
// Read local config file
- ConfigRetriever cr;
- cr.setLocalConfigFileName(glob.local_config_filename);
- int nodeid = cr.init(true);
- if(nodeid == -1){
+ LocalConfig lc;
+ if(!lc.init(glob.local_config_filename))
return false;
- }
- glob.localNodeId = (NodeId)nodeid;
+ glob.localNodeId = lc._ownNodeId;
return true;
}
@@ -328,123 +338,7 @@ readGlobalConfig() {
InitConfigFileParser parser;
glob.cluster_config = parser.parseConfig(glob.config_filename);
if(glob.cluster_config == 0){
- /**
- * Try to get configuration from other MGM server
- * Note: Only new format
- */
- glob.cluster_config = new Config();
-
- ConfigRetriever cr;
- cr.setLocalConfigFileName(glob.local_config_filename);
- glob.cluster_config->m_configValues = cr.getConfig(NDB_VERSION,
- NODE_TYPE_MGM);
- if (glob.cluster_config->m_configValues == NULL)
- return false;
- }
- return true;
-}
-
-/**
- * @fn setPortNo
- * @param glob : Global variables
- * @return true if success, false otherwise.
- *
- * Port number:
- * 2. Use port number from global configuration file
- * 4. Use port number for statistics from global configuration file
- */
-static bool
-setPortNo(){
- const Properties *mgmProps;
-
- ndb_mgm_configuration_iterator * iter =
- ndb_mgm_create_configuration_iterator(glob.cluster_config->m_configValues,
- CFG_SECTION_NODE);
- if(iter == 0)
- return false;
-
- if(ndb_mgm_find(iter, CFG_NODE_ID, glob.localNodeId) != 0){
- ndbout << "Could not retrieve configuration for Node "
- << glob.localNodeId << " in config file." << endl
- << "Have you set correct NodeId for this node?" << endl;
- ndb_mgm_destroy_iterator(iter);
- return false;
- }
-
- unsigned type;
- if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0 ||
- type != NODE_TYPE_MGM){
- ndbout << "Local node id " << glob.localNodeId
- << " is not defined as management server" << endl
- << "Have you set correct NodeId for this node?" << endl;
- return false;
- }
-
- /************
- * Set Port *
- ************/
- Uint32 tmp = 0;
- if(ndb_mgm_get_int_parameter(iter, CFG_MGM_PORT, &tmp) != 0){
- ndbout << "Could not find PortNumber in the configuration file." << endl;
- return false;
- }
- glob.port = tmp;
-
- /*****************
- * Set Stat Port *
- *****************/
-#if 0
- if (!mgmProps->get("PortNumberStats", &tmp)){
- ndbout << "Could not find PortNumberStats in the configuration file."
- << endl;
return false;
}
- glob.port_stats = tmp;
-#endif
-
-#if 0
- const char * host;
- if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){
- ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
-
- const char * hostname;
- {
- const Properties * p;
- char buf[255];
- snprintf(buf, sizeof(buf), "Computer_%s", host.c_str());
- if(!glob.cluster_config->get(buf, &p)){
- ndbout << "Failed to find computer " << host << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(!p->get("HostName", &hostname)){
- ndbout << "Failed to find \"HostName\" for computer " << host
- << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(NdbHost_GetHostName(buf) != 0){
- ndbout << "Unable to get own hostname" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- }
-
- const char * ip_address;
- if(mgmProps->get("IpAddress", &ip_address)){
- glob.use_specific_ip = true;
- glob.interface_name = strdup(ip_address);
- return true;
- }
-
- glob.interface_name = strdup(hostname);
-#endif
-
- glob.interface_name = 0;
- glob.use_specific_ip = false;
-
return true;
}
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index b26d550fe31..b9947fcf0e7 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -295,6 +295,7 @@ ClusterMgr::execAPI_REGREQ(const Uint32 * theData){
}
int global_mgmt_server_check = 0; // set to one in mgmtsrvr main;
+
void
ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
const ApiRegConf * const apiRegConf = (ApiRegConf *)&theData[0];
@@ -309,6 +310,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
Node & node = theNodes[nodeId];
assert(node.defined == true);
assert(node.connected == true);
+
if(node.m_info.m_version != apiRegConf->version){
node.m_info.m_version = apiRegConf->version;
if (global_mgmt_server_check == 1)
diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp
index f6f2106f2aa..5f620f77906 100644
--- a/ndb/src/ndbapi/DictCache.cpp
+++ b/ndb/src/ndbapi/DictCache.cpp
@@ -157,6 +157,7 @@ GlobalDictCache::put(const char * name, NdbTableImpl * tab)
void
GlobalDictCache::drop(NdbTableImpl * tab)
{
+ unsigned i;
const Uint32 len = strlen(tab->m_internalName.c_str());
Vector<TableVersion> * vers =
m_tableHash.getData(tab->m_internalName.c_str(), len);
@@ -173,7 +174,7 @@ GlobalDictCache::drop(NdbTableImpl * tab)
abort();
}
- for(unsigned i = 0; i < sz; i++){
+ for(i = 0; i < sz; i++){
TableVersion & ver = (* vers)[i];
if(ver.m_impl == tab){
if(ver.m_refCount == 0 || ver.m_status == RETREIVING ||
@@ -193,7 +194,7 @@ GlobalDictCache::drop(NdbTableImpl * tab)
}
}
- for(unsigned i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
TableVersion & ver = (* vers)[i];
ndbout_c("%d: version: %d refCount: %d status: %d impl: %p",
i, ver.m_version, ver.m_refCount, ver.m_status, ver.m_impl);
@@ -204,6 +205,7 @@ GlobalDictCache::drop(NdbTableImpl * tab)
void
GlobalDictCache::release(NdbTableImpl * tab){
+ unsigned i;
const Uint32 len = strlen(tab->m_internalName.c_str());
Vector<TableVersion> * vers =
m_tableHash.getData(tab->m_internalName.c_str(), len);
@@ -220,7 +222,7 @@ GlobalDictCache::release(NdbTableImpl * tab){
abort();
}
- for(unsigned i = 0; i < sz; i++){
+ for(i = 0; i < sz; i++){
TableVersion & ver = (* vers)[i];
if(ver.m_impl == tab){
if(ver.m_refCount == 0 || ver.m_status == RETREIVING ||
@@ -235,7 +237,7 @@ GlobalDictCache::release(NdbTableImpl * tab){
}
}
- for(unsigned i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
TableVersion & ver = (* vers)[i];
ndbout_c("%d: version: %d refCount: %d status: %d impl: %p",
i, ver.m_version, ver.m_refCount, ver.m_status, ver.m_impl);
@@ -244,3 +246,4 @@ GlobalDictCache::release(NdbTableImpl * tab){
abort();
}
+template class Vector<GlobalDictCache::TableVersion>;
diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am
index 2ec58ab6e85..14badb0c62f 100644
--- a/ndb/src/ndbapi/Makefile.am
+++ b/ndb/src/ndbapi/Makefile.am
@@ -23,9 +23,7 @@ libndbapi_la_SOURCES = \
NdbOperationDefine.cpp \
NdbOperationExec.cpp \
NdbResultSet.cpp \
- NdbCursorOperation.cpp \
- NdbScanReceiver.cpp NdbScanOperation.cpp \
- NdbScanFilter.cpp \
+ NdbScanOperation.cpp NdbScanFilter.cpp \
NdbIndexOperation.cpp \
NdbEventOperation.cpp \
NdbEventOperationImpl.cpp \
diff --git a/ndb/src/ndbapi/Makefile_old b/ndb/src/ndbapi/Makefile_old
index c2bb0189a7f..54de9ba96f4 100644
--- a/ndb/src/ndbapi/Makefile_old
+++ b/ndb/src/ndbapi/Makefile_old
@@ -34,31 +34,24 @@ SOURCES = \
Ndblist.cpp \
Ndbif.cpp \
Ndbinit.cpp \
- Ndberr.cpp \
- ndberror.c \
- NdbErrorOut.cpp \
- NdbConnection.cpp \
+ ndberror.c Ndberr.cpp NdbErrorOut.cpp \
+ NdbConnection.cpp \
NdbConnectionScan.cpp \
NdbOperation.cpp \
NdbOperationSearch.cpp \
- NdbOperationScan.cpp \
NdbOperationInt.cpp \
NdbOperationDefine.cpp \
NdbOperationExec.cpp \
- NdbScanReceiver.cpp \
NdbResultSet.cpp \
- NdbCursorOperation.cpp \
NdbScanOperation.cpp NdbScanFilter.cpp \
NdbIndexOperation.cpp \
NdbEventOperation.cpp \
NdbEventOperationImpl.cpp \
NdbApiSignal.cpp \
NdbRecAttr.cpp \
- NdbSchemaCon.cpp \
- NdbSchemaOp.cpp \
NdbUtil.cpp \
NdbReceiver.cpp \
- NdbDictionary.cpp NdbDictionaryImpl.cpp DictCache.cpp \
+ NdbDictionary.cpp NdbDictionaryImpl.cpp DictCache.cpp
NdbBlob.cpp
include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index fe7260c4693..f09a7481d2d 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -154,26 +154,22 @@ Ndb::NDB_connect(Uint32 tNode)
tNdbCon->Status(NdbConnection::Connecting); // Set status to connecting
Uint32 nodeSequence;
{ // send and receive signal
- tp->lock_mutex();
+ Guard guard(tp->theMutexPtr);
nodeSequence = tp->getNodeSequence(tNode);
bool node_is_alive = tp->get_node_alive(tNode);
if (node_is_alive) {
tReturnCode = tp->sendSignal(tSignal, tNode);
releaseSignal(tSignal);
- if (tReturnCode == -1) {
- tp->unlock_mutex();
- } else {
+ if (tReturnCode != -1) {
theWaiter.m_node = tNode;
theWaiter.m_state = WAIT_TC_SEIZE;
tReturnCode = receiveResponse();
}//if
} else {
releaseSignal(tSignal);
- tp->unlock_mutex();
tReturnCode = -1;
}//if
}
-
if ((tReturnCode == 0) && (tNdbCon->Status() == NdbConnection::Connected)) {
//************************************************
// Send and receive was successful
@@ -463,9 +459,9 @@ Ndb::closeTransaction(NdbConnection* aConnection)
CHECK_STATUS_MACRO_VOID;
tCon = theTransactionList;
-
+
if (aConnection == tCon) { // Remove the active connection object
- theTransactionList = tCon->next(); // from the transaction list.
+ theTransactionList = tCon->next(); // from the transaction list.
} else {
while (aConnection != tCon) {
if (tCon == NULL) {
@@ -473,44 +469,33 @@ Ndb::closeTransaction(NdbConnection* aConnection)
// closeTransaction called on non-existing transaction
//-----------------------------------------------------
- if(aConnection->theError.code == 4008){
- /**
- * When a SCAN timed-out, returning the NdbConnection leads
- * to reuse. And TC crashes when the API tries to reuse it to
- * something else...
- */
+ if(aConnection->theError.code == 4008){
+ /**
+ * When a SCAN timed-out, returning the NdbConnection leads
+ * to reuse. And TC crashes when the API tries to reuse it to
+ * something else...
+ */
#ifdef VM_TRACE
- printf("Scan timeout:ed NdbConnection-> not returning it-> memory leak\n");
+ printf("Scan timeout:ed NdbConnection-> "
+ "not returning it-> memory leak\n");
#endif
- return;
- }
+ return;
+ }
#ifdef VM_TRACE
- printf("Non-existing transaction into closeTransaction\n");
+ printf("Non-existing transaction into closeTransaction\n");
abort();
#endif
- return;
+ return;
}//if
tPreviousCon = tCon;
tCon = tCon->next();
}//while
tPreviousCon->next(tCon->next());
}//if
-
+
aConnection->release();
-
- if(aConnection->theError.code == 4008){
- /**
- * When a SCAN timed-out, returning the NdbConnection leads
- * to reuse. And TC crashes when the API tries to reuse it to
- * something else...
- */
-#ifdef VM_TRACE
- printf("Scan timeout:ed NdbConnection-> not returning it-> memory leak\n");
-#endif
- return;
- }
-
+
if(aConnection->theError.code == 4008){
/**
* Something timed-out, returning the NdbConnection leads
@@ -522,7 +507,7 @@ Ndb::closeTransaction(NdbConnection* aConnection)
#endif
return;
}
-
+
if (aConnection->theReleaseOnClose == false) {
/**
* Put it back in idle list for that node
@@ -729,9 +714,10 @@ Ndb::getNodeId()
}
/****************************************************************************
-Uint64 getTupleIdFromNdb( Uint32 aTableId );
+Uint64 getTupleIdFromNdb( Uint32 aTableId, Uint32 cacheSize );
Parameters: aTableId : The TableId.
+ cacheSize: Prefetch this many values
Remark: Returns a new TupleId to the application.
The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
@@ -750,8 +736,19 @@ Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
return tupleId;
}
+Uint64
+Ndb::getAutoIncrementValue(NdbDictionary::Table * aTable, Uint32 cacheSize)
+{
+ DEBUG_TRACE("getAutoIncrementValue");
+ if (aTable == 0)
+ return ~0;
+ const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
+ Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
+ return tupleId;
+}
+
Uint64
-Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize )
+Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
{
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0)
@@ -760,7 +757,7 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize )
}
Uint64
-Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize )
+Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
{
if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] )
{
@@ -773,31 +770,90 @@ Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize )
}
}
+Uint64
+Ndb::readAutoIncrementValue(const char* aTableName)
+{
+ DEBUG_TRACE("readtAutoIncrementValue");
+ const NdbTableImpl* table = theDictionary->getTable(aTableName);
+ if (table == 0)
+ return ~0;
+ Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
+ return tupleId;
+}
+
+Uint64
+Ndb::readAutoIncrementValue(NdbDictionary::Table * aTable)
+{
+ DEBUG_TRACE("readtAutoIncrementValue");
+ if (aTable == 0)
+ return ~0;
+ const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
+ Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
+ return tupleId;
+}
+
+Uint64
+Ndb::readTupleIdFromNdb(Uint32 aTableId)
+{
+ if ( theFirstTupleId[aTableId] == theLastTupleId[aTableId] )
+ // Cache is empty, check next in database
+ return opTupleIdOnNdb(aTableId, 0, 3);
+
+ return theFirstTupleId[aTableId] + 1;
+}
+
bool
-Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val)
+Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase)
{
DEBUG_TRACE("setAutoIncrementValue " << val);
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0)
return false;
- return setTupleIdInNdb(table->m_tableId, val);
+ return setTupleIdInNdb(table->m_tableId, val, increase);
+}
+
+bool
+Ndb::setAutoIncrementValue(NdbDictionary::Table * aTable, Uint64 val, bool increase)
+{
+ DEBUG_TRACE("setAutoIncrementValue " << val);
+ if (aTable == 0)
+ return ~0;
+ const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
+ return setTupleIdInNdb(table->m_tableId, val, increase);
}
bool
-Ndb::setTupleIdInNdb(const char* aTableName, Uint64 val )
+Ndb::setTupleIdInNdb(const char* aTableName, Uint64 val, bool increase )
{
DEBUG_TRACE("setTupleIdInNdb");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0)
return false;
- return setTupleIdInNdb(table->m_tableId, val);
+ return setTupleIdInNdb(table->m_tableId, val, increase);
}
bool
-Ndb::setTupleIdInNdb(Uint32 aTableId, Uint64 val )
+Ndb::setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase )
{
DEBUG_TRACE("setTupleIdInNdb");
- return (opTupleIdOnNdb(aTableId, val, 1) == val);
+ if (increase)
+ {
+ if (theFirstTupleId[aTableId] != theLastTupleId[aTableId])
+ {
+ // We have a cache sequence
+ if (val <= theFirstTupleId[aTableId]+1)
+ return false;
+ if (val <= theLastTupleId[aTableId])
+ {
+ theFirstTupleId[aTableId] = val - 1;
+ return true;
+ }
+ // else continue;
+ }
+ return (opTupleIdOnNdb(aTableId, val, 2) == val);
+ }
+ else
+ return (opTupleIdOnNdb(aTableId, val, 1) == val);
}
Uint64
@@ -809,7 +865,7 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
NdbOperation* tOperation;
Uint64 tValue;
NdbRecAttr* tRecAttrResult;
-
+ int result;
Uint64 ret;
CHECK_STATUS_MACRO_ZERO;
@@ -835,15 +891,7 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
case 0:
tOperation->interpretedUpdateTuple();
tOperation->equal("SYSKEY_0", aTableId );
- {
-#ifdef WORDS_BIGENDIAN
- Uint64 cacheSize64 = opValue; // XXX interpreter bug on Uint32
- tOperation->incValue("NEXTID", cacheSize64);
-#else
- Uint32 cacheSize32 = opValue; // XXX for little-endian
- tOperation->incValue("NEXTID", cacheSize32);
-#endif
- }
+ tOperation->incValue("NEXTID", opValue);
tRecAttrResult = tOperation->getValue("NEXTID");
if (tConnection->execute( Commit ) == -1 )
@@ -863,10 +911,40 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
if (tConnection->execute( Commit ) == -1 )
goto error_handler;
- theFirstTupleId[aTableId] = ~0;
- theLastTupleId[aTableId] = ~0;
+ theFirstTupleId[aTableId] = ~(Uint64)0;
+ theLastTupleId[aTableId] = ~(Uint64)0;
ret = opValue;
break;
+ case 2:
+ tOperation->interpretedUpdateTuple();
+ tOperation->equal("SYSKEY_0", aTableId );
+ tOperation->load_const_u64(1, opValue);
+ tOperation->read_attr("NEXTID", 2);
+ tOperation->branch_le(2, 1, 0);
+ tOperation->write_attr("NEXTID", 1);
+ tOperation->interpret_exit_ok();
+ tOperation->def_label(0);
+ tOperation->interpret_exit_nok(9999);
+
+ if ( (result = tConnection->execute( Commit )) == -1 )
+ goto error_handler;
+
+ if (result == 9999)
+ ret = ~(Uint64)0;
+ else
+ {
+ theFirstTupleId[aTableId] = theLastTupleId[aTableId] = opValue - 1;
+ ret = opValue;
+ }
+ break;
+ case 3:
+ tOperation->readTuple();
+ tOperation->equal("SYSKEY_0", aTableId );
+ tRecAttrResult = tOperation->getValue("NEXTID");
+ if (tConnection->execute( Commit ) == -1 )
+ goto error_handler;
+ ret = tRecAttrResult->u_64_value();
+ break;
default:
goto error_handler;
}
@@ -973,13 +1051,13 @@ Ndb::StartTransactionNodeSelectionData::init(Uint32 noOfNodes,
*/
{
fragment2PrimaryNodeMap = new Uint32[noOfFragments];
-
- for(Uint32 i = 0; i<noOfNodes; i++){
+ Uint32 i;
+ for(i = 0; i<noOfNodes; i++){
fragment2PrimaryNodeMap[i] = nodeIds[i];
}
// Sort them (bubble sort)
- for(Uint32 i = 0; i<noOfNodes-1; i++)
+ for(i = 0; i<noOfNodes-1; i++)
for(Uint32 j = i+1; j<noOfNodes; j++)
if(fragment2PrimaryNodeMap[i] > fragment2PrimaryNodeMap[j]){
Uint32 tmp = fragment2PrimaryNodeMap[i];
@@ -987,7 +1065,7 @@ Ndb::StartTransactionNodeSelectionData::init(Uint32 noOfNodes,
fragment2PrimaryNodeMap[j] = tmp;
}
- for(Uint32 i = 0; i<noOfNodes; i++){
+ for(i = 0; i<noOfNodes; i++){
fragment2PrimaryNodeMap[i+noOfNodes] = fragment2PrimaryNodeMap[i];
}
}
diff --git a/ndb/src/ndbapi/NdbApiSignal.cpp b/ndb/src/ndbapi/NdbApiSignal.cpp
index a44937cd398..6f5e1e50d2c 100644
--- a/ndb/src/ndbapi/NdbApiSignal.cpp
+++ b/ndb/src/ndbapi/NdbApiSignal.cpp
@@ -15,18 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/******************************************************************************
-Name: NdbApiSignal.C
-Include:
-Link:
-Author: UABMNST Mona Natterkvist UAB/B/SD
-Date: 970829
-Version: 0.1
-Description: Interface between TIS and NDB
-Documentation:
-Adjust: 971114 UABMNST First version.
- 000705 QABANAB Update of Protocol2
-******************************************************************************/
#include "API.hpp"
#include "NdbApiSignal.hpp"
@@ -46,6 +34,7 @@ Adjust: 971114 UABMNST First version.
#include <signaldata/IndxKeyInfo.hpp>
#include <signaldata/IndxAttrInfo.hpp>
#include <signaldata/TcHbRep.hpp>
+#include <signaldata/ScanTab.hpp>
#include <NdbOut.hpp>
@@ -161,7 +150,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_TCROLLBACKREQ;
- theLength = 5;
+ theLength = 3;
}
break;
@@ -188,16 +177,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_SCAN_TABREQ;
- theLength = 25;
- }
- break;
-
- case GSN_SCAN_TABINFO:
- {
- theTrace = TestOrd::TraceAPI;
- theReceiversBlockNumber = DBTC;
- theVerId_signalNumber = GSN_SCAN_TABINFO;
- theLength = 17;
+ theLength = 9; // ScanTabReq::SignalLength;
}
break;
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
index 8e067f770e8..7939f54d846 100644
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ b/ndb/src/ndbapi/NdbBlob.cpp
@@ -14,23 +14,25 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include "Ndb.hpp"
-#include "NdbDictionaryImpl.hpp"
-#include "NdbConnection.hpp"
-#include "NdbOperation.hpp"
-#include "NdbIndexOperation.hpp"
-#include "NdbRecAttr.hpp"
-#include "NdbBlob.hpp"
+#include <Ndb.hpp>
+#include <NdbDictionaryImpl.hpp>
+#include <NdbConnection.hpp>
+#include <NdbOperation.hpp>
+#include <NdbIndexOperation.hpp>
+#include <NdbRecAttr.hpp>
+#include <NdbBlob.hpp>
+#include <NdbScanOperation.hpp>
#ifdef NDB_BLOB_DEBUG
#define DBG(x) \
do { \
static const char* p = getenv("NDB_BLOB_DEBUG"); \
if (p == 0 || *p == 0 || *p == '0') break; \
- const char* cname = theColumn == NULL ? "BLOB" : theColumn->m_name.c_str(); \
- ndbout << cname << " " << __LINE__ << " " << x << " " << *this << endl; \
+ static char* prefix = "BLOB"; \
+ const char* cname = theColumn == NULL ? "-" : theColumn->m_name.c_str(); \
+ ndbout << prefix << " " << hex << (void*)this << " " << cname; \
+ ndbout << " " << dec << __LINE__ << " " << x << " " << *this << endl; \
} while (0)
-#define EXE() assert(theNdbCon->executeNoBlobs(NoCommit) == 0)
#else
#define DBG(x)
#endif
@@ -48,7 +50,7 @@ ndb_blob_debug(const Uint32* data, unsigned size)
/*
* Reading index table directly (as a table) is faster but there are
- * bugs or limitations. Keep the code but make possible to choose.
+ * bugs or limitations. Keep the code and make possible to choose.
*/
static const bool g_ndb_blob_ok_to_read_index_table = false;
@@ -81,7 +83,7 @@ NdbBlob::getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnIm
{
assert(t != 0 && c != 0 && c->getBlobType());
memset(btname, 0, BlobTableNameSize);
- sprintf(btname, "NDB$BLOB_%d_%d_%d", (int)t->m_tableId, (int)t->m_version, (int)c->m_attrId);
+ sprintf(btname, "NDB$BLOB_%d_%d", (int)t->m_tableId, (int)c->m_attrId);
}
void
@@ -115,7 +117,7 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
case NdbDictionary::Column::Blob:
bc.setType(NdbDictionary::Column::Binary);
break;
- case NdbDictionary::Column::Clob:
+ case NdbDictionary::Column::Text:
bc.setType(NdbDictionary::Column::Char);
break;
default:
@@ -138,12 +140,12 @@ void
NdbBlob::init()
{
theState = Idle;
- theBlobTableName[0] = 0;
theNdb = NULL;
theNdbCon = NULL;
theNdbOp = NULL;
theTable = NULL;
theAccessTable = NULL;
+ theBlobTable = NULL;
theColumn = NULL;
theFillChar = 0;
theInlineSize = 0;
@@ -154,11 +156,13 @@ NdbBlob::init()
theSetFlag = false;
theSetBuf = NULL;
theGetSetBytes = 0;
+ thePendingBlobOps = 0;
+ theActiveHook = NULL;
+ theActiveHookArg = NULL;
theHead = NULL;
theInlineData = NULL;
theHeadInlineRecAttr = NULL;
theHeadInlineUpdateFlag = false;
- theNewPartFlag = false;
theNullFlag = -1;
theLength = 0;
thePos = 0;
@@ -269,7 +273,7 @@ NdbBlob::isScanOp()
inline Uint32
NdbBlob::getPartNumber(Uint64 pos)
{
- assert(pos >= theInlineSize);
+ assert(thePartSize != 0 && pos >= theInlineSize);
return (pos - theInlineSize) / thePartSize;
}
@@ -301,7 +305,7 @@ NdbBlob::getTableKeyValue(NdbOperation* anOp)
assert(c != NULL);
if (c->m_pk) {
unsigned len = c->m_attrSize * c->m_arraySize;
- if (anOp->getValue(c, (char*)&data[pos]) == NULL) {
+ if (anOp->getValue_impl(c, (char*)&data[pos]) == NULL) {
setErrorCode(anOp);
return -1;
}
@@ -321,10 +325,10 @@ int
NdbBlob::setTableKeyValue(NdbOperation* anOp)
{
const Uint32* data = (const Uint32*)theKeyBuf.data;
+ DBG("setTableKeyValue key=" << ndb_blob_debug(data, theTable->m_sizeOfKeysInWords));
+ const unsigned columns = theTable->m_columns.size();
unsigned pos = 0;
- const unsigned size = theTable->m_columns.size();
- DBG("setTableKeyValue key=" << ndb_blob_debug(data, size));
- for (unsigned i = 0; i < size; i++) {
+ for (unsigned i = 0; i < columns; i++) {
NdbColumnImpl* c = theTable->m_columns[i];
assert(c != NULL);
if (c->m_pk) {
@@ -344,10 +348,10 @@ int
NdbBlob::setAccessKeyValue(NdbOperation* anOp)
{
const Uint32* data = (const Uint32*)theAccessKeyBuf.data;
+ DBG("setAccessKeyValue key=" << ndb_blob_debug(data, theAccessTable->m_sizeOfKeysInWords));
+ const unsigned columns = theAccessTable->m_columns.size();
unsigned pos = 0;
- const unsigned size = theAccessTable->m_columns.size();
- DBG("setAccessKeyValue key=" << ndb_blob_debug(data, size));
- for (unsigned i = 0; i < size; i++) {
+ for (unsigned i = 0; i < columns; i++) {
NdbColumnImpl* c = theAccessTable->m_columns[i];
assert(c != NULL);
if (c->m_pk) {
@@ -382,7 +386,7 @@ int
NdbBlob::getHeadInlineValue(NdbOperation* anOp)
{
DBG("getHeadInlineValue");
- theHeadInlineRecAttr = anOp->getValue(theColumn, theHeadInlineBuf.data);
+ theHeadInlineRecAttr = anOp->getValue_impl(theColumn, theHeadInlineBuf.data);
if (theHeadInlineRecAttr == NULL) {
setErrorCode(anOp);
return -1;
@@ -478,11 +482,27 @@ NdbBlob::setValue(const void* data, Uint32 bytes)
return 0;
}
+// activation hook
+
+int
+NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
+{
+ DBG("setActiveHook hook=" << hex << (void*)activeHook << " arg=" << hex << arg);
+ if (theState != Prepared) {
+ setErrorCode(ErrState);
+ return -1;
+ }
+ theActiveHook = activeHook;
+ theActiveHookArg = arg;
+ return 0;
+}
+
// misc operations
int
NdbBlob::getNull(bool& isNull)
{
+ DBG("getNull");
if (theState == Prepared && theSetFlag) {
isNull = (theSetBuf == NULL);
return 0;
@@ -519,6 +539,7 @@ NdbBlob::setNull()
int
NdbBlob::getLength(Uint64& len)
{
+ DBG("getLength");
if (theState == Prepared && theSetFlag) {
len = theGetSetBytes;
return 0;
@@ -534,17 +555,17 @@ NdbBlob::getLength(Uint64& len)
int
NdbBlob::truncate(Uint64 length)
{
- DBG("truncate kength=" << length);
+ DBG("truncate length=" << length);
if (theNullFlag == -1) {
setErrorCode(ErrState);
return -1;
}
if (theLength > length) {
- if (length >= theInlineSize) {
- Uint32 part1 = getPartNumber(length);
+ if (length > theInlineSize) {
+ Uint32 part1 = getPartNumber(length - 1);
Uint32 part2 = getPartNumber(theLength - 1);
assert(part2 >= part1);
- if (deleteParts(part1, part2 - part1) == -1)
+ if (part2 > part1 && deleteParts(part1 + 1, part2 - part1) == -1)
return -1;
} else {
if (deleteParts(0, getPartCount()) == -1)
@@ -559,6 +580,7 @@ NdbBlob::truncate(Uint64 length)
int
NdbBlob::getPos(Uint64& pos)
{
+ DBG("getPos");
if (theNullFlag == -1) {
setErrorCode(ErrState);
return -1;
@@ -570,6 +592,7 @@ NdbBlob::getPos(Uint64& pos)
int
NdbBlob::setPos(Uint64 pos)
{
+ DBG("setPos pos=" << pos);
if (theNullFlag == -1) {
setErrorCode(ErrState);
return -1;
@@ -628,6 +651,10 @@ NdbBlob::readDataPrivate(Uint64 pos, char* buf, Uint32& bytes)
len -= n;
}
}
+ if (len > 0 && thePartSize == 0) {
+ setErrorCode(ErrSeek);
+ return -1;
+ }
if (len > 0) {
assert(pos >= theInlineSize);
Uint32 off = (pos - theInlineSize) % thePartSize;
@@ -637,11 +664,10 @@ NdbBlob::readDataPrivate(Uint64 pos, char* buf, Uint32& bytes)
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
return -1;
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
+ // need result now
+ DBG("execute pending part reads");
+ if (executePendingBlobReads() == -1)
return -1;
- }
Uint32 n = thePartSize - off;
if (n > len)
n = len;
@@ -672,11 +698,10 @@ NdbBlob::readDataPrivate(Uint64 pos, char* buf, Uint32& bytes)
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
return -1;
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
+ // need result now
+ DBG("execute pending part reads");
+ if (executePendingBlobReads() == -1)
return -1;
- }
memcpy(buf, thePartBuf.data, len);
Uint32 n = len;
pos += n;
@@ -735,29 +760,27 @@ NdbBlob::writeDataPrivate(Uint64 pos, const char* buf, Uint32 bytes)
len -= n;
}
}
+ if (len > 0 && thePartSize == 0) {
+ setErrorCode(ErrSeek);
+ return -1;
+ }
if (len > 0) {
assert(pos >= theInlineSize);
Uint32 off = (pos - theInlineSize) % thePartSize;
// partial first block
if (off != 0) {
DBG("partial first block pos=" << pos << " len=" << len);
- if (theNewPartFlag) {
- // must flush insert to guarantee read
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
- return -1;
- }
- theNewPartFlag = false;
- }
+ // flush writes to guarantee correct read
+ DBG("execute pending part writes");
+ if (executePendingBlobWrites() == -1)
+ return -1;
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
return -1;
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
+ // need result now
+ DBG("execute pending part reafs");
+ if (executePendingBlobReads() == -1)
return -1;
- }
Uint32 n = thePartSize - off;
if (n > len) {
memset(thePartBuf.data + off + len, theFillChar, n - len);
@@ -798,22 +821,16 @@ NdbBlob::writeDataPrivate(Uint64 pos, const char* buf, Uint32 bytes)
assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
Uint32 part = (pos - theInlineSize) / thePartSize;
if (theLength > pos + len) {
- if (theNewPartFlag) {
- // must flush insert to guarantee read
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
- return -1;
- }
- theNewPartFlag = false;
- }
+ // flush writes to guarantee correct read
+ DBG("execute pending part writes");
+ if (executePendingBlobWrites() == -1)
+ return -1;
if (readParts(thePartBuf.data, part, 1) == -1)
return -1;
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode(theNdbOp);
+ // need result now
+ DBG("execute pending part reads");
+ if (executePendingBlobReads() == -1)
return -1;
- }
memcpy(thePartBuf.data, buf, len);
if (updateParts(thePartBuf.data, part, 1) == -1)
return -1;
@@ -848,7 +865,7 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
DBG("readParts part=" << part << " count=" << count);
Uint32 n = 0;
while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName);
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->readTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
@@ -858,6 +875,8 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
}
buf += thePartSize;
n++;
+ thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
}
return 0;
}
@@ -868,7 +887,7 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
DBG("insertParts part=" << part << " count=" << count);
Uint32 n = 0;
while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName);
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->insertTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
@@ -878,7 +897,8 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
}
buf += thePartSize;
n++;
- theNewPartFlag = true;
+ thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
}
return 0;
}
@@ -889,7 +909,7 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
DBG("updateParts part=" << part << " count=" << count);
Uint32 n = 0;
while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName);
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->updateTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
@@ -899,7 +919,8 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
}
buf += thePartSize;
n++;
- theNewPartFlag = true;
+ thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
}
return 0;
}
@@ -910,7 +931,7 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count)
DBG("deleteParts part=" << part << " count=" << count);
Uint32 n = 0;
while (n < count) {
- NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTableName);
+ NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
tOp->deleteTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1) {
@@ -918,6 +939,52 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count)
return -1;
}
n++;
+ thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
+ theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
+ }
+ return 0;
+}
+
+// pending ops
+
+int
+NdbBlob::executePendingBlobReads()
+{
+ Uint8 flags = (1 << NdbOperation::ReadRequest);
+ if (thePendingBlobOps & flags) {
+ if (theNdbCon->executeNoBlobs(NoCommit) == -1)
+ return -1;
+ thePendingBlobOps = 0;
+ theNdbCon->thePendingBlobOps = 0;
+ }
+ return 0;
+}
+
+int
+NdbBlob::executePendingBlobWrites()
+{
+ Uint8 flags = 0xFF & ~(1 << NdbOperation::ReadRequest);
+ if (thePendingBlobOps & flags) {
+ if (theNdbCon->executeNoBlobs(NoCommit) == -1)
+ return -1;
+ thePendingBlobOps = 0;
+ theNdbCon->thePendingBlobOps = 0;
+ }
+ return 0;
+}
+
+// callbacks
+
+int
+NdbBlob::invokeActiveHook()
+{
+ DBG("invokeActiveHook");
+ assert(theState == Active && theActiveHook != NULL);
+ int ret = (*theActiveHook)(this, theActiveHookArg);
+ DBG("invokeActiveHook ret=" << ret);
+ if (ret != 0) {
+ // no error is set on blob level
+ return -1;
}
return 0;
}
@@ -947,7 +1014,7 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
partType = NdbDictionary::Column::Binary;
theFillChar = 0x0;
break;
- case NdbDictionary::Column::Clob:
+ case NdbDictionary::Column::Text:
partType = NdbDictionary::Column::Char;
theFillChar = 0x20;
break;
@@ -959,22 +1026,21 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
theInlineSize = theColumn->getInlineSize();
thePartSize = theColumn->getPartSize();
theStripeSize = theColumn->getStripeSize();
- // blob table sanity check
+ // sanity check
assert((NDB_BLOB_HEAD_SIZE << 2) == sizeof(Head));
assert(theColumn->m_attrSize * theColumn->m_arraySize == sizeof(Head) + theInlineSize);
- getBlobTableName(theBlobTableName, theTable, theColumn);
- const NdbDictionary::Table* bt;
- const NdbDictionary::Column* bc;
- if (theInlineSize >= (1 << 16) ||
- thePartSize == 0 ||
- thePartSize >= (1 << 16) ||
- theStripeSize == 0 ||
- (bt = theNdb->theDictionary->getTable(theBlobTableName)) == NULL ||
- (bc = bt->getColumn("DATA")) == NULL ||
- bc->getType() != partType ||
- bc->getLength() != (int)thePartSize) {
- setErrorCode(ErrTable);
- return -1;
+ if (thePartSize > 0) {
+ const NdbDictionary::Table* bt = NULL;
+ const NdbDictionary::Column* bc = NULL;
+ if (theStripeSize == 0 ||
+ (bt = theColumn->getBlobTable()) == NULL ||
+ (bc = bt->getColumn("DATA")) == NULL ||
+ bc->getType() != partType ||
+ bc->getLength() != (int)thePartSize) {
+ setErrorCode(ErrTable);
+ return -1;
+ }
+ theBlobTable = &NdbTableImpl::getImpl(*bt);
}
// buffers
theKeyBuf.alloc(theTable->m_sizeOfKeysInWords << 2);
@@ -1060,7 +1126,7 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
Uint32 bytes = theGetSetBytes - theInlineSize;
if (writeDataPrivate(pos, buf, bytes) == -1)
return -1;
- if (anExecType == Commit && theHeadInlineUpdateFlag) {
+ if (theHeadInlineUpdateFlag) {
// add an operation to update head+inline
NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
if (tOp == NULL ||
@@ -1128,6 +1194,10 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
batch = true;
}
}
+ if (theActiveHook != NULL) {
+ // need blob head for callback
+ batch = true;
+ }
DBG("preExecute out batch=" << batch);
return 0;
}
@@ -1144,8 +1214,11 @@ NdbBlob::postExecute(ExecType anExecType)
DBG("postExecute type=" << anExecType);
if (theState == Invalid)
return -1;
- if (theState == Active)
+ if (theState == Active) {
+ setState(anExecType == NoCommit ? Active : Closed);
+ DBG("postExecute skip");
return 0;
+ }
assert(theState == Prepared);
assert(isKeyOp());
if (isIndexOp()) {
@@ -1199,8 +1272,12 @@ NdbBlob::postExecute(ExecType anExecType)
if (deleteParts(0, getPartCount()) == -1)
return -1;
}
- theNewPartFlag = false;
setState(anExecType == NoCommit ? Active : Closed);
+ // activation callback
+ if (theActiveHook != NULL) {
+ if (invokeActiveHook() == -1)
+ return -1;
+ }
DBG("postExecute out");
return 0;
}
@@ -1250,7 +1327,7 @@ NdbBlob::atNextResult()
// get primary key
{ Uint32* data = (Uint32*)theKeyBuf.data;
unsigned size = theTable->m_sizeOfKeysInWords;
- if (theNdbOp->getKeyFromKEYINFO20(data, size) == -1) {
+ if (((NdbScanOperation*)theNdbOp)->getKeyFromKEYINFO20(data, size) == -1) {
setErrorCode(ErrUsage);
return -1;
}
@@ -1274,20 +1351,18 @@ NdbBlob::atNextResult()
Uint32 bytes = theGetSetBytes - theInlineSize;
if (readDataPrivate(pos, buf, bytes) == -1)
return -1;
- // must also execute them
- DBG("force execute");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1) {
- setErrorCode((NdbOperation*)0);
- return -1;
- }
}
}
setState(Active);
+ // activation callback
+ if (theActiveHook != NULL) {
+ if (invokeActiveHook() == -1)
+ return -1;
+ }
DBG("atNextResult out");
return 0;
}
-
// misc
const NdbDictionary::Column*
@@ -1303,6 +1378,9 @@ NdbBlob::setErrorCode(int anErrorCode, bool invalidFlag)
{
DBG("setErrorCode code=" << anErrorCode);
theError.code = anErrorCode;
+ // conditionally copy error to operation level
+ if (theNdbOp != NULL && theNdbOp->theError.code == 0)
+ theNdbOp->setErrorCode(theError.code);
if (invalidFlag)
setState(Invalid);
}
@@ -1335,11 +1413,34 @@ NdbBlob::setErrorCode(NdbConnection* aCon, bool invalidFlag)
setErrorCode(code, invalidFlag);
}
+// info about all blobs in this operation
+
+NdbBlob*
+NdbBlob::blobsFirstBlob()
+{
+ return theNdbOp->theBlobList;
+}
+
+NdbBlob*
+NdbBlob::blobsNextBlob()
+{
+ return theNext;
+}
+
+// debug
+
#ifdef VM_TRACE
+inline int
+NdbBlob::getOperationType() const
+{
+ return theNdbOp != NULL ? theNdbOp->theOperationType : -1;
+}
+
NdbOut&
operator<<(NdbOut& out, const NdbBlob& blob)
{
- ndbout << dec << "s=" << blob.theState;
+ ndbout << dec << "o=" << blob.getOperationType();
+ ndbout << dec << " s=" << blob.theState;
ndbout << dec << " n=" << blob.theNullFlag;;
ndbout << dec << " l=" << blob.theLength;
ndbout << dec << " p=" << blob.thePos;
diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbConnection.cpp
index ad415b8acbf..9a2995a957e 100644
--- a/ndb/src/ndbapi/NdbConnection.cpp
+++ b/ndb/src/ndbapi/NdbConnection.cpp
@@ -27,11 +27,12 @@ Description: Interface between TIS and NDB
Documentation:
Adjust: 971022 UABMNST First version.
*****************************************************************************/
-#include "NdbOut.hpp"
-#include "NdbConnection.hpp"
-#include "NdbOperation.hpp"
-#include "NdbScanOperation.hpp"
-#include "NdbIndexOperation.hpp"
+#include <NdbOut.hpp>
+#include <NdbConnection.hpp>
+#include <NdbOperation.hpp>
+#include <NdbScanOperation.hpp>
+#include <NdbIndexScanOperation.hpp>
+#include <NdbIndexOperation.hpp>
#include "NdbApiSignal.hpp"
#include "TransporterFacade.hpp"
#include "API.hpp"
@@ -80,18 +81,16 @@ NdbConnection::NdbConnection( Ndb* aNdb ) :
theTransactionIsStarted(false),
theDBnode(0),
theReleaseOnClose(false),
- // Cursor operations
+ // Scan operations
m_waitForReply(true),
- m_theFirstCursorOperation(NULL),
- m_theLastCursorOperation(NULL),
- m_firstExecutedCursorOp(NULL),
+ m_theFirstScanOperation(NULL),
+ m_theLastScanOperation(NULL),
+ m_firstExecutedScanOp(NULL),
// Scan operations
- theScanFinished(0),
- theCurrentScanRec(NULL),
- thePreviousScanRec(NULL),
theScanningOp(NULL),
theBuddyConPtr(0xFFFFFFFF),
- theBlobFlag(false)
+ theBlobFlag(false),
+ thePendingBlobOps(0)
{
theListState = NotInList;
theError.code = 0;
@@ -119,7 +118,6 @@ NdbConnection::init()
theListState = NotInList;
theInUseState = true;
theTransactionIsStarted = false;
- theScanFinished = 0;
theNext = NULL;
theFirstOpInList = NULL;
@@ -130,9 +128,6 @@ NdbConnection::init()
theFirstExecOpInList = NULL;
theLastExecOpInList = NULL;
- theCurrentScanRec = NULL;
- thePreviousScanRec = NULL;
-
theCompletedFirstOp = NULL;
theGlobalCheckpointId = 0;
@@ -148,14 +143,15 @@ NdbConnection::init()
theSimpleState = true;
theSendStatus = InitState;
theMagicNumber = 0x37412619;
- // Cursor operations
+ // Scan operations
m_waitForReply = true;
- m_theFirstCursorOperation = NULL;
- m_theLastCursorOperation = NULL;
- m_firstExecutedCursorOp = 0;
+ m_theFirstScanOperation = NULL;
+ m_theLastScanOperation = NULL;
+ m_firstExecutedScanOp = 0;
theBuddyConPtr = 0xFFFFFFFF;
//
theBlobFlag = false;
+ thePendingBlobOps = 0;
}//NdbConnection::init()
/*****************************************************************************
@@ -203,6 +199,23 @@ NdbConnection::setErrorCode(int anErrorCode)
theError.code = anErrorCode;
}//NdbConnection::setErrorCode()
+int
+NdbConnection::restart(){
+ if(theCompletionStatus == CompletedSuccess){
+ releaseCompletedOperations();
+ Uint64 tTransid = theNdb->theFirstTransId;
+ theTransactionId = tTransid;
+ if ((tTransid & 0xFFFFFFFF) == 0xFFFFFFFF) {
+ theNdb->theFirstTransId = (tTransid >> 32) << 32;
+ } else {
+ theNdb->theFirstTransId = tTransid + 1;
+ }
+ theCompletionStatus = NotCompleted;
+ return 0;
+ }
+ return -1;
+}
+
/*****************************************************************************
void handleExecuteCompletion(void);
@@ -258,26 +271,34 @@ NdbConnection::execute(ExecType aTypeOfExec,
if (! theBlobFlag)
return executeNoBlobs(aTypeOfExec, abortOption, forceSend);
- // execute prepared ops in batches, as requested by blobs
+ /*
+ * execute prepared ops in batches, as requested by blobs
+ * - blob error does not terminate execution
+ * - blob error sets error on operation
+ * - if error on operation skip blob calls
+ */
ExecType tExecType;
NdbOperation* tPrepOp;
+ int ret = 0;
do {
tExecType = aTypeOfExec;
tPrepOp = theFirstOpInList;
while (tPrepOp != NULL) {
- bool batch = false;
- NdbBlob* tBlob = tPrepOp->theBlobList;
- while (tBlob != NULL) {
- if (tBlob->preExecute(tExecType, batch) == -1)
- return -1;
- tBlob = tBlob->theNext;
- }
- if (batch) {
- // blob asked to execute all up to here now
- tExecType = NoCommit;
- break;
+ if (tPrepOp->theError.code == 0) {
+ bool batch = false;
+ NdbBlob* tBlob = tPrepOp->theBlobList;
+ while (tBlob != NULL) {
+ if (tBlob->preExecute(tExecType, batch) == -1)
+ ret = -1;
+ tBlob = tBlob->theNext;
+ }
+ if (batch) {
+ // blob asked to execute all up to here now
+ tExecType = NoCommit;
+ break;
+ }
}
tPrepOp = tPrepOp->next();
}
@@ -293,26 +314,30 @@ NdbConnection::execute(ExecType aTypeOfExec,
if (tExecType == Commit) {
NdbOperation* tOp = theCompletedFirstOp;
while (tOp != NULL) {
- NdbBlob* tBlob = tOp->theBlobList;
- while (tBlob != NULL) {
- if (tBlob->preCommit() == -1)
- return -1;
- tBlob = tBlob->theNext;
+ if (tOp->theError.code == 0) {
+ NdbBlob* tBlob = tOp->theBlobList;
+ while (tBlob != NULL) {
+ if (tBlob->preCommit() == -1)
+ ret = -1;
+ tBlob = tBlob->theNext;
+ }
}
tOp = tOp->next();
}
}
if (executeNoBlobs(tExecType, abortOption, forceSend) == -1)
- return -1;
+ ret = -1;
{
NdbOperation* tOp = theCompletedFirstOp;
while (tOp != NULL) {
- NdbBlob* tBlob = tOp->theBlobList;
- while (tBlob != NULL) {
- // may add new operations if batch
- if (tBlob->postExecute(tExecType) == -1)
- return -1;
- tBlob = tBlob->theNext;
+ if (tOp->theError.code == 0) {
+ NdbBlob* tBlob = tOp->theBlobList;
+ while (tBlob != NULL) {
+ // may add new operations if batch
+ if (tBlob->postExecute(tExecType) == -1)
+ ret = -1;
+ tBlob = tBlob->theNext;
+ }
}
tOp = tOp->next();
}
@@ -327,7 +352,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
}
} while (theFirstOpInList != NULL || tExecType != aTypeOfExec);
- return 0;
+ return ret;
}
int
@@ -386,6 +411,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
break;
}
}
+ thePendingBlobOps = 0;
return 0;
}//NdbConnection::execute()
@@ -414,7 +440,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
* Reset error.code on execute
*/
theError.code = 0;
- NdbCursorOperation* tcOp = m_theFirstCursorOperation;
+ NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
// Execute any cursor operations
while (tcOp != NULL) {
@@ -423,14 +449,14 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
if (tReturnCode == -1) {
return;
}//if
- tcOp = (NdbCursorOperation*)tcOp->next();
+ tcOp = (NdbScanOperation*)tcOp->next();
} // while
- m_theLastCursorOperation->next(m_firstExecutedCursorOp);
- m_firstExecutedCursorOp = m_theFirstCursorOperation;
+ m_theLastScanOperation->next(m_firstExecutedScanOp);
+ m_firstExecutedScanOp = m_theFirstScanOperation;
// Discard cursor operations, since these are also
// in the complete operations list we do not need
// to release them.
- m_theFirstCursorOperation = m_theLastCursorOperation = NULL;
+ m_theFirstScanOperation = m_theLastScanOperation = NULL;
}
bool tTransactionIsStarted = theTransactionIsStarted;
@@ -797,17 +823,14 @@ Remark: Release all operations.
******************************************************************************/
void
NdbConnection::release(){
- if (theTransactionIsStarted == true && theScanningOp != NULL )
- stopScan();
-
releaseOperations();
if ( (theTransactionIsStarted == true) &&
- ((theCommitStatus != Committed) &&
- (theCommitStatus != Aborted))) {
-/****************************************************************************
- * The user did not perform any rollback but simply closed the
- * transaction. We must rollback Ndb since Ndb have been contacted.
-******************************************************************************/
+ ((theCommitStatus != Committed) &&
+ (theCommitStatus != Aborted))) {
+ /************************************************************************
+ * The user did not perform any rollback but simply closed the
+ * transaction. We must rollback Ndb since Ndb have been contacted.
+ ************************************************************************/
execute(Rollback);
}//if
theMagicNumber = 0xFE11DC;
@@ -839,8 +862,8 @@ void
NdbConnection::releaseOperations()
{
// Release any open scans
- releaseCursorOperations(m_theFirstCursorOperation);
- releaseCursorOperations(m_firstExecutedCursorOp);
+ releaseScanOperations(m_theFirstScanOperation);
+ releaseScanOperations(m_firstExecutedScanOp);
releaseOps(theCompletedFirstOp);
releaseOps(theFirstOpInList);
@@ -852,9 +875,9 @@ NdbConnection::releaseOperations()
theLastOpInList = NULL;
theLastExecOpInList = NULL;
theScanningOp = NULL;
- m_theFirstCursorOperation = NULL;
- m_theLastCursorOperation = NULL;
- m_firstExecutedCursorOp = NULL;
+ m_theFirstScanOperation = NULL;
+ m_theLastScanOperation = NULL;
+ m_firstExecutedScanOp = NULL;
}//NdbConnection::releaseOperations()
void
@@ -865,24 +888,21 @@ NdbConnection::releaseCompletedOperations()
}//NdbConnection::releaseOperations()
/******************************************************************************
-void releaseCursorOperations();
+void releaseScanOperations();
Remark: Release all cursor operations.
(NdbScanOperation and NdbIndexOperation)
******************************************************************************/
void
-NdbConnection::releaseCursorOperations(NdbCursorOperation* cursorOp)
+NdbConnection::releaseScanOperations(NdbIndexScanOperation* cursorOp)
{
while(cursorOp != 0){
- NdbCursorOperation* next = (NdbCursorOperation*)cursorOp->next();
+ NdbIndexScanOperation* next = (NdbIndexScanOperation*)cursorOp->next();
cursorOp->release();
- if (cursorOp->cursorType() == NdbCursorOperation::ScanCursor)
- theNdb->releaseScanOperation((NdbScanOperation*)cursorOp);
- else
- theNdb->releaseOperation(cursorOp);
+ theNdb->releaseScanOperation(cursorOp);
cursorOp = next;
}
-}//NdbConnection::releaseCursorOperations()
+}//NdbConnection::releaseScanOperations()
/*****************************************************************************
NdbOperation* getNdbOperation(const char* aTableName);
@@ -916,45 +936,6 @@ NdbConnection::getNdbOperation(const char* aTableName)
}//NdbConnection::getNdbOperation()
/*****************************************************************************
-NdbOperation* getNdbOperation(const char* anIndexName, const char* aTableName);
-
-Return Value Return a pointer to a NdbOperation object if getNdbOperation
- was succesful.
- Return NULL : In all other case.
-Parameters: anIndexName : Name of the index to use.
- aTableName : Name of the database table.
-Remark: Get an operation from NdbOperation idlelist and get the
- NdbConnection object
- who was fetch by startTransaction pointing to this operation
- getOperation will set the theTableId in the NdbOperation object.
- synchronous
-******************************************************************************/
-NdbOperation*
-NdbConnection::getNdbOperation(const char* anIndexName, const char* aTableName)
-{
- if ((theError.code == 0) &&
- (theCommitStatus == Started)){
- NdbIndexImpl* index =
- theNdb->theDictionary->getIndex(anIndexName, aTableName);
- NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName);
- NdbTableImpl* indexTable =
- theNdb->theDictionary->getIndexTable(index, table);
- if (indexTable != 0){
- return getNdbOperation(indexTable);
- } else {
- setErrorCode(theNdb->theDictionary->getNdbError().code);
- return NULL;
- }//if
- } else {
- if (theError.code == 0) {
- setOperationErrorCodeAbort(4114);
- }//if
-
- return NULL;
- }//if
-}//NdbConnection::getNdbOperation()
-
-/*****************************************************************************
NdbOperation* getNdbOperation(int aTableId);
Return Value Return a pointer to a NdbOperation object if getNdbOperation
@@ -1014,6 +995,14 @@ NdbConnection::getNdbOperation(NdbTableImpl * tab, NdbOperation* aNextOp)
return NULL;
}//NdbConnection::getNdbOperation()
+NdbOperation* NdbConnection::getNdbOperation(NdbDictionary::Table * table)
+{
+ if (table)
+ return getNdbOperation(& NdbTableImpl::getImpl(*table));
+ else
+ return NULL;
+}//NdbConnection::getNdbOperation()
+
// NdbScanOperation
/*****************************************************************************
NdbScanOperation* getNdbScanOperation(const char* aTableName);
@@ -1033,7 +1022,7 @@ NdbConnection::getNdbScanOperation(const char* aTableName)
if (tab != 0){
return getNdbScanOperation(tab);
} else {
- setOperationErrorCodeAbort(theNdb->theError.code);
+ setOperationErrorCodeAbort(theNdb->theDictionary->m_error.code);
return NULL;
}//if
}
@@ -1053,17 +1042,29 @@ Remark: Get an operation from NdbScanOperation idlelist and get the NdbC
who was fetch by startTransaction pointing to this operation
getOperation will set the theTableId in the NdbOperation object.synchronous
******************************************************************************/
-NdbScanOperation*
-NdbConnection::getNdbScanOperation(const char* anIndexName, const char* aTableName)
+NdbIndexScanOperation*
+NdbConnection::getNdbIndexScanOperation(const char* anIndexName,
+ const char* aTableName)
+{
+ NdbIndexImpl* index =
+ theNdb->theDictionary->getIndex(anIndexName, aTableName);
+ NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName);
+
+ return getNdbIndexScanOperation(index, table);
+}
+
+NdbIndexScanOperation*
+NdbConnection::getNdbIndexScanOperation(NdbIndexImpl* index,
+ NdbTableImpl* table)
{
if (theCommitStatus == Started){
- NdbIndexImpl* index =
- theNdb->theDictionary->getIndex(anIndexName, aTableName);
- NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName);
- NdbTableImpl* indexTable =
- theNdb->theDictionary->getIndexTable(index, table);
+ const NdbTableImpl * indexTable = index->getIndexTable();
if (indexTable != 0){
- return getNdbScanOperation(indexTable);
+ NdbIndexScanOperation* tOp =
+ getNdbScanOperation((NdbTableImpl *) indexTable);
+ tOp->m_currentTable = table;
+ if(tOp) tOp->m_cursor_type = NdbScanOperation::IndexCursor;
+ return tOp;
} else {
setOperationErrorCodeAbort(theNdb->theError.code);
return NULL;
@@ -1072,7 +1073,18 @@ NdbConnection::getNdbScanOperation(const char* anIndexName, const char* aTableNa
setOperationErrorCodeAbort(4114);
return NULL;
-}//NdbConnection::getNdbScanOperation()
+}//NdbConnection::getNdbIndexScanOperation()
+
+NdbIndexScanOperation*
+NdbConnection::getNdbIndexScanOperation(NdbDictionary::Index * index,
+ NdbDictionary::Table * table)
+{
+ if (index && table)
+ return getNdbIndexScanOperation(& NdbIndexImpl::getImpl(*index),
+ & NdbTableImpl::getImpl(*table));
+ else
+ return NULL;
+}//NdbConnection::getNdbIndexScanOperation()
/*****************************************************************************
NdbScanOperation* getNdbScanOperation(int aTableId);
@@ -1084,21 +1096,21 @@ Remark: Get an operation from NdbScanOperation object idlelist and get t
object who was fetch by startTransaction pointing to this operation
getOperation will set the theTableId in the NdbOperation object, synchronous.
*****************************************************************************/
-NdbScanOperation*
+NdbIndexScanOperation*
NdbConnection::getNdbScanOperation(NdbTableImpl * tab)
{
- NdbScanOperation* tOp;
+ NdbIndexScanOperation* tOp;
tOp = theNdb->getScanOperation();
if (tOp == NULL)
goto getNdbOp_error1;
// Link scan operation into list of cursor operations
- if (m_theLastCursorOperation == NULL)
- m_theFirstCursorOperation = m_theLastCursorOperation = tOp;
+ if (m_theLastScanOperation == NULL)
+ m_theFirstScanOperation = m_theLastScanOperation = tOp;
else {
- m_theLastCursorOperation->next(tOp);
- m_theLastCursorOperation = tOp;
+ m_theLastScanOperation->next(tOp);
+ m_theLastScanOperation = tOp;
}
tOp->next(NULL);
if (tOp->init(tab, this) != -1) {
@@ -1113,6 +1125,14 @@ getNdbOp_error1:
return NULL;
}//NdbConnection::getNdbScanOperation()
+NdbScanOperation*
+NdbConnection::getNdbScanOperation(NdbDictionary::Table * table)
+{
+ if (table)
+ return getNdbScanOperation(& NdbTableImpl::getImpl(*table));
+ else
+ return NULL;
+}//NdbConnection::getNdbScanOperation()
// IndexOperation
@@ -1207,6 +1227,18 @@ NdbConnection::getNdbIndexOperation(NdbIndexImpl * anIndex,
return NULL;
}//NdbConnection::getNdbIndexOperation()
+NdbIndexOperation*
+NdbConnection::getNdbIndexOperation(NdbDictionary::Index * index,
+ NdbDictionary::Table * table)
+{
+ if (index && table)
+ return getNdbIndexOperation(& NdbIndexImpl::getImpl(*index),
+ & NdbTableImpl::getImpl(*table));
+ else
+ return NULL;
+}//NdbConnection::getNdbIndexOperation()
+
+
/*******************************************************************************
int receiveDIHNDBTAMPER(NdbApiSignal* aSignal)
@@ -1323,12 +1355,16 @@ Remark:
int
NdbConnection::receiveTC_COMMITCONF(const TcCommitConf * commitConf)
{
- if(theStatus != Connected){
- return -1;
+ if(checkState_TransId(&commitConf->transId1)){
+ theCommitStatus = Committed;
+ theCompletionStatus = CompletedSuccess;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
}
- theCommitStatus = Committed;
- theCompletionStatus = CompletedSuccess;
- return 0;
+ return -1;
}//NdbConnection::receiveTC_COMMITCONF()
/******************************************************************************
@@ -1342,33 +1378,43 @@ Remark:
int
NdbConnection::receiveTC_COMMITREF(NdbApiSignal* aSignal)
{
- if(theStatus != Connected){
- return -1;
+ const TcCommitRef * ref = CAST_CONSTPTR(TcCommitRef, aSignal->getDataPtr());
+ if(checkState_TransId(&ref->transId1)){
+ setOperationErrorCodeAbort(ref->errorCode);
+ theCommitStatus = Aborted;
+ theCompletionStatus = CompletedFailure;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
}
- const TcCommitRef * const ref = CAST_CONSTPTR(TcCommitRef, aSignal->getDataPtr());
- setOperationErrorCodeAbort(ref->errorCode);
- theCommitStatus = Aborted;
- theCompletionStatus = CompletedFailure;
- return 0;
+
+ return -1;
}//NdbConnection::receiveTC_COMMITREF()
-/*******************************************************************************
+/******************************************************************************
int receiveTCROLLBACKCONF(NdbApiSignal* aSignal);
Return Value: Return 0 : receiveTCROLLBACKCONF was successful.
Return -1: In all other case.
Parameters: aSignal: The signal object pointer.
Remark:
-*******************************************************************************/
+******************************************************************************/
int
NdbConnection::receiveTCROLLBACKCONF(NdbApiSignal* aSignal)
{
- if(theStatus != Connected){
- return -1;
+ if(checkState_TransId(aSignal->getDataPtr() + 1)){
+ theCommitStatus = Aborted;
+ theCompletionStatus = CompletedSuccess;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
}
- theCommitStatus = Aborted;
- theCompletionStatus = CompletedSuccess;
- return 0;
+
+ return -1;
}//NdbConnection::receiveTCROLLBACKCONF()
/*******************************************************************************
@@ -1382,13 +1428,18 @@ Remark:
int
NdbConnection::receiveTCROLLBACKREF(NdbApiSignal* aSignal)
{
- if(theStatus != Connected){
- return -1;
+ if(checkState_TransId(aSignal->getDataPtr() + 1)){
+ setOperationErrorCodeAbort(aSignal->readData(4));
+ theCommitStatus = Aborted;
+ theCompletionStatus = CompletedFailure;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
}
- setOperationErrorCodeAbort(aSignal->readData(2));
- theCommitStatus = Aborted;
- theCompletionStatus = CompletedFailure;
- return 0;
+
+ return -1;
}//NdbConnection::receiveTCROLLBACKREF()
/*****************************************************************************
@@ -1403,36 +1454,31 @@ Remark: Handles the reception of the ROLLBACKREP signal.
int
NdbConnection::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tTmp1, tTmp2;
-
- if (theStatus != Connected) {
- return -1;
- }//if
-/*****************************************************************************
+ /****************************************************************************
Check that we are expecting signals from this transaction and that it doesn't
belong to a transaction already completed. Simply ignore messages from other
transactions.
-******************************************************************************/
- tTmp1 = aSignal->readData(2);
- tTmp2 = aSignal->readData(3);
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
- tCurrTransId = this->getTransactionId();
- if (tCurrTransId != tRecTransId) {
- return -1;
- }//if
- theError.code = aSignal->readData(4); // Override any previous errors
+ ****************************************************************************/
+ if(checkState_TransId(aSignal->getDataPtr() + 1)){
+ theError.code = aSignal->readData(4);// Override any previous errors
+
+ /**********************************************************************/
+ /* A serious error has occured. This could be due to deadlock or */
+ /* lack of resources or simply a programming error in NDB. This */
+ /* transaction will be aborted. Actually it has already been */
+ /* and we only need to report completion and return with the */
+ /* error code to the application. */
+ /**********************************************************************/
+ theCompletionStatus = CompletedFailure;
+ theCommitStatus = Aborted;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
+ }
-/**********************************************************************/
-/* A serious error has occured. This could be due to deadlock or */
-/* lack of resources or simply a programming error in NDB. This */
-/* transaction will be aborted. Actually it has already been */
-/* and we only need to report completion and return with the */
-/* error code to the application. */
-/**********************************************************************/
- theCompletionStatus = CompletedFailure;
- theCommitStatus = Aborted;
- return 0;
+ return -1;
}//NdbConnection::receiveTCROLLBACKREP()
/*******************************************************************************
@@ -1446,49 +1492,50 @@ Remark:
int
NdbConnection::receiveTCKEYCONF(const TcKeyConf * keyConf, Uint32 aDataLength)
{
- Uint64 tRecTransId;
- NdbOperation* tOp;
- Uint32 tConditionFlag;
-
+ NdbReceiver* tOp;
const Uint32 tTemp = keyConf->confInfo;
- const Uint32 tTmp1 = keyConf->transId1;
- const Uint32 tTmp2 = keyConf->transId2;
-/******************************************************************************
+ /***************************************************************************
Check that we are expecting signals from this transaction and that it
doesn't belong to a transaction already completed. Simply ignore messages
from other transactions.
-******************************************************************************/
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
+ ***************************************************************************/
+ if(checkState_TransId(&keyConf->transId1)){
- const Uint32 tNoOfOperations = TcKeyConf::getNoOfOperations(tTemp);
- const Uint32 tCommitFlag = TcKeyConf::getCommitFlag(tTemp);
- tConditionFlag = (Uint32)(((aDataLength - 5) >> 1) - tNoOfOperations);
- tConditionFlag |= (Uint32)(tNoOfOperations > 10);
- tConditionFlag |= (Uint32)(tNoOfOperations <= 0);
- tConditionFlag |= (Uint32)(theTransactionId - tRecTransId);
- tConditionFlag |= (Uint32)(theStatus - Connected);
+ const Uint32 tNoOfOperations = TcKeyConf::getNoOfOperations(tTemp);
+ const Uint32 tCommitFlag = TcKeyConf::getCommitFlag(tTemp);
- if (tConditionFlag == 0) {
const Uint32* tPtr = (Uint32 *)&keyConf->operations[0];
+ Uint32 tNoComp = theNoOfOpCompleted;
for (Uint32 i = 0; i < tNoOfOperations ; i++) {
- tOp = theNdb->void2rec_op(theNdb->int2void(*tPtr));
+ tOp = theNdb->void2rec(theNdb->int2void(*tPtr));
tPtr++;
const Uint32 tAttrInfoLen = *tPtr;
tPtr++;
- if (tOp && tOp->checkMagicNumber() != -1) {
- tOp->TCOPCONF(tAttrInfoLen);
+ if (tOp && tOp->checkMagicNumber()) {
+ tNoComp += tOp->execTCOPCONF(tAttrInfoLen);
} else {
return -1;
}//if
}//for
- Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
+ theNoOfOpCompleted = tNoComp;
Uint32 tGCI = keyConf->gci;
if (tCommitFlag == 1) {
theCommitStatus = Committed;
theGlobalCheckpointId = tGCI;
} else if ((tNoComp >= tNoSent) &&
(theLastExecOpInList->theCommitIndicator == 1)){
+
+
+ if (m_abortOption == IgnoreError && theError.code != 0){
+ /**
+ * There's always a TCKEYCONF when using IgnoreError
+ */
+#ifdef VM_TRACE
+ ndbout_c("Not completing transaction 2");
+#endif
+ return -1;
+ }
/**********************************************************************/
// We sent the transaction with Commit flag set and received a CONF with
// no Commit flag set. This is clearly an anomaly.
@@ -1502,7 +1549,12 @@ from other transactions.
return 0; // No more operations to wait for
}//if
// Not completed the reception yet.
- }//if
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
+ }
+
return -1;
}//NdbConnection::receiveTCKEYCONF()
@@ -1518,50 +1570,50 @@ Remark: Handles the reception of the TCKEY_FAILCONF signal.
int
NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tTmp1, tTmp2;
NdbOperation* tOp;
- if (theStatus != Connected) {
- return -1;
- }//if
/*
- Check that we are expecting signals from this transaction and that it
- doesn't belong to a transaction already completed. Simply ignore
- messages from other transactions.
+ Check that we are expecting signals from this transaction and that it
+ doesn't belong to a transaction already completed. Simply ignore
+ messages from other transactions.
*/
- tTmp1 = failConf->transId1;
- tTmp2 = failConf->transId2;
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
- tCurrTransId = this->getTransactionId();
- if (tCurrTransId != tRecTransId) {
- return -1;
- }//if
- /*
- A node failure of the TC node occured. The transaction has
- been committed.
- */
- theCommitStatus = Committed;
- tOp = theFirstExecOpInList;
- while (tOp != NULL) {
+ if(checkState_TransId(&failConf->transId1)){
/*
- Check if the transaction expected read values...
- If it did some of them might have gotten lost even if we succeeded
- in committing the transaction.
+ A node failure of the TC node occured. The transaction has
+ been committed.
*/
- if (tOp->theAI_ElementLen != 0) {
- theCompletionStatus = CompletedFailure;
- setOperationErrorCodeAbort(4115);
- break;
- }//if
- if (tOp->theCurrentRecAttr != NULL) {
- theCompletionStatus = CompletedFailure;
- setOperationErrorCodeAbort(4115);
- break;
- }//if
- tOp = tOp->next();
- }//while
- theReleaseOnClose = true;
- return 0;
+ theCommitStatus = Committed;
+ tOp = theFirstExecOpInList;
+ while (tOp != NULL) {
+ /*
+ * Check if the transaction expected read values...
+ * If it did some of them might have gotten lost even if we succeeded
+ * in committing the transaction.
+ */
+ switch(tOp->theOperationType){
+ case NdbOperation::UpdateRequest:
+ case NdbOperation::InsertRequest:
+ case NdbOperation::DeleteRequest:
+ case NdbOperation::WriteRequest:
+ tOp = tOp->next();
+ break;
+ case NdbOperation::ReadRequest:
+ case NdbOperation::ReadExclusive:
+ case NdbOperation::OpenScanRequest:
+ case NdbOperation::OpenRangeScanRequest:
+ theCompletionStatus = CompletedFailure;
+ setOperationErrorCodeAbort(4115);
+ tOp = NULL;
+ break;
+ }//if
+ }//while
+ theReleaseOnClose = true;
+ return 0;
+ } else {
+#ifdef VM_TRACE
+ ndbout_c("Recevied TCKEY_FAILCONF wo/ operation");
+#endif
+ }
+ return -1;
}//NdbConnection::receiveTCKEY_FAILCONF()
/*************************************************************************
@@ -1576,111 +1628,94 @@ Remark: Handles the reception of the TCKEY_FAILREF signal.
int
NdbConnection::receiveTCKEY_FAILREF(NdbApiSignal* aSignal)
{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tTmp1, tTmp2;
-
- if (theStatus != Connected) {
- return -1;
- }//if
- /*
- Check that we are expecting signals from this transaction and
- that it doesn't belong to a transaction already
- completed. Simply ignore messages from other transactions.
- */
- tTmp1 = aSignal->readData(2);
- tTmp2 = aSignal->readData(3);
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
- tCurrTransId = this->getTransactionId();
- if (tCurrTransId != tRecTransId) {
- return -1;
- }//if
/*
- We received an indication of that this transaction was aborted due to a
- node failure.
+ Check that we are expecting signals from this transaction and
+ that it doesn't belong to a transaction already
+ completed. Simply ignore messages from other transactions.
*/
- if (theSendStatus == sendTC_ROLLBACK) {
+ if(checkState_TransId(aSignal->getDataPtr()+1)){
/*
- We were in the process of sending a rollback anyways. We will
- report it as a success.
+ We received an indication of that this transaction was aborted due to a
+ node failure.
*/
- theCompletionStatus = CompletedSuccess;
+ if (theSendStatus == NdbConnection::sendTC_ROLLBACK) {
+ /*
+ We were in the process of sending a rollback anyways. We will
+ report it as a success.
+ */
+ theCompletionStatus = NdbConnection::CompletedSuccess;
+ } else {
+ theCompletionStatus = NdbConnection::CompletedFailure;
+ theError.code = 4031;
+ }//if
+ theReleaseOnClose = true;
+ theCommitStatus = NdbConnection::Aborted;
+ return 0;
} else {
- theCompletionStatus = CompletedFailure;
- theError.code = 4031;
- }//if
- theReleaseOnClose = true;
- theCommitStatus = Aborted;
- return 0;
+#ifdef VM_TRACE
+ ndbout_c("Recevied TCKEY_FAILREF wo/ operation");
+#endif
+ }
+ return -1;
}//NdbConnection::receiveTCKEY_FAILREF()
-/*******************************************************************************
+/******************************************************************************
int receiveTCINDXCONF(NdbApiSignal* aSignal, Uint32 long_short_ind);
Return Value: Return 0 : receiveTCINDXCONF was successful.
Return -1: In all other case.
Parameters: aSignal: The signal object pointer.
Remark:
-*******************************************************************************/
+******************************************************************************/
int
-NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf, Uint32 aDataLength)
+NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf,
+ Uint32 aDataLength)
{
- Uint64 tRecTransId;
- Uint32 tConditionFlag;
-
- const Uint32 tTemp = indxConf->confInfo;
- const Uint32 tTmp1 = indxConf->transId1;
- const Uint32 tTmp2 = indxConf->transId2;
-/******************************************************************************
-Check that we are expecting signals from this transaction and that it
-doesn't belong to a transaction already completed. Simply ignore messages
-from other transactions.
-******************************************************************************/
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
-
- const Uint32 tNoOfOperations = TcIndxConf::getNoOfOperations(tTemp);
- const Uint32 tCommitFlag = TcKeyConf::getCommitFlag(tTemp);
-
- tConditionFlag = (Uint32)(((aDataLength - 5) >> 1) - tNoOfOperations);
- tConditionFlag |= (Uint32)(tNoOfOperations > 10);
- tConditionFlag |= (Uint32)(tNoOfOperations <= 0);
- tConditionFlag |= (Uint32)(theTransactionId - tRecTransId);
- tConditionFlag |= (Uint32)(theStatus - Connected);
-
- if (tConditionFlag == 0) {
+ if(checkState_TransId(&indxConf->transId1)){
+ const Uint32 tTemp = indxConf->confInfo;
+ const Uint32 tNoOfOperations = TcIndxConf::getNoOfOperations(tTemp);
+ const Uint32 tCommitFlag = TcKeyConf::getCommitFlag(tTemp);
+
const Uint32* tPtr = (Uint32 *)&indxConf->operations[0];
+ Uint32 tNoComp = theNoOfOpCompleted;
for (Uint32 i = 0; i < tNoOfOperations ; i++) {
- NdbIndexOperation* tOp = theNdb->void2rec_iop(theNdb->int2void(*tPtr));
+ NdbReceiver* tOp = theNdb->void2rec(theNdb->int2void(*tPtr));
tPtr++;
const Uint32 tAttrInfoLen = *tPtr;
tPtr++;
- if (tOp && tOp->checkMagicNumber() != -1) {
- tOp->TCOPCONF(tAttrInfoLen);
+ if (tOp && tOp->checkMagicNumber()) {
+ tNoComp += tOp->execTCOPCONF(tAttrInfoLen);
} else {
return -1;
}//if
}//for
- Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
Uint32 tGCI = indxConf->gci;
+ theNoOfOpCompleted = tNoComp;
if (tCommitFlag == 1) {
theCommitStatus = Committed;
theGlobalCheckpointId = tGCI;
} else if ((tNoComp >= tNoSent) &&
(theLastExecOpInList->theCommitIndicator == 1)){
-/**********************************************************************/
-// We sent the transaction with Commit flag set and received a CONF with
-// no Commit flag set. This is clearly an anomaly.
-/**********************************************************************/
+ /**********************************************************************/
+ // We sent the transaction with Commit flag set and received a CONF with
+ // no Commit flag set. This is clearly an anomaly.
+ /**********************************************************************/
theError.code = 4011;
- theCompletionStatus = CompletedFailure;
- theCommitStatus = Aborted;
+ theCompletionStatus = NdbConnection::CompletedFailure;
+ theCommitStatus = NdbConnection::Aborted;
return 0;
}//if
if (tNoComp >= tNoSent) {
return 0; // No more operations to wait for
}//if
// Not completed the reception yet.
- }//if
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
+ }
+
return -1;
}//NdbConnection::receiveTCINDXCONF()
@@ -1696,36 +1731,26 @@ Remark: Handles the reception of the TCINDXREF signal.
int
NdbConnection::receiveTCINDXREF( NdbApiSignal* aSignal)
{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tTmp1, tTmp2;
-
- if (theStatus != Connected) {
- return -1;
- }//if
-/*****************************************************************************
-Check that we are expecting signals from this transaction and that it doesn't
-belong to a transaction already completed. Simply ignore messages from other
-transactions.
-******************************************************************************/
- tTmp1 = aSignal->readData(2);
- tTmp2 = aSignal->readData(3);
- tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
- tCurrTransId = this->getTransactionId();
- if (tCurrTransId != tRecTransId) {
- return -1;
- }//if
- theError.code = aSignal->readData(4); // Override any previous errors
+ if(checkState_TransId(aSignal->getDataPtr()+1)){
+ theError.code = aSignal->readData(4); // Override any previous errors
+
+ /**********************************************************************/
+ /* A serious error has occured. This could be due to deadlock or */
+ /* lack of resources or simply a programming error in NDB. This */
+ /* transaction will be aborted. Actually it has already been */
+ /* and we only need to report completion and return with the */
+ /* error code to the application. */
+ /**********************************************************************/
+ theCompletionStatus = NdbConnection::CompletedFailure;
+ theCommitStatus = NdbConnection::Aborted;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
+ }
-/**********************************************************************/
-/* A serious error has occured. This could be due to deadlock or */
-/* lack of resources or simply a programming error in NDB. This */
-/* transaction will be aborted. Actually it has already been */
-/* and we only need to report completion and return with the */
-/* error code to the application. */
-/**********************************************************************/
- theCompletionStatus = CompletedFailure;
- theCommitStatus = Aborted;
- return 0;
+ return -1;
}//NdbConnection::receiveTCINDXREF()
/*******************************************************************************
@@ -1741,7 +1766,7 @@ NdbConnection::OpCompleteFailure()
{
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
- theCompletionStatus = CompletedFailure;
+ theCompletionStatus = NdbConnection::CompletedFailure;
tNoComp++;
theNoOfOpCompleted = tNoComp;
if (tNoComp == tNoSent) {
@@ -1752,8 +1777,18 @@ NdbConnection::OpCompleteFailure()
//operation is not really part of that transaction.
//------------------------------------------------------------------------
if (theSimpleState == 1) {
- theCommitStatus = Aborted;
+ theCommitStatus = NdbConnection::Aborted;
}//if
+ if (m_abortOption == IgnoreError){
+ /**
+ * There's always a TCKEYCONF when using IgnoreError
+ */
+#ifdef VM_TRACE
+ ndbout_c("Not completing transaction");
+#endif
+ return -1;
+ }
+
return 0; // Last operation received
} else if (tNoComp > tNoSent) {
setOperationErrorCodeAbort(4113); // Too many operations,
@@ -1780,7 +1815,7 @@ NdbConnection::OpCompleteSuccess()
theNoOfOpCompleted = tNoComp;
if (tNoComp == tNoSent) { // Last operation completed
if (theSimpleState == 1) {
- theCommitStatus = Committed;
+ theCommitStatus = NdbConnection::Committed;
}//if
return 0;
} else if (tNoComp < tNoSent) {
@@ -1788,7 +1823,7 @@ NdbConnection::OpCompleteSuccess()
} else {
setOperationErrorCodeAbort(4113); // Too many operations,
// stop waiting for more
- theCompletionStatus = CompletedFailure;
+ theCompletionStatus = NdbConnection::CompletedFailure;
return 0;
}//if
}//NdbConnection::OpCompleteSuccess()
@@ -1801,7 +1836,7 @@ Remark: Get global checkpoint identity of the transaction
int
NdbConnection::getGCI()
{
- if (theCommitStatus == Committed) {
+ if (theCommitStatus == NdbConnection::Committed) {
return theGlobalCheckpointId;
}//if
return 0;
diff --git a/ndb/src/ndbapi/NdbConnectionScan.cpp b/ndb/src/ndbapi/NdbConnectionScan.cpp
index 962acc0bdac..0c4490015ff 100644
--- a/ndb/src/ndbapi/NdbConnectionScan.cpp
+++ b/ndb/src/ndbapi/NdbConnectionScan.cpp
@@ -33,7 +33,6 @@
#include <NdbConnection.hpp>
#include <NdbOperation.hpp>
#include <NdbScanOperation.hpp>
-#include "NdbScanReceiver.hpp"
#include "NdbApiSignal.hpp"
#include "TransporterFacade.hpp"
#include "NdbUtil.hpp"
@@ -44,304 +43,6 @@
#include <NdbOut.hpp>
-// time out for next scan result (-1 is infinite)
-// XXX should change default only if non-trivial interpreted program is used
-#define WAITFOR_SCAN_TIMEOUT 120000
-
-
-/*****************************************************************************
- * int executeScan();
- *
- * 1. Check that the transaction is started and other important preconditions
- * 2. Tell the kernel to start scanning by sending one SCAN_TABREQ, if
- * parallelism is greater than 16 also send one SCAN_TABINFO for each
- * additional 16
- * Define which attributes to scan in ATTRINFO, this signal also holds the
- * interpreted program
- * 3. Wait for the answer of the SCAN_TABREQ. This is either a SCAN_TABCONF if
- * the scan was correctly defined and a SCAN_TABREF if the scan couldn't
- * be started.
- * 4. Check the result, if scan was not started return -1
- *
- ****************************************************************************/
-int
-NdbConnection::executeScan(){
- if (theTransactionIsStarted == true){ // Transaction already started.
- setErrorCode(4600);
- return -1;
- }
- if (theStatus != Connected) { // Lost connection
- setErrorCode(4601);
- return -1;
- }
- if (theScanningOp == NULL){
- setErrorCode(4602); // getNdbOperation must be called before executeScan
- return -1;
- }
- TransporterFacade* tp = TransporterFacade::instance();
- theNoOfOpCompleted = 0;
- theNoOfSCANTABCONFRecv = 0;
- tp->lock_mutex();
- if (tp->get_node_alive(theDBnode) &&
- (tp->getNodeSequence(theDBnode) == theNodeSequence)) {
- if (tp->check_send_size(theDBnode, get_send_size())) {
- theTransactionIsStarted = true;
- if (sendScanStart() == -1){
- tp->unlock_mutex();
- return -1;
- }//if
- theNdb->theWaiter.m_node = theDBnode;
- theNdb->theWaiter.m_state = WAIT_SCAN;
- int res = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
- if (res == 0) {
- return 0;
- } else {
- if (res == -1) {
- setErrorCode(4008);
- } else if (res == -2) {
- theTransactionIsStarted = false;
- theReleaseOnClose = true;
- setErrorCode(4028);
- } else {
- ndbout << "Impossible return from receiveResponse in executeScan";
- ndbout << endl;
- abort();
- }//if
- theCommitStatus = Aborted;
- return -1;
- }//if
- } else {
- TRACE_DEBUG("Start a scan with send buffer full attempted");
- setErrorCode(4022);
- theCommitStatus = Aborted;
- }//if
- } else {
- if (!(tp->get_node_stopping(theDBnode) &&
- (tp->getNodeSequence(theDBnode) == theNodeSequence))) {
- TRACE_DEBUG("The node is hard dead when attempting to start a scan");
- setErrorCode(4029);
- theReleaseOnClose = true;
- } else {
- TRACE_DEBUG("The node is stopping when attempting to start a scan");
- setErrorCode(4030);
- }//if
- theCommitStatus = Aborted;
- }//if
- tp->unlock_mutex();
- return -1;
-}
-
-/******************************************************************************
- * int nextScanResult();
- * Remark:
- * This method is used to distribute data received to the application.
- * Iterate through the list and search for operations that haven't
- * been distributed yet (status != Finished).
- * If there are no more operations/records still waiting to be exececuted
- * we have to send SCAN_NEXTREQ to fetch next set of records.
- *
- * TODO - This function should be able to return a value indicating if
- * there are any more records already fetched from memory or if it has to
- * ask the db for more. This would mean we could get better performance when
- * takeOver is used wince we can take over all ops already fetched, put them
- * in another trans and send them of to the db when there are no more records
- * already fetched. Maybe use a new argument to the function for this
-******************************************************************************/
-int
-NdbConnection::nextScanResult(bool fetchAllowed){
-
- if (theTransactionIsStarted != true){ // Transaction not started.
- setErrorCode(4601);
- return -1;
- }
- // Scan has finished ok but no operations recived = empty recordset.
- if(theScanFinished == true){
- return 1; // No more records
- }
- if (theStatus != Connected){// Lost connection
- setErrorCode(4601);
- return -1;
- }
- // Something went wrong, probably we got a SCAN_TABREF earlier.
- if (theCompletionStatus == CompletedFailure) {
- return -1;
- }
- if (theNoOfOpCompleted == theNoOfOpFetched) {
- // There are no more records cached in NdbApi
- if (fetchAllowed == true){
- // Get some more records from db
-
- if (fetchNextScanResult() == -1){
- return -1;
- }
- if (theScanFinished == true) { // The scan has finished.
- return 1; // 1 = No more records
- }
- if (theCompletionStatus == CompletedFailure) {
- return -1; // Something went wrong, probably we got a SCAN_TABREF.
- }
- } else {
- // There where no more cached records in NdbApi
- // and we where not allowed to go to db and ask for
- // more
- return 2;
- }
- }
-
- // It's not allowed to come here without any cached records
- if (theCurrentScanRec == NULL){
-#ifdef VM_TRACE
- ndbout << "nextScanResult("<<fetchAllowed<<")"<<endl
- << " theTransactionIsStarted = " << theTransactionIsStarted << endl
- << " theScanFinished = " << theScanFinished << endl
- << " theCommitStatus = " << theCommitStatus << endl
- << " theStatus = " << theStatus << endl
- << " theCompletionStatus = " << theCompletionStatus << endl
- << " theNoOfOpCompleted = " << theNoOfOpCompleted << endl
- << " theNoOfOpFetched = " << theNoOfOpFetched << endl
- << " theScanningOp = " << theScanningOp << endl
- << " theNoOfSCANTABCONFRecv = "<< theNoOfSCANTABCONFRecv << endl
- << " theNdb->theWaiter.m_node = " <<theNdb->theWaiter.m_node<<endl
- << " theNdb->theWaiter.m_state = " << theNdb->theWaiter.m_state << endl;
- abort();
-#endif
- return -1;
- }
-
- // Execute the saved signals for this operation.
- NdbScanReceiver* tScanRec = theCurrentScanRec;
- theScanningOp->theCurrRecAI_Len = 0;
- theScanningOp->theCurrentRecAttr = theScanningOp->theFirstRecAttr;
- if(tScanRec->executeSavedSignals() != 0)
- return -1;
- theNoOfOpCompleted++;
- // Remember for next iteration and takeOverScanOp
- thePreviousScanRec = tScanRec;
- theCurrentScanRec = tScanRec->next();
- return 0; // 0 = There are more rows to be fetched.
-}
-
-/******************************************************************************
- * int stopScan()
- * Remark: By sending SCAN_NEXTREQ with data word 2 set to TRUE we
- * abort the scan process.
- *****************************************************************************/
-int
-NdbConnection::stopScan()
-{
- if(theScanFinished == true){
- return 0;
- }
- if (theCompletionStatus == CompletedFailure){
- return 0;
- }
-
- if (theScanningOp == 0){
- return 0;
- }
-
- theNoOfOpCompleted = 0;
- theNoOfSCANTABCONFRecv = 0;
- theScanningOp->prepareNextScanResult();
- return sendScanNext(1);
-}
-
-
-/********************************************************************
- * int sendScanStart()
- *
- * Send the signals reuired to define and start the scan
- * 1. Send SCAN_TABREQ
- * 2. Send SCAN_TABINFO(if any, parallelism must be > 16)
- * 3. Send ATTRINFO signals
- *
- * Returns -1 if an error occurs otherwise 0.
- *
- ********************************************************************/
-int
-NdbConnection::sendScanStart(){
-
- /***** 0. Prepare signals ******************/
- // This might modify variables and signals
- if(theScanningOp->prepareSendScan(theTCConPtr,
- theTransactionId) == -1)
- return -1;
-
- /***** 1. Send SCAN_TABREQ **************/
- /***** 2. Send SCAN_TABINFO *************/
- /***** 3. Send ATTRINFO signals *********/
- if (theScanningOp->doSendScan(theDBnode) == -1)
- return -1;
- return 0;
-}
-
-
-int
-NdbConnection::fetchNextScanResult(){
- theNoOfOpCompleted = 0;
- theNoOfSCANTABCONFRecv = 0;
- theScanningOp->prepareNextScanResult();
- return sendScanNext(0);
-}
-
-
-
-/***********************************************************
- * int sendScanNext(int stopScanFlag)
- *
- * ************************************************************/
-int NdbConnection::sendScanNext(bool stopScanFlag){
- NdbApiSignal tSignal(theNdb->theMyRef);
- Uint32 tTransId1, tTransId2;
- tSignal.setSignal(GSN_SCAN_NEXTREQ);
- tSignal.setData(theTCConPtr, 1);
- // Set the stop flag in word 2(1 = stop)
- Uint32 tStopValue;
- tStopValue = stopScanFlag == true ? 1 : 0;
- tSignal.setData(tStopValue, 2);
- tTransId1 = (Uint32) theTransactionId;
- tTransId2 = (Uint32) (theTransactionId >> 32);
- tSignal.setData(tTransId1, 3);
- tSignal.setData(tTransId2, 4);
- tSignal.setLength(4);
- Uint32 conn_seq = theNodeSequence;
- int return_code = theNdb->sendRecSignal(theDBnode,
- WAIT_SCAN,
- &tSignal,
- conn_seq);
- if (return_code == 0) {
- return 0;
- } else if (return_code == -1) { // Time-out
- TRACE_DEBUG("Time-out when sending sendScanNext");
- setErrorCode(4024);
- theTransactionIsStarted = false;
- theReleaseOnClose = true;
- theCommitStatus = Aborted;
- } else if (return_code == -2) { // Node failed
- TRACE_DEBUG("Node failed when sendScanNext");
- setErrorCode(4027);
- theTransactionIsStarted = false;
- theReleaseOnClose = true;
- theCommitStatus = Aborted;
- } else if (return_code == -3) {
- TRACE_DEBUG("Send failed when sendScanNext");
- setErrorCode(4033);
- theTransactionIsStarted = false;
- theReleaseOnClose = true;
- theCommitStatus = Aborted;
- } else if (return_code == -4) {
- TRACE_DEBUG("Send buffer full when sendScanNext");
- setErrorCode(4032);
- } else if (return_code == -5) {
- TRACE_DEBUG("Node stopping when sendScanNext");
- setErrorCode(4034);
- } else {
- ndbout << "Impossible return from sendRecSignal" << endl;
- abort();
- }//if
- return -1;
-}
-
/***************************************************************************
* int receiveSCAN_TABREF(NdbApiSignal* aSignal)
@@ -352,39 +53,24 @@ int NdbConnection::sendScanNext(bool stopScanFlag){
****************************************************************************/
int
NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
- const ScanTabRef * const scanTabRef = CAST_CONSTPTR(ScanTabRef, aSignal->getDataPtr());
- if (theStatus != Connected){
-#ifdef VM_TRACE
- ndbout << "SCAN_TABREF dropped, theStatus = " << theStatus << endl;
-#endif
- return -1;
- }
- if (aSignal->getLength() != ScanTabRef::SignalLength){
-#ifdef VM_TRACE
- ndbout << "SCAN_TABREF dropped, signal length " << aSignal->getLength() << endl;
-#endif
- return -1;
- }
- const Uint64 tCurrTransId = this->getTransactionId();
- const Uint64 tRecTransId = (Uint64)scanTabRef->transId1 +
- ((Uint64)scanTabRef->transId2 << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
-#ifdef VM_TRACE
- ndbout << "SCAN_TABREF dropped, wrong transid" << endl;
+ const ScanTabRef * ref = CAST_CONSTPTR(ScanTabRef, aSignal->getDataPtr());
+
+ if(checkState_TransId(&ref->transId1)){
+ theScanningOp->theError.code = ref->errorCode;
+ if(!ref->closeNeeded){
+ theScanningOp->execCLOSE_SCAN_REP();
+ return 0;
+ }
+ assert(theScanningOp->m_sent_receivers_count);
+ theScanningOp->m_conf_receivers_count++;
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
#endif
- return -1;
}
-#if 0
- ndbout << "SCAN_TABREF, "
- <<"transid=("<<hex<<scanTabRef->transId1<<", "<<hex<<scanTabRef->transId2<<")"
- <<", err="<<dec<<scanTabRef->errorCode << endl;
-#endif
- setErrorCode(scanTabRef->errorCode);
- theCompletionStatus = CompletedFailure;
- theCommitStatus = Aborted; // Indicate that this "transaction" was aborted
- theTransactionIsStarted = false;
- theScanningOp->releaseSignals();
- return 0;
+
+ return -1;
}
/*****************************************************************************
@@ -401,173 +87,44 @@ NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
*
*****************************************************************************/
int
-NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal)
+NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
+ const Uint32 * ops, Uint32 len)
{
- const ScanTabConf * const conf = CAST_CONSTPTR(ScanTabConf, aSignal->getDataPtr());
- if (theStatus != Connected){
-#ifdef VM_TRACE
- ndbout << "Dropping SCAN_TABCONF, theStatus = "<< theStatus << endl;
-#endif
- return -1;
- }
- if(aSignal->getLength() != ScanTabConf::SignalLength){
-#ifdef VM_TRACE
- ndbout << "Dropping SCAN_TABCONF, getLength = "<< aSignal->getLength() << endl;
-#endif
- return -1;
- }
- const Uint64 tCurrTransId = this->getTransactionId();
- const Uint64 tRecTransId =
- (Uint64)conf->transId1 + ((Uint64)conf->transId2 << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
-#ifdef VM_TRACE
- ndbout << "Dropping SCAN_TABCONF, wrong transid" << endl;
-#endif
- return -1;
- }
-
- const Uint8 scanStatus =
- ScanTabConf::getScanStatus(conf->requestInfo);
-
- if (scanStatus != 0) {
- theCompletionStatus = CompletedSuccess;
- theCommitStatus = Committed;
- theScanFinished = true;
- return 0;
- }
-
- // There can only be one SCANTABCONF
- assert(theNoOfSCANTABCONFRecv == 0);
- theNoOfSCANTABCONFRecv++;
-
- // Save a copy of the signal
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL){
- setErrorCode(4000);
- return 2; // theWaiter.m_state = NO_WAIT
- }
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
- theScanningOp->theSCAN_TABCONF_Recv = tCopy;
-
- return checkNextScanResultComplete();
-
-}
-
-/*****************************************************************************
- * int receiveSCAN_TABINFO(NdbApiSignal* aSignal)
- *
- * Receive SCAN_TABINFO
- *
- *****************************************************************************/
-int
-NdbConnection::receiveSCAN_TABINFO(NdbApiSignal* aSignal)
-{
- if (theStatus != Connected){
- //ndbout << "SCAN_TABINFO dropped, theStatus = " << theStatus << endl;
- return -1;
- }
- if (aSignal->getLength() != ScanTabInfo::SignalLength){
- //ndbout << "SCAN_TABINFO dropped, length = " << aSignal->getLength() << endl;
- return -1;
- }
-
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL){
- setErrorCode(4000);
- return 2; // theWaiter.m_state = NO_WAIT
- }
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
-
- // Put the signal last in list
- if (theScanningOp->theFirstSCAN_TABINFO_Recv == NULL)
- theScanningOp->theFirstSCAN_TABINFO_Recv = tCopy;
- else
- theScanningOp->theLastSCAN_TABINFO_Recv->next(tCopy);
- theScanningOp->theLastSCAN_TABINFO_Recv = tCopy;
-
- return checkNextScanResultComplete();
-}
-
-/******************************************************************************
- * int checkNextScanResultComplete(NdbApiSignal* aSignal)
- *
- * Remark Traverses all the lists that are associated with
- * this resultset and checks if all signals are there.
- * If all required signal are received return 0
- *
- *
- *****************************************************************************/
-int
-NdbConnection::checkNextScanResultComplete(){
-
- if (theNoOfSCANTABCONFRecv != 1) {
- return -1;
- }
-
- Uint32 tNoOfOpFetched = 0;
- theCurrentScanRec = NULL;
- thePreviousScanRec = NULL;
-
- const ScanTabConf * const conf =
- CAST_CONSTPTR(ScanTabConf, theScanningOp->theSCAN_TABCONF_Recv->getDataPtr());
- const Uint32 numOperations = ScanTabConf::getOperations(conf->requestInfo);
- Uint32 sigIndex = 0;
- NdbApiSignal* tSignal = theScanningOp->theFirstSCAN_TABINFO_Recv;
- while(tSignal != NULL){
- const ScanTabInfo * const info = CAST_CONSTPTR(ScanTabInfo, tSignal->getDataPtr());
- // Loop through the operations for this SCAN_TABINFO
- // tOpAndLength is allowed to be zero, this means no
- // TRANSID_AI signals where sent for this record
- // I.e getValue was called 0 times when defining scan
-
- // The max number of operations in each signal is 16
- Uint32 numOpsInSig = numOperations - sigIndex*16;
- if (numOpsInSig > 16)
- numOpsInSig = 16;
- for(Uint32 i = 0; i < numOpsInSig; i++){
- const Uint32 tOpAndLength = info->operLenAndIdx[i];
- const Uint32 tOpIndex = ScanTabInfo::getIdx(tOpAndLength);
- const Uint32 tOpLen = ScanTabInfo::getLen(tOpAndLength);
-
- assert(tOpIndex < 256);
- NdbScanReceiver* tScanRec =
- theScanningOp->theScanReceiversArray[tOpIndex];
- assert(tScanRec != NULL);
- if(tScanRec->isCompleted(tOpLen))
- tScanRec->setCompleted();
- else{
- return -1; // At least one receiver was not ready
- }
-
- // Build list of scan receivers
- if (theCurrentScanRec == NULL) {
- theCurrentScanRec = tScanRec;
- thePreviousScanRec = tScanRec;
- } else {
- thePreviousScanRec->next(tScanRec);
- thePreviousScanRec = tScanRec;
+ const ScanTabConf * conf = CAST_CONSTPTR(ScanTabConf, aSignal->getDataPtr());
+ if(checkState_TransId(&conf->transId1)){
+
+ if (conf->requestInfo == ScanTabConf::EndOfData) {
+ theScanningOp->execCLOSE_SCAN_REP();
+ return 0;
+ }
+
+ for(Uint32 i = 0; i<len; i += 3){
+ Uint32 ptrI = * ops++;
+ Uint32 tcPtrI = * ops++;
+ Uint32 info = * ops++;
+ Uint32 opCount = ScanTabConf::getRows(info);
+ Uint32 totalLen = ScanTabConf::getLength(info);
+
+ void * tPtr = theNdb->int2void(ptrI);
+ assert(tPtr); // For now
+ NdbReceiver* tOp = theNdb->void2rec(tPtr);
+ if (tOp && tOp->checkMagicNumber()){
+ if(tOp->execSCANOPCONF(tcPtrI, totalLen, opCount)){
+ /**
+ *
+ */
+ theScanningOp->receiver_delivered(tOp);
+ } else if(info == ScanTabConf::EndOfData){
+ theScanningOp->receiver_completed(tOp);
+ }
}
- tNoOfOpFetched++;
}
- tSignal = tSignal->next();
- sigIndex++;
- }
-
- // Check number of operations fetched against value in SCANTAB_CONF
- if (tNoOfOpFetched != numOperations) {
- setErrorCode(4113);
- return 2; // theWaiter.m_state = NO_WAIT
+ return 0;
+ } else {
+#ifdef NDB_NO_DROPPED_SIGNAL
+ abort();
+#endif
}
- // All signals for this resultset recieved
- // release SCAN_TAB signals
- theNoOfSCANTABCONFRecv = 0;
- theScanningOp->releaseSignals();
-
- // We have received all operations with correct lengths.
- thePreviousScanRec = NULL;
- theNoOfOpFetched = tNoOfOpFetched;
- return 0;
+ return -1;
}
diff --git a/ndb/src/ndbapi/NdbCursorOperation.cpp b/ndb/src/ndbapi/NdbCursorOperation.cpp
index e4dd600c57f..a9f84c4c110 100644
--- a/ndb/src/ndbapi/NdbCursorOperation.cpp
+++ b/ndb/src/ndbapi/NdbCursorOperation.cpp
@@ -30,8 +30,6 @@
#include <NdbResultSet.hpp>
NdbCursorOperation::NdbCursorOperation(Ndb* aNdb) :
- NdbOperation(aNdb),
- m_resultSet(0)
{
}
@@ -48,10 +46,6 @@ void NdbCursorOperation::cursInit()
NdbResultSet* NdbCursorOperation::getResultSet()
{
- if (!m_resultSet)
- m_resultSet = new NdbResultSet(this);
-
- return m_resultSet;
}
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index 413ad0745db..4b30f41b51d 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -174,6 +174,14 @@ NdbDictionary::Column::getIndexOnlyStorage() const {
return m_impl.m_indexOnly;
}
+const NdbDictionary::Table *
+NdbDictionary::Column::getBlobTable() const {
+ NdbTableImpl * t = m_impl.m_blobTable;
+ if (t)
+ return t->m_facade;
+ return 0;
+}
+
void
NdbDictionary::Column::setAutoIncrement(bool val){
m_impl.m_autoIncrement = val;
@@ -806,73 +814,90 @@ NdbDictionary::Dictionary::getNdbError() const {
return m_impl.getNdbError();
}
-NdbOut& operator <<(NdbOut& ndbout, const NdbDictionary::Column::Type type)
+// printers
+
+NdbOut&
+operator<<(NdbOut& out, const NdbDictionary::Column& col)
{
- switch(type){
- case NdbDictionary::Column::Bigunsigned:
- ndbout << "Bigunsigned";
+ out << col.getName() << " ";
+ switch (col.getType()) {
+ case NdbDictionary::Column::Tinyint:
+ out << "Tinyint";
break;
- case NdbDictionary::Column::Unsigned:
- ndbout << "Unsigned";
+ case NdbDictionary::Column::Tinyunsigned:
+ out << "Tinyunsigned";
+ break;
+ case NdbDictionary::Column::Smallint:
+ out << "Smallint";
break;
case NdbDictionary::Column::Smallunsigned:
- ndbout << "Smallunsigned";
+ out << "Smallunsigned";
break;
- case NdbDictionary::Column::Tinyunsigned:
- ndbout << "Tinyunsigned";
+ case NdbDictionary::Column::Mediumint:
+ out << "Mediumint";
break;
- case NdbDictionary::Column::Bigint:
- ndbout << "Bigint";
+ case NdbDictionary::Column::Mediumunsigned:
+ out << "Mediumunsigned";
break;
case NdbDictionary::Column::Int:
- ndbout << "Int";
- break;
- case NdbDictionary::Column::Smallint:
- ndbout << "Smallint";
+ out << "Int";
break;
- case NdbDictionary::Column::Tinyint:
- ndbout << "Tinyint";
+ case NdbDictionary::Column::Unsigned:
+ out << "Unsigned";
break;
- case NdbDictionary::Column::Char:
- ndbout << "Char";
+ case NdbDictionary::Column::Bigint:
+ out << "Bigint";
break;
- case NdbDictionary::Column::Varchar:
- ndbout << "Varchar";
+ case NdbDictionary::Column::Bigunsigned:
+ out << "Bigunsigned";
break;
case NdbDictionary::Column::Float:
- ndbout << "Float";
+ out << "Float";
break;
case NdbDictionary::Column::Double:
- ndbout << "Double";
+ out << "Double";
break;
- case NdbDictionary::Column::Mediumint:
- ndbout << "Mediumint";
+ case NdbDictionary::Column::Decimal:
+ out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")";
break;
- case NdbDictionary::Column::Mediumunsigned:
- ndbout << "Mediumunsigend";
+ case NdbDictionary::Column::Char:
+ out << "Char(" << col.getLength() << ")";
+ break;
+ case NdbDictionary::Column::Varchar:
+ out << "Varchar(" << col.getLength() << ")";
break;
case NdbDictionary::Column::Binary:
- ndbout << "Binary";
+ out << "Binary(" << col.getLength() << ")";
break;
case NdbDictionary::Column::Varbinary:
- ndbout << "Varbinary";
+ out << "Varbinary(" << col.getLength() << ")";
break;
- case NdbDictionary::Column::Decimal:
- ndbout << "Decimal";
+ case NdbDictionary::Column::Datetime:
+ out << "Datetime";
break;
case NdbDictionary::Column::Timespec:
- ndbout << "Timespec";
+ out << "Timespec";
break;
case NdbDictionary::Column::Blob:
- ndbout << "Blob";
+ out << "Blob(" << col.getInlineSize() << "," << col.getPartSize()
+ << ";" << col.getStripeSize() << ")";
+ break;
+ case NdbDictionary::Column::Text:
+ out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
+ << ";" << col.getStripeSize() << ")";
break;
case NdbDictionary::Column::Undefined:
- ndbout << "Undefined";
+ out << "Undefined";
break;
default:
- ndbout << "Unknown type=" << (Uint32)type;
+ out << "Type" << (Uint32)col.getType();
break;
}
-
- return ndbout;
+ if (col.getPrimaryKey())
+ out << " PRIMARY KEY";
+ else if (! col.getNullable())
+ out << " NOT NULL";
+ else
+ out << " NULL";
+ return out;
}
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 348f6db36e4..cb5e3b3c821 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -79,6 +79,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
m_attrSize = col.m_attrSize;
m_arraySize = col.m_arraySize;
m_keyInfoPos = col.m_keyInfoPos;
+ m_blobTable = col.m_blobTable;
// Do not copy m_facade !!
return *this;
@@ -104,6 +105,7 @@ NdbColumnImpl::init()
m_arraySize = 1,
m_autoIncrement = false;
m_autoIncrementInitialValue = 1;
+ m_blobTable = NULL;
}
NdbColumnImpl::~NdbColumnImpl()
@@ -181,7 +183,7 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
case NdbDictionary::Column::Timespec:
break;
case NdbDictionary::Column::Blob:
- case NdbDictionary::Column::Clob:
+ case NdbDictionary::Column::Text:
if (m_precision != col.m_precision ||
m_scale != col.m_scale ||
m_length != col.m_length) {
@@ -230,10 +232,6 @@ NdbColumnImpl::assign(const NdbColumnImpl& org)
NdbTableImpl::NdbTableImpl()
: NdbDictionary::Table(* this), m_facade(this)
{
- m_noOfKeys = 0;
- m_sizeOfKeysInWords = 0;
- m_noOfBlobs = 0;
- m_index = 0;
init();
}
@@ -267,6 +265,7 @@ NdbTableImpl::init(){
m_indexType = NdbDictionary::Index::Undefined;
m_noOfKeys = 0;
+ m_fragmentCount = 0;
m_sizeOfKeysInWords = 0;
m_noOfBlobs = 0;
}
@@ -287,11 +286,9 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){
return false;
}
-
if(m_fragmentType != obj.m_fragmentType){
return false;
}
-
if(m_columns.size() != obj.m_columns.size()){
return false;
}
@@ -330,6 +327,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_newExternalName.assign(org.m_newExternalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
m_fragmentType = org.m_fragmentType;
+ m_fragmentCount = org.m_fragmentCount;
for(unsigned i = 0; i<org.m_columns.size(); i++){
NdbColumnImpl * col = new NdbColumnImpl();
@@ -387,7 +385,8 @@ void
NdbTableImpl::buildColumnHash(){
const Uint32 size = m_columns.size();
- for(size_t i = 31; i >= 0; i--){
+ size_t i;
+ for(i = 31; i >= 0; i--){
if(((1 << i) & size) != 0){
m_columnHashMask = (1 << (i + 1)) - 1;
break;
@@ -396,7 +395,7 @@ NdbTableImpl::buildColumnHash(){
Vector<Uint32> hashValues;
Vector<Vector<Uint32> > chains; chains.fill(size, hashValues);
- for(size_t i = 0; i<size; i++){
+ for(i = 0; i<size; i++){
Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE;
Uint32 bucket = hv & m_columnHashMask;
bucket = (bucket < size ? bucket : bucket - size);
@@ -410,7 +409,7 @@ NdbTableImpl::buildColumnHash(){
m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining
Uint32 pos = 0; // In overflow vector
- for(size_t i = 0; i<size; i++){
+ for(i = 0; i<size; i++){
Uint32 sz = chains[i].size();
if(sz == 1){
Uint32 col = chains[i][0];
@@ -495,6 +494,12 @@ NdbIndexImpl::getTable() const
return m_tableName.c_str();
}
+const NdbTableImpl *
+NdbIndexImpl::getIndexTable() const
+{
+ return m_table;
+}
+
/**
* NdbEventImpl
*/
@@ -840,6 +845,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
m_waiter.m_state = wst;
m_waiter.wait(theWait);
+ m_transporter->unlock_mutex();
// End of Protected area
if(m_waiter.m_state == NO_WAIT && m_error.code == 0){
@@ -1091,7 +1097,7 @@ columnTypeMapping[] = {
{ DictTabInfo::ExtDatetime, NdbDictionary::Column::Datetime },
{ DictTabInfo::ExtTimespec, NdbDictionary::Column::Timespec },
{ DictTabInfo::ExtBlob, NdbDictionary::Column::Blob },
- { DictTabInfo::ExtClob, NdbDictionary::Column::Clob },
+ { DictTabInfo::ExtText, NdbDictionary::Column::Text },
{ -1, -1 }
};
@@ -1132,6 +1138,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_kvalue = tableDesc.TableKValue;
impl->m_minLoadFactor = tableDesc.MinLoadFactor;
impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
+ impl->m_fragmentCount = tableDesc.FragmentCount;
impl->m_indexType = (NdbDictionary::Index::Type)
getApiConstant(tableDesc.TableType,
@@ -1147,7 +1154,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
Uint32 keyInfoPos = 0;
Uint32 keyCount = 0;
- Uint32 blobCount;
+ Uint32 blobCount = 0;
for(Uint32 i = 0; i < tableDesc.NoOfAttributes; i++) {
DictTabInfo::Attribute attrDesc; attrDesc.init();
@@ -1206,7 +1213,6 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
}
if (col->getBlobType())
blobCount++;
-
NdbColumnImpl * null = 0;
impl->m_columns.fill(attrDesc.AttributeId, null);
if(impl->m_columns[attrDesc.AttributeId] != 0){
@@ -1218,6 +1224,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
it.next();
}
impl->m_noOfKeys = keyCount;
+ impl->m_keyLenInWords = keyInfoPos;
impl->m_sizeOfKeysInWords = keyInfoPos;
impl->m_noOfBlobs = blobCount;
* ret = impl;
@@ -1254,13 +1261,34 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
{
for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i];
- if (! c.getBlobType())
+ if (! c.getBlobType() || c.getPartSize() == 0)
continue;
NdbTableImpl bt;
NdbBlob::getBlobTable(bt, &t, &c);
if (createTable(bt) != 0)
return -1;
+ // Save BLOB table handle
+ NdbTableImpl * cachedBlobTable = getTable(bt.m_externalName.c_str());
+ c.m_blobTable = cachedBlobTable;
}
+
+ return 0;
+}
+
+int
+NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
+{
+ for (unsigned i = 0; i < t.m_columns.size(); i++) {
+ NdbColumnImpl & c = *t.m_columns[i];
+ if (! c.getBlobType() || c.getPartSize() == 0)
+ continue;
+ char btname[NdbBlob::BlobTableNameSize];
+ NdbBlob::getBlobTableName(btname, &t, &c);
+ // Save BLOB table handle
+ NdbTableImpl * cachedBlobTable = getTable(btname);;
+ c.m_blobTable = cachedBlobTable;
+ }
+
return 0;
}
@@ -1315,6 +1343,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbTableImpl & impl,
bool alter)
{
+ unsigned i;
if((unsigned)impl.getNoOfPrimaryKeys() > NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY){
m_error.code = 4317;
return -1;
@@ -1340,7 +1369,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
bool haveAutoIncrement = false;
Uint64 autoIncrementValue;
- for(unsigned i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
const NdbColumnImpl * col = impl.m_columns[i];
if(col == 0)
continue;
@@ -1384,7 +1413,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
abort();
}
- for(unsigned i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
const NdbColumnImpl * col = impl.m_columns[i];
if(col == 0)
continue;
@@ -1453,7 +1482,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
alterTable(&tSignal, ptr)
: createTable(&tSignal, ptr);
- if (haveAutoIncrement) {
+ if (!alter && haveAutoIncrement) {
// if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) {
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) {
m_error.code = 4336;
@@ -1623,7 +1652,7 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
{
for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i];
- if (! c.getBlobType())
+ if (! c.getBlobType() || c.getPartSize() == 0)
continue;
char btname[NdbBlob::BlobTableNameSize];
NdbBlob::getBlobTableName(btname, &t, &c);
@@ -1734,8 +1763,8 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
return 0;
}
- NdbTableImpl* primTab = getTable(tab->m_primaryTable.c_str());
- if(primTab == 0){
+ NdbTableImpl* prim = getTable(tab->m_primaryTable.c_str());
+ if(prim == 0){
m_error.code = 4243;
return 0;
}
@@ -1749,7 +1778,7 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
idx->m_indexId = tab->m_tableId;
idx->m_internalName.assign(internalName);
idx->m_externalName.assign(externalName);
- idx->m_tableName.assign(primTab->m_externalName);
+ idx->m_tableName.assign(prim->m_externalName);
idx->m_type = tab->m_indexType;
// skip last attribute (NDB$PK or NDB$TNODE)
for(unsigned i = 0; i+1<tab->m_columns.size(); i++){
@@ -1757,6 +1786,14 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
// Copy column definition
*col = *tab->m_columns[i];
idx->m_columns.push_back(col);
+ /**
+ * reverse map
+ */
+ int key_id = prim->getColumn(col->getName())->getColumnNo();
+ int fill = -1;
+ idx->m_key_ids.fill(key_id, fill);
+ idx->m_key_ids[key_id] = i;
+ col->m_keyInfoPos = key_id;
}
idx->m_table = tab;
@@ -1788,7 +1825,7 @@ NdbDictInterface::createIndex(Ndb & ndb,
{
//validate();
//aggregate();
-
+ unsigned i;
UtilBufferWriter w(m_buffer);
const size_t len = strlen(impl.m_externalName.c_str()) + 1;
if(len > MAX_TAB_NAME_SIZE) {
@@ -1828,7 +1865,7 @@ NdbDictInterface::createIndex(Ndb & ndb,
req->setOnline(true);
AttributeList attributeList;
attributeList.sz = impl.m_columns.size();
- for(unsigned i = 0; i<attributeList.sz; i++){
+ for(i = 0; i<attributeList.sz; i++){
const NdbColumnImpl* col =
table.getColumn(impl.m_columns[i]->m_name.c_str());
if(col == 0){
@@ -1842,18 +1879,11 @@ NdbDictInterface::createIndex(Ndb & ndb,
m_error.code = 4245;
return -1;
}
-
- if (it == DictTabInfo::UniqueHashIndex &&
- (col->m_nullable) && (attributeList.sz > 1)) {
- // We only support one NULL attribute
- m_error.code = 4246;
- return -1;
- }
attributeList.id[i] = col->m_attrId;
}
if (it == DictTabInfo::UniqueHashIndex) {
// Sort index attributes according to primary table (using insertion sort)
- for(unsigned i = 1; i < attributeList.sz; i++) {
+ for(i = 1; i < attributeList.sz; i++) {
unsigned int temp = attributeList.id[i];
unsigned int j = i;
while((j > 0) && (attributeList.id[j - 1] > temp)) {
@@ -1863,7 +1893,7 @@ NdbDictInterface::createIndex(Ndb & ndb,
attributeList.id[j] = temp;
}
// Check for illegal duplicate attributes
- for(unsigned i = 0; i<attributeList.sz; i++) {
+ for(i = 0; i<attributeList.sz; i++) {
if ((i != (attributeList.sz - 1)) &&
(attributeList.id[i] == attributeList.id[i+1])) {
m_error.code = 4258;
@@ -2043,6 +2073,7 @@ NdbDictInterface::execDROP_INDX_REF(NdbApiSignal * signal,
int
NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
{
+ int i;
NdbTableImpl* tab = getTable(evnt.getTable());
if(tab == 0){
@@ -2065,7 +2096,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
int attributeList_sz = evnt.m_attrIds.size();
- for (int i = 0; i < attributeList_sz; i++) {
+ for (i = 0; i < attributeList_sz; i++) {
NdbColumnImpl *col_impl = table.getColumn(evnt.m_attrIds[i]);
if (col_impl) {
evnt.m_facade->addColumn(*(col_impl->m_facade));
@@ -2086,7 +2117,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
int pk_count = 0;
evnt.m_attrListBitmask.clear();
- for(int i = 0; i<attributeList_sz; i++){
+ for(i = 0; i<attributeList_sz; i++){
const NdbColumnImpl* col =
table.getColumn(evnt.m_columns[i]->m_name.c_str());
if(col == 0){
@@ -2104,7 +2135,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
}
// Sort index attributes according to primary table (using insertion sort)
- for(int i = 1; i < attributeList_sz; i++) {
+ for(i = 1; i < attributeList_sz; i++) {
NdbColumnImpl* temp = evnt.m_columns[i];
unsigned int j = i;
while((j > 0) && (evnt.m_columns[j - 1]->m_attrId > temp->m_attrId)) {
@@ -2114,7 +2145,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
evnt.m_columns[j] = temp;
}
// Check for illegal duplicate attributes
- for(int i = 1; i<attributeList_sz; i++) {
+ for(i = 1; i<attributeList_sz; i++) {
if (evnt.m_columns[i-1]->m_attrId == evnt.m_columns[i]->m_attrId) {
m_error.code = 4258;
return -1;
@@ -2788,6 +2819,7 @@ NdbDictInterface::listObjects(NdbApiSignal* signal)
m_waiter.m_node = aNodeId;
m_waiter.m_state = WAIT_LIST_TABLES_CONF;
m_waiter.wait(WAITFOR_RESPONSE_TIMEOUT);
+ m_transporter->unlock_mutex();
// end protected
if (m_waiter.m_state == NO_WAIT && m_error.code == 0)
return 0;
@@ -2810,3 +2842,10 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
m_waiter.signal(NO_WAIT);
}
}
+
+template class Vector<int>;
+template class Vector<Uint32>;
+template class Vector<Vector<Uint32> >;
+template class Vector<NdbTableImpl*>;
+template class Vector<NdbColumnImpl*>;
+
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index bf59838c198..9a890f02575 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -71,6 +71,7 @@ public:
bool m_autoIncrement;
Uint64 m_autoIncrementInitialValue;
BaseString m_defaultValue;
+ NdbTableImpl * m_blobTable;
/**
* Internal types and sizes, and aggregates
@@ -124,7 +125,9 @@ public:
int m_kvalue;
int m_minLoadFactor;
int m_maxLoadFactor;
-
+ int m_keyLenInWords;
+ int m_fragmentCount;
+
NdbDictionaryImpl * m_dictionary;
NdbIndexImpl * m_index;
NdbColumnImpl * getColumn(unsigned attrId);
@@ -168,12 +171,14 @@ public:
const char * getName() const;
void setTable(const char * table);
const char * getTable() const;
+ const NdbTableImpl * getIndexTable() const;
Uint32 m_indexId;
BaseString m_internalName;
BaseString m_externalName;
BaseString m_tableName;
Vector<NdbColumnImpl *> m_columns;
+ Vector<int> m_key_ids;
NdbDictionary::Index::Type m_type;
bool m_logging;
@@ -358,6 +363,7 @@ public:
int createTable(NdbTableImpl &t);
int createBlobTables(NdbTableImpl &);
+ int addBlobTables(NdbTableImpl &);
int alterTable(NdbTableImpl &t);
int dropTable(const char * name);
int dropTable(NdbTableImpl &);
@@ -438,7 +444,7 @@ inline
bool
NdbColumnImpl::getBlobType() const {
return (m_type == NdbDictionary::Column::Blob ||
- m_type == NdbDictionary::Column::Clob);
+ m_type == NdbDictionary::Column::Text);
}
inline
@@ -612,7 +618,6 @@ NdbDictionaryImpl::getTableImpl(const char * internalTableName)
if (ret == 0){
ret = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames());
-
m_globalHash->lock();
m_globalHash->put(internalTableName, ret);
m_globalHash->unlock();
@@ -625,6 +630,8 @@ NdbDictionaryImpl::getTableImpl(const char * internalTableName)
m_ndb.theFirstTupleId[ret->getTableId()] = ~0;
m_ndb.theLastTupleId[ret->getTableId()] = ~0;
+
+ addBlobTables(*ret);
return ret;
}
diff --git a/ndb/src/ndbapi/NdbEventOperation.cpp b/ndb/src/ndbapi/NdbEventOperation.cpp
index ebdebaffd61..506a6c8d86d 100644
--- a/ndb/src/ndbapi/NdbEventOperation.cpp
+++ b/ndb/src/ndbapi/NdbEventOperation.cpp
@@ -37,7 +37,7 @@
NdbEventOperation::NdbEventOperation(Ndb *theNdb,
const char* eventName,
- const int bufferLength)
+ int bufferLength)
: m_impl(* new NdbEventOperationImpl(*this,theNdb,
eventName,
bufferLength))
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index b73a58d97c4..f5e683b1c29 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -93,11 +93,12 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
NdbEventOperationImpl::~NdbEventOperationImpl()
{
+ int i;
if (sdata) NdbMem_Free(sdata);
- for (int i=0 ; i<3; i++) {
+ for (i=0 ; i<3; i++) {
if (ptr[i].p) NdbMem_Free(ptr[i].p);
}
- for (int i=0 ; i<2; i++) {
+ for (i=0 ; i<2; i++) {
NdbRecAttr *p = theFirstRecAttrs[i];
while (p) {
NdbRecAttr *p_next = p->next();
@@ -166,7 +167,7 @@ NdbEventOperationImpl::getValue(const NdbColumnImpl *tAttrInfo, char *aValue, in
}
//theErrorLine++;
- tRecAttr->setUNDEFINED();
+ tRecAttr->setNULL();
// We want to keep the list sorted to make data insertion easier later
if (theFirstRecAttr == NULL) {
@@ -387,7 +388,7 @@ NdbEventOperationImpl::next(int *pOverrun)
while (tAttrId > tRecAttrId) {
//printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
- tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr->setNULL();
tWorkingRecAttr = tWorkingRecAttr->next();
if (tWorkingRecAttr == NULL)
break;
@@ -399,19 +400,16 @@ NdbEventOperationImpl::next(int *pOverrun)
//printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
if (tAttrId == tRecAttrId) {
- tWorkingRecAttr->setNotNULL();
if (!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey())
hasSomeData++;
//printf("set!\n");
- Uint32 *theRef = (Uint32*)tWorkingRecAttr->aRef();
- Uint32 *theEndRef = theRef + tDataSz;
- while (theRef < theEndRef)
- *theRef++ = *aDataPtr++;
+ tWorkingRecAttr->receive_data(aDataPtr, tDataSz);
// move forward, data has already moved forward
aAttrPtr++;
+ aDataPtr += tDataSz;
tWorkingRecAttr = tWorkingRecAttr->next();
} else {
// move only attr forward
@@ -423,7 +421,7 @@ NdbEventOperationImpl::next(int *pOverrun)
while (tWorkingRecAttr != NULL) {
tRecAttrId = tWorkingRecAttr->attrId();
//printf("set undefined [%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
- tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr->setNULL();
tWorkingRecAttr = tWorkingRecAttr->next();
}
@@ -436,7 +434,7 @@ NdbEventOperationImpl::next(int *pOverrun)
tDataSz = AttributeHeader(*aDataPtr).getDataSize();
aDataPtr++;
while (tAttrId > tRecAttrId) {
- tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr->setNULL();
tWorkingRecAttr = tWorkingRecAttr->next();
if (tWorkingRecAttr == NULL)
break;
@@ -445,16 +443,11 @@ NdbEventOperationImpl::next(int *pOverrun)
if (tWorkingRecAttr == NULL)
break;
if (tAttrId == tRecAttrId) {
- tWorkingRecAttr->setNotNULL();
-
if (!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey())
hasSomeData++;
- Uint32 *theRef = (Uint32*)tWorkingRecAttr->aRef();
- Uint32 *theEndRef = theRef + tDataSz;
- while (theRef < theEndRef)
- *theRef++ = *aDataPtr++;
-
+ tWorkingRecAttr->receive_data(aDataPtr, tDataSz);
+ aDataPtr += tDataSz;
// move forward, data+attr has already moved forward
tWorkingRecAttr = tWorkingRecAttr->next();
} else {
@@ -463,7 +456,7 @@ NdbEventOperationImpl::next(int *pOverrun)
}
}
while (tWorkingRecAttr != NULL) {
- tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr->setNULL();
tWorkingRecAttr = tWorkingRecAttr->next();
}
@@ -1233,8 +1226,9 @@ NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h,
int aMillisecondNumber)
{
// check if there are anything in any of the buffers
+ int i;
int n = 0;
- for (int i = 0; i < h->m_nids; i++)
+ for (i = 0; i < h->m_nids; i++)
n += hasData(h->m_bufferIds[i]);
if (n) return n;
@@ -1243,7 +1237,9 @@ NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h,
return -1;
n = 0;
- for (int i = 0; i < h->m_nids; i++)
+ for (i = 0; i < h->m_nids; i++)
n += hasData(h->m_bufferIds[i]);
return n;
}
+
+template class Vector<NdbGlobalEventBufferHandle*>;
diff --git a/ndb/src/ndbapi/NdbImpl.hpp b/ndb/src/ndbapi/NdbImpl.hpp
index cd05335b337..1fb1969b589 100644
--- a/ndb/src/ndbapi/NdbImpl.hpp
+++ b/ndb/src/ndbapi/NdbImpl.hpp
@@ -35,6 +35,7 @@ public:
#include <NdbError.hpp>
#include <NdbCondition.h>
#include <NdbReceiver.hpp>
+#include <NdbOperation.hpp>
#include <NdbTick.h>
@@ -83,12 +84,13 @@ Ndb::void2rec_iop(void* val){
return (NdbIndexOperation*)(void2rec(val)->getOwner());
}
-inline
-NdbScanReceiver*
-Ndb::void2rec_srec(void* val){
- return (NdbScanReceiver*)(void2rec(val)->getOwner());
+inline
+NdbConnection *
+NdbReceiver::getTransaction(){
+ return ((NdbOperation*)m_owner)->theNdbCon;
}
+
inline
int
Ndb::checkInitState()
@@ -151,7 +153,6 @@ NdbWaiter::wait(int waitTime)
waitTime = maxTime - NdbTick_CurrentMillisecond();
}
}
- NdbMutex_Unlock((NdbMutex*)m_mutex);
}
inline
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp
index 631c09e2e6d..7bea3b9f3d2 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -14,18 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbIndexOperation.cpp
- * Include:
- * Link:
- * Author: UABMASD Martin Skld INN/V Alzato
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Secondary index support
- * Documentation:
- * Adjust: 2002-04-01 UABMASD First version.
- ****************************************************************************/
-
+#include <ndb_global.h>
#include <NdbIndexOperation.hpp>
#include <NdbResultSet.hpp>
#include <Ndb.hpp>
@@ -37,8 +26,6 @@
#include <signaldata/IndxKeyInfo.hpp>
#include <signaldata/IndxAttrInfo.hpp>
-#define CHECK_NULL(v) assert(v == NULL); v = NULL;
-
NdbIndexOperation::NdbIndexOperation(Ndb* aNdb) :
NdbOperation(aNdb),
m_theIndex(NULL),
@@ -675,10 +662,8 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
tSignal = tnextSignal;
} while (tSignal != NULL);
}//if
- NdbRecAttr* tRecAttrObject = theFirstRecAttr;
theStatus = WaitResponse;
- theCurrentRecAttr = tRecAttrObject;
-
+ theReceiver.prepareSend();
return 0;
}
diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp
index f67d4e60200..5d0d52a31d8 100644
--- a/ndb/src/ndbapi/NdbLinHash.hpp
+++ b/ndb/src/ndbapi/NdbLinHash.hpp
@@ -165,13 +165,14 @@ NdbLinHash<C>::createHashTable() {
max = SEGMENTSIZE - 1;
slack = SEGMENTSIZE * MAXLOADFCTR;
directory[0] = new Segment_t();
-
+ int i;
+
/* The first segment cleared before used */
- for(int i = 0; i < SEGMENTSIZE; i++ )
+ for(i = 0; i < SEGMENTSIZE; i++ )
directory[0]->elements[i] = 0;
/* clear the rest of the directory */
- for( int i = 1; i < DIRECTORYSIZE; i++)
+ for(i = 1; i < DIRECTORYSIZE; i++)
directory[i] = 0;
}
@@ -203,7 +204,8 @@ NdbLinHash<C>::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data )
* chain=chainp will copy the contents of HASH_T into chain
*/
NdbElement_t<C> * oldChain = 0;
- for(NdbElement_t<C> * chain = *chainp; chain != 0; chain = chain->next){
+ NdbElement_t<C> * chain;
+ for(chain = *chainp; chain != 0; chain = chain->next){
if(chain->len == len && !memcmp(chain->str, str, len))
return -1; /* Element already exists */
else
@@ -211,7 +213,7 @@ NdbLinHash<C>::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data )
}
/* New entry */
- NdbElement_t<C> * chain = new NdbElement_t<C>();
+ chain = new NdbElement_t<C>();
chain->len = len;
chain->hash = hash;
chain->localkey1 = lkey1;
diff --git a/ndb/src/ndbapi/NdbOperation.cpp b/ndb/src/ndbapi/NdbOperation.cpp
index e6031a58c5f..18a7d1d1c80 100644
--- a/ndb/src/ndbapi/NdbOperation.cpp
+++ b/ndb/src/ndbapi/NdbOperation.cpp
@@ -14,20 +14,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*****************************************************************************
- * Name: NdbOperation.C
- * Include:
- * Link:
- * Author: UABMNST Mona Natterkvist UAB/B/SD
- * Date: 970829
- * Version: 0.1
- * Description: Interface between TIS and NDB
- * Documentation:
- * Adjust: 971022 UABMNST First version.
- ****************************************************************************/
-#include "NdbConnection.hpp"
-#include "NdbOperation.hpp"
+#include <ndb_global.h>
+#include <NdbConnection.hpp>
+#include <NdbOperation.hpp>
#include "NdbApiSignal.hpp"
#include "NdbRecAttr.hpp"
#include "NdbUtil.hpp"
@@ -55,7 +44,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
//theTable(aTable),
theNdbCon(NULL),
theNext(NULL),
- theNextScanOp(NULL),
theTCREQ(NULL),
theFirstATTRINFO(NULL),
theCurrentATTRINFO(NULL),
@@ -63,8 +51,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
theAI_LenInCurrAI(0),
theFirstKEYINFO(NULL),
theLastKEYINFO(NULL),
- theFirstRecAttr(NULL),
- theCurrentRecAttr(NULL),
theFirstLabel(NULL),
theLastLabel(NULL),
@@ -77,10 +63,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
theNoOfLabels(0),
theNoOfSubroutines(0),
- theTotalRecAI_Len(0),
- theCurrRecAI_Len(0),
- theAI_ElementLen(0),
- theCurrElemPtr(NULL),
m_currentTable(NULL), //theTableId(0xFFFF),
m_accessTable(NULL), //theAccessTableId(0xFFFF),
//theSchemaVersion(0),
@@ -96,14 +78,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
m_tcReqGSN(GSN_TCKEYREQ),
m_keyInfoGSN(GSN_KEYINFO),
m_attrInfoGSN(GSN_ATTRINFO),
- theParallelism(0),
- theScanReceiversArray(NULL),
- theSCAN_TABREQ(NULL),
- theFirstSCAN_TABINFO_Send(NULL),
- theLastSCAN_TABINFO_Send(NULL),
- theFirstSCAN_TABINFO_Recv(NULL),
- theLastSCAN_TABINFO_Recv(NULL),
- theSCAN_TABCONF_Recv(NULL),
theBoundATTRINFO(NULL),
theBlobList(NULL)
{
@@ -167,7 +141,7 @@ NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
theNdbCon = myConnection;
for (Uint32 i=0; i<NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; i++)
for (int j=0; j<3; j++)
- theTupleKeyDefined[i][j] = false;
+ theTupleKeyDefined[i][j] = 0;
theFirstATTRINFO = NULL;
theCurrentATTRINFO = NULL;
@@ -177,13 +151,11 @@ NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
theTupKeyLen = 0;
theNoOfTupKeyDefined = 0;
- theTotalCurrAI_Len = 0;
- theAI_LenInCurrAI = 0;
- theTotalRecAI_Len = 0;
theDistrKeySize = 0;
theDistributionGroup = 0;
- theCurrRecAI_Len = 0;
- theAI_ElementLen = 0;
+
+ theTotalCurrAI_Len = 0;
+ theAI_LenInCurrAI = 0;
theStartIndicator = 0;
theCommitIndicator = 0;
theSimpleIndicator = 0;
@@ -193,9 +165,6 @@ NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
theDistrGroupType = 0;
theDistrKeyIndicator = 0;
theScanInfo = 0;
- theFirstRecAttr = NULL;
- theCurrentRecAttr = NULL;
- theCurrElemPtr = NULL;
theTotalNrOfKeyWordInSignal = 8;
theMagicNumber = 0xABCDEF01;
theBoundATTRINFO = NULL;
@@ -215,6 +184,7 @@ NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
tcKeyReq->scanInfo = 0;
theKEYINFOptr = &tcKeyReq->keyInfo[0];
theATTRINFOptr = &tcKeyReq->attrInfo[0];
+ theReceiver.init(NdbReceiver::NDB_OPERATION, this);
return 0;
}
@@ -229,8 +199,6 @@ NdbOperation::release()
{
NdbApiSignal* tSignal;
NdbApiSignal* tSaveSignal;
- NdbRecAttr* tRecAttr;
- NdbRecAttr* tSaveRecAttr;
NdbBranch* tBranch;
NdbBranch* tSaveBranch;
NdbLabel* tLabel;
@@ -265,15 +233,6 @@ NdbOperation::release()
}
theFirstKEYINFO = NULL;
theLastKEYINFO = NULL;
- tRecAttr = theFirstRecAttr;
- while (tRecAttr != NULL)
- {
- tSaveRecAttr = tRecAttr;
- tRecAttr = tRecAttr->next();
- theNdb->releaseRecAttr(tSaveRecAttr);
- }
- theFirstRecAttr = NULL;
- theCurrentRecAttr = NULL;
if (theInterpretIndicator == 1)
{
tBranch = theFirstBranch;
@@ -321,19 +280,19 @@ NdbOperation::release()
theNdb->releaseNdbBlob(tSaveBlob);
}
theBlobList = NULL;
- releaseScan();
+ theReceiver.release();
}
NdbRecAttr*
NdbOperation::getValue(const char* anAttrName, char* aValue)
{
- return getValue(m_currentTable->getColumn(anAttrName), aValue);
+ return getValue_impl(m_currentTable->getColumn(anAttrName), aValue);
}
NdbRecAttr*
NdbOperation::getValue(Uint32 anAttrId, char* aValue)
{
- return getValue(m_currentTable->getColumn(anAttrId), aValue);
+ return getValue_impl(m_currentTable->getColumn(anAttrId), aValue);
}
int
@@ -441,18 +400,6 @@ NdbOperation::write_attr(Uint32 anAttrId, Uint32 RegDest)
return write_attr(m_currentTable->getColumn(anAttrId), RegDest);
}
-int
-NdbOperation::setBound(const char* anAttrName, int type, const void* aValue, Uint32 len)
-{
- return setBound(m_accessTable->getColumn(anAttrName), type, aValue, len);
-}
-
-int
-NdbOperation::setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len)
-{
- return setBound(m_accessTable->getColumn(anAttrId), type, aValue, len);
-}
-
const char*
NdbOperation::getTableName() const
{
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp
index 69a6602fe65..08ed6e84271 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -34,6 +34,7 @@
#include "NdbUtil.hpp"
#include "NdbOut.hpp"
#include "NdbImpl.hpp"
+#include <NdbIndexScanOperation.hpp>
#include "NdbBlob.hpp"
#include <Interpreter.hpp>
@@ -262,30 +263,10 @@ NdbOperation::interpretedUpdateTuple()
theStatus = OperationDefined;
tNdbCon->theSimpleState = 0;
theOperationType = UpdateRequest;
- theInterpretIndicator = 1;
theAI_LenInCurrAI = 25;
theErrorLine = tErrorLine++;
- theTotalCurrAI_Len = 5;
- theSubroutineSize = 0;
- theInitialReadSize = 0;
- theInterpretedSize = 0;
- theFinalUpdateSize = 0;
- theFinalReadSize = 0;
-
- theFirstLabel = NULL;
- theLastLabel = NULL;
- theFirstBranch = NULL;
- theLastBranch = NULL;
-
- theFirstCall = NULL;
- theLastCall = NULL;
- theFirstSubroutine = NULL;
- theLastSubroutine = NULL;
-
- theNoOfLabels = 0;
- theNoOfSubroutines = 0;
-
+ initInterpreter();
return 0;
} else {
setErrorCode(4200);
@@ -305,30 +286,11 @@ NdbOperation::interpretedDeleteTuple()
theStatus = OperationDefined;
tNdbCon->theSimpleState = 0;
theOperationType = DeleteRequest;
- theInterpretIndicator = 1;
theErrorLine = tErrorLine++;
theAI_LenInCurrAI = 25;
- theTotalCurrAI_Len = 5;
- theSubroutineSize = 0;
- theInitialReadSize = 0;
- theInterpretedSize = 0;
- theFinalUpdateSize = 0;
- theFinalReadSize = 0;
-
- theFirstLabel = NULL;
- theLastLabel = NULL;
- theFirstBranch = NULL;
- theLastBranch = NULL;
-
- theFirstCall = NULL;
- theLastCall = NULL;
- theFirstSubroutine = NULL;
- theLastSubroutine = NULL;
-
- theNoOfLabels = 0;
- theNoOfSubroutines = 0;
+ initInterpreter();
return 0;
} else {
setErrorCode(4200);
@@ -348,14 +310,14 @@ NdbOperation::interpretedDeleteTuple()
* Remark: Define an attribute to retrieve in query.
*****************************************************************************/
NdbRecAttr*
-NdbOperation::getValue(const NdbColumnImpl* tAttrInfo, char* aValue)
+NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
{
NdbRecAttr* tRecAttr;
if ((tAttrInfo != NULL) &&
(!tAttrInfo->m_indexOnly) &&
(theStatus != Init)){
if (theStatus == SetBound) {
- saveBoundATTRINFO();
+ ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
theStatus = GetValue;
}
if (theStatus != GetValue) {
@@ -387,33 +349,15 @@ NdbOperation::getValue(const NdbColumnImpl* tAttrInfo, char* aValue)
// Insert Attribute Id into ATTRINFO part.
/************************************************************************
- * Get a Receive Attribute object and link it into the operation object.
- ************************************************************************/
- tRecAttr = theNdb->getRecAttr();
- if (tRecAttr != NULL) {
- if (theFirstRecAttr == NULL)
- theFirstRecAttr = tRecAttr;
- else
- theCurrentRecAttr->next(tRecAttr);
- theCurrentRecAttr = tRecAttr;
- tRecAttr->next(NULL);
-
- /**********************************************************************
- * Now set the attribute identity and the pointer to the data in
- * the RecAttr object
- * Also set attribute size, array size and attribute type
- ********************************************************************/
- if (tRecAttr->setup(tAttrInfo, aValue) == 0) {
- theErrorLine++;
- return tRecAttr;
- } else {
- setErrorCodeAbort(4000);
- return NULL;
- }
- } else {
+ * Get a Receive Attribute object and link it into the operation object.
+ ***********************************************************************/
+ if((tRecAttr = theReceiver.getValue(tAttrInfo, aValue)) != 0){
+ theErrorLine++;
+ return tRecAttr;
+ } else {
setErrorCodeAbort(4000);
return NULL;
- }//if getRecAttr failure
+ }
} else {
return NULL;
}//if insertATTRINFO failure
@@ -632,47 +576,6 @@ NdbOperation::getBlobHandle(NdbConnection* aCon, const NdbColumnImpl* tAttrInfo)
return tBlob;
}
-/*
- * Define bound on index column in range scan.
- */
-int
-NdbOperation::setBound(const NdbColumnImpl* tAttrInfo, int type, const void* aValue, Uint32 len)
-{
- if (theOperationType == OpenRangeScanRequest &&
- theStatus == SetBound &&
- (0 <= type && type <= 4) &&
- aValue != NULL &&
- len <= 8000) {
- // bound type
- insertATTRINFO(type);
- // attribute header
- Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
- if (len != sizeInBytes && (len != 0)) {
- setErrorCodeAbort(4209);
- return -1;
- }
- len = sizeInBytes;
- Uint32 tIndexAttrId = tAttrInfo->m_attrId;
- Uint32 sizeInWords = (len + 3) / 4;
- AttributeHeader ah(tIndexAttrId, sizeInWords);
- insertATTRINFO(ah.m_value);
- // attribute data
- if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0)
- insertATTRINFOloop((const Uint32*)aValue, sizeInWords);
- else {
- Uint32 temp[2000];
- memcpy(temp, aValue, len);
- while ((len & 0x3) != 0)
- ((char*)temp)[len++] = 0;
- insertATTRINFOloop(temp, sizeInWords);
- }
- return 0;
- } else {
- setErrorCodeAbort(4228); // XXX wrong code
- return -1;
- }
-}
-
/****************************************************************************
* int insertATTRINFO( Uint32 aData );
*
diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp
index d00c527550d..7ee76bf2f3e 100644
--- a/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -14,18 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/***************************************************************************
-Name: NdbOperationExec.C
-Include:
-Link:
-Author: UABRONM Mikael Ronstrm UAB/M/MT Jonas Kamf UAB/M/MT
-Date: 2001-10-16
-Version: 1.2
-Description:
-Documentation:
-***************************************************************************/
-
+#include <ndb_global.h>
#include <NdbOperation.hpp>
#include <NdbConnection.hpp>
#include "NdbApiSignal.hpp"
@@ -46,83 +35,6 @@ Documentation:
#include <NdbOut.hpp>
-/******************************************************************************
-int doSend()
-
-Return Value: Return >0 : send was succesful, returns number of signals sent
- Return -1: In all other case.
-Parameters: aProcessorId: Receiving processor node
-Remark: Sends the ATTRINFO signal(s)
-******************************************************************************/
-int
-NdbOperation::doSendScan(int aProcessorId)
-{
- Uint32 tSignalCount = 0;
- NdbApiSignal* tSignal;
-
- if (theInterpretIndicator != 1 ||
- (theOperationType != OpenScanRequest &&
- theOperationType != OpenRangeScanRequest)) {
- setErrorCodeAbort(4005);
- return -1;
- }
-
- assert(theSCAN_TABREQ != NULL);
- tSignal = theSCAN_TABREQ;
- if (tSignal->setSignal(GSN_SCAN_TABREQ) == -1) {
- setErrorCode(4001);
- return -1;
- }
- // Update the "attribute info length in words" in SCAN_TABREQ before
- // sending it. This could not be done in openScan because
- // we created the ATTRINFO signals after the SCAN_TABREQ signal.
- ScanTabReq * const scanTabReq = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
- scanTabReq->attrLen = theTotalCurrAI_Len;
- if (theOperationType == OpenRangeScanRequest)
- scanTabReq->attrLen += theTotalBoundAI_Len;
- TransporterFacade *tp = TransporterFacade::instance();
- if (tp->sendSignal(tSignal, aProcessorId) == -1) {
- setErrorCode(4002);
- return -1;
- }
- tSignalCount++;
-
- tSignal = theFirstSCAN_TABINFO_Send;
- while (tSignal != NULL){
- if (tp->sendSignal(tSignal, aProcessorId)) {
- setErrorCode(4002);
- return -1;
- }
- tSignalCount++;
- tSignal = tSignal->next();
- }
-
- if (theOperationType == OpenRangeScanRequest) {
- // must have at least one signal since it contains attrLen for bounds
- assert(theBoundATTRINFO != NULL);
- tSignal = theBoundATTRINFO;
- while (tSignal != NULL) {
- if (tp->sendSignal(tSignal,aProcessorId) == -1){
- setErrorCode(4002);
- return -1;
- }
- tSignalCount++;
- tSignal = tSignal->next();
- }
- }
-
- tSignal = theFirstATTRINFO;
- while (tSignal != NULL) {
- if (tp->sendSignal(tSignal,aProcessorId) == -1){
- setErrorCode(4002);
- return -1;
- }
- tSignalCount++;
- tSignal = tSignal->next();
- }
- theStatus = WaitResponse;
- return tSignalCount;
-}//NdbOperation::doSendScan()
void
NdbOperation::setLastFlag(NdbApiSignal* signal, Uint32 lastFlag)
@@ -178,62 +90,6 @@ NdbOperation::doSend(int aNodeId, Uint32 lastFlag)
}//NdbOperation::doSend()
/***************************************************************************
-int prepareSendScan(Uint32 aTC_ConnectPtr,
- Uint64 aTransactionId)
-
-Return Value: Return 0 : preparation of send was succesful.
- Return -1: In all other case.
-Parameters: aTC_ConnectPtr: the Connect pointer to TC.
- aTransactionId: the Transaction identity of the transaction.
-Remark: Puts the the final data into ATTRINFO signal(s) after this
- we know the how many signal to send and their sizes
-***************************************************************************/
-int NdbOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
- Uint64 aTransactionId){
-
- if (theInterpretIndicator != 1 ||
- (theOperationType != OpenScanRequest &&
- theOperationType != OpenRangeScanRequest)) {
- setErrorCodeAbort(4005);
- return -1;
- }
-
- if (theStatus == SetBound) {
- saveBoundATTRINFO();
- theStatus = GetValue;
- }
-
- theErrorLine = 0;
-
- // In preapareSendInterpreted we set the sizes (word 4-8) in the
- // first ATTRINFO signal.
- if (prepareSendInterpreted() == -1)
- return -1;
-
- const Uint32 transId1 = (Uint32) (aTransactionId & 0xFFFFFFFF);
- const Uint32 transId2 = (Uint32) (aTransactionId >> 32);
-
- if (theOperationType == OpenRangeScanRequest) {
- NdbApiSignal* tSignal = theBoundATTRINFO;
- do{
- tSignal->setData(aTC_ConnectPtr, 1);
- tSignal->setData(transId1, 2);
- tSignal->setData(transId2, 3);
- tSignal = tSignal->next();
- } while (tSignal != NULL);
- }
- theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
- NdbApiSignal* tSignal = theFirstATTRINFO;
- do{
- tSignal->setData(aTC_ConnectPtr, 1);
- tSignal->setData(transId1, 2);
- tSignal->setData(transId2, 3);
- tSignal = tSignal->next();
- } while (tSignal != NULL);
- return 0;
-}
-
-/***************************************************************************
int prepareSend(Uint32 aTC_ConnectPtr,
Uint64 aTransactionId)
@@ -457,6 +313,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
theTCREQ->setLength(tcKeyReq->getAIInTcKeyReq(tReqInfo) +
tAttrInfoIndex + TcKeyReq::StaticLength);
+
tAIDataPtr[0] = Tdata1;
tAIDataPtr[1] = Tdata2;
tAIDataPtr[2] = Tdata3;
@@ -479,9 +336,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
tSignal = tnextSignal;
} while (tSignal != NULL);
}//if
- NdbRecAttr* tRecAttrObject = theFirstRecAttr;
theStatus = WaitResponse;
- theCurrentRecAttr = tRecAttrObject;
+ theReceiver.prepareSend();
return 0;
}//NdbOperation::prepareSend()
@@ -648,71 +504,10 @@ NdbOperation::prepareSendInterpreted()
theFirstATTRINFO->setData(tFinalReadSize, 7);
theFirstATTRINFO->setData(tSubroutineSize, 8);
}//if
+ theReceiver.prepareSend();
return 0;
}//NdbOperation::prepareSendInterpreted()
-/***************************************************************************
-int TCOPCONF(int anAttrInfoLen)
-
-Return Value: Return 0 : send was succesful.
- Return -1: In all other case.
-Parameters: anAttrInfoLen: The length of the attribute information from TC.
-Remark: Handles the reception of the TC[KEY/INDX]CONF signal.
-***************************************************************************/
-void
-NdbOperation::TCOPCONF(Uint32 anAttrInfoLen)
-{
- Uint32 tCurrRecLen = theCurrRecAI_Len;
- if (theStatus == WaitResponse) {
- theTotalRecAI_Len = anAttrInfoLen;
- if (anAttrInfoLen == tCurrRecLen) {
- Uint32 tAI_ElemLen = theAI_ElementLen;
- NdbRecAttr* tCurrRecAttr = theCurrentRecAttr;
- theStatus = Finished;
-
- if ((tAI_ElemLen == 0) &&
- (tCurrRecAttr == NULL)) {
- NdbRecAttr* tRecAttr = theFirstRecAttr;
- while (tRecAttr != NULL) {
- if (tRecAttr->copyoutRequired()) // copy to application buffer
- tRecAttr->copyout();
- tRecAttr = tRecAttr->next();
- }
- theNdbCon->OpCompleteSuccess();
- return;
- } else if (tAI_ElemLen != 0) {
- setErrorCode(4213);
- theNdbCon->OpCompleteFailure();
- return;
- } else {
- setErrorCode(4214);
- theNdbCon->OpCompleteFailure();
- return;
- }//if
- } else if (anAttrInfoLen > tCurrRecLen) {
- return;
- } else {
- theStatus = Finished;
-
- if (theAI_ElementLen != 0) {
- setErrorCode(4213);
- theNdbCon->OpCompleteFailure();
- return;
- }//if
- if (theCurrentRecAttr != NULL) {
- setErrorCode(4214);
- theNdbCon->OpCompleteFailure();
- return;
- }//if
- theNdbCon->OpCompleteFailure();
- return;
- }//if
- } else {
- setErrorCode(4004);
- }//if
- return;
-}//NdbOperation::TCKEYOPCONF()
-
int
NdbOperation::checkState_TransId(NdbApiSignal* aSignal)
{
@@ -777,188 +572,13 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
}//NdbOperation::receiveTCKEYREF()
-/***************************************************************************
-int receiveREAD_CONF( NdbApiSignal* aSignal)
-
-Return Value: Return 0 : send was succesful.
- Return -1: In all other case.
-Parameters: aSignal: the signal object that contains the READCONF signal from TUP.
-Remark: Handles the reception of the READCONF signal.
-***************************************************************************/
-int
-NdbOperation::receiveREAD_CONF(const Uint32* aDataPtr, Uint32 aDataLength)
-{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tCondFlag = (Uint32)(theStatus - WaitResponse);
- Uint32 tTotLen = aDataPtr[3];
-
- tRecTransId = (Uint64)aDataPtr[1] + ((Uint64)aDataPtr[2] << 32);
- tCurrTransId = theNdbCon->getTransactionId();
- tCondFlag |= (Uint32)((tRecTransId - tCurrTransId) != (Uint64)0);
- tCondFlag |= (Uint32)(aDataLength < 4);
-
- if (tCondFlag == 0) {
- theTotalRecAI_Len = tTotLen;
- int tRetValue = receiveREAD_AI((Uint32*)&aDataPtr[4], (aDataLength - 4));
- if (theStatus == Finished) {
- return tRetValue;
- } else {
- theStatus = Finished;
- return theNdbCon->OpCompleteFailure();
- }//if
- }//if
-#ifdef NDB_NO_DROPPED_SIGNAL
- abort();
-#endif
- return -1;
-}//NdbOperation::receiveREAD_CONF()
-
-/***************************************************************************
-int receiveTRANSID_AI( NdbApiSignal* aSignal)
-
-Return Value: Return 0 : send was succesful.
- Return -1: In all other case.
-Parameters: aSignal: the signal object that contains the TRANSID_AI signal.
-Remark: Handles the reception of the TRANSID_AI signal.
-***************************************************************************/
-int
-NdbOperation::receiveTRANSID_AI(const Uint32* aDataPtr, Uint32 aDataLength)
-{
- Uint64 tRecTransId, tCurrTransId;
- Uint32 tCondFlag = (Uint32)(theStatus - WaitResponse);
-
- tRecTransId = (Uint64)aDataPtr[1] + ((Uint64)aDataPtr[2] << 32);
- tCurrTransId = theNdbCon->getTransactionId();
- tCondFlag |= (Uint32)((tRecTransId - tCurrTransId) != (Uint64)0);
- tCondFlag |= (Uint32)(aDataLength < 3);
-
- if (tCondFlag == 0) {
- return receiveREAD_AI((Uint32*)&aDataPtr[3], (aDataLength - 3));
- }//if
-#ifdef NDB_NO_DROPPED_SIGNAL
- abort();
-#endif
- return -1;
-}//NdbOperation::receiveTRANSID_AI()
-
-/***************************************************************************
-int receiveREAD_AI( NdbApiSignal* aSignal, int aLength, int aStartPos)
-
-Return Value: Return 0 : send was succesoccurredful.
- Return -1: In all other case.
-Parameters: aSignal: the signal object that contains the LEN_ATTRINFO11 signal.
- aLength:
- aStartPos:
-Remark: Handles the reception of the LEN_ATTRINFO11 signal.
-***************************************************************************/
-int
-NdbOperation::receiveREAD_AI(Uint32* aDataPtr, Uint32 aLength)
-{
-
- register Uint32 tAI_ElementLen = theAI_ElementLen;
- register Uint32* tCurrElemPtr = theCurrElemPtr;
- if (theError.code == 0) {
- // If inconsistency error occurred we will still continue
- // receiving signals since we need to know whether commit
- // has occurred.
-
- register Uint32 tData;
- for (register Uint32 i = 0; i < aLength ; i++, aDataPtr++)
- {
- // Code to receive Attribute Information
- tData = *aDataPtr;
- if (tAI_ElementLen != 0) {
- tAI_ElementLen--;
- *tCurrElemPtr = tData;
- tCurrElemPtr++;
- continue;
- } else {
- // Waiting for a new attribute element
- NdbRecAttr* tWorkingRecAttr;
-
- tWorkingRecAttr = theCurrentRecAttr;
- AttributeHeader ah(tData);
- const Uint32 tAttrId = ah.getAttributeId();
- const Uint32 tAttrSize = ah.getDataSize();
- if ((tWorkingRecAttr != NULL) &&
- (tWorkingRecAttr->attrId() == tAttrId)) {
- ;
- } else {
- setErrorCode(4211);
- break;
- }//if
- theCurrentRecAttr = tWorkingRecAttr->next();
- NdbColumnImpl * col = m_currentTable->getColumn(tAttrId);
- if (ah.isNULL()) {
- // Return a Null value from the NDB to the attribute.
- if(col != 0 && col->m_nullable) {
- tWorkingRecAttr->setNULL();
- tAI_ElementLen = 0;
- } else {
- setErrorCode(4212);
- break;
- }//if
- } else {
- // Return a value from the NDB to the attribute.
- tWorkingRecAttr->setNotNULL();
- const Uint32 sizeInBytes = col->m_attrSize * col->m_arraySize;
- const Uint32 sizeInWords = (sizeInBytes + 3) / 4;
- tAI_ElementLen = tAttrSize;
- tCurrElemPtr = (Uint32*)tWorkingRecAttr->aRef();
- if (sizeInWords == tAttrSize){
- continue;
- } else {
- setErrorCode(4201);
- break;
- }//if
- }//if
- }//if
- }//for
- }//if
- Uint32 tCurrRecLen = theCurrRecAI_Len;
- Uint32 tTotRecLen = theTotalRecAI_Len;
- theAI_ElementLen = tAI_ElementLen;
- theCurrElemPtr = tCurrElemPtr;
- tCurrRecLen = tCurrRecLen + aLength;
- theCurrRecAI_Len = tCurrRecLen; // Update Current Received AI Length
- if (tTotRecLen == tCurrRecLen){ // Operation completed
- NdbRecAttr* tCurrRecAttr = theCurrentRecAttr;
- theStatus = Finished;
-
- NdbConnection* tNdbCon = theNdbCon;
- if ((tAI_ElementLen == 0) &&
- (tCurrRecAttr == NULL)) {
- NdbRecAttr* tRecAttr = theFirstRecAttr;
- while (tRecAttr != NULL) {
- if (tRecAttr->copyoutRequired()) // copy to application buffer
- tRecAttr->copyout();
- tRecAttr = tRecAttr->next();
- }
- return tNdbCon->OpCompleteSuccess();
- } else if (tAI_ElementLen != 0) {
- setErrorCode(4213);
- return tNdbCon->OpCompleteFailure();
- } else {
- setErrorCode(4214);
- return tNdbCon->OpCompleteFailure();
- }//if
- }
- else if ((tCurrRecLen > tTotRecLen) &&
- (tTotRecLen > 0)) { /* == 0 if TCKEYCONF not yet received */
- setErrorCode(4215);
- theStatus = Finished;
-
- return theNdbCon->OpCompleteFailure();
- }//if
- return -1; // Continue waiting for more signals of this operation
-}//NdbOperation::receiveREAD_AI()
void
NdbOperation::handleFailedAI_ElemLen()
{
- NdbRecAttr* tRecAttr = theFirstRecAttr;
+ NdbRecAttr* tRecAttr = theReceiver.theFirstRecAttr;
while (tRecAttr != NULL) {
- tRecAttr->setUNDEFINED();
+ tRecAttr->setNULL();
tRecAttr = tRecAttr->next();
}//while
}//NdbOperation::handleFailedAI_ElemLen()
diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/ndb/src/ndbapi/NdbOperationInt.cpp
index e61fc5b05d7..3a7e0dda85e 100644
--- a/ndb/src/ndbapi/NdbOperationInt.cpp
+++ b/ndb/src/ndbapi/NdbOperationInt.cpp
@@ -33,6 +33,7 @@ Adjust: 991029 UABRONM First version.
#include "NdbRecAttr.hpp"
#include "NdbUtil.hpp"
#include "Interpreter.hpp"
+#include <NdbIndexScanOperation.hpp>
#ifdef VM_TRACE
#include <NdbEnv.h>
@@ -43,6 +44,31 @@ Adjust: 991029 UABRONM First version.
#define INT_DEBUG(x)
#endif
+void
+NdbOperation::initInterpreter(){
+ theFirstLabel = NULL;
+ theLastLabel = NULL;
+ theFirstBranch = NULL;
+ theLastBranch = NULL;
+
+ theFirstCall = NULL;
+ theLastCall = NULL;
+ theFirstSubroutine = NULL;
+ theLastSubroutine = NULL;
+
+ theNoOfLabels = 0;
+ theNoOfSubroutines = 0;
+
+ theSubroutineSize = 0;
+ theInitialReadSize = 0;
+ theInterpretedSize = 0;
+ theFinalUpdateSize = 0;
+ theFinalReadSize = 0;
+ theInterpretIndicator = 1;
+
+ theTotalCurrAI_Len = 5;
+}
+
int
NdbOperation::incCheck(const NdbColumnImpl* tNdbColumnImpl)
{
@@ -191,7 +217,7 @@ NdbOperation::initial_interpreterCheck()
{
if ((theInterpretIndicator == 1)) {
if (theStatus == SetBound) {
- saveBoundATTRINFO();
+ ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
theStatus = GetValue;
}
if (theStatus == ExecInterpretedValue) {
@@ -382,9 +408,7 @@ NdbOperation::incValue(const NdbColumnImpl* tNdbColumnImpl, Uint64 aValue)
// Load aValue into register 7
if (insertATTRINFO( Interpreter::LoadConst64(7)) == -1)
goto incValue_error1;
- if (insertATTRINFO((Uint32)(aValue >> 32)) == -1)
- goto incValue_error1;
- if (insertATTRINFO(Uint32(aValue & 0xFFFFFFFF)) == -1)
+ if (insertATTRINFOloop((Uint32*)&aValue, 2) == -1)
goto incValue_error1;
// Add register 6 and 7 and put result in register 7
if (insertATTRINFO( Interpreter::Add(7, 6, 7)) == -1)
@@ -425,9 +449,7 @@ NdbOperation::subValue(const NdbColumnImpl* tNdbColumnImpl, Uint64 aValue)
// Load aValue into register 7
if (insertATTRINFO( Interpreter::LoadConst64(7)) == -1)
goto subValue_error1;
- if (insertATTRINFO((Uint32)(aValue >> 32)) == -1)
- goto subValue_error1;
- if (insertATTRINFO(Uint32(aValue & 0xFFFFFFFF)) == -1)
+ if (insertATTRINFOloop((Uint32*)&aValue, 2) == -1)
goto subValue_error1;
// Subtract register 6 and 7 and put result in register 7
if (insertATTRINFO( Interpreter::Sub(7, 6, 7)) == -1)
@@ -664,8 +686,6 @@ int
NdbOperation::load_const_u64(Uint32 RegDest, Uint64 Constant)
{
INT_DEBUG(("load_const_u64 %u %llu", RegDest, Constant));
- Uint32 tTemp1;
- Uint32 tTemp2;
if (initial_interpreterCheck() == -1)
return -1;
if (RegDest >= 8)
@@ -673,15 +693,11 @@ NdbOperation::load_const_u64(Uint32 RegDest, Uint64 Constant)
setErrorCodeAbort(4229);
return -1;
}
- tTemp1 = (Uint32)(Constant & 0xFFFFFFFF);
- tTemp2 = (Uint32)(Constant >> 32);
-
+
// 64 bit value
if (insertATTRINFO( Interpreter::LoadConst64(RegDest)) == -1)
return -1;
- if (insertATTRINFO(tTemp1) == -1)
- return -1;
- if (insertATTRINFO(tTemp2) == -1)
+ if (insertATTRINFOloop((Uint32*)&Constant, 2) == -1)
return -1;
theErrorLine++;
return 0;
diff --git a/ndb/src/ndbapi/NdbOperationScan.cpp b/ndb/src/ndbapi/NdbOperationScan.cpp
index 299e6f2adea..283eb591bdb 100644
--- a/ndb/src/ndbapi/NdbOperationScan.cpp
+++ b/ndb/src/ndbapi/NdbOperationScan.cpp
@@ -14,590 +14,3 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include "NdbOperation.hpp"
-#include "NdbScanReceiver.hpp"
-
-#include <signaldata/TcKeyReq.hpp>
-#include <signaldata/ScanTab.hpp>
-#include <signaldata/ScanFrag.hpp>
-#include <signaldata/KeyInfo.hpp>
-
-
-/******************************************************************************
- * int openScanRead();
- *****************************************************************************/
-int
-NdbOperation::openScanRead(Uint32 aParallelism)
-{
- aParallelism = checkParallelism(aParallelism);
-
- if ((theNdbCon->theCommitStatus != NdbConnection::Started) &&
- (theStatus != Init) &&
- (aParallelism == 0)) {
- setErrorCode(4200);
- return -1;
- }
- return openScan(aParallelism, false, false, false);
-}
-
-/****************************************************************************
- * int openScanExclusive();
- ****************************************************************************/
-int
-NdbOperation::openScanExclusive(Uint32 aParallelism)
-{
- aParallelism = checkParallelism(aParallelism);
-
- if ((theNdbCon->theCommitStatus != NdbConnection::Started) &&
- (theStatus != Init) &&
- (aParallelism == 0)) {
- setErrorCode(4200);
- return -1;
- }
- return openScan(aParallelism, true, true, false);
-}
-
-/******************************************************************************
- * int openScanReadHoldLock();
- *****************************************************************************/
-int
-NdbOperation::openScanReadHoldLock(Uint32 aParallelism)
-{
- aParallelism = checkParallelism(aParallelism);
-
- if ((theNdbCon->theCommitStatus != NdbConnection::Started) &&
- (theStatus != Init) &&
- (aParallelism == 0)) {
- setErrorCode(4200);
- return -1;
- }
- return openScan(aParallelism, false, true, false);
-}
-
-/******************************************************************************
- * int openScanReadCommitted();
- *****************************************************************************/
-int
-NdbOperation::openScanReadCommitted(Uint32 aParallelism)
-{
- aParallelism = checkParallelism(aParallelism);
-
- if ((theNdbCon->theCommitStatus != NdbConnection::Started) &&
- (theStatus != Init) &&
- (aParallelism == 0)) {
- setErrorCode(4200);
- return -1;
- }
- return openScan(aParallelism, false, false, true);
-}
-
-/****************************************************************************
- * int checkParallelism();
- * Remark If the parallelism is set wrong the number of scan-operations
- * will not correspond to the number of TRANSID_AI signals returned
- * from NDB and the result will be a crash, therefore
- * we adjust it or return an error if the value is totally wrong.
- ****************************************************************************/
-int
-NdbOperation::checkParallelism(Uint32 aParallelism)
-{
- if (aParallelism == 0) {
- setErrorCodeAbort(4232);
- return 0;
- }
- if (aParallelism > 16) {
- if (aParallelism <= 240) {
-
- /**
- * If tscanConcurrency > 16 it must be a multiple of 16
- */
- if (((aParallelism >> 4) << 4) < aParallelism) {
- aParallelism = ((aParallelism >> 4) << 4) + 16;
- }//if
-
- /*---------------------------------------------------------------*/
- /* We cannot have a parallelism > 16 per node */
- /*---------------------------------------------------------------*/
- if ((aParallelism / theNdb->theNoOfDBnodes) > 16) {
- aParallelism = theNdb->theNoOfDBnodes * 16;
- }//if
-
- } else {
- setErrorCodeAbort(4232);
- aParallelism = 0;
- }//if
- }//if
- return aParallelism;
-}//NdbOperation::checkParallelism()
-
-/**********************************************************************
- * int openScan();
- *************************************************************************/
-int
-NdbOperation::openScan(Uint32 aParallelism,
- bool lockMode, bool lockHoldMode, bool readCommitted)
-{
- aParallelism = checkParallelism(aParallelism);
- if(aParallelism == 0){
- return 0;
- }
- NdbScanReceiver* tScanRec;
- // It is only possible to call openScan if
- // 1. this transcation don't already contain another scan operation
- // 2. this transaction don't already contain other operations
- // 3. theScanOp contains a NdbScanOperation
- if (theNdbCon->theScanningOp != NULL){
- setErrorCode(4605);
- return -1;
- }
-
- if ((theNdbCon->theFirstOpInList != this) ||
- (theNdbCon->theLastOpInList != this)) {
- setErrorCode(4603);
- return -1;
- }
- theNdbCon->theScanningOp = this;
-
- initScan();
- theParallelism = aParallelism;
-
- // If the scan is on ordered index then it is a range scan
- if (m_currentTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
- m_currentTable->m_indexType == NdbDictionary::Index::UniqueOrderedIndex) {
- assert(m_currentTable == m_accessTable);
- m_currentTable = theNdb->theDictionary->getTable(m_currentTable->m_primaryTable.c_str());
- assert(m_currentTable != NULL);
- // Modify operation state
- theStatus = SetBound;
- theOperationType = OpenRangeScanRequest;
- }
-
- theScanReceiversArray = new NdbScanReceiver* [aParallelism];
- if (theScanReceiversArray == NULL){
- setErrorCodeAbort(4000);
- return -1;
- }
-
- for (Uint32 i = 0; i < aParallelism; i ++) {
- tScanRec = theNdb->getNdbScanRec();
- if (tScanRec == NULL) {
- setErrorCodeAbort(4000);
- return -1;
- }//if
- tScanRec->init(this, lockMode);
- theScanReceiversArray[i] = tScanRec;
- }
-
- theSCAN_TABREQ = theNdb->getSignal();
- if (theSCAN_TABREQ == NULL) {
- setErrorCodeAbort(4000);
- return -1;
- }//if
- ScanTabReq * const scanTabReq = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
- scanTabReq->apiConnectPtr = theNdbCon->theTCConPtr;
- scanTabReq->tableId = m_accessTable->m_tableId;
- scanTabReq->tableSchemaVersion = m_accessTable->m_version;
- scanTabReq->storedProcId = 0xFFFF;
- scanTabReq->buddyConPtr = theNdbCon->theBuddyConPtr;
-
- Uint32 reqInfo = 0;
- ScanTabReq::setParallelism(reqInfo, aParallelism);
- ScanTabReq::setLockMode(reqInfo, lockMode);
- ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
- ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
- if (theOperationType == OpenRangeScanRequest)
- ScanTabReq::setRangeScanFlag(reqInfo, true);
- scanTabReq->requestInfo = reqInfo;
-
- Uint64 transId = theNdbCon->getTransactionId();
- scanTabReq->transId1 = (Uint32) transId;
- scanTabReq->transId2 = (Uint32) (transId >> 32);
-
- for (Uint32 i = 0; i < 16 && i < aParallelism ; i++) {
- scanTabReq->apiOperationPtr[i] = theScanReceiversArray[i]->ptr2int();
- }//for
-
- // Create one additional SCAN_TABINFO for each
- // 16 of parallelism
- NdbApiSignal* tSignal;
- Uint32 tParallelism = aParallelism;
- while (tParallelism > 16) {
- tSignal = theNdb->getSignal();
- if (tSignal == NULL) {
- setErrorCodeAbort(4000);
- return -1;
- }//if
- if (tSignal->setSignal(GSN_SCAN_TABINFO) == -1) {
- setErrorCode(4001);
- return -1;
- }
- tSignal->next(theFirstSCAN_TABINFO_Send);
- theFirstSCAN_TABINFO_Send = tSignal;
- tParallelism -= 16;
- }//while
-
- // Format all SCAN_TABINFO signals
- tParallelism = 16;
- tSignal = theFirstSCAN_TABINFO_Send;
- while (tSignal != NULL) {
- tSignal->setData(theNdbCon->theTCConPtr, 1);
- for (int i = 0; i < 16 ; i++) {
- tSignal->setData(theScanReceiversArray[i + tParallelism]->ptr2int(), i + 2);
- }//for
- tSignal = tSignal->next();
- tParallelism += 16;
- }//while
-
- getFirstATTRINFOScan();
- return 0;
-}//NdbScanOperation::openScan()
-
-/*****************************************************************************
- * int getFirstATTRINFOScan( U_int32 aData )
- *
- * Return Value: Return 0: Successful
- * Return -1: All other cases
- * Parameters: None: Only allocate the first signal.
- * Remark: When a scan is defined we need to use this method instead
- * of insertATTRINFO for the first signal.
- * This is because we need not to mess up the code in
- * insertATTRINFO with if statements since we are not
- * interested in the TCKEYREQ signal.
- *****************************************************************************/
-int
-NdbOperation::getFirstATTRINFOScan()
-{
- NdbApiSignal* tSignal;
-
- tSignal = theNdb->getSignal();
- if (tSignal == NULL){
- setErrorCodeAbort(4000);
- return -1;
- }
- tSignal->setSignal(m_attrInfoGSN);
- theAI_LenInCurrAI = 8;
- theATTRINFOptr = &tSignal->getDataPtrSend()[8];
- theFirstATTRINFO = tSignal;
- theCurrentATTRINFO = tSignal;
- theCurrentATTRINFO->next(NULL);
- return 0;
-}
-
-/*
- * After setBound() are done, move the accumulated ATTRINFO signals to
- * a separate list. Then continue with normal scan.
- */
-int
-NdbOperation::saveBoundATTRINFO()
-{
- theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
- theBoundATTRINFO = theFirstATTRINFO;
- theTotalBoundAI_Len = theTotalCurrAI_Len;
- theTotalCurrAI_Len = 5;
- theBoundATTRINFO->setData(theTotalBoundAI_Len, 4);
- theBoundATTRINFO->setData(0, 5);
- theBoundATTRINFO->setData(0, 6);
- theBoundATTRINFO->setData(0, 7);
- theBoundATTRINFO->setData(0, 8);
- theStatus = GetValue;
- return getFirstATTRINFOScan();
-}
-
-/*****************************************************************************
- * void releaseScan()
- *
- * Return Value No return value.
- * Parameters: No parameters.
- * Remark: Release objects after scanning.
- *****************************************************************************/
-void
-NdbOperation::releaseScan()
-{
- NdbScanReceiver* tScanRec;
- TransporterFacade::instance()->lock_mutex();
- for (Uint32 i = 0; i < theParallelism && theScanReceiversArray != NULL; i++) {
- tScanRec = theScanReceiversArray[i];
- if (tScanRec != NULL) {
- tScanRec->release();
- tScanRec->next(NULL);
- }
- }
- TransporterFacade::instance()->unlock_mutex();
- releaseSignals();
-
- if (theScanReceiversArray != NULL) {
- for (Uint32 i = 0; i < theParallelism; i++) {
- NdbScanReceiver* tScanRec;
- tScanRec = theScanReceiversArray[i];
- if (tScanRec != NULL) {
- theNdb->releaseNdbScanRec(tScanRec);
- theScanReceiversArray[i] = NULL;
- }
- }
-
- delete [] theScanReceiversArray;
- }//if
- theScanReceiversArray = NULL;
-
- if (theSCAN_TABREQ != NULL){
- theNdb->releaseSignal(theSCAN_TABREQ);
- theSCAN_TABREQ = NULL;
- }
-}
-
-void NdbOperation::releaseSignals(){
- theNdb->releaseSignalsInList(&theFirstSCAN_TABINFO_Send);
- theFirstSCAN_TABINFO_Send = NULL;
- theLastSCAN_TABINFO_Send = NULL;
- // theNdb->releaseSignalsInList(&theFirstSCAN_TABINFO_Recv);
-
- while(theFirstSCAN_TABINFO_Recv != NULL){
- NdbApiSignal* tmp = theFirstSCAN_TABINFO_Recv;
- theFirstSCAN_TABINFO_Recv = tmp->next();
- delete tmp;
- }
- theFirstSCAN_TABINFO_Recv = NULL;
- theLastSCAN_TABINFO_Recv = NULL;
- if (theSCAN_TABCONF_Recv != NULL){
- // theNdb->releaseSignal(theSCAN_TABCONF_Recv);
- delete theSCAN_TABCONF_Recv;
- theSCAN_TABCONF_Recv = NULL;
- }
-}
-
-
-void NdbOperation::prepareNextScanResult(){
- NdbScanReceiver* tScanRec;
- for (Uint32 i = 0; i < theParallelism; i++) {
- tScanRec = theScanReceiversArray[i];
- assert(tScanRec != NULL);
- tScanRec->prepareNextScanResult();
- tScanRec->next(NULL);
- }
- releaseSignals();
-}
-
-/******************************************************************************
- * void initScan();
- *
- * Return Value: Return 0 : init was successful.
- * Return -1: In all other case.
- * Remark: Initiates operation record after allocation.
- *****************************************************************************/
-void
-NdbOperation::initScan()
-{
- theTotalRecAI_Len = 0;
- theCurrRecAI_Len = 0;
- theStatus = GetValue;
- theOperationType = OpenScanRequest;
- theCurrentRecAttr = theFirstRecAttr;
- theScanInfo = 0;
- theMagicNumber = 0xABCDEF01;
- theTotalCurrAI_Len = 5;
-
- theFirstLabel = NULL;
- theLastLabel = NULL;
- theFirstBranch = NULL;
- theLastBranch = NULL;
-
- theFirstCall = NULL;
- theLastCall = NULL;
- theFirstSubroutine = NULL;
- theLastSubroutine = NULL;
-
- theNoOfLabels = 0;
- theNoOfSubroutines = 0;
-
- theSubroutineSize = 0;
- theInitialReadSize = 0;
- theInterpretedSize = 0;
- theFinalUpdateSize = 0;
- theFinalReadSize = 0;
- theInterpretIndicator = 1;
-
-
- theFirstSCAN_TABINFO_Send = NULL;
- theLastSCAN_TABINFO_Send = NULL;
- theFirstSCAN_TABINFO_Recv = NULL;
- theLastSCAN_TABINFO_Recv = NULL;
- theSCAN_TABCONF_Recv = NULL;
-
- theScanReceiversArray = NULL;
-
- theTotalBoundAI_Len = 0;
- theBoundATTRINFO = NULL;
- return;
-}
-
-NdbOperation* NdbOperation::takeOverForDelete(NdbConnection* updateTrans){
- return takeOverScanOp(DeleteRequest, updateTrans);
-}
-
-NdbOperation* NdbOperation::takeOverForUpdate(NdbConnection* updateTrans){
- return takeOverScanOp(UpdateRequest, updateTrans);
-}
-/******************************************************************************
- * NdbOperation* takeOverScanOp(NdbConnection* updateTrans);
- *
- * Parameters: The update transactions NdbConnection pointer.
- * Return Value: A reference to the transferred operation object
- * or NULL if no success.
- * Remark: Take over the scanning transactions NdbOperation
- * object for a tuple to an update transaction,
- * which is the last operation read in nextScanResult()
- * (theNdbCon->thePreviousScanRec)
- *
- * FUTURE IMPLEMENTATION: (This note was moved from header file.)
- * In the future, it will even be possible to transfer
- * to a NdbConnection on another Ndb-object.
- * In this case the receiving NdbConnection-object must call
- * a method receiveOpFromScan to actually receive the information.
- * This means that the updating transactions can be placed
- * in separate threads and thus increasing the parallelism during
- * the scan process.
- *****************************************************************************/
-NdbOperation*
-NdbOperation::takeOverScanOp(OperationType opType, NdbConnection* updateTrans)
-{
- if (opType != UpdateRequest && opType != DeleteRequest) {
- setErrorCode(4604);
- return NULL;
- }
-
- const NdbScanReceiver* tScanRec = theNdbCon->thePreviousScanRec;
- if (tScanRec == NULL){
- // No operation read by nextScanResult
- setErrorCode(4609);
- return NULL;
- }
-
- if (tScanRec->theFirstKEYINFO20_Recv == NULL){
- // No KEYINFO20 received
- setErrorCode(4608);
- return NULL;
- }
-
- NdbOperation * newOp = updateTrans->getNdbOperation(m_currentTable);
- if (newOp == NULL){
- return NULL;
- }
-
- /**
- * Copy and caclulate attributes from the scanned operation to the
- * new operation
- */
- const KeyInfo20 * const firstKeyInfo20 =
- CAST_CONSTPTR(KeyInfo20, tScanRec->theFirstKEYINFO20_Recv->getDataPtr());
- const Uint32 totalKeyLen = firstKeyInfo20->keyLen;
- newOp->theTupKeyLen = totalKeyLen;
-
- newOp->theOperationType = opType;
- if (opType == DeleteRequest) {
- newOp->theStatus = GetValue;
- } else {
- newOp->theStatus = SetValue;
- }
- const Uint32 tScanInfo = firstKeyInfo20->scanInfo_Node & 0xFFFF;
- const Uint32 tTakeOverNode = firstKeyInfo20->scanInfo_Node >> 16;
- {
- UintR scanInfo = 0;
- TcKeyReq::setTakeOverScanFlag(scanInfo, 1);
- TcKeyReq::setTakeOverScanNode(scanInfo, tTakeOverNode);
- TcKeyReq::setTakeOverScanInfo(scanInfo, tScanInfo);
- newOp->theScanInfo = scanInfo;
- }
-
- /**
- * Copy received KEYINFO20 signals into TCKEYREQ and KEYINFO signals
- * put them in list of the new op
- */
- TcKeyReq * const tcKeyReq =
- CAST_PTR(TcKeyReq, newOp->theTCREQ->getDataPtrSend());
-
- // Copy the first 8 words of key info from KEYINF20 into TCKEYREQ
- for (Uint32 i = 0; i < TcKeyReq::MaxKeyInfo; i++) {
- tcKeyReq->keyInfo[i] = firstKeyInfo20->keyData[i];
- }
- if (totalKeyLen > TcKeyReq::MaxKeyInfo) {
-
- Uint32 keyWordsCopied = TcKeyReq::MaxKeyInfo;
-
- // Create KEYINFO signals in newOp
- for (Uint32 i = keyWordsCopied; i < totalKeyLen; i += KeyInfo::DataLength){
- NdbApiSignal* tSignal = theNdb->getSignal();
- if (tSignal == NULL){
- setErrorCodeAbort(4000);
- return NULL;
- }
- if (tSignal->setSignal(GSN_KEYINFO) == -1){
- setErrorCodeAbort(4001);
- return NULL;
- }
- tSignal->next(newOp->theFirstKEYINFO);
- newOp->theFirstKEYINFO = tSignal;
- }
-
- // Init pointers to KEYINFO20 signal
- NdbApiSignal* currKeyInfo20 = tScanRec->theFirstKEYINFO20_Recv;
- const KeyInfo20 * keyInfo20 =
- CAST_CONSTPTR(KeyInfo20, currKeyInfo20->getDataPtr());
- Uint32 posInKeyInfo20 = keyWordsCopied;
-
- // Init pointers to KEYINFO signal
- NdbApiSignal* currKeyInfo = newOp->theFirstKEYINFO;
- KeyInfo * keyInfo = CAST_PTR(KeyInfo, currKeyInfo->getDataPtrSend());
- Uint32 posInKeyInfo = 0;
-
- // Copy from KEYINFO20 to KEYINFO
- while(keyWordsCopied < totalKeyLen){
- keyInfo->keyData[posInKeyInfo++] = keyInfo20->keyData[posInKeyInfo20++];
- keyWordsCopied++;
- if(keyWordsCopied >= totalKeyLen)
- break;
- if (posInKeyInfo20 >=
- (currKeyInfo20->getLength()-KeyInfo20::HeaderLength)){
- currKeyInfo20 = currKeyInfo20->next();
- keyInfo20 = CAST_CONSTPTR(KeyInfo20, currKeyInfo20->getDataPtr());
- posInKeyInfo20 = 0;
- }
- if (posInKeyInfo >= KeyInfo::DataLength){
- currKeyInfo = currKeyInfo->next();
- keyInfo = CAST_PTR(KeyInfo, currKeyInfo->getDataPtrSend());
- posInKeyInfo = 0;
- }
- }
- }
-
- // create blob handles automatically
- if (opType == DeleteRequest && m_currentTable->m_noOfBlobs != 0) {
- for (unsigned i = 0; i < m_currentTable->m_columns.size(); i++) {
- NdbColumnImpl* c = m_currentTable->m_columns[i];
- assert(c != 0);
- if (c->getBlobType()) {
- if (newOp->getBlobHandle(updateTrans, c) == NULL)
- return NULL;
- }
- }
- }
-
- return newOp;
-}
-
-int
-NdbOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
-{
- const NdbScanReceiver* tScanRec = theNdbCon->thePreviousScanRec;
- NdbApiSignal* tSignal = tScanRec->theFirstKEYINFO20_Recv;
- unsigned pos = 0;
- unsigned n = 0;
- while (pos < size) {
- if (n == 20) {
- tSignal = tSignal->next();
- n = 0;
- }
- const unsigned h = KeyInfo20::HeaderLength;
- data[pos++] = tSignal->getDataPtrSend()[h + n++];
- }
- return 0;
-}
diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp
index 0ed2ff4e796..2e753f13006 100644
--- a/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/ndb/src/ndbapi/NdbRecAttr.cpp
@@ -29,6 +29,7 @@ Adjust: 971206 UABRONM First version
#include <ndb_global.h>
#include <NdbOut.hpp>
#include <NdbRecAttr.hpp>
+#include <NdbBlob.hpp>
#include "NdbDictionaryImpl.hpp"
#include <NdbTCP.h>
@@ -60,6 +61,8 @@ NdbRecAttr::setup(const NdbColumnImpl* anAttrInfo, char* aValue)
theAttrSize = tAttrSize;
theArraySize = tArraySize;
theValue = aValue;
+ theNULLind = 0;
+ m_nullable = anAttrInfo->m_nullable;
// check alignment to signal data
// a future version could check alignment per data type as well
@@ -128,78 +131,117 @@ NdbRecAttr::clone() const {
return ret;
}
-NdbOut& operator<<(NdbOut& ndbout, const NdbRecAttr &r)
+bool
+NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){
+ const Uint32 n = (theAttrSize * theArraySize + 3) >> 2;
+ if(n == sz){
+ theNULLind = 0;
+ if(!copyoutRequired())
+ memcpy(theRef, data, 4 * sz);
+ else
+ memcpy(theValue, data, theAttrSize * theArraySize);
+ return true;
+ } else if(sz == 0){
+ setNULL();
+ return true;
+ }
+ return false;
+}
+
+NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
{
if (r.isNULL())
{
- ndbout << "[NULL]";
- return ndbout;
+ out << "[NULL]";
+ return out;
}
if (r.arraySize() > 1)
- ndbout << "[";
+ out << "[";
for (Uint32 j = 0; j < r.arraySize(); j++)
{
if (j > 0)
- ndbout << " ";
+ out << " ";
switch(r.getType())
{
case NdbDictionary::Column::Bigunsigned:
- ndbout << r.u_64_value();
+ out << r.u_64_value();
break;
case NdbDictionary::Column::Unsigned:
- ndbout << r.u_32_value();
+ out << r.u_32_value();
break;
case NdbDictionary::Column::Smallunsigned:
- ndbout << r.u_short_value();
+ out << r.u_short_value();
break;
case NdbDictionary::Column::Tinyunsigned:
- ndbout << (unsigned) r.u_char_value();
+ out << (unsigned) r.u_char_value();
break;
case NdbDictionary::Column::Bigint:
- ndbout << r.int64_value();
+ out << r.int64_value();
break;
case NdbDictionary::Column::Int:
- ndbout << r.int32_value();
+ out << r.int32_value();
break;
case NdbDictionary::Column::Smallint:
- ndbout << r.short_value();
+ out << r.short_value();
break;
case NdbDictionary::Column::Tinyint:
- ndbout << (int) r.char_value();
+ out << (int) r.char_value();
break;
case NdbDictionary::Column::Char:
- ndbout.print("%.*s", r.arraySize(), r.aRef());
+ out.print("%.*s", r.arraySize(), r.aRef());
j = r.arraySize();
break;
case NdbDictionary::Column::Varchar:
{
short len = ntohs(r.u_short_value());
- ndbout.print("%.*s", len, r.aRef()+2);
+ out.print("%.*s", len, r.aRef()+2);
}
j = r.arraySize();
break;
case NdbDictionary::Column::Float:
- ndbout << r.float_value();
+ out << r.float_value();
break;
case NdbDictionary::Column::Double:
- ndbout << r.double_value();
+ out << r.double_value();
break;
+ case NdbDictionary::Column::Blob:
+ {
+ const NdbBlob::Head* h = (const NdbBlob::Head*)r.aRef();
+ out << h->length << ":";
+ const unsigned char* p = (const unsigned char*)(h + 1);
+ unsigned n = r.arraySize() - sizeof(*h);
+ for (unsigned k = 0; k < n && k < h->length; k++)
+ out.print("%02X", (int)p[k]);
+ j = r.arraySize();
+ }
+ break;
+ case NdbDictionary::Column::Text:
+ {
+ const NdbBlob::Head* h = (const NdbBlob::Head*)r.aRef();
+ out << h->length << ":";
+ const unsigned char* p = (const unsigned char*)(h + 1);
+ unsigned n = r.arraySize() - sizeof(*h);
+ for (unsigned k = 0; k < n && k < h->length; k++)
+ out.print("%c", (int)p[k]);
+ j = r.arraySize();
+ }
+ break;
default: /* no print functions for the rest, just print type */
- ndbout << r.getType();
+ out << r.getType();
j = r.arraySize();
if (j > 1)
- ndbout << " %u times" << j;
+ out << " " << j << " times";
break;
}
}
if (r.arraySize() > 1)
{
- ndbout << "]";
+ out << "]";
}
- return ndbout;
+ return out;
}
diff --git a/ndb/src/ndbapi/NdbReceiver.cpp b/ndb/src/ndbapi/NdbReceiver.cpp
index 4c461698a4a..bdb5e6c7e78 100644
--- a/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/ndb/src/ndbapi/NdbReceiver.cpp
@@ -16,6 +16,10 @@
#include "NdbImpl.hpp"
#include <NdbReceiver.hpp>
+#include "NdbDictionaryImpl.hpp"
+#include <NdbRecAttr.hpp>
+#include <AttributeHeader.hpp>
+#include <NdbConnection.hpp>
NdbReceiver::NdbReceiver(Ndb *aNdb) :
theMagicNumber(0),
@@ -24,8 +28,19 @@ NdbReceiver::NdbReceiver(Ndb *aNdb) :
m_type(NDB_UNINITIALIZED),
m_owner(0)
{
+ theCurrentRecAttr = theFirstRecAttr = 0;
+ m_defined_rows = 0;
+ m_rows = new NdbRecAttr*[0];
}
+NdbReceiver::~NdbReceiver()
+{
+ if (m_id != NdbObjectIdMap::InvalidId) {
+ m_ndb->theNdbObjectIdMap->unmap(m_id, this);
+ }
+ delete[] m_rows;
+}
+
void
NdbReceiver::init(ReceiverType type, void* owner)
{
@@ -36,11 +51,175 @@ NdbReceiver::init(ReceiverType type, void* owner)
if (m_ndb)
m_id = m_ndb->theNdbObjectIdMap->map(this);
}
+
+ theFirstRecAttr = NULL;
+ theCurrentRecAttr = NULL;
+}
+
+void
+NdbReceiver::release(){
+ NdbRecAttr* tRecAttr = theFirstRecAttr;
+ while (tRecAttr != NULL)
+ {
+ NdbRecAttr* tSaveRecAttr = tRecAttr;
+ tRecAttr = tRecAttr->next();
+ m_ndb->releaseRecAttr(tSaveRecAttr);
+ }
+ theFirstRecAttr = NULL;
+ theCurrentRecAttr = NULL;
}
-NdbReceiver::~NdbReceiver()
+NdbRecAttr *
+NdbReceiver::getValue(const NdbColumnImpl* tAttrInfo, char * user_dst_ptr){
+ NdbRecAttr* tRecAttr = m_ndb->getRecAttr();
+ if(tRecAttr && !tRecAttr->setup(tAttrInfo, user_dst_ptr)){
+ if (theFirstRecAttr == NULL)
+ theFirstRecAttr = tRecAttr;
+ else
+ theCurrentRecAttr->next(tRecAttr);
+ theCurrentRecAttr = tRecAttr;
+ tRecAttr->next(NULL);
+ return tRecAttr;
+ }
+ if(tRecAttr){
+ m_ndb->releaseRecAttr(tRecAttr);
+ }
+ return 0;
+}
+
+#define KEY_ATTR_ID (~0)
+
+void
+NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
+ if(rows > m_defined_rows){
+ delete[] m_rows;
+ m_defined_rows = rows;
+ m_rows = new NdbRecAttr*[rows + 1];
+ }
+ m_rows[rows] = 0;
+
+ NdbColumnImpl key;
+ if(key_size){
+ key.m_attrId = KEY_ATTR_ID;
+ key.m_arraySize = key_size+1;
+ key.m_attrSize = 4;
+ key.m_nullable = true; // So that receive works w.r.t KEYINFO20
+ }
+ m_key_info = key_size;
+
+ for(Uint32 i = 0; i<rows; i++){
+ NdbRecAttr * prev = theCurrentRecAttr;
+ assert(prev == 0 || i > 0);
+
+ // Put key-recAttr fir on each row
+ if(key_size && !getValue(&key, (char*)0)){
+ abort();
+ return ; // -1
+ }
+
+ NdbRecAttr* tRecAttr = org->theFirstRecAttr;
+ while(tRecAttr != 0){
+ if(getValue(&NdbColumnImpl::getImpl(*tRecAttr->m_column), (char*)0) != 0)
+ tRecAttr = tRecAttr->next();
+ else
+ break;
+ }
+
+ if(tRecAttr){
+ abort();
+ return ;// -1;
+ }
+
+ // Store first recAttr for each row in m_rows[i]
+ if(prev){
+ m_rows[i] = prev->next();
+ } else {
+ m_rows[i] = theFirstRecAttr;
+ }
+ }
+
+ prepareSend();
+ return ; //0;
+}
+
+void
+NdbReceiver::copyout(NdbReceiver & dstRec){
+ NdbRecAttr* src = m_rows[m_current_row++];
+ NdbRecAttr* dst = dstRec.theFirstRecAttr;
+ Uint32 tmp = m_key_info;
+ if(tmp > 0){
+ src = src->next();
+ }
+
+ while(dst){
+ Uint32 len = ((src->theAttrSize * src->theArraySize)+3)/4;
+ dst->receive_data((Uint32*)src->aRef(), src->isNULL() ? 0 : len);
+ src = src->next();
+ dst = dst->next();
+ }
+}
+
+int
+NdbReceiver::execTRANSID_AI(const Uint32* aDataPtr, Uint32 aLength)
{
- if (m_id != NdbObjectIdMap::InvalidId) {
- m_ndb->theNdbObjectIdMap->unmap(m_id, this);
+ bool ok = true;
+ NdbRecAttr* currRecAttr = theCurrentRecAttr;
+
+ for (Uint32 used = 0; used < aLength ; used++){
+ AttributeHeader ah(* aDataPtr++);
+ const Uint32 tAttrId = ah.getAttributeId();
+ const Uint32 tAttrSize = ah.getDataSize();
+
+ /**
+ * Set all results to NULL if not found...
+ */
+ while(currRecAttr && currRecAttr->attrId() != tAttrId){
+ ok &= currRecAttr->setNULL();
+ currRecAttr = currRecAttr->next();
+ }
+
+ if(ok && currRecAttr && currRecAttr->receive_data(aDataPtr, tAttrSize)){
+ used += tAttrSize;
+ aDataPtr += tAttrSize;
+ currRecAttr = currRecAttr->next();
+ } else {
+ ndbout_c("%p: ok: %d tAttrId: %d currRecAttr: %p",
+ this,ok, tAttrId, currRecAttr);
+ currRecAttr = theCurrentRecAttr;
+ while(currRecAttr != 0){
+ ndbout_c("%d ", currRecAttr->attrId());
+ currRecAttr = currRecAttr->next();
+ }
+ abort();
+ return -1;
+ }
}
+
+ theCurrentRecAttr = currRecAttr;
+
+ /**
+ * Update m_received_result_length
+ */
+ Uint32 tmp = m_received_result_length + aLength;
+ m_received_result_length = tmp;
+
+ return (tmp == m_expected_result_length ? 1 : 0);
+}
+
+int
+NdbReceiver::execKEYINFO20(Uint32 info, const Uint32* aDataPtr, Uint32 aLength)
+{
+ NdbRecAttr* currRecAttr = m_rows[m_current_row++];
+ assert(currRecAttr->attrId() == KEY_ATTR_ID);
+ currRecAttr->receive_data(aDataPtr, aLength + 1);
+
+ /**
+ * Save scanInfo in the end of keyinfo
+ */
+ ((Uint32*)currRecAttr->aRef())[aLength] = info;
+
+ Uint32 tmp = m_received_result_length + aLength;
+ m_received_result_length = tmp;
+
+ return (tmp == m_expected_result_length ? 1 : 0);
}
diff --git a/ndb/src/ndbapi/NdbResultSet.cpp b/ndb/src/ndbapi/NdbResultSet.cpp
index 65ed43f60d8..f270584d227 100644
--- a/ndb/src/ndbapi/NdbResultSet.cpp
+++ b/ndb/src/ndbapi/NdbResultSet.cpp
@@ -29,8 +29,9 @@
#include <Ndb.hpp>
#include <NdbConnection.hpp>
#include <NdbResultSet.hpp>
+#include <NdbBlob.hpp>
-NdbResultSet::NdbResultSet(NdbCursorOperation *owner)
+NdbResultSet::NdbResultSet(NdbScanOperation *owner)
: m_operation(owner)
{
}
@@ -45,7 +46,25 @@ void NdbResultSet::init()
int NdbResultSet::nextResult(bool fetchAllowed)
{
- return m_operation->nextResult(fetchAllowed);
+ int res;
+ if ((res = m_operation->nextResult(fetchAllowed)) == 0) {
+ // handle blobs
+ NdbBlob* tBlob = m_operation->theBlobList;
+ while (tBlob != 0) {
+ if (tBlob->atNextResult() == -1)
+ return -1;
+ tBlob = tBlob->theNext;
+ }
+ /*
+ * Flush blob part ops on behalf of user because
+ * - nextResult is analogous to execute(NoCommit)
+ * - user is likely to want blob value before next execute
+ */
+ if (m_operation->m_transConnection->executePendingBlobOps() == -1)
+ return -1;
+ return 0;
+ }
+ return res;
}
void NdbResultSet::close()
@@ -55,52 +74,30 @@ void NdbResultSet::close()
NdbOperation*
NdbResultSet::updateTuple(){
- if(m_operation->cursorType() != NdbCursorOperation::ScanCursor){
- m_operation->setErrorCode(4003);
- return 0;
- }
-
- NdbScanOperation * op = (NdbScanOperation*)(m_operation);
- return op->takeOverScanOp(NdbOperation::UpdateRequest,
- op->m_transConnection);
+ return updateTuple(m_operation->m_transConnection);
}
NdbOperation*
NdbResultSet::updateTuple(NdbConnection* takeOverTrans){
- if(m_operation->cursorType() != NdbCursorOperation::ScanCursor){
- m_operation->setErrorCode(4003);
- return 0;
- }
-
return m_operation->takeOverScanOp(NdbOperation::UpdateRequest,
takeOverTrans);
}
int
NdbResultSet::deleteTuple(){
- if(m_operation->cursorType() != NdbCursorOperation::ScanCursor){
- m_operation->setErrorCode(4003);
- return 0;
- }
-
- NdbScanOperation * op = (NdbScanOperation*)(m_operation);
- void * res = op->takeOverScanOp(NdbOperation::DeleteRequest,
- op->m_transConnection);
- if(res == 0)
- return -1;
- return 0;
+ return deleteTuple(m_operation->m_transConnection);
}
int
NdbResultSet::deleteTuple(NdbConnection * takeOverTrans){
- if(m_operation->cursorType() != NdbCursorOperation::ScanCursor){
- m_operation->setErrorCode(4003);
- return 0;
- }
-
void * res = m_operation->takeOverScanOp(NdbOperation::DeleteRequest,
takeOverTrans);
if(res == 0)
return -1;
return 0;
}
+
+int
+NdbResultSet::restart(){
+ return m_operation->restart();
+}
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp
index 9542b226d7d..3813ab139de 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -337,7 +337,6 @@ static const tab2 table2[] = {
const int tab_sz = sizeof(table)/sizeof(table[0]);
const int tab2_sz = sizeof(table2)/sizeof(table2[0]);
-template<typename T>
int
matchType(const NdbDictionary::Column * col){
return 1;
@@ -382,7 +381,7 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
return -1;
}
- if(!matchType<T>(col)){
+ if(!matchType(col)){
/**
* Code not reached
*/
@@ -777,3 +776,10 @@ main(void){
return 0;
}
#endif
+
+template class Vector<NdbScanFilterImpl::State>;
+#if __SUNPRO_CC != 0x560
+template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32);
+template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64);
+#endif
+
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index cc090ac0364..0aa40f968bb 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -14,49 +14,62 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbScanOperation.cpp
- * Include:
- * Link:
- * Author: UABMASD Martin Skld INN/V Alzato
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Table scan support
- * Documentation:
- * Adjust: 2002-04-01 UABMASD First version.
- ****************************************************************************/
-
#include <ndb_global.h>
#include <Ndb.hpp>
#include <NdbScanOperation.hpp>
+#include <NdbIndexScanOperation.hpp>
#include <NdbConnection.hpp>
#include <NdbResultSet.hpp>
#include "NdbApiSignal.hpp"
#include <NdbOut.hpp>
#include "NdbDictionaryImpl.hpp"
-#include "NdbBlob.hpp"
+
+#include <NdbRecAttr.hpp>
+#include <NdbReceiver.hpp>
+
+#include <stdlib.h>
+#include <NdbSqlUtil.hpp>
+
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/KeyInfo.hpp>
+#include <signaldata/TcKeyReq.hpp>
NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
- NdbCursorOperation(aNdb),
- m_transConnection(NULL),
- m_autoExecute(false),
- m_updateOp(false),
- m_deleteOp(false),
- m_setValueList(new SetValueRecList())
+ NdbOperation(aNdb),
+ m_resultSet(0),
+ m_transConnection(NULL)
{
+ theParallelism = 0;
+ m_allocated_receivers = 0;
+ m_prepared_receivers = 0;
+ m_api_receivers = 0;
+ m_conf_receivers = 0;
+ m_sent_receivers = 0;
+ m_receivers = 0;
+ m_array = new Uint32[1]; // skip if on delete in fix_receivers
}
NdbScanOperation::~NdbScanOperation()
{
- if (m_setValueList) delete m_setValueList;
+ for(Uint32 i = 0; i<m_allocated_receivers; i++){
+ theNdb->releaseNdbScanRec(m_receivers[i]);
+ }
+ delete[] m_array;
+ if (m_resultSet)
+ delete m_resultSet;
}
-NdbCursorOperation::CursorType
-NdbScanOperation::cursorType()
+NdbResultSet*
+NdbScanOperation::getResultSet()
{
- return NdbCursorOperation::ScanCursor;
+ if (!m_resultSet)
+ m_resultSet = new NdbResultSet(this);
+
+ return m_resultSet;
}
+
+
void
NdbScanOperation::setErrorCode(int aErrorCode){
NdbConnection* tmp = theNdbCon;
@@ -91,288 +104,543 @@ NdbScanOperation::init(NdbTableImpl* tab, NdbConnection* myConnection)
setErrorCodeAbort(theNdb->getNdbError().code);
return -1;
}
- aScanConnection->theFirstOpInList = this;
- aScanConnection->theLastOpInList = this;
- NdbCursorOperation::cursInit();
+
// NOTE! The hupped trans becomes the owner of the operation
- return NdbOperation::init(tab, aScanConnection);
+ if(NdbOperation::init(tab, aScanConnection) != 0){
+ return -1;
+ }
+
+ initInterpreter();
+
+ theStatus = GetValue;
+ theOperationType = OpenScanRequest;
+
+ theTotalBoundAI_Len = 0;
+ theBoundATTRINFO = NULL;
+
+ return 0;
}
-NdbResultSet* NdbScanOperation::readTuples(Uint32 parallell,
- NdbCursorOperation::LockMode lm)
+NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
+ Uint32 batch,
+ Uint32 parallel)
{
- int res = 0;
+ m_ordered = 0;
+
+ Uint32 fragCount = m_currentTable->m_fragmentCount;
+
+ if (batch + parallel == 0) {
+ batch = 16;
+ parallel= fragCount;
+ } else {
+ if (batch == 0 && parallel > 0) { // Backward
+ batch = (parallel >= 16 ? 16 : parallel);
+ parallel = (parallel + 15) / 16;
+ }
+ if (parallel > fragCount || parallel == 0)
+ parallel = fragCount;
+ }
+
+ // It is only possible to call openScan if
+ // 1. this transcation don't already contain another scan operation
+ // 2. this transaction don't already contain other operations
+ // 3. theScanOp contains a NdbScanOperation
+ if (theNdbCon->theScanningOp != NULL){
+ setErrorCode(4605);
+ return 0;
+ }
+
+ theNdbCon->theScanningOp = this;
+
+ bool lockExcl, lockHoldMode, readCommitted;
switch(lm){
- case NdbCursorOperation::LM_Read:
- parallell = (parallell == 0 ? 240 : parallell);
- res = openScan(parallell, false, true, false);
+ case NdbScanOperation::LM_Read:
+ lockExcl = false;
+ lockHoldMode = true;
+ readCommitted = false;
break;
- case NdbCursorOperation::LM_Exclusive:
- parallell = (parallell == 0 ? 1 : parallell);
- res = openScan(parallell, true, true, false);
+ case NdbScanOperation::LM_Exclusive:
+ lockExcl = true;
+ lockHoldMode = true;
+ readCommitted = false;
break;
- case NdbCursorOperation::LM_Dirty:
- parallell = (parallell == 0 ? 240 : parallell);
- res = openScan(parallell, false, false, true);
+ case NdbScanOperation::LM_CommittedRead:
+ lockExcl = false;
+ lockHoldMode = false;
+ readCommitted = true;
break;
default:
- res = -1;
setErrorCode(4003);
+ return 0;
}
- if(res == -1){
- return NULL;
- }
- theNdbCon->theFirstOpInList = 0;
- theNdbCon->theLastOpInList = 0;
- return getResultSet();
-}
+ m_keyInfo = lockExcl;
-int NdbScanOperation::updateTuples(Uint32 parallelism)
-{
- if (openScanExclusive(parallelism) == -1) {
- return -1;
+ bool range = false;
+ if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
+ m_accessTable->m_indexType == NdbDictionary::Index::UniqueOrderedIndex){
+ if (m_currentTable == m_accessTable){
+ // Old way of scanning indexes, should not be allowed
+ m_currentTable = theNdb->theDictionary->
+ getTable(m_currentTable->m_primaryTable.c_str());
+ assert(m_currentTable != NULL);
+ }
+ assert (m_currentTable != m_accessTable);
+ // Modify operation state
+ theStatus = SetBound;
+ theOperationType = OpenRangeScanRequest;
+ range = true;
}
- theNdbCon->theFirstOpInList = 0;
- theNdbCon->theLastOpInList = 0;
-
- m_updateOp = true;
-
- return 0;
-}
+
+ theParallelism = parallel;
+ theBatchSize = batch;
-int NdbScanOperation::deleteTuples(Uint32 parallelism)
-{
- if (openScanExclusive(parallelism) == -1) {
- return -1;
+ if(fix_receivers(parallel) == -1){
+ setErrorCodeAbort(4000);
+ return 0;
}
- theNdbCon->theFirstOpInList = 0;
- theNdbCon->theLastOpInList = 0;
-
- m_deleteOp = true;
-
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, const char* aValue, Uint32 len)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
- m_setValueList->add(anAttrName, aValue, len);
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, Int32 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
-
- m_setValueList->add(anAttrName, aValue);
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, Uint32 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
-
- m_setValueList->add(anAttrName, aValue);
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, Uint64 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
-
- m_setValueList->add(anAttrName, aValue);
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, Int64 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
-
- m_setValueList->add(anAttrName, aValue);
- return 0;
-}
-
-int NdbScanOperation::setValue(const char* anAttrName, float aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
+ theSCAN_TABREQ = theNdb->getSignal();
+ if (theSCAN_TABREQ == NULL) {
+ setErrorCodeAbort(4000);
+ return 0;
+ }//if
+
+ ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ req->apiConnectPtr = theNdbCon->theTCConPtr;
+ req->tableId = m_accessTable->m_tableId;
+ req->tableSchemaVersion = m_accessTable->m_version;
+ req->storedProcId = 0xFFFF;
+ req->buddyConPtr = theNdbCon->theBuddyConPtr;
+
+ Uint32 reqInfo = 0;
+ ScanTabReq::setParallelism(reqInfo, parallel);
+ ScanTabReq::setScanBatch(reqInfo, batch);
+ ScanTabReq::setLockMode(reqInfo, lockExcl);
+ ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
+ ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
+ ScanTabReq::setRangeScanFlag(reqInfo, range);
+ req->requestInfo = reqInfo;
- m_setValueList->add(anAttrName, aValue);
- return 0;
-}
+ Uint64 transId = theNdbCon->getTransactionId();
+ req->transId1 = (Uint32) transId;
+ req->transId2 = (Uint32) (transId >> 32);
-int NdbScanOperation::setValue(const char* anAttrName, double aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrName) == NULL)
- return -1;
+ getFirstATTRINFOScan();
- m_setValueList->add(anAttrName, aValue);
- return 0;
+ return getResultSet();
}
-
-int NdbScanOperation::setValue(Uint32 anAttrId, const char* aValue, Uint32 len)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
-
- m_setValueList->add(anAttrId, aValue, len);
+int
+NdbScanOperation::fix_receivers(Uint32 parallel){
+ assert(parallel > 0);
+ if(parallel > m_allocated_receivers){
+ const Uint32 sz = parallel * (4*sizeof(char*)+sizeof(Uint32));
+
+ Uint32 * tmp = new Uint32[(sz+3)/4];
+ // Save old receivers
+ memcpy(tmp+parallel, m_receivers, m_allocated_receivers*sizeof(char*));
+ delete[] m_array;
+ m_array = tmp;
+
+ m_prepared_receivers = tmp;
+ m_receivers = (NdbReceiver**)(tmp + parallel);
+ m_api_receivers = m_receivers + parallel;
+ m_conf_receivers = m_api_receivers + parallel;
+ m_sent_receivers = m_conf_receivers + parallel;
+
+ // Only get/init "new" receivers
+ NdbReceiver* tScanRec;
+ for (Uint32 i = m_allocated_receivers; i < parallel; i ++) {
+ tScanRec = theNdb->getNdbScanRec();
+ if (tScanRec == NULL) {
+ setErrorCodeAbort(4000);
+ return -1;
+ }//if
+ m_receivers[i] = tScanRec;
+ tScanRec->init(NdbReceiver::NDB_SCANRECEIVER, this);
+ }
+ m_allocated_receivers = parallel;
+ }
+
+ for(Uint32 i = 0; i<parallel; i++){
+ m_receivers[i]->m_list_index = i;
+ m_prepared_receivers[i] = m_receivers[i]->getId();
+ m_sent_receivers[i] = m_receivers[i];
+ m_conf_receivers[i] = 0;
+ m_api_receivers[i] = 0;
+ }
+
+ m_api_receivers_count = 0;
+ m_current_api_receiver = 0;
+ m_sent_receivers_count = parallel;
+ m_conf_receivers_count = 0;
return 0;
}
-int NdbScanOperation::setValue(Uint32 anAttrId, Int32 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
-
- m_setValueList->add(anAttrId, aValue);
- return 0;
+/**
+ * Move receiver from send array to conf:ed array
+ */
+void
+NdbScanOperation::receiver_delivered(NdbReceiver* tRec){
+ if(theError.code == 0){
+ Uint32 idx = tRec->m_list_index;
+ Uint32 last = m_sent_receivers_count - 1;
+ if(idx != last){
+ NdbReceiver * move = m_sent_receivers[last];
+ m_sent_receivers[idx] = move;
+ move->m_list_index = idx;
+ }
+ m_sent_receivers_count = last;
+
+ last = m_conf_receivers_count;
+ m_conf_receivers[last] = tRec;
+ m_conf_receivers_count = last + 1;
+ tRec->m_list_index = last;
+ tRec->m_current_row = 0;
+ }
}
-int NdbScanOperation::setValue(Uint32 anAttrId, Uint32 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
-
- m_setValueList->add(anAttrId, aValue);
- return 0;
+/**
+ * Remove receiver as it's completed
+ */
+void
+NdbScanOperation::receiver_completed(NdbReceiver* tRec){
+ if(theError.code == 0){
+ Uint32 idx = tRec->m_list_index;
+ Uint32 last = m_sent_receivers_count - 1;
+ if(idx != last){
+ NdbReceiver * move = m_sent_receivers[last];
+ m_sent_receivers[idx] = move;
+ move->m_list_index = idx;
+ }
+ m_sent_receivers_count = last;
+ }
}
-int NdbScanOperation::setValue(Uint32 anAttrId, Uint64 aValue)
+/*****************************************************************************
+ * int getFirstATTRINFOScan( U_int32 aData )
+ *
+ * Return Value: Return 0: Successful
+ * Return -1: All other cases
+ * Parameters: None: Only allocate the first signal.
+ * Remark: When a scan is defined we need to use this method instead
+ * of insertATTRINFO for the first signal.
+ * This is because we need not to mess up the code in
+ * insertATTRINFO with if statements since we are not
+ * interested in the TCKEYREQ signal.
+ *****************************************************************************/
+int
+NdbScanOperation::getFirstATTRINFOScan()
{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
-
- m_setValueList->add(anAttrId, aValue);
- return 0;
-}
+ NdbApiSignal* tSignal;
-int NdbScanOperation::setValue(Uint32 anAttrId, Int64 aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
+ tSignal = theNdb->getSignal();
+ if (tSignal == NULL){
+ setErrorCodeAbort(4000);
+ return -1;
+ }
+ tSignal->setSignal(m_attrInfoGSN);
+ theAI_LenInCurrAI = 8;
+ theATTRINFOptr = &tSignal->getDataPtrSend()[8];
+ theFirstATTRINFO = tSignal;
+ theCurrentATTRINFO = tSignal;
+ theCurrentATTRINFO->next(NULL);
- m_setValueList->add(anAttrId, aValue);
return 0;
}
-int NdbScanOperation::setValue(Uint32 anAttrId, float aValue)
-{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
+/**
+ * Constats for theTupleKeyDefined[][0]
+ */
+#define SETBOUND_EQ 1
+#define FAKE_PTR 2
+#define API_PTR 3
- m_setValueList->add(anAttrId, aValue);
- return 0;
-}
-int NdbScanOperation::setValue(Uint32 anAttrId, double aValue)
+/*
+ * After setBound() are done, move the accumulated ATTRINFO signals to
+ * a separate list. Then continue with normal scan.
+ */
+int
+NdbIndexScanOperation::saveBoundATTRINFO()
{
- // Check if attribute exist
- if (m_currentTable->getColumn(anAttrId) == NULL)
- return -1;
-
- m_setValueList->add(anAttrId, aValue);
- return 0;
+ theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
+ theBoundATTRINFO = theFirstATTRINFO;
+ theTotalBoundAI_Len = theTotalCurrAI_Len;
+ theTotalCurrAI_Len = 5;
+ theBoundATTRINFO->setData(theTotalBoundAI_Len, 4);
+ theBoundATTRINFO->setData(0, 5);
+ theBoundATTRINFO->setData(0, 6);
+ theBoundATTRINFO->setData(0, 7);
+ theBoundATTRINFO->setData(0, 8);
+ theStatus = GetValue;
+
+ int res = getFirstATTRINFOScan();
+
+ /**
+ * Define each key with getValue (if ordered)
+ * unless the one's with EqBound
+ */
+ if(!res && m_ordered){
+
+ /**
+ * If setBound EQ
+ */
+ Uint32 i = 0;
+ while(theTupleKeyDefined[i][0] == SETBOUND_EQ)
+ i++;
+
+
+ Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
+ m_sort_columns = cnt - i;
+ for(; i<cnt; i++){
+ NdbColumnImpl* key = m_accessTable->m_index->m_columns[i];
+ NdbColumnImpl* col = m_currentTable->getColumn(key->m_keyInfoPos);
+ NdbRecAttr* tmp = NdbScanOperation::getValue_impl(col, (char*)-1);
+ UintPtr newVal = UintPtr(tmp);
+ theTupleKeyDefined[i][0] = FAKE_PTR;
+ theTupleKeyDefined[i][1] = (newVal & 0xFFFFFFFF);
+#if (SIZEOF_CHARP == 8)
+ theTupleKeyDefined[i][2] = (newVal >> 32);
+#endif
+ }
+ }
+ return res;
}
-NdbBlob*
-NdbScanOperation::getBlobHandle(const char* anAttrName)
-{
- return NdbOperation::getBlobHandle(m_transConnection, m_currentTable->getColumn(anAttrName));
-}
+#define WAITFOR_SCAN_TIMEOUT 120000
-NdbBlob*
-NdbScanOperation::getBlobHandle(Uint32 anAttrId)
-{
- return NdbOperation::getBlobHandle(m_transConnection, m_currentTable->getColumn(anAttrId));
+int
+NdbScanOperation::executeCursor(int nodeId){
+ NdbConnection * tCon = theNdbCon;
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ Uint32 seq = tCon->theNodeSequence;
+ if (tp->get_node_alive(nodeId) &&
+ (tp->getNodeSequence(nodeId) == seq)) {
+
+ if(prepareSendScan(tCon->theTCConPtr, tCon->theTransactionId) == -1)
+ return -1;
+
+ tCon->theMagicNumber = 0x37412619;
+
+ if (doSendScan(nodeId) == -1)
+ return -1;
+
+ return 0;
+ } else {
+ if (!(tp->get_node_stopping(nodeId) &&
+ (tp->getNodeSequence(nodeId) == seq))){
+ TRACE_DEBUG("The node is hard dead when attempting to start a scan");
+ setErrorCode(4029);
+ tCon->theReleaseOnClose = true;
+ abort();
+ } else {
+ TRACE_DEBUG("The node is stopping when attempting to start a scan");
+ setErrorCode(4030);
+ }//if
+ tCon->theCommitStatus = NdbConnection::Aborted;
+ }//if
+ return -1;
}
-// Private methods
+#define DEBUG_NEXT_RESULT 0
-int NdbScanOperation::executeCursor(int ProcessorId)
+int NdbScanOperation::nextResult(bool fetchAllowed)
{
- int result = theNdbCon->executeScan();
- // If the scan started ok and we are updating or deleting
- // iterate over all tuples
- if ((m_updateOp) || (m_deleteOp)) {
- NdbOperation* newOp;
-
- while ((result != -1) && (nextResult() == 0)) {
- if (m_updateOp) {
- newOp = takeOverScanOp(UpdateRequest, m_transConnection);
- // Pass setValues from scan operation to new operation
- m_setValueList->iterate(SetValueRecList::callSetValueFn, *newOp);
- // No need to call updateTuple since scan was taken over for update
- // it should be the same with delete - MASV
- // newOp->updateTuple();
+ if(m_ordered)
+ return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed);
+
+ /**
+ * Check current receiver
+ */
+ int retVal = 2;
+ Uint32 idx = m_current_api_receiver;
+ Uint32 last = m_api_receivers_count;
+
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("nextResult(%d) idx=%d last=%d", fetchAllowed, idx, last);
+
+ /**
+ * Check next buckets
+ */
+ for(; idx < last; idx++){
+ NdbReceiver* tRec = m_api_receivers[idx];
+ if(tRec->nextResult()){
+ tRec->copyout(theReceiver);
+ retVal = 0;
+ break;
+ }
+ }
+
+ /**
+ * We have advanced atleast one bucket
+ */
+ if(!fetchAllowed || !retVal){
+ m_current_api_receiver = idx;
+ if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal);
+ return retVal;
+ }
+
+ Uint32 nodeId = theNdbCon->theDBnode;
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ Uint32 seq = theNdbCon->theNodeSequence;
+ if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false) == 0){
+
+ idx = m_current_api_receiver;
+ last = m_api_receivers_count;
+
+ do {
+ if(theError.code){
+ setErrorCode(theError.code);
+ if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
+ return -1;
}
- else if (m_deleteOp) {
- newOp = takeOverScanOp(DeleteRequest, m_transConnection);
- // newOp->deleteTuple();
+
+ Uint32 cnt = m_conf_receivers_count;
+ Uint32 sent = m_sent_receivers_count;
+
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("idx=%d last=%d cnt=%d sent=%d", idx, last, cnt, sent);
+
+ if(cnt > 0){
+ /**
+ * Just move completed receivers
+ */
+ memcpy(m_api_receivers+last, m_conf_receivers, cnt * sizeof(char*));
+ last += cnt;
+ m_conf_receivers_count = 0;
+ } else if(retVal == 2 && sent > 0){
+ /**
+ * No completed...
+ */
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
+ continue;
+ } else {
+ idx = last;
+ retVal = -2; //return_code;
+ }
+ } else if(retVal == 2){
+ /**
+ * No completed & no sent -> EndOfData
+ */
+ if(send_next_scan(0, true) == 0){ // Close scan
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
+ theError.code = -1; // make sure user gets error if he tries again
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
+ return 1;
+ }
+ retVal = -1; //return_code;
+ } else {
+ retVal = -3;
+ }
+ idx = last;
}
-#if 0
- // takeOverScanOp will take over the lock that scan aquired
- // the lock is released when nextScanResult is called
- // That means that the "takeover" has to be sent to the kernel
- // before nextScanresult is called - MASV
- if (m_autoExecute){
- m_transConnection->execute(NoCommit);
+
+ if(retVal == 0)
+ break;
+
+ for(; idx < last; idx++){
+ NdbReceiver* tRec = m_api_receivers[idx];
+ if(tRec->nextResult()){
+ tRec->copyout(theReceiver);
+ retVal = 0;
+ break;
+ }
}
-#else
- m_transConnection->execute(NoCommit);
-#endif
- }
- closeScan();
+ } while(retVal == 2);
+ } else {
+ retVal = -3;
}
-
- return result;
+
+ m_api_receivers_count = last;
+ m_current_api_receiver = idx;
+
+ switch(retVal){
+ case 0:
+ case 1:
+ case 2:
+ if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal);
+ return retVal;
+ case -1:
+ setErrorCode(4008); // Timeout
+ break;
+ case -2:
+ setErrorCode(4028); // Node fail
+ break;
+ case -3: // send_next_scan -> return fail (set error-code self)
+ break;
+ }
+
+ theNdbCon->theTransactionIsStarted = false;
+ theNdbCon->theReleaseOnClose = true;
+ if(DEBUG_NEXT_RESULT) ndbout_c("return -1", retVal);
+ return -1;
}
-int NdbScanOperation::nextResult(bool fetchAllowed)
-{
- int result = theNdbCon->nextScanResult(fetchAllowed);
- if (result == -1){
- // Move the error code from hupped transaction
- // to the real trans
- const NdbError err = theNdbCon->getNdbError();
- m_transConnection->setOperationErrorCode(err.code);
- }
- if (result == 0) {
- // handle blobs
- NdbBlob* tBlob = theBlobList;
- while (tBlob != NULL) {
- if (tBlob->atNextResult() == -1)
- return -1;
- tBlob = tBlob->theNext;
+int
+NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag){
+ if(cnt > 0 || stopScanFlag){
+ NdbApiSignal tSignal(theNdb->theMyRef);
+ tSignal.setSignal(GSN_SCAN_NEXTREQ);
+
+ Uint32* theData = tSignal.getDataPtrSend();
+ theData[0] = theNdbCon->theTCConPtr;
+ theData[1] = stopScanFlag == true ? 1 : 0;
+ Uint64 transId = theNdbCon->theTransactionId;
+ theData[2] = transId;
+ theData[3] = (Uint32) (transId >> 32);
+
+ /**
+ * Prepare ops
+ */
+ Uint32 last = m_sent_receivers_count;
+ Uint32 * prep_array = (cnt > 21 ? m_prepared_receivers : theData + 4);
+ for(Uint32 i = 0; i<cnt; i++){
+ NdbReceiver * tRec = m_api_receivers[i];
+ m_sent_receivers[last+i] = tRec;
+ tRec->m_list_index = last+i;
+ prep_array[i] = tRec->m_tcPtrI;
+ tRec->prepareSend();
}
+ memcpy(&m_api_receivers[0], &m_api_receivers[cnt], cnt * sizeof(char*));
+
+ Uint32 nodeId = theNdbCon->theDBnode;
+ TransporterFacade * tp = TransporterFacade::instance();
+ int ret;
+ if(cnt > 21){
+ tSignal.setLength(4);
+ LinearSectionPtr ptr[3];
+ ptr[0].p = prep_array;
+ ptr[0].sz = cnt;
+ ret = tp->sendFragmentedSignal(&tSignal, nodeId, ptr, 1);
+ } else {
+ tSignal.setLength(4+cnt);
+ ret = tp->sendSignal(&tSignal, nodeId);
+ }
+
+ m_sent_receivers_count = last + cnt + stopScanFlag;
+ m_api_receivers_count -= cnt;
+ m_current_api_receiver = 0;
+
+ return ret;
}
- return result;
+ return 0;
}
int
NdbScanOperation::prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId)
{
printf("NdbScanOperation::prepareSend\n");
+ abort();
return 0;
}
@@ -385,300 +653,834 @@ NdbScanOperation::doSend(int ProcessorId)
void NdbScanOperation::closeScan()
{
- if(theNdbCon){
- if (theNdbCon->stopScan() == -1)
- theError = theNdbCon->getNdbError();
- theNdb->closeTransaction(theNdbCon);
- theNdbCon = 0;
- }
+ if(m_transConnection) do {
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("closeScan() theError.code = %d "
+ "m_api_receivers_count = %d "
+ "m_conf_receivers_count = %d "
+ "m_sent_receivers_count = %d",
+ theError.code,
+ m_api_receivers_count,
+ m_conf_receivers_count,
+ m_sent_receivers_count);
+
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+
+ Uint32 seq = theNdbCon->theNodeSequence;
+ Uint32 nodeId = theNdbCon->theDBnode;
+
+ if(seq != tp->getNodeSequence(nodeId)){
+ theNdbCon->theReleaseOnClose = true;
+ break;
+ }
+
+ while(theError.code == 0 && m_sent_receivers_count){
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ switch(return_code){
+ case 0:
+ break;
+ case -1:
+ setErrorCode(4008);
+ case -2:
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
+ theNdbCon->theReleaseOnClose = true;
+ }
+ }
+
+ if(m_api_receivers_count+m_conf_receivers_count){
+ // Send close scan
+ send_next_scan(0, true); // Close scan
+ }
+
+ /**
+ * wait for close scan conf
+ */
+ while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count){
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ switch(return_code){
+ case 0:
+ break;
+ case -1:
+ setErrorCode(4008);
+ case -2:
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
+ }
+ }
+ } while(0);
+
+ theNdbCon->theScanningOp = 0;
+ theNdb->closeTransaction(theNdbCon);
+
+ theNdbCon = 0;
m_transConnection = NULL;
}
-void NdbScanOperation::release(){
- closeScan();
- NdbCursorOperation::release();
+void
+NdbScanOperation::execCLOSE_SCAN_REP(){
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
}
-void SetValueRecList::add(const char* anAttrName, const char* aValue, Uint32 len)
+void NdbScanOperation::release()
{
- SetValueRec* newSetValueRec = new SetValueRec();
-
- newSetValueRec->stype = SetValueRec::SET_STRING_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->stringStruct.aStringValue = (char *) malloc(len);
- strlcpy(newSetValueRec->stringStruct.aStringValue, aValue, len);
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ if(theNdbCon != 0 || m_transConnection != 0){
+ closeScan();
+ }
+ for(Uint32 i = 0; i<m_allocated_receivers; i++){
+ m_receivers[i]->release();
}
}
-void SetValueRecList::add(const char* anAttrName, Int32 aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+/***************************************************************************
+int prepareSendScan(Uint32 aTC_ConnectPtr,
+ Uint64 aTransactionId)
+
+Return Value: Return 0 : preparation of send was succesful.
+ Return -1: In all other case.
+Parameters: aTC_ConnectPtr: the Connect pointer to TC.
+ aTransactionId: the Transaction identity of the transaction.
+Remark: Puts the the final data into ATTRINFO signal(s) after this
+ we know the how many signal to send and their sizes
+***************************************************************************/
+int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
+ Uint64 aTransactionId){
+
+ if (theInterpretIndicator != 1 ||
+ (theOperationType != OpenScanRequest &&
+ theOperationType != OpenRangeScanRequest)) {
+ setErrorCodeAbort(4005);
+ return -1;
+ }
- newSetValueRec->stype = SetValueRec::SET_INT32_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->anInt32Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ if (theStatus == SetBound) {
+ ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
+ theStatus = GetValue;
}
-}
-void SetValueRecList::add(const char* anAttrName, Uint32 aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+ theErrorLine = 0;
- newSetValueRec->stype = SetValueRec::SET_UINT32_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->anUint32Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ // In preapareSendInterpreted we set the sizes (word 4-8) in the
+ // first ATTRINFO signal.
+ if (prepareSendInterpreted() == -1)
+ return -1;
+
+ if(m_ordered){
+ ((NdbIndexScanOperation*)this)->fix_get_values();
+ }
+
+ const Uint32 transId1 = (Uint32) (aTransactionId & 0xFFFFFFFF);
+ const Uint32 transId2 = (Uint32) (aTransactionId >> 32);
+
+ if (theOperationType == OpenRangeScanRequest) {
+ NdbApiSignal* tSignal = theBoundATTRINFO;
+ do{
+ tSignal->setData(aTC_ConnectPtr, 1);
+ tSignal->setData(transId1, 2);
+ tSignal->setData(transId2, 3);
+ tSignal = tSignal->next();
+ } while (tSignal != NULL);
+ }
+ theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
+ NdbApiSignal* tSignal = theFirstATTRINFO;
+ do{
+ tSignal->setData(aTC_ConnectPtr, 1);
+ tSignal->setData(transId1, 2);
+ tSignal->setData(transId2, 3);
+ tSignal = tSignal->next();
+ } while (tSignal != NULL);
+
+ /**
+ * Prepare all receivers
+ */
+ theReceiver.prepareSend();
+ bool keyInfo = m_keyInfo;
+ Uint32 key_size = keyInfo ? m_currentTable->m_keyLenInWords : 0;
+ for(Uint32 i = 0; i<theParallelism; i++){
+ m_receivers[i]->do_get_value(&theReceiver, theBatchSize, key_size);
}
+ return 0;
}
-void SetValueRecList::add(const char* anAttrName, Int64 aValue)
+/******************************************************************************
+int doSend()
+
+Return Value: Return >0 : send was succesful, returns number of signals sent
+ Return -1: In all other case.
+Parameters: aProcessorId: Receiving processor node
+Remark: Sends the ATTRINFO signal(s)
+******************************************************************************/
+int
+NdbScanOperation::doSendScan(int aProcessorId)
{
- SetValueRec* newSetValueRec = new SetValueRec();
+ Uint32 tSignalCount = 0;
+ NdbApiSignal* tSignal;
+
+ if (theInterpretIndicator != 1 ||
+ (theOperationType != OpenScanRequest &&
+ theOperationType != OpenRangeScanRequest)) {
+ setErrorCodeAbort(4005);
+ return -1;
+ }
+
+ assert(theSCAN_TABREQ != NULL);
+ tSignal = theSCAN_TABREQ;
+ if (tSignal->setSignal(GSN_SCAN_TABREQ) == -1) {
+ setErrorCode(4001);
+ return -1;
+ }
+ // Update the "attribute info length in words" in SCAN_TABREQ before
+ // sending it. This could not be done in openScan because
+ // we created the ATTRINFO signals after the SCAN_TABREQ signal.
+ ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
+ req->attrLen = theTotalCurrAI_Len;
+ if (theOperationType == OpenRangeScanRequest)
+ req->attrLen += theTotalBoundAI_Len;
+ TransporterFacade *tp = TransporterFacade::instance();
+ if(theParallelism > 16){
+ LinearSectionPtr ptr[3];
+ ptr[0].p = m_prepared_receivers;
+ ptr[0].sz = theParallelism;
+ if (tp->sendFragmentedSignal(tSignal, aProcessorId, ptr, 1) == -1) {
+ setErrorCode(4002);
+ return -1;
+ }
+ } else {
+ tSignal->setLength(9+theParallelism);
+ memcpy(tSignal->getDataPtrSend()+9, m_prepared_receivers, 4*theParallelism);
+ if (tp->sendSignal(tSignal, aProcessorId) == -1) {
+ setErrorCode(4002);
+ return -1;
+ }
+ }
- newSetValueRec->stype = SetValueRec::SET_INT64_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->anInt64Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ if (theOperationType == OpenRangeScanRequest) {
+ // must have at least one signal since it contains attrLen for bounds
+ assert(theBoundATTRINFO != NULL);
+ tSignal = theBoundATTRINFO;
+ while (tSignal != NULL) {
+ if (tp->sendSignal(tSignal,aProcessorId) == -1){
+ setErrorCode(4002);
+ return -1;
+ }
+ tSignalCount++;
+ tSignal = tSignal->next();
+ }
+ }
+
+ tSignal = theFirstATTRINFO;
+ while (tSignal != NULL) {
+ if (tp->sendSignal(tSignal,aProcessorId) == -1){
+ setErrorCode(4002);
+ return -1;
+ }
+ tSignalCount++;
+ tSignal = tSignal->next();
+ }
+ theStatus = WaitResponse;
+ return tSignalCount;
+}//NdbOperation::doSendScan()
+
+/******************************************************************************
+ * NdbOperation* takeOverScanOp(NdbConnection* updateTrans);
+ *
+ * Parameters: The update transactions NdbConnection pointer.
+ * Return Value: A reference to the transferred operation object
+ * or NULL if no success.
+ * Remark: Take over the scanning transactions NdbOperation
+ * object for a tuple to an update transaction,
+ * which is the last operation read in nextScanResult()
+ * (theNdbCon->thePreviousScanRec)
+ *
+ * FUTURE IMPLEMENTATION: (This note was moved from header file.)
+ * In the future, it will even be possible to transfer
+ * to a NdbConnection on another Ndb-object.
+ * In this case the receiving NdbConnection-object must call
+ * a method receiveOpFromScan to actually receive the information.
+ * This means that the updating transactions can be placed
+ * in separate threads and thus increasing the parallelism during
+ * the scan process.
+ *****************************************************************************/
+int
+NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
+{
+ Uint32 idx = m_current_api_receiver;
+ Uint32 last = m_api_receivers_count;
+
+ Uint32 row;
+ NdbReceiver * tRec;
+ NdbRecAttr * tRecAttr;
+ if(idx < last && (tRec = m_api_receivers[idx])
+ && ((row = tRec->m_current_row) <= tRec->m_defined_rows)
+ && (tRecAttr = tRec->m_rows[row-1])){
+
+ const Uint32 * src = (Uint32*)tRecAttr->aRef();
+ memcpy(data, src, 4*size);
+ return 0;
}
+ return -1;
}
-void SetValueRecList::add(const char* anAttrName, Uint64 aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+NdbOperation*
+NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
+
+ Uint32 idx = m_current_api_receiver;
+ Uint32 last = m_api_receivers_count;
+
+ Uint32 row;
+ NdbReceiver * tRec;
+ NdbRecAttr * tRecAttr;
+ if(idx < last && (tRec = m_api_receivers[idx])
+ && ((row = tRec->m_current_row) <= tRec->m_defined_rows)
+ && (tRecAttr = tRec->m_rows[row-1])){
+
+ NdbOperation * newOp = pTrans->getNdbOperation(m_currentTable);
+ if (newOp == NULL){
+ return NULL;
+ }
+
+ const Uint32 len = (tRecAttr->attrSize() * tRecAttr->arraySize() + 3)/4-1;
+
+ newOp->theTupKeyLen = len;
+ newOp->theOperationType = opType;
+ if (opType == DeleteRequest) {
+ newOp->theStatus = GetValue;
+ } else {
+ newOp->theStatus = SetValue;
+ }
+
+ const Uint32 * src = (Uint32*)tRecAttr->aRef();
+ const Uint32 tScanInfo = src[len] & 0xFFFF;
+ const Uint32 tTakeOverNode = src[len] >> 16;
+ {
+ UintR scanInfo = 0;
+ TcKeyReq::setTakeOverScanFlag(scanInfo, 1);
+ TcKeyReq::setTakeOverScanNode(scanInfo, tTakeOverNode);
+ TcKeyReq::setTakeOverScanInfo(scanInfo, tScanInfo);
+ newOp->theScanInfo = scanInfo;
+ }
- newSetValueRec->stype = SetValueRec::SET_UINT64_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->anUint64Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ // Copy the first 8 words of key info from KEYINF20 into TCKEYREQ
+ TcKeyReq * tcKeyReq = CAST_PTR(TcKeyReq,newOp->theTCREQ->getDataPtrSend());
+ Uint32 i = 0;
+ for (i = 0; i < TcKeyReq::MaxKeyInfo && i < len; i++) {
+ tcKeyReq->keyInfo[i] = * src++;
+ }
+
+ if(i < len){
+ NdbApiSignal* tSignal = theNdb->getSignal();
+ newOp->theFirstKEYINFO = tSignal;
+
+ Uint32 left = len - i;
+ while(tSignal && left > KeyInfo::DataLength){
+ tSignal->setSignal(GSN_KEYINFO);
+ KeyInfo * keyInfo = CAST_PTR(KeyInfo, tSignal->getDataPtrSend());
+ memcpy(keyInfo->keyData, src, 4 * KeyInfo::DataLength);
+ src += KeyInfo::DataLength;
+ left -= KeyInfo::DataLength;
+
+ tSignal->next(theNdb->getSignal());
+ tSignal = tSignal->next();
+ }
+
+ if(tSignal && left > 0){
+ tSignal->setSignal(GSN_KEYINFO);
+ KeyInfo * keyInfo = CAST_PTR(KeyInfo, tSignal->getDataPtrSend());
+ memcpy(keyInfo->keyData, src, 4 * left);
+ }
+ }
+ // create blob handles automatically
+ if (opType == DeleteRequest && m_currentTable->m_noOfBlobs != 0) {
+ for (unsigned i = 0; i < m_currentTable->m_columns.size(); i++) {
+ NdbColumnImpl* c = m_currentTable->m_columns[i];
+ assert(c != 0);
+ if (c->getBlobType()) {
+ if (newOp->getBlobHandle(pTrans, c) == NULL)
+ return NULL;
+ }
+ }
+ }
+
+ return newOp;
}
+ return 0;
}
-void SetValueRecList::add(const char* anAttrName, float aValue)
+NdbBlob*
+NdbScanOperation::getBlobHandle(const char* anAttrName)
{
- SetValueRec* newSetValueRec = new SetValueRec();
+ return NdbOperation::getBlobHandle(m_transConnection,
+ m_currentTable->getColumn(anAttrName));
+}
- newSetValueRec->stype = SetValueRec::SET_FLOAT_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->aFloatValue = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
- }
+NdbBlob*
+NdbScanOperation::getBlobHandle(Uint32 anAttrId)
+{
+ return NdbOperation::getBlobHandle(m_transConnection,
+ m_currentTable->getColumn(anAttrId));
}
-void SetValueRecList::add(const char* anAttrName, double aValue)
+NdbIndexScanOperation::NdbIndexScanOperation(Ndb* aNdb)
+ : NdbScanOperation(aNdb)
{
- SetValueRec* newSetValueRec = new SetValueRec();
+}
- newSetValueRec->stype = SetValueRec::SET_DOUBLE_ATTR1;
- newSetValueRec->anAttrName = strdup(anAttrName);
- newSetValueRec->aDoubleValue = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
- }
+NdbIndexScanOperation::~NdbIndexScanOperation(){
}
-void SetValueRecList::add(Uint32 anAttrId, const char* aValue, Uint32 len)
+int
+NdbIndexScanOperation::setBound(const char* anAttrName, int type, const void* aValue, Uint32 len)
{
- SetValueRec* newSetValueRec = new SetValueRec();
-
- newSetValueRec->stype = SetValueRec::SET_STRING_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->stringStruct.aStringValue = (char *) malloc(len);
- strlcpy(newSetValueRec->stringStruct.aStringValue, aValue, len);
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
- }
+ return setBound(m_accessTable->getColumn(anAttrName), type, aValue, len);
}
-void SetValueRecList::add(Uint32 anAttrId, Int32 aValue)
+int
+NdbIndexScanOperation::setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len)
{
- SetValueRec* newSetValueRec = new SetValueRec();
+ return setBound(m_accessTable->getColumn(anAttrId), type, aValue, len);
+}
- newSetValueRec->stype = SetValueRec::SET_INT32_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->anInt32Value = aValue;
- last->next = newSetValueRec;
- last = newSetValueRec;
+int
+NdbIndexScanOperation::equal_impl(const NdbColumnImpl* anAttrObject,
+ const char* aValue,
+ Uint32 len){
+ return setBound(anAttrObject, BoundEQ, aValue, len);
}
-void SetValueRecList::add(Uint32 anAttrId, Uint32 aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+NdbRecAttr*
+NdbIndexScanOperation::getValue_impl(const NdbColumnImpl* attrInfo,
+ char* aValue){
+ if(!m_ordered){
+ return NdbScanOperation::getValue_impl(attrInfo, aValue);
+ }
+
+ if (theStatus == SetBound) {
+ saveBoundATTRINFO();
+ theStatus = GetValue;
+ }
- newSetValueRec->stype = SetValueRec::SET_UINT32_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->anUint32Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ int id = attrInfo->m_attrId; // In "real" table
+ assert(m_accessTable->m_index);
+ int sz = (int)m_accessTable->m_index->m_key_ids.size();
+ if(id >= sz || (id = m_accessTable->m_index->m_key_ids[id]) == -1){
+ return NdbScanOperation::getValue_impl(attrInfo, aValue);
+ }
+
+ assert(id < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY);
+ Uint32 marker = theTupleKeyDefined[id][0];
+
+ if(marker == SETBOUND_EQ){
+ return NdbScanOperation::getValue_impl(attrInfo, aValue);
+ } else if(marker == API_PTR){
+ return NdbScanOperation::getValue_impl(attrInfo, aValue);
}
+
+ assert(marker == FAKE_PTR);
+
+ UintPtr oldVal;
+ oldVal = theTupleKeyDefined[id][1];
+#if (SIZEOF_CHARP == 8)
+ oldVal = oldVal | (((UintPtr)theTupleKeyDefined[id][2]) << 32);
+#endif
+ theTupleKeyDefined[id][0] = API_PTR;
+
+ NdbRecAttr* tmp = (NdbRecAttr*)oldVal;
+ tmp->setup(attrInfo, aValue);
+
+ return tmp;
}
-void SetValueRecList::add(Uint32 anAttrId, Int64 aValue)
+#include <AttributeHeader.hpp>
+/*
+ * Define bound on index column in range scan.
+ */
+int
+NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
+ int type, const void* aValue, Uint32 len)
{
- SetValueRec* newSetValueRec = new SetValueRec();
+ if (theOperationType == OpenRangeScanRequest &&
+ theStatus == SetBound &&
+ (0 <= type && type <= 4) &&
+ len <= 8000) {
+ // bound type
+
+ insertATTRINFO(type);
+ // attribute header
+ Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ if (len != sizeInBytes && (len != 0)) {
+ setErrorCodeAbort(4209);
+ return -1;
+ }
+ len = aValue != NULL ? sizeInBytes : 0;
+ Uint32 tIndexAttrId = tAttrInfo->m_attrId;
+ Uint32 sizeInWords = (len + 3) / 4;
+ AttributeHeader ah(tIndexAttrId, sizeInWords);
+ insertATTRINFO(ah.m_value);
+ if (len != 0) {
+ // attribute data
+ if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0)
+ insertATTRINFOloop((const Uint32*)aValue, sizeInWords);
+ else {
+ Uint32 temp[2000];
+ memcpy(temp, aValue, len);
+ while ((len & 0x3) != 0)
+ ((char*)temp)[len++] = 0;
+ insertATTRINFOloop(temp, sizeInWords);
+ }
+ }
- newSetValueRec->stype = SetValueRec::SET_INT64_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->anInt64Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ /**
+ * Do sorted stuff
+ */
+
+ /**
+ * The primary keys for an ordered index is defined in the beginning
+ * so it's safe to use [tIndexAttrId]
+ * (instead of looping as is NdbOperation::equal_impl)
+ */
+ if(type == BoundEQ && !theTupleKeyDefined[tIndexAttrId][0]){
+ theNoOfTupKeyDefined++;
+ theTupleKeyDefined[tIndexAttrId][0] = SETBOUND_EQ;
+ }
+
+ return 0;
+ } else {
+ setErrorCodeAbort(4228); // XXX wrong code
+ return -1;
}
}
-void SetValueRecList::add(Uint32 anAttrId, Uint64 aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+NdbResultSet*
+NdbIndexScanOperation::readTuples(LockMode lm,
+ Uint32 batch,
+ Uint32 parallel,
+ bool order_by){
+ NdbResultSet * rs = NdbScanOperation::readTuples(lm, batch, 0);
+ if(rs && order_by){
+ m_ordered = 1;
+ m_sort_columns = m_accessTable->getNoOfColumns() - 1; // -1 for NDB$NODE
+ m_current_api_receiver = m_sent_receivers_count;
+ m_api_receivers_count = m_sent_receivers_count;
+ }
+ return rs;
+}
- newSetValueRec->stype = SetValueRec::SET_UINT64_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->anUint64Value = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+void
+NdbIndexScanOperation::fix_get_values(){
+ /**
+ * Loop through all getValues and set buffer pointer to "API" pointer
+ */
+ NdbRecAttr * curr = theReceiver.theFirstRecAttr;
+ Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
+ assert(cnt < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY);
+
+ NdbIndexImpl * idx = m_accessTable->m_index;
+ NdbTableImpl * tab = m_currentTable;
+ for(Uint32 i = 0; i<cnt; i++){
+ Uint32 val = theTupleKeyDefined[i][0];
+ switch(val){
+ case FAKE_PTR:
+ curr->setup(curr->m_column, 0);
+ case API_PTR:
+ curr = curr->next();
+ break;
+ case SETBOUND_EQ:
+ break;
+#ifdef VM_TRACE
+ default:
+ abort();
+#endif
+ }
}
}
-void SetValueRecList::add(Uint32 anAttrId, float aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+int
+NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
+ const NdbReceiver* t1,
+ const NdbReceiver* t2){
+
+ NdbRecAttr * r1 = t1->m_rows[t1->m_current_row];
+ NdbRecAttr * r2 = t2->m_rows[t2->m_current_row];
- newSetValueRec->stype = SetValueRec::SET_FLOAT_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->aFloatValue = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ r1 = (skip ? r1->next() : r1);
+ r2 = (skip ? r2->next() : r2);
+
+ while(cols > 0){
+ Uint32 * d1 = (Uint32*)r1->aRef();
+ Uint32 * d2 = (Uint32*)r2->aRef();
+ unsigned r1_null = r1->isNULL();
+ if((r1_null ^ (unsigned)r2->isNULL())){
+ return (r1_null ? -1 : 1);
+ }
+ Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType;
+ Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4;
+ if(!r1_null){
+ char r = NdbSqlUtil::cmp(type, d1, d2, size, size);
+ if(r){
+ assert(r != NdbSqlUtil::CmpUnknown);
+ assert(r != NdbSqlUtil::CmpError);
+ return r;
+ }
+ }
+ cols--;
+ r1 = r1->next();
+ r2 = r2->next();
}
+ return 0;
}
-void SetValueRecList::add(Uint32 anAttrId, double aValue)
-{
- SetValueRec* newSetValueRec = new SetValueRec();
+int
+NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){
+
+ Uint32 u_idx = 0, u_last = 0;
+ Uint32 s_idx = m_current_api_receiver; // first sorted
+ Uint32 s_last = theParallelism; // last sorted
- newSetValueRec->stype = SetValueRec::SET_DOUBLE_ATTR2;
- newSetValueRec->anAttrId = anAttrId;
- newSetValueRec->aDoubleValue = aValue;
- if (!last)
- first = last = newSetValueRec;
- else {
- last->next = newSetValueRec;
- last = newSetValueRec;
+ NdbReceiver** arr = m_api_receivers;
+ NdbReceiver* tRec = arr[s_idx];
+
+ if(DEBUG_NEXT_RESULT) ndbout_c("nextOrderedResult(%d) nextResult: %d",
+ fetchAllowed,
+ (s_idx < s_last ? tRec->nextResult() : 0));
+
+ if(DEBUG_NEXT_RESULT) ndbout_c("u=[%d %d] s=[%d %d]",
+ u_idx, u_last,
+ s_idx, s_last);
+
+ bool fetchNeeded = (s_idx == s_last) || !tRec->nextResult();
+
+ if(fetchNeeded){
+ if(fetchAllowed){
+ if(DEBUG_NEXT_RESULT) ndbout_c("performing fetch...");
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ Uint32 seq = theNdbCon->theNodeSequence;
+ Uint32 nodeId = theNdbCon->theDBnode;
+ if(seq == tp->getNodeSequence(nodeId) && !send_next_scan_ordered(s_idx)){
+ Uint32 tmp = m_sent_receivers_count;
+ s_idx = m_current_api_receiver;
+ while(m_sent_receivers_count > 0 && !theError.code){
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
+ continue;
+ }
+ if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
+ return -1;
+ }
+
+ u_idx = 0;
+ u_last = m_conf_receivers_count;
+ m_conf_receivers_count = 0;
+ memcpy(arr, m_conf_receivers, u_last * sizeof(char*));
+
+ if(DEBUG_NEXT_RESULT) ndbout_c("sent: %d recv: %d", tmp, u_last);
+ if(theError.code){
+ setErrorCode(theError.code);
+ if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
+ return -1;
+ }
+ }
+ } else {
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 2");
+ return 2;
+ }
+ } else {
+ u_idx = s_idx;
+ u_last = s_idx + 1;
+ s_idx++;
+ }
+
+ if(DEBUG_NEXT_RESULT) ndbout_c("u=[%d %d] s=[%d %d]",
+ u_idx, u_last,
+ s_idx, s_last);
+
+
+ Uint32 cols = m_sort_columns;
+ Uint32 skip = m_keyInfo;
+ while(u_idx < u_last){
+ u_last--;
+ tRec = arr[u_last];
+
+ // Do binary search instead to find place
+ Uint32 place = s_idx;
+ for(; place < s_last; place++){
+ if(compare(skip, cols, tRec, arr[place]) <= 0){
+ break;
+ }
+ }
+
+ if(place != s_idx){
+ if(DEBUG_NEXT_RESULT)
+ ndbout_c("memmove(%d, %d, %d)", s_idx-1, s_idx, (place - s_idx));
+ memmove(arr+s_idx-1, arr+s_idx, sizeof(char*)*(place - s_idx));
+ }
+
+ if(DEBUG_NEXT_RESULT) ndbout_c("putting %d @ %d", u_last, place - 1);
+ m_api_receivers[place-1] = tRec;
+ s_idx--;
}
-}
-void
-SetValueRecList::callSetValueFn(SetValueRec& aSetValueRec, NdbOperation& oper)
-{
- switch(aSetValueRec.stype) {
- case(SetValueRec::SET_STRING_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.stringStruct.aStringValue, aSetValueRec.stringStruct.len);
- break;
- case(SetValueRec::SET_INT32_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.anInt32Value);
- break;
- case(SetValueRec::SET_UINT32_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.anUint32Value);
- break;
- case(SetValueRec::SET_INT64_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.anInt64Value);
- break;
- case(SetValueRec::SET_UINT64_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.anUint64Value);
- break;
- case(SetValueRec::SET_FLOAT_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.aFloatValue);
- break;
- case(SetValueRec::SET_DOUBLE_ATTR1):
- oper.setValue(aSetValueRec.anAttrName, aSetValueRec.aDoubleValue);
- break;
- case(SetValueRec::SET_STRING_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.stringStruct.aStringValue, aSetValueRec.stringStruct.len);
- break;
- case(SetValueRec::SET_INT32_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.anInt32Value);
- break;
- case(SetValueRec::SET_UINT32_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.anUint32Value);
- break;
- case(SetValueRec::SET_INT64_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.anInt64Value);
- break;
- case(SetValueRec::SET_UINT64_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.anUint64Value);
- break;
- case(SetValueRec::SET_FLOAT_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.aFloatValue);
- break;
- case(SetValueRec::SET_DOUBLE_ATTR2):
- oper.setValue(aSetValueRec.anAttrId, aSetValueRec.aDoubleValue);
- break;
+ if(DEBUG_NEXT_RESULT) ndbout_c("u=[%d %d] s=[%d %d]",
+ u_idx, u_last,
+ s_idx, s_last);
+
+ m_current_api_receiver = s_idx;
+
+ if(DEBUG_NEXT_RESULT)
+ for(Uint32 i = s_idx; i<s_last; i++)
+ ndbout_c("%p", arr[i]);
+
+ tRec = m_api_receivers[s_idx];
+ if(s_idx < s_last && tRec->nextResult()){
+ tRec->copyout(theReceiver);
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 0");
+ return 0;
}
-}
-SetValueRec::~SetValueRec()
-{
- if ((stype == SET_STRING_ATTR1) ||
- (stype == SET_INT32_ATTR1) ||
- (stype == SET_UINT32_ATTR1) ||
- (stype == SET_INT64_ATTR1) ||
- (stype == SET_UINT64_ATTR1) ||
- (stype == SET_FLOAT_ATTR1) ||
- (stype == SET_DOUBLE_ATTR1))
- free(anAttrName);
-
- if ((stype == SET_STRING_ATTR1) ||
- (stype == SET_STRING_ATTR2))
- free(stringStruct.aStringValue);
- if (next) delete next;
- next = 0;
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ Uint32 seq = theNdbCon->theNodeSequence;
+ Uint32 nodeId = theNdbCon->theDBnode;
+ if(seq == tp->getNodeSequence(nodeId) &&
+ send_next_scan(0, true) == 0 &&
+ theError.code == 0){
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
+ return 1;
+ }
+ setErrorCode(theError.code);
+ if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
+ return -1;
}
int
-NdbScanOperation::equal_impl(const NdbColumnImpl* anAttrObject,
- const char* aValue,
- Uint32 len){
- return setBound(anAttrObject, BoundEQ, aValue, len);
+NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){
+ if(idx == theParallelism)
+ return 0;
+
+ NdbApiSignal tSignal(theNdb->theMyRef);
+ tSignal.setSignal(GSN_SCAN_NEXTREQ);
+
+ Uint32* theData = tSignal.getDataPtrSend();
+ theData[0] = theNdbCon->theTCConPtr;
+ theData[1] = 0;
+ Uint64 transId = theNdbCon->theTransactionId;
+ theData[2] = transId;
+ theData[3] = (Uint32) (transId >> 32);
+
+ /**
+ * Prepare ops
+ */
+ Uint32 last = m_sent_receivers_count;
+ Uint32 * prep_array = theData + 4;
+
+ NdbReceiver * tRec = m_api_receivers[idx];
+ m_sent_receivers[last] = tRec;
+ tRec->m_list_index = last;
+ prep_array[0] = tRec->m_tcPtrI;
+ tRec->prepareSend();
+
+ m_sent_receivers_count = last + 1;
+ m_current_api_receiver = idx + 1;
+
+ Uint32 nodeId = theNdbCon->theDBnode;
+ TransporterFacade * tp = TransporterFacade::instance();
+ tSignal.setLength(4+1);
+ return tp->sendSignal(&tSignal, nodeId);
}
+int
+NdbScanOperation::restart(){
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+
+ Uint32 seq = theNdbCon->theNodeSequence;
+ Uint32 nodeId = theNdbCon->theDBnode;
+
+ if(seq != tp->getNodeSequence(nodeId)){
+ theNdbCon->theReleaseOnClose = true;
+ return -1;
+ }
+
+ while(m_sent_receivers_count){
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ switch(return_code){
+ case 0:
+ break;
+ case -1:
+ setErrorCode(4008);
+ case -2:
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
+ return -1;
+ }
+ }
+
+ if(m_api_receivers_count+m_conf_receivers_count){
+ // Send close scan
+ if(send_next_scan(0, true) == -1) // Close scan
+ return -1;
+ }
+
+ /**
+ * wait for close scan conf
+ */
+ while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count){
+ theNdb->theWaiter.m_node = nodeId;
+ theNdb->theWaiter.m_state = WAIT_SCAN;
+ int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
+ switch(return_code){
+ case 0:
+ break;
+ case -1:
+ setErrorCode(4008);
+ case -2:
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
+ return -1;
+ }
+ }
+ /**
+ * Reset receivers
+ */
+ const Uint32 parallell = theParallelism;
+
+ for(Uint32 i = 0; i<parallell; i++){
+ m_receivers[i]->m_list_index = i;
+ m_prepared_receivers[i] = m_receivers[i]->getId();
+ m_sent_receivers[i] = m_receivers[i];
+ m_conf_receivers[i] = 0;
+ m_api_receivers[i] = 0;
+ m_receivers[i]->prepareSend();
+ }
+
+ m_api_receivers_count = 0;
+ m_current_api_receiver = 0;
+ m_sent_receivers_count = parallell;
+ m_conf_receivers_count = 0;
+
+ if(m_ordered){
+ m_current_api_receiver = parallell;
+ m_api_receivers_count = parallell;
+ }
+
+ if (doSendScan(nodeId) == -1)
+ return -1;
+
+ return 0;
+}
diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp
index a05eb4c54c3..7ad37401b9a 100644
--- a/ndb/src/ndbapi/Ndbif.cpp
+++ b/ndb/src/ndbapi/Ndbif.cpp
@@ -17,11 +17,9 @@
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
-//#include "NdbSchemaOp.hpp"
-//#include "NdbSchemaCon.hpp"
#include "NdbOperation.hpp"
#include "NdbIndexOperation.hpp"
-#include "NdbScanReceiver.hpp"
+#include "NdbScanOperation.hpp"
#include "NdbConnection.hpp"
#include "NdbRecAttr.hpp"
#include "NdbReceiver.hpp"
@@ -34,6 +32,9 @@
#include <signaldata/CreateIndx.hpp>
#include <signaldata/DropIndx.hpp>
#include <signaldata/TcIndx.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/ScanTab.hpp>
#include <ndb_limits.h>
#include <NdbOut.hpp>
@@ -41,12 +42,13 @@
/******************************************************************************
- * int init( int aMaxNoOfTransactions );
+ * int init( int aNrOfCon, int aNrOfOp );
*
* Return Value: Return 0 : init was successful.
* Return -1: In all other case.
- * Parameters: aMaxNoOfTransactions : Max number of simultaneous transations
- * Remark: Create pointers and idle list Synchronous.
+ * Parameters: aNrOfCon : Number of connections offered to the application.
+ * aNrOfOp : Number of operations offered to the application.
+ * Remark: Create pointers and idle list Synchronous.
****************************************************************************/
int
Ndb::init(int aMaxNoOfTransactions)
@@ -75,7 +77,7 @@ Ndb::init(int aMaxNoOfTransactions)
executeMessage,
statusMessage);
-
+
if ( tBlockNo == -1 ) {
theError.code = 4105;
theFacade->unlock_mutex();
@@ -95,6 +97,7 @@ Ndb::init(int aMaxNoOfTransactions)
}
theFirstTransId = ((Uint64)theNdbBlockNumber << 52)+((Uint64)theNode << 40);
+ theFirstTransId += theFacade->m_open_count;
theFacade->unlock_mutex();
@@ -253,8 +256,7 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
NdbConnection* localCon = theSentTransactionsArray[i];
if (localCon->getConnectedNodeId() == aNodeId ) {
const NdbConnection::SendStatusType sendStatus = localCon->theSendStatus;
- if (sendStatus == NdbConnection::sendTC_OP ||
- sendStatus == NdbConnection::sendTC_COMMIT) {
+ if (sendStatus == NdbConnection::sendTC_OP || sendStatus == NdbConnection::sendTC_COMMIT) {
/*
A transaction was interrupted in the prepare phase by a node
failure. Since the transaction was not found in the phase
@@ -300,26 +302,28 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
NdbOperation* tOp;
NdbIndexOperation* tIndexOp;
NdbConnection* tCon;
- int tReturnCode;
+ int tReturnCode = -1;
const Uint32* tDataPtr = aSignal->getDataPtr();
const Uint32 tWaitState = theWaiter.m_state;
const Uint32 tSignalNumber = aSignal->readSignalNumber();
const Uint32 tFirstData = *tDataPtr;
+ const Uint32 tLen = aSignal->getLength();
+ void * tFirstDataPtr;
/*
- In order to support 64 bit processes in the application we need to use
- id's rather than a direct pointer to the object used. It is also a good
- idea that one cannot corrupt the application code by sending a corrupt
- memory pointer.
-
- All signals received by the API requires the first data word to be such
- an id to the receiving object.
+ In order to support 64 bit processes in the application we need to use
+ id's rather than a direct pointer to the object used. It is also a good
+ idea that one cannot corrupt the application code by sending a corrupt
+ memory pointer.
+
+ All signals received by the API requires the first data word to be such
+ an id to the receiving object.
*/
-
+
switch (tSignalNumber){
case GSN_TCKEYCONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
const TcKeyConf * const keyConf = (TcKeyConf *)tDataPtr;
@@ -328,7 +332,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
(tCon->theSendStatus == NdbConnection::sendTC_OP)) {
- tReturnCode = tCon->receiveTCKEYCONF(keyConf, aSignal->getLength());
+ tReturnCode = tCon->receiveTCKEYCONF(keyConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
}//if
@@ -346,111 +350,71 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
return;
}
- case GSN_READCONF:
- {
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- tOp = void2rec_op(tFirstDataPtr);
- if (tOp->checkMagicNumber() == 0) {
- tCon = tOp->theNdbCon;
- if (tCon != NULL) {
- if (tCon->theSendStatus == NdbConnection::sendTC_OP) {
- tReturnCode = tOp->receiveREAD_CONF(tDataPtr,
- aSignal->getLength());
- if (tReturnCode != -1) {
- completedTransaction(tCon);
- }//if
- }//if
- }//if
- }//if
+ case GSN_TRANSID_AI:{
+ tFirstDataPtr = int2void(tFirstData);
+ NdbReceiver* tRec;
+ if (tFirstDataPtr && (tRec = void2rec(tFirstDataPtr)) &&
+ tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) &&
+ tCon->checkState_TransId(((const TransIdAI*)tDataPtr)->transId)){
+ Uint32 com;
+ if(aSignal->m_noOfSections > 0){
+ com = tRec->execTRANSID_AI(ptr[0].p, ptr[0].sz);
+ } else {
+ com = tRec->execTRANSID_AI(tDataPtr + TransIdAI::HeaderLength,
+ tLen - TransIdAI::HeaderLength);
+ }
+
+ if(com == 1){
+ switch(tRec->getType()){
+ case NdbReceiver::NDB_OPERATION:
+ case NdbReceiver::NDB_INDEX_OPERATION:
+ if(tCon->OpCompleteSuccess() != -1){
+ completedTransaction(tCon);
+ return;
+ }
+ break;
+ case NdbReceiver::NDB_SCANRECEIVER:
+ tCon->theScanningOp->receiver_delivered(tRec);
+ theWaiter.m_state = (tWaitState == WAIT_SCAN ? NO_WAIT : tWaitState);
+ break;
+ default:
+ goto InvalidSignal;
+ }
+ }
+ break;
+ } else {
+ /**
+ * This is ok as transaction can have been aborted before TRANSID_AI
+ * arrives (if TUP on other node than TC)
+ */
return;
}
- case GSN_TRANSID_AI:
+ }
+ case GSN_TCKEY_FAILCONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- // ndbout << "*** GSN_TRANSID_AI ***" << endl;
- NdbReceiver* tRec = void2rec(tFirstDataPtr);
- if (tRec->getType() == NdbReceiver::NDB_OPERATION){
- // tOp = (NdbOperation*)tRec->getOwner();
+ tFirstDataPtr = int2void(tFirstData);
+ const TcKeyFailConf * failConf = (TcKeyFailConf *)tDataPtr;
+ const BlockReference aTCRef = aSignal->theSendersBlockRef;
+ if (tFirstDataPtr != 0){
tOp = void2rec_op(tFirstDataPtr);
- // ndbout << "NDB_OPERATION" << endl;
- if (tOp->checkMagicNumber() == 0) {
- tCon = tOp->theNdbCon;
- if (tCon != NULL) {
- if (tCon->theSendStatus == NdbConnection::sendTC_OP) {
- tReturnCode = tOp->receiveTRANSID_AI(tDataPtr,
- aSignal->getLength());
- if (tReturnCode != -1) {
- completedTransaction(tCon);
- break;
- }
- }
- }
- }
- } else if (tRec->getType() == NdbReceiver::NDB_INDEX_OPERATION){
- // tOp = (NdbIndexOperation*)tRec->getOwner();
- tOp = void2rec_iop(tFirstDataPtr);
- // ndbout << "NDB_INDEX_OPERATION" << endl;
- if (tOp->checkMagicNumber() == 0) {
+
+ if (tOp->checkMagicNumber(false) == 0) {
tCon = tOp->theNdbCon;
if (tCon != NULL) {
- if (tCon->theSendStatus == NdbConnection::sendTC_OP) {
- tReturnCode = tOp->receiveTRANSID_AI(tDataPtr,
- aSignal->getLength());
+ if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
+ (tCon->theSendStatus == NdbConnection::sendTC_COMMIT)) {
+ tReturnCode = tCon->receiveTCKEY_FAILCONF(failConf);
if (tReturnCode != -1) {
completedTransaction(tCon);
- break;
- }
- }
- }
- }
- } else if (tRec->getType() == NdbReceiver::NDB_SCANRECEIVER) {
- // NdbScanReceiver* tScanRec = (NdbScanReceiver*)tRec->getOwner();
- // NdbScanReceiver* tScanRec =
- // (NdbScanReceiver*)(void2rec(tFirstDataPtr)->getOwner());
- NdbScanReceiver* tScanRec = void2rec_srec(tFirstDataPtr);
- // ndbout << "NDB_SCANRECEIVER" << endl;
- if(tScanRec->checkMagicNumber() == 0){
- tReturnCode = tScanRec->receiveTRANSID_AI_SCAN(aSignal);
- if (tReturnCode != -1) {
- theWaiter.m_state = NO_WAIT;
- break;
+ }//if
+ }//if
}
}
} else {
-#ifdef NDB_NO_DROPPED_SIGNAL
- abort();
+#ifdef VM_TRACE
+ ndbout_c("Recevied TCKEY_FAILCONF wo/ operation");
#endif
- goto InvalidSignal;
}
- return;
- }
- case GSN_TCKEY_FAILCONF:
- {
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- const TcKeyFailConf * const failConf = (TcKeyFailConf *)tDataPtr;
- const BlockReference aTCRef = aSignal->theSendersBlockRef;
-
- tOp = void2rec_op(tFirstDataPtr);
-
- if (tOp->checkMagicNumber() == 0) {
- tCon = tOp->theNdbCon;
- if (tCon != NULL) {
- if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
- (tCon->theSendStatus == NdbConnection::sendTC_COMMIT)) {
- tReturnCode = tCon->receiveTCKEY_FAILCONF(failConf);
- if (tReturnCode != -1) {
- completedTransaction(tCon);
- }//if
- }//if
- }//if
- }//if
-
if(tFirstData & 1){
NdbConnection::sendTC_COMMIT_ACK(theCommitAckSignal,
failConf->transId1,
@@ -461,28 +425,32 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TCKEY_FAILREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- tOp = void2rec_op(tFirstDataPtr);
- if (tOp->checkMagicNumber() == 0) {
- tCon = tOp->theNdbCon;
- if (tCon != NULL) {
- if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
- (tCon->theSendStatus == NdbConnection::sendTC_ROLLBACK)) {
- tReturnCode = tCon->receiveTCKEY_FAILREF(aSignal);
- if (tReturnCode != -1) {
- completedTransaction(tCon);
- return;
- }//if
- }//if
- }//if
- }//if
- return;
+ tFirstDataPtr = int2void(tFirstData);
+ if(tFirstDataPtr != 0){
+ tOp = void2rec_op(tFirstDataPtr);
+ if (tOp->checkMagicNumber() == 0) {
+ tCon = tOp->theNdbCon;
+ if (tCon != NULL) {
+ if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
+ (tCon->theSendStatus == NdbConnection::sendTC_ROLLBACK)) {
+ tReturnCode = tCon->receiveTCKEY_FAILREF(aSignal);
+ if (tReturnCode != -1) {
+ completedTransaction(tCon);
+ return;
+ }//if
+ }//if
+ }//if
+ }//if
+ } else {
+#ifdef VM_TRACE
+ ndbout_c("Recevied TCKEY_FAILREF wo/ operation");
+#endif
+ }
+ break;
}
case GSN_TCKEYREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tOp = void2rec_op(tFirstDataPtr);
@@ -493,8 +461,9 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tReturnCode = tOp->receiveTCKEYREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
+ return;
}//if
- return;
+ break;
}//if
}//if
} //if
@@ -503,7 +472,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TC_COMMITCONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
const TcCommitConf * const commitConf = (TcCommitConf *)tDataPtr;
@@ -531,7 +500,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
case GSN_TC_COMMITREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tCon = void2con(tFirstDataPtr);
@@ -540,14 +509,13 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tReturnCode = tCon->receiveTC_COMMITREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
- return;
}//if
}//if
return;
}
case GSN_TCROLLBACKCONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tCon = void2con(tFirstDataPtr);
@@ -562,7 +530,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TCROLLBACKREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tCon = void2con(tFirstDataPtr);
@@ -571,14 +539,13 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tReturnCode = tCon->receiveTCROLLBACKREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
- return;
}//if
}//if
return;
}
case GSN_TCROLLBACKREP:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tCon = void2con(tFirstDataPtr);
@@ -592,27 +559,27 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TCSEIZECONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
if (tWaitState != WAIT_TC_SEIZE) {
- return;
+ goto InvalidSignal;
}//if
tCon = void2con(tFirstDataPtr);
if (tCon->checkMagicNumber() != 0) {
- return;
+ goto InvalidSignal;
}//if
tReturnCode = tCon->receiveTCSEIZECONF(aSignal);
if (tReturnCode != -1) {
theWaiter.m_state = NO_WAIT;
} else {
- return;
+ goto InvalidSignal;
}//if
break;
}
case GSN_TCSEIZEREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
if (tWaitState != WAIT_TC_SEIZE) {
@@ -632,7 +599,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TCRELEASECONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
if (tWaitState != WAIT_TC_RELEASE) {
@@ -650,7 +617,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}
case GSN_TCRELEASEREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
if (tWaitState != WAIT_TC_RELEASE) {
@@ -704,84 +671,100 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
case GSN_DIHNDBTAMPER:
{
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- if (tWaitState != WAIT_NDB_TAMPER)
- return;
- tCon = void2con(tFirstDataPtr);
- if (tCon->checkMagicNumber() != 0)
- return;
- tReturnCode = tCon->receiveDIHNDBTAMPER(aSignal);
- if (tReturnCode != -1)
- theWaiter.m_state = NO_WAIT;
- break;
+ tFirstDataPtr = int2void(tFirstData);
+ if (tFirstDataPtr == 0) goto InvalidSignal;
+
+ if (tWaitState != WAIT_NDB_TAMPER)
+ return;
+ tCon = void2con(tFirstDataPtr);
+ if (tCon->checkMagicNumber() != 0)
+ return;
+ tReturnCode = tCon->receiveDIHNDBTAMPER(aSignal);
+ if (tReturnCode != -1)
+ theWaiter.m_state = NO_WAIT;
+ break;
}
case GSN_SCAN_TABCONF:
{
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- //ndbout << "*** GSN_SCAN_TABCONF *** " << endl;
- if (tWaitState != WAIT_SCAN){
- return;
- }
- tCon = void2con(tFirstDataPtr);
- if (tCon->checkMagicNumber() != 0)
- return;
- tReturnCode = tCon->receiveSCAN_TABCONF(aSignal);
- if (tReturnCode != -1)
- theWaiter.m_state = NO_WAIT;
- break;
+ tFirstDataPtr = int2void(tFirstData);
+ assert(tFirstDataPtr);
+ assert(void2con(tFirstDataPtr));
+ assert(void2con(tFirstDataPtr)->checkMagicNumber() == 0);
+ if(tFirstDataPtr &&
+ (tCon = void2con(tFirstDataPtr)) && (tCon->checkMagicNumber() == 0)){
+
+ if(aSignal->m_noOfSections > 0){
+ tReturnCode = tCon->receiveSCAN_TABCONF(aSignal,
+ ptr[0].p, ptr[0].sz);
+ } else {
+ tReturnCode =
+ tCon->receiveSCAN_TABCONF(aSignal,
+ tDataPtr + ScanTabConf::SignalLength,
+ tLen - ScanTabConf::SignalLength);
+ }
+ if (tReturnCode != -1 && tWaitState == WAIT_SCAN)
+ theWaiter.m_state = NO_WAIT;
+ break;
+ } else {
+ goto InvalidSignal;
+ }
}
case GSN_SCAN_TABREF:
{
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- if (tWaitState == WAIT_SCAN){
+ tFirstDataPtr = int2void(tFirstData);
+ if (tFirstDataPtr == 0) goto InvalidSignal;
+
tCon = void2con(tFirstDataPtr);
+
+ assert(tFirstDataPtr != 0 &&
+ void2con(tFirstDataPtr)->checkMagicNumber() == 0);
+
if (tCon->checkMagicNumber() == 0){
tReturnCode = tCon->receiveSCAN_TABREF(aSignal);
- if (tReturnCode != -1){
+ if (tReturnCode != -1 && tWaitState == WAIT_SCAN){
theWaiter.m_state = NO_WAIT;
}
break;
}
- }
- goto InvalidSignal;
- }
- case GSN_SCAN_TABINFO:
- {
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- //ndbout << "*** GSN_SCAN_TABINFO ***" << endl;
- if (tWaitState != WAIT_SCAN)
- return;
- tCon = void2con(tFirstDataPtr);
- if (tCon->checkMagicNumber() != 0)
- return;
- tReturnCode = tCon->receiveSCAN_TABINFO(aSignal);
- if (tReturnCode != -1)
- theWaiter.m_state = NO_WAIT;
- break;
+ goto InvalidSignal;
}
case GSN_KEYINFO20: {
- void* tFirstDataPtr = int2void(tFirstData);
- if (tFirstDataPtr == 0) goto InvalidSignal;
-
- //ndbout << "*** GSN_KEYINFO20 ***" << endl;
- NdbScanReceiver* tScanRec = void2rec_srec(tFirstDataPtr);
- if (tScanRec->checkMagicNumber() != 0)
+ tFirstDataPtr = int2void(tFirstData);
+ NdbReceiver* tRec;
+ if (tFirstDataPtr && (tRec = void2rec(tFirstDataPtr)) &&
+ tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) &&
+ tCon->checkState_TransId(&((const KeyInfo20*)tDataPtr)->transId1)){
+
+ Uint32 len = ((const KeyInfo20*)tDataPtr)->keyLen;
+ Uint32 info = ((const KeyInfo20*)tDataPtr)->scanInfo_Node;
+ int com = -1;
+ if(aSignal->m_noOfSections > 0 && len == ptr[0].sz){
+ com = tRec->execKEYINFO20(info, ptr[0].p, len);
+ } else if(len == tLen - KeyInfo20::HeaderLength){
+ com = tRec->execKEYINFO20(info, tDataPtr+KeyInfo20::HeaderLength, len);
+ }
+
+ switch(com){
+ case 1:
+ tCon->theScanningOp->receiver_delivered(tRec);
+ theWaiter.m_state = (tWaitState == WAIT_SCAN ? NO_WAIT : tWaitState);
+ break;
+ case 0:
+ break;
+ case -1:
+ goto InvalidSignal;
+ }
+ break;
+ } else {
+ /**
+ * This is ok as transaction can have been aborted before KEYINFO20
+ * arrives (if TUP on other node than TC)
+ */
return;
- tReturnCode = tScanRec->receiveKEYINFO20(aSignal);
- if (tReturnCode != -1)
- theWaiter.m_state = NO_WAIT;
- break;
+ }
}
case GSN_TCINDXCONF:{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
const TcIndxConf * const indxConf = (TcIndxConf *)tDataPtr;
@@ -789,7 +772,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
(tCon->theSendStatus == NdbConnection::sendTC_OP)) {
- tReturnCode = tCon->receiveTCINDXCONF(indxConf, aSignal->getLength());
+ tReturnCode = tCon->receiveTCINDXCONF(indxConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
}//if
@@ -801,10 +784,10 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
indxConf->transId2,
aTCRef);
}
- break;
+ return;
}
case GSN_TCINDXREF:{
- void* tFirstDataPtr = int2void(tFirstData);
+ tFirstDataPtr = int2void(tFirstData);
if (tFirstDataPtr == 0) goto InvalidSignal;
tIndexOp = void2rec_iop(tFirstDataPtr);
@@ -865,8 +848,7 @@ Ndb::completedTransaction(NdbConnection* aCon)
Uint32 tTransArrayIndex = aCon->theTransArrayIndex;
Uint32 tNoSentTransactions = theNoOfSentTransactions;
Uint32 tNoCompletedTransactions = theNoOfCompletedTransactions;
- if ((tNoSentTransactions > 0) &&
- (aCon->theListState == NdbConnection::InSendList) &&
+ if ((tNoSentTransactions > 0) && (aCon->theListState == NdbConnection::InSendList) &&
(tTransArrayIndex < tNoSentTransactions)) {
NdbConnection* tMoveCon = theSentTransactionsArray[tNoSentTransactions - 1];
@@ -895,8 +877,8 @@ Ndb::completedTransaction(NdbConnection* aCon)
ndbout << endl << flush;
#ifdef VM_TRACE
printState("completedTransaction abort");
-#endif
abort();
+#endif
}//if
}//Ndb::completedTransaction()
@@ -965,6 +947,10 @@ Ndb::check_send_timeout()
WAITFOR_RESPONSE_TIMEOUT) {
#ifdef VM_TRACE
a_con->printState();
+ Uint32 t1 = a_con->theTransactionId;
+ Uint32 t2 = a_con->theTransactionId >> 32;
+ ndbout_c("[%.8x %.8x]", t1, t2);
+ abort();
#endif
a_con->setOperationErrorCodeAbort(4012);
a_con->theCommitStatus = NdbConnection::Aborted;
@@ -1255,8 +1241,7 @@ Return: 0 - Response received
******************************************************************************/
int
-Ndb::receiveResponse(int waitTime)
-{
+Ndb::receiveResponse(int waitTime){
int tResultCode;
TransporterFacade::instance()->checkForceSend(theNdbBlockNumber);
@@ -1310,10 +1295,10 @@ Ndb::sendRecSignal(Uint16 node_id,
if (return_code != -1) {
theWaiter.m_node = node_id;
theWaiter.m_state = aWaitState;
- return receiveResponse();
- // End of protected area
- }//if
- return_code = -3;
+ return_code = receiveResponse();
+ } else {
+ return_code = -3;
+ }
} else {
return_code = -4;
}//if
@@ -1335,8 +1320,8 @@ void
NdbConnection::sendTC_COMMIT_ACK(NdbApiSignal * aSignal,
Uint32 transId1, Uint32 transId2,
Uint32 aTCRef){
-#if 0
- ndbout_c("Sending TC_COMMIT_ACK(0x%x, 0x%x) to -> %d",
+#ifdef MARKER_TRACE
+ ndbout_c("Sending TC_COMMIT_ACK(0x%.8x, 0x%.8x) to -> %d",
transId1,
transId2,
refToNode(aTCRef));
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp
index f451ba885d4..be168ddffbe 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/ndb/src/ndbapi/Ndbinit.cpp
@@ -43,7 +43,7 @@ static int theNoOfNdbObjects = 0;
static char *ndbConnectString = 0;
-#ifdef NDB_WIN32
+#if defined NDB_WIN32 || defined SCO
static NdbMutex & createNdbMutex = * NdbMutex_Create();
#else
static NdbMutex createNdbMutex = NDB_MUTEX_INITIALIZER;
@@ -111,12 +111,13 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) :
theCurrentConnectCounter = 1;
theCurrentConnectIndex = 0;
- for (int i = 0; i < MAX_NDB_NODES ; i++) {
+ int i;
+ for (i = 0; i < MAX_NDB_NODES ; i++) {
theConnectionArray[i] = NULL;
the_release_ind[i] = 0;
theDBnodes[i] = 0;
}//forg
- for (int i = 0; i < 2048 ; i++) {
+ for (i = 0; i < 2048 ; i++) {
theFirstTupleId[i] = 0;
theLastTupleId[i] = 0;
}//for
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp
index e557fdc0a5f..1e1cb5e4b40 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/ndb/src/ndbapi/Ndblist.cpp
@@ -15,16 +15,13 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <NdbOut.hpp>
-#include "Ndb.hpp"
-//#include "NdbSchemaOp.hpp"
-//#include "NdbSchemaCon.hpp"
-#include "NdbOperation.hpp"
-#include "NdbScanOperation.hpp"
-#include "NdbIndexOperation.hpp"
-#include "NdbConnection.hpp"
+#include <Ndb.hpp>
+#include <NdbOperation.hpp>
+#include <NdbIndexOperation.hpp>
+#include <NdbIndexScanOperation.hpp>
+#include <NdbConnection.hpp>
#include "NdbApiSignal.hpp"
-#include "NdbRecAttr.hpp"
-#include "NdbScanReceiver.hpp"
+#include <NdbRecAttr.hpp>
#include "NdbUtil.hpp"
#include "API.hpp"
#include "NdbBlob.hpp"
@@ -264,13 +261,13 @@ Ndb::getNdbLabel()
* Remark: Get a NdbScanReceiver from theScanRecList and return the
* object .
****************************************************************************/
-NdbScanReceiver*
+NdbReceiver*
Ndb::getNdbScanRec()
{
- NdbScanReceiver* tNdbScanRec;
+ NdbReceiver* tNdbScanRec;
if ( theScanList == NULL )
{
- tNdbScanRec = new NdbScanReceiver(this);
+ tNdbScanRec = new NdbReceiver(this);
if (tNdbScanRec == NULL)
{
return NULL;
@@ -345,17 +342,17 @@ Return Value: Return theOpList : if the getScanOperation was succesful.
Return NULL : In all other case.
Remark: Get an operation from theScanOpIdleList and return the object .
***************************************************************************/
-NdbScanOperation*
+NdbIndexScanOperation*
Ndb::getScanOperation()
{
- NdbScanOperation* tOp = theScanOpIdleList;
+ NdbIndexScanOperation* tOp = theScanOpIdleList;
if (tOp != NULL ) {
- NdbScanOperation* tOpNext = (NdbScanOperation*) tOp->next();
+ NdbIndexScanOperation* tOpNext = (NdbIndexScanOperation*)tOp->next();
tOp->next(NULL);
theScanOpIdleList = tOpNext;
return tOp;
} else {
- tOp = new NdbScanOperation(this);
+ tOp = new NdbIndexScanOperation(this);
if (tOp != NULL)
tOp->next(NULL);
}
@@ -509,7 +506,7 @@ Parameters: aNdbScanRec: The NdbScanReceiver object.
Remark: Add a NdbScanReceiver object into the Scan idlelist.
***************************************************************************/
void
-Ndb::releaseNdbScanRec(NdbScanReceiver* aNdbScanRec)
+Ndb::releaseNdbScanRec(NdbReceiver* aNdbScanRec)
{
aNdbScanRec->next(theScanList);
theScanList = aNdbScanRec;
@@ -558,12 +555,12 @@ Parameters: aScanOperation : The released NdbScanOperation object.
Remark: Add a NdbScanOperation object into the signal idlelist.
***************************************************************************/
void
-Ndb::releaseScanOperation(NdbScanOperation* aScanOperation)
+Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation)
{
aScanOperation->next(theScanOpIdleList);
aScanOperation->theNdbCon = NULL;
aScanOperation->theMagicNumber = 0xFE11D2;
- theScanOpIdleList = (NdbScanOperation*)aScanOperation;
+ theScanOpIdleList = aScanOperation;
}
/***************************************************************************
@@ -592,7 +589,7 @@ Ndb::releaseSignal(NdbApiSignal* aSignal)
#if defined VM_TRACE
// Check that signal is not null
assert(aSignal != NULL);
-
+#if 0
// Check that signal is not already in list
NdbApiSignal* tmp = theSignalIdleList;
while (tmp != NULL){
@@ -600,6 +597,7 @@ Ndb::releaseSignal(NdbApiSignal* aSignal)
tmp = tmp->next();
}
#endif
+#endif
creleaseSignals++;
aSignal->next(theSignalIdleList);
theSignalIdleList = aSignal;
@@ -645,7 +643,7 @@ void
Ndb::freeScanOperation()
{
NdbScanOperation* tOp = theScanOpIdleList;
- theScanOpIdleList = (NdbScanOperation *) theScanOpIdleList->next();
+ theScanOpIdleList = (NdbIndexScanOperation *) theScanOpIdleList->next();
delete tOp;
}
@@ -696,7 +694,7 @@ Remark: Always release the first item in the free list
void
Ndb::freeNdbScanRec()
{
- NdbScanReceiver* tNdbScanRec = theScanList;
+ NdbReceiver* tNdbScanRec = theScanList;
theScanList = theScanList->next();
delete tNdbScanRec;
}
diff --git a/ndb/src/ndbapi/ObjectMap.hpp b/ndb/src/ndbapi/ObjectMap.hpp
index 4abb54b5081..f67774bb413 100644
--- a/ndb/src/ndbapi/ObjectMap.hpp
+++ b/ndb/src/ndbapi/ObjectMap.hpp
@@ -93,26 +93,28 @@ inline
void *
NdbObjectIdMap::unmap(Uint32 id, void *object){
- int i = id>>2;
+ Uint32 i = id>>2;
// lock();
-
- void * obj = m_map[i].m_obj;
- if (object == obj) {
- m_map[i].m_next = m_firstFree;
- m_firstFree = i;
- } else {
- ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj);
- return 0;
- }
-
- // unlock();
-
+ if(i < m_size){
+ void * obj = m_map[i].m_obj;
+ if (object == obj) {
+ m_map[i].m_next = m_firstFree;
+ m_firstFree = i;
+ } else {
+ ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj);
+ return 0;
+ }
+
+ // unlock();
+
#ifdef DEBUG_OBJECTMAP
- ndbout_c("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj);
+ ndbout_c("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj);
#endif
-
- return obj;
+
+ return obj;
+ }
+ return 0;
}
inline void *
@@ -120,7 +122,11 @@ NdbObjectIdMap::getObject(Uint32 id){
#ifdef DEBUG_OBJECTMAP
ndbout_c("NdbObjectIdMap::getObject(%u) obj=0x%x", id, m_map[id>>2].m_obj);
#endif
- return m_map[id>>2].m_obj;
+ id >>= 2;
+ if(id < m_size){
+ return m_map[id].m_obj;
+ }
+ return 0;
}
inline void
@@ -129,7 +135,6 @@ NdbObjectIdMap::expand(Uint32 incSize){
MapEntry * tmp = (MapEntry*)malloc(newSize * sizeof(MapEntry));
memcpy(tmp, m_map, m_size * sizeof(MapEntry));
- free(m_map);
m_map = tmp;
for(Uint32 i = m_size; i<newSize; i++){
diff --git a/ndb/src/ndbapi/ScanOperation.txt b/ndb/src/ndbapi/ScanOperation.txt
index 7197cf66f7e..27e4e8c1755 100644
--- a/ndb/src/ndbapi/ScanOperation.txt
+++ b/ndb/src/ndbapi/ScanOperation.txt
@@ -8,3 +8,49 @@ theNdbCon -> z
z) NdbConnection (scan)
theScanningOp -> y
theFirstOpInList -> y (until after openScan)
+
+# SU
+
+ScanOpLen: includes KeyInfo
+New protocol
+
+# -- Impl.
+
+1) Scan uses one NdbReceiver per "parallelism"
+2) Each NdbReceiver can handle up to "batch size" rows
+3) API send one "pointer" per parallelism (prev. was one per row)
+4) API handles each receiver independently.
+ It can "nextResult"-one, receive one and close-one
+5) When a recevier has been "nextResult"-ed, the API can fetch from it again
+6) After doing "openScan"-req, no wait is performed
+ (only possible to block on nextResult(true) or closeScan)
+
+7) Instead of "ack"-ing each row with length,
+* Each row is sent in one lonw signal (unless to short)
+* Each NdbReceiver is ack-ed with #rows and sum(#length)
+* KeyInfo20 is one signal and included in sum(#length)
+
+8) The API receive(s) the data into NdbRecAttr-objects
+ (prev. it copied signals using new/delete)
+9) KeyInfo20 is also received into a NdbRecAttr-object
+10)
+
+# -- Close of scan
+
+1) Each NdbReciver gets a signal when it's complete
+ (0 rows is ack-ed)
+2) The API then "closes" this receiver
+3) The API can at any time close then scan for other reason(s)
+ (example dying)
+4) This is signal:ed via a NEXT_SCANREQ (close = 1)
+5) TC responds with a SCAN_TABCONF (close = 1)
+
+
+# -- Sorted
+
+1) The sorted scan is transparent to TC
+ It's a API only impl.
+2) The API makes the following adjustements:
+* Scan all fragments simultaniously (max parallelism)
+* Never return a row to the API if a NdbReciver is "outstanding"
+* Sort Receivers (only top row as they already are sorted within)
diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp
index e725144a8f8..7ec9a6a55a3 100644
--- a/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/ndb/src/ndbapi/TransporterFacade.cpp
@@ -34,11 +34,8 @@
#include <ndb_version.h>
#include <SignalLoggerManager.hpp>
-#if !defined NDB_OSE && !defined NDB_SOFTOSE
-#include <signal.h>
-#endif
-
//#define REPORT_TRANSPORTER
+//#define API_TRACE;
#if defined DEBUG_TRANSPORTER
#define TRP_DEBUG(t) ndbout << __FILE__ << ":" << __LINE__ << ":" << t << endl;
@@ -47,7 +44,7 @@
#endif
TransporterFacade* TransporterFacade::theFacadeInstance = NULL;
-
+ConfigRetriever *TransporterFacade::s_config_retriever= 0;
/*****************************************************************************
@@ -160,14 +157,12 @@ setSignalLog(){
}
return false;
}
+#ifdef TRACE_APIREGREQ
+#define TRACE_GSN(gsn) true
+#else
+#define TRACE_GSN(gsn) (gsn != GSN_API_REGREQ && gsn != GSN_API_REGCONF)
+#endif
#endif
-
-// These symbols are needed, but not used in the API
-int g_sectionSegmentPool;
-struct ErrorReporter {
- void handleAssert(const char*, const char*, int);
-};
-void ErrorReporter::handleAssert(const char* message, const char* file, int line) {}
/**
* The execute function : Handle received signal
@@ -182,9 +177,7 @@ execute(void * callbackObj, SignalHeader * const header,
Uint32 tRecBlockNo = header->theReceiversBlockNumber;
#ifdef API_TRACE
- if(setSignalLog()){
- // header->theVerId_signalNumber != GSN_API_REGREQ &&
- // header->theVerId_signalNumber != GSN_API_REGCONF){
+ if(setSignalLog() && TRACE_GSN(header->theVerId_signalNumber)){
signalLogger.executeSignal(* header,
prio,
theData,
@@ -314,6 +307,14 @@ execute(void * callbackObj, SignalHeader * const header,
}
}
+// These symbols are needed, but not used in the API
+void
+SignalLoggerManager::printSegmentedSection(FILE *, const SignalHeader &,
+ const SegmentedSectionPtr ptr[3],
+ unsigned i){
+ abort();
+}
+
void
copy(Uint32 * & insertPtr,
class SectionSegmentPool & thePool, const SegmentedSectionPtr & _ptr){
@@ -332,29 +333,48 @@ atexit_stop_instance(){
*
* Which is protected by a mutex
*/
+
+
TransporterFacade*
TransporterFacade::start_instance(const char * connectString){
// TransporterFacade used from API get config from mgmt srvr
- ConfigRetriever configRetriever;
- configRetriever.setConnectString(connectString);
- ndb_mgm_configuration * props = configRetriever.getConfig(NDB_VERSION,
- NODE_TYPE_API);
- if (props == 0) {
- ndbout << "Configuration error: ";
- const char* erString = configRetriever.getErrorString();
- if (erString == 0) {
- erString = "No error specified!";
+ s_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_API);
+
+ s_config_retriever->setConnectString(connectString);
+ const char* error = 0;
+ do {
+ if(s_config_retriever->init() == -1)
+ break;
+
+ if(s_config_retriever->do_connect() == -1)
+ break;
+
+ Uint32 nodeId = s_config_retriever->allocNodeId();
+ for(Uint32 i = 0; nodeId == 0 && i<5; i++){
+ NdbSleep_SecSleep(3);
+ nodeId = s_config_retriever->allocNodeId();
}
- ndbout << erString << endl;
- return 0;
- }
- const int nodeId = configRetriever.getOwnNodeId();
-
- TransporterFacade * tf = start_instance(nodeId, props);
+ if(nodeId == 0)
+ break;
+
+ ndb_mgm_configuration * props = s_config_retriever->getConfig();
+ if(props == 0)
+ break;
+
+ TransporterFacade * tf = start_instance(nodeId, props);
+
+ free(props);
+ return tf;
+ } while(0);
- free(props);
- return tf;
+ ndbout << "Configuration error: ";
+ const char* erString = s_config_retriever->getErrorString();
+ if (erString == 0) {
+ erString = "No error specified!";
+ }
+ ndbout << erString << endl;
+ return 0;
}
TransporterFacade*
@@ -389,6 +409,14 @@ TransporterFacade::start_instance(int nodeId,
return tf;
}
+void
+TransporterFacade::close_configuration(){
+ if (s_config_retriever) {
+ delete s_config_retriever;
+ s_config_retriever= 0;
+ }
+}
+
/**
* Note that this function need no locking since its
* only called from the destructor of Ndb (the NdbObject)
@@ -397,6 +425,9 @@ TransporterFacade::start_instance(int nodeId,
*/
void
TransporterFacade::stop_instance(){
+
+ close_configuration();
+
if(theFacadeInstance == NULL){
/**
* We are called from atexit function
@@ -440,7 +471,21 @@ runSendRequest_C(void * me)
void TransporterFacade::threadMainSend(void)
{
+ SocketServer socket_server;
+
theTransporterRegistry->startSending();
+ if (!theTransporterRegistry->start_service(socket_server)){
+ ndbout_c("Unable to start theTransporterRegistry->start_service");
+ exit(0);
+ }
+
+ if (!theTransporterRegistry->start_clients()){
+ ndbout_c("Unable to start theTransporterRegistry->start_clients");
+ exit(0);
+ }
+
+ socket_server.startServer();
+
while(!theStopReceive) {
NdbSleep_MilliSleep(10);
NdbMutex_Lock(theMutexPtr);
@@ -451,6 +496,11 @@ void TransporterFacade::threadMainSend(void)
NdbMutex_Unlock(theMutexPtr);
}
theTransporterRegistry->stopSending();
+
+ socket_server.stopServer();
+ socket_server.stopSessions();
+
+ theTransporterRegistry->stop_clients();
}
extern "C"
@@ -466,7 +516,7 @@ void TransporterFacade::threadMainReceive(void)
{
theTransporterRegistry->startReceiving();
NdbMutex_Lock(theMutexPtr);
- theTransporterRegistry->checkConnections();
+ theTransporterRegistry->update_connections();
NdbMutex_Unlock(theMutexPtr);
while(!theStopReceive) {
for(int i = 0; i<10; i++){
@@ -478,7 +528,7 @@ void TransporterFacade::threadMainReceive(void)
}
}
NdbMutex_Lock(theMutexPtr);
- theTransporterRegistry->checkConnections();
+ theTransporterRegistry->update_connections();
NdbMutex_Unlock(theMutexPtr);
}//while
theTransporterRegistry->stopReceiving();
@@ -498,6 +548,7 @@ TransporterFacade::TransporterFacade() :
theClusterMgr = NULL;
theArbitMgr = NULL;
theStartNodeId = 1;
+ m_open_count = 0;
}
bool
@@ -652,6 +703,7 @@ TransporterFacade::open(void* objRef,
ExecuteFunction fun,
NodeStatusFunction statusFun)
{
+ m_open_count++;
return m_threads.open(objRef, fun, statusFun);
}
@@ -732,8 +784,7 @@ TransporterFacade::checkForceSend(Uint32 block_number) {
/******************************************************************************
* SEND SIGNAL METHODS
- ******************************************************************************/
-
+ *****************************************************************************/
int
TransporterFacade::sendSignal(NdbApiSignal * aSignal, NodeId aNode){
Uint32* tDataPtr = aSignal->getDataPtrSend();
@@ -741,9 +792,7 @@ TransporterFacade::sendSignal(NdbApiSignal * aSignal, NodeId aNode){
Uint32 TBno = aSignal->theReceiversBlockNumber;
if(getIsNodeSendable(aNode) == true){
#ifdef API_TRACE
- if(setSignalLog()){
- // aSignal->theVerId_signalNumber != GSN_API_REGREQ &&
- // aSignal->theVerId_signalNumber != GSN_API_REGCONF){
+ if(setSignalLog() && TRACE_GSN(aSignal->theVerId_signalNumber)){
Uint32 tmp = aSignal->theSendersBlockRef;
aSignal->theSendersBlockRef = numberToRef(tmp, theOwnId);
LinearSectionPtr ptr[3];
@@ -777,9 +826,7 @@ TransporterFacade::sendSignal(NdbApiSignal * aSignal, NodeId aNode){
int
TransporterFacade::sendSignalUnCond(NdbApiSignal * aSignal, NodeId aNode){
#ifdef API_TRACE
- if(setSignalLog()){
- //aSignal->theVerId_signalNumber != GSN_API_REGREQ &&
- //aSignal->theVerId_signalNumber != GSN_API_REGCONF
+ if(setSignalLog() && TRACE_GSN(aSignal->theVerId_signalNumber)){
Uint32 tmp = aSignal->theSendersBlockRef;
aSignal->theSendersBlockRef = numberToRef(tmp, theOwnId);
LinearSectionPtr ptr[3];
@@ -809,7 +856,7 @@ TransporterFacade::sendFragmentedSignal(NdbApiSignal* aSignal, NodeId aNode,
aSignal->m_noOfSections = secs;
if(getIsNodeSendable(aNode) == true){
#ifdef API_TRACE
- if(setSignalLog()){
+ if(setSignalLog() && TRACE_GSN(aSignal->theVerId_signalNumber)){
Uint32 tmp = aSignal->theSendersBlockRef;
aSignal->theSendersBlockRef = numberToRef(tmp, theOwnId);
signalLogger.sendSignal(* aSignal,
@@ -845,7 +892,7 @@ TransporterFacade::sendFragmentedSignalUnCond(NdbApiSignal* aSignal,
aSignal->m_noOfSections = secs;
#ifdef API_TRACE
- if(setSignalLog()){
+ if(setSignalLog() && TRACE_GSN(aSignal->theVerId_signalNumber)){
Uint32 tmp = aSignal->theSendersBlockRef;
aSignal->theSendersBlockRef = numberToRef(tmp, theOwnId);
signalLogger.sendSignal(* aSignal,
@@ -875,13 +922,13 @@ TransporterFacade::sendFragmentedSignalUnCond(NdbApiSignal* aSignal,
void
TransporterFacade::doConnect(int aNodeId){
theTransporterRegistry->setIOState(aNodeId, NoHalt);
- theTransporterRegistry->setPerformState(aNodeId, PerformConnect);
+ theTransporterRegistry->do_connect(aNodeId);
}
void
TransporterFacade::doDisconnect(int aNodeId)
{
- theTransporterRegistry->setPerformState(aNodeId, PerformDisconnect);
+ theTransporterRegistry->do_disconnect(aNodeId);
}
void
@@ -906,7 +953,7 @@ TransporterFacade::ownId() const
bool
TransporterFacade::isConnected(NodeId aNodeId){
- return theTransporterRegistry->performState(aNodeId) == PerformIO;
+ return theTransporterRegistry->is_connected(aNodeId);
}
NodeId
@@ -992,3 +1039,6 @@ TransporterFacade::ThreadData::close(int number){
m_statusFunction[number] = 0;
return 0;
}
+
+template class Vector<NodeStatusFunction>;
+template class Vector<TransporterFacade::ThreadData::Object_Execute>;
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 4b76cbe864a..14da4b11aa1 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -29,6 +29,7 @@ class ClusterMgr;
class ArbitMgr;
class IPCConfig;
struct ndb_mgm_configuration;
+class ConfigRetriever;
class Ndb;
class NdbApiSignal;
@@ -56,6 +57,7 @@ public:
static TransporterFacade* instance();
static TransporterFacade* start_instance(int, const ndb_mgm_configuration*);
static TransporterFacade* start_instance(const char *connectString);
+ static void close_configuration();
static void stop_instance();
/**
@@ -110,7 +112,6 @@ public:
// Close this block number
int close_local(BlockNumber blockNumber);
- void setState(Uint32 aNodeId, PerformState aState);
private:
/**
@@ -160,7 +161,9 @@ private:
/**
* Block number handling
*/
+public:
static const unsigned MAX_NO_THREADS = 4711;
+private:
struct ThreadData {
static const Uint32 ACTIVE = (1 << 16) | 1;
@@ -208,6 +211,8 @@ private:
}
} m_threads;
+ Uint32 m_open_count;
+
/**
* execute function
*/
@@ -219,6 +224,7 @@ public:
NdbMutex* theMutexPtr;
private:
static TransporterFacade* theFacadeInstance;
+ static ConfigRetriever *s_config_retriever;
public:
GlobalDictCache m_globalDictCache;
@@ -312,6 +318,7 @@ TransporterFacade::getIsNodeSendable(NodeId n) const {
"%d of node: %d",
node.m_info.m_type, n);
abort();
+ return false; // to remove compiler warning
}
}
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 760322d669d..66a89326a66 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -91,6 +91,9 @@ ErrorBundle ErrorCodes[] = {
{ 4029, NR, "Node failure caused abort of transaction" },
{ 4031, NR, "Node failure caused abort of transaction" },
{ 4033, NR, "Send to NDB failed" },
+ { 4115, NR,
+ "Transaction was committed but all read information was not "
+ "received due to node crash" },
/**
* Node shutdown
@@ -114,9 +117,6 @@ ErrorBundle ErrorCodes[] = {
"Time-out, most likely caused by simple read or cluster failure" },
{ 4024, UR,
"Time-out, most likely caused by simple read or cluster failure" },
- { 4115, UR,
- "Transaction was committed but all read information was not "
- "received due to node crash" },
/**
* TemporaryResourceError
@@ -404,7 +404,6 @@ ErrorBundle ErrorCodes[] = {
{ 4243, AE, "Index not found" },
{ 4244, AE, "Index or table with given name already exists" },
{ 4245, AE, "Index attribute must be defined as stored, i.e. the StorageAttributeType must be defined as NormalStorageAttribute"},
- { 4246, AE, "Combined index attributes are not allowed to be NULL attributes" },
{ 4247, AE, "Illegal index/trigger create/drop/alter request" },
{ 4248, AE, "Trigger/index name invalid" },
{ 4249, AE, "Invalid table" },
diff --git a/ndb/test/include/HugoOperations.hpp b/ndb/test/include/HugoOperations.hpp
index 7295b72b18f..6bd8f7204b2 100644
--- a/ndb/test/include/HugoOperations.hpp
+++ b/ndb/test/include/HugoOperations.hpp
@@ -57,10 +57,6 @@ public:
int pkDeleteRecord(Ndb*,
int recordNo,
int numRecords = 1);
-
- int scanReadRecords(Ndb* pNdb,
- Uint32 parallelism = 240, ScanLock lock = SL_Read);
- int executeScanRead(Ndb*);
int execute_Commit(Ndb*,
AbortOption ao = AbortOnError);
@@ -93,7 +89,11 @@ public:
int recordNo,
int numRecords = 1,
int updatesValue = 0);
-
+
+ int scanReadRecords(Ndb*, NdbScanOperation::LockMode =
+ NdbScanOperation::LM_CommittedRead,
+ int numRecords = 1);
+
protected:
void allocRows(int rows);
void deallocRows();
@@ -102,48 +102,12 @@ protected:
HugoCalculator calc;
Vector<BaseString> savedRecords;
+
+ struct RsPair { NdbResultSet* m_result_set; int records; };
+ Vector<RsPair> m_result_sets;
+ Vector<RsPair> m_executed_result_sets;
private:
NdbConnection* pTrans;
-
- struct ScanTmp {
- ScanTmp() {
- pTrans = 0;
- m_tmpRow = 0;
- m_delete = true;
- m_op = DONE;
- }
- ScanTmp(NdbConnection* a, NDBT_ResultRow* b){
- pTrans = a;
- m_tmpRow = b;
- m_delete = true;
- m_op = DONE;
- }
- ScanTmp(const ScanTmp& org){
- * this = org;
- }
- ScanTmp& operator=(const ScanTmp& org){
- pTrans = org.pTrans;
- m_tmpRow = org.m_tmpRow;
- m_delete = org.m_delete;
- m_op = org.m_op;
- return * this;
- }
-
- ~ScanTmp() {
- if(m_delete && pTrans)
- pTrans->close();
- if(m_delete && m_tmpRow)
- delete m_tmpRow;
- }
-
- NdbConnection * pTrans;
- NDBT_ResultRow * m_tmpRow;
- bool m_delete;
- enum { DONE, READ, UPDATE, DELETE } m_op;
- };
- Vector<ScanTmp> m_scans;
- int run(ScanTmp & tmp);
-
};
#endif
diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp
index 5ff1fef16bc..280d9490f15 100644
--- a/ndb/test/include/HugoTransactions.hpp
+++ b/ndb/test/include/HugoTransactions.hpp
@@ -34,16 +34,17 @@ public:
int records,
int batch = 512,
bool allowConstraintViolation = true,
- int doSleep = 0);
+ int doSleep = 0,
+ bool oneTrans = false);
int scanReadRecords(Ndb*,
int records,
int abort = 0,
- int parallelism = 1,
+ int parallelism = 0,
bool committed = false);
int scanReadCommittedRecords(Ndb*,
int records,
int abort = 0,
- int parallelism = 1);
+ int parallelism = 0);
int pkReadRecords(Ndb*,
int records,
int batchsize = 1,
@@ -52,20 +53,20 @@ public:
int scanUpdateRecords(Ndb*,
int records,
int abort = 0,
- int parallelism = 1);
+ int parallelism = 0);
int scanUpdateRecords1(Ndb*,
int records,
int abort = 0,
- int parallelism = 1);
+ int parallelism = 0);
int scanUpdateRecords2(Ndb*,
int records,
int abort = 0,
- int parallelism = 1);
+ int parallelism = 0);
int scanUpdateRecords3(Ndb*,
int records,
int abort = 0,
- int parallelism = 1);
+ int parallelism = 0);
int pkUpdateRecords(Ndb*,
int records,
@@ -100,24 +101,6 @@ public:
int batchsize = 1);
protected:
- int takeOverAndUpdateRecord(Ndb*,
- NdbOperation*);
-#if 0
- int setValueForAttr(NdbOperation*,
- int attrId,
- int rowId,
- int updateId);
-public:
- int equalForAttr(NdbOperation*,
- int attrId,
- int rowId);
-#endif
-
- int addRowToUpdate(Ndb* pNdb,
- NdbConnection* pUpdTrans,
- NdbOperation* pOrgOp);
-
-
NDBT_ResultRow row;
int m_defaultScanUpdateMethod;
};
diff --git a/ndb/test/include/NDBT_Table.hpp b/ndb/test/include/NDBT_Table.hpp
index eee76773106..59db3ed1092 100644
--- a/ndb/test/include/NDBT_Table.hpp
+++ b/ndb/test/include/NDBT_Table.hpp
@@ -23,10 +23,9 @@
#include <NdbOut.hpp>
class NDBT_Attribute : public NdbDictionary::Column {
- friend class NdbOut& operator <<(class NdbOut&, const NDBT_Attribute &);
public:
NDBT_Attribute(const char* _name,
- Column::Type _type,
+ NdbDictionary::Column::Type _type,
int _length = 1,
bool _pk = false,
bool _nullable = false):
@@ -51,13 +50,12 @@ public:
NDBT_Table(const char* name,
int noOfAttributes,
- const NdbDictionary::Column attributes[],
- bool stored = true)
+ const NdbDictionary::Column attributes[])
: NdbDictionary::Table(name)
{
assert(name != 0);
- setStoredTable(stored);
+ //setStoredTable(stored);
for(int i = 0; i<noOfAttributes; i++)
addColumn(attributes[i]);
}
diff --git a/ndb/test/include/NDBT_Tables.hpp b/ndb/test/include/NDBT_Tables.hpp
index 1da9818ee70..aa78f7d4e2c 100644
--- a/ndb/test/include/NDBT_Tables.hpp
+++ b/ndb/test/include/NDBT_Tables.hpp
@@ -26,7 +26,8 @@
class NDBT_Tables {
public:
- static int createTable(Ndb* pNdb, const char* _name, bool _temp = false);
+ static int createTable(Ndb* pNdb, const char* _name, bool _temp = false,
+ bool existsOK = false);
static int createAllTables(Ndb* pNdb, bool _temp, bool existsOK = false);
static int createAllTables(Ndb* pNdb);
diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp
index 7a5d14689bc..6a968c491ae 100644
--- a/ndb/test/include/NDBT_Test.hpp
+++ b/ndb/test/include/NDBT_Test.hpp
@@ -63,6 +63,8 @@ public:
bool getPropertyWait(const char*, Uint32);
const char* getPropertyWait(const char*, const char* );
+ void decProperty(const char *);
+
// Communicate with other tests
void stopTest();
bool isTestStopped();
@@ -110,6 +112,7 @@ public:
NDBT_Step(NDBT_TestCase* ptest,
const char* pname,
NDBT_TESTFUNC* pfunc);
+ virtual ~NDBT_Step() {}
int execute(NDBT_Context*);
virtual int setUp() = 0;
virtual void tearDown() = 0;
@@ -132,8 +135,9 @@ public:
NDBT_NdbApiStep(NDBT_TestCase* ptest,
const char* pname,
NDBT_TESTFUNC* pfunc);
- int setUp();
- void tearDown();
+ virtual ~NDBT_NdbApiStep() {}
+ virtual int setUp();
+ virtual void tearDown();
Ndb* getNdb();
protected:
@@ -145,6 +149,7 @@ public:
NDBT_ParallelStep(NDBT_TestCase* ptest,
const char* pname,
NDBT_TESTFUNC* pfunc);
+ virtual ~NDBT_ParallelStep() {}
};
class NDBT_Verifier : public NDBT_NdbApiStep {
@@ -152,6 +157,7 @@ public:
NDBT_Verifier(NDBT_TestCase* ptest,
const char* name,
NDBT_TESTFUNC* func);
+ virtual ~NDBT_Verifier() {}
};
class NDBT_Initializer : public NDBT_NdbApiStep {
@@ -159,6 +165,7 @@ public:
NDBT_Initializer(NDBT_TestCase* ptest,
const char* name,
NDBT_TESTFUNC* func);
+ virtual ~NDBT_Initializer() {}
};
class NDBT_Finalizer : public NDBT_NdbApiStep {
@@ -166,6 +173,7 @@ public:
NDBT_Finalizer(NDBT_TestCase* ptest,
const char* name,
NDBT_TESTFUNC* func);
+ virtual ~NDBT_Finalizer() {}
};
@@ -174,7 +182,8 @@ public:
NDBT_TestCase(NDBT_TestSuite* psuite,
const char* name,
const char* comment);
- virtual ~NDBT_TestCase(){};
+ virtual ~NDBT_TestCase(){}
+
// This is the default executor of a test case
// When a test case is executed it will need to be suplied with a number of
// different parameters and settings, these are passed to the test in the
diff --git a/ndb/test/include/NdbRestarter.hpp b/ndb/test/include/NdbRestarter.hpp
index b4c29a87eff..114726f6a2b 100644
--- a/ndb/test/include/NdbRestarter.hpp
+++ b/ndb/test/include/NdbRestarter.hpp
@@ -19,6 +19,7 @@
#include <mgmapi.h>
#include <Vector.hpp>
+#include <BaseString.hpp>
class NdbRestarter {
public:
@@ -85,8 +86,8 @@ protected:
Vector<ndb_mgm_node_state> apiNodes;
bool connected;
- const char* addr;
- const char* host;
+ BaseString addr;
+ BaseString host;
int port;
NdbMgmHandle handle;
ndb_mgm_configuration * m_config;
diff --git a/ndb/test/include/UtilTransactions.hpp b/ndb/test/include/UtilTransactions.hpp
index b16ab74455e..1298028d591 100644
--- a/ndb/test/include/UtilTransactions.hpp
+++ b/ndb/test/include/UtilTransactions.hpp
@@ -34,24 +34,24 @@ public:
int clearTable(Ndb*,
int records = 0,
- int parallelism = 240);
+ int parallelism = 0);
// Delete all records from the table using a scan
int clearTable1(Ndb*,
int records = 0,
- int parallelism = 16);
+ int parallelism = 0);
// Delete all records from the table using a scan
// Using batching
int clearTable2(Ndb*,
int records = 0,
- int parallelism = 240);
+ int parallelism = 0);
int clearTable3(Ndb*,
int records = 0,
- int parallelism = 240);
+ int parallelism = 0);
int selectCount(Ndb*,
- int parallelism = 16,
+ int parallelism = 0,
int* count_rows = NULL,
ScanLock lock = SL_Read,
NdbConnection* pTrans = NULL);
@@ -64,7 +64,7 @@ public:
ReadCallBackFn* fn = NULL);
int verifyIndex(Ndb*,
const char* indexName,
- int parallelism = 240,
+ int parallelism = 0,
bool transactional = false);
int copyTableData(Ndb*,
@@ -88,7 +88,7 @@ private:
int verifyUniqueIndex(Ndb*,
const char* indexName,
- int parallelism = 240,
+ int parallelism = 0,
bool transactional = false);
int scanAndCompareUniqueIndex(Ndb* pNdb,
diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am
index 7b3648bdc45..51bc11f8a25 100644
--- a/ndb/test/ndbapi/Makefile.am
+++ b/ndb/test/ndbapi/Makefile.am
@@ -9,7 +9,6 @@ create_tab \
flexAsynch \
flexBench \
flexHammer \
-flexScan \
flexTT \
testBackup \
testBasic \
@@ -26,9 +25,11 @@ testOperations \
testRestartGci \
testScan \
testScanInterpreter \
+testScanPerf \
testSystemRestart \
testTimeout \
testTransactions \
+testDeadlock \
test_event
#flexTimedAsynch
@@ -41,7 +42,6 @@ drop_all_tabs_SOURCES = drop_all_tabs.cpp
flexAsynch_SOURCES = flexAsynch.cpp
flexBench_SOURCES = flexBench.cpp
flexHammer_SOURCES = flexHammer.cpp
-flexScan_SOURCES = flexScan.cpp
flexTT_SOURCES = flexTT.cpp
#flexTimedAsynch_SOURCES = flexTimedAsynch.cpp
#flex_bench_mysql_SOURCES = flex_bench_mysql.cpp
@@ -60,9 +60,11 @@ testOperations_SOURCES = testOperations.cpp
testRestartGci_SOURCES = testRestartGci.cpp
testScan_SOURCES = testScan.cpp ScanFunctions.hpp
testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp
+testScanPerf_SOURCES = testScanPerf.cpp
testSystemRestart_SOURCES = testSystemRestart.cpp
testTimeout_SOURCES = testTimeout.cpp
testTransactions_SOURCES = testTransactions.cpp
+testDeadlock_SOURCES = testDeadlock.cpp
test_event_SOURCES = test_event.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel
@@ -79,3 +81,4 @@ testBackup_LDADD = $(LDADD) bank/libbank.a
# Don't update the files from bitkeeper
%::SCCS/s.%
+
diff --git a/ndb/test/ndbapi/Makefile_old b/ndb/test/ndbapi/Makefile_old
index 34761e1eb9c..c3198096ec0 100644
--- a/ndb/test/ndbapi/Makefile_old
+++ b/ndb/test/ndbapi/Makefile_old
@@ -4,12 +4,11 @@ include .defs.mk
ifeq ($(NDB_OS), OSE)
DIRS = basic flexBench flexAsynch
else
-DIRS = lmc-bench bank ronja
+DIRS = lmc-bench ronja
BIN_DIRS = \
flexAsynch \
flexBench \
flexHammer \
- flexScan \
flexTT \
create_tab \
create_all_tabs \
@@ -32,7 +31,6 @@ BIN_DIRS = \
testDataBuffers \
testDict \
acid \
- interpreterInTup \
telco \
indexTest \
test_event \
diff --git a/ndb/test/ndbapi/ScanFunctions.hpp b/ndb/test/ndbapi/ScanFunctions.hpp
index 36d01909861..2ff4b751c33 100644
--- a/ndb/test/ndbapi/ScanFunctions.hpp
+++ b/ndb/test/ndbapi/ScanFunctions.hpp
@@ -79,8 +79,9 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
const int retryMax = 100;
int sleepTime = 10;
int check;
- NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbConnection *pTrans = 0;
+ NdbScanOperation *pOp = 0;
+ NdbResultSet *rs = 0;
while (true){
if (retryAttempt >= retryMax){
@@ -103,117 +104,81 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
}
// Execute the scan without defining a scan operation
- if(action != ExecuteScanWithOutOpenScan){
-
- if (action == OnlyOneOpBeforeOpenScan){
- // There can only be one operation defined when calling openScan
- NdbOperation* pOp3;
- pOp3 = pTrans->getNdbOperation(tab.getName());
- if (pOp3 == NULL) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
+ pOp = pTrans->getNdbScanOperation(tab.getName());
+ if (pOp == NULL) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+
+ rs = pOp->readTuples(exclusive ?
+ NdbScanOperation::LM_Exclusive :
+ NdbScanOperation::LM_Read);
+
+ if( rs == 0 ) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+
+ if (action == OnlyOpenScanOnce){
+ // Call openScan one more time when it's already defined
+ NdbResultSet* rs2 = pOp->readTuples(NdbScanOperation::LM_Read);
+ if( rs2 == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
- if (exclusive == true)
- check = pOp->openScanExclusive(parallelism);
- else
- check = pOp->openScanRead(parallelism);
+ }
+
+ if (action==EqualAfterOpenScan){
+ check = pOp->equal(tab.getColumn(0)->getName(), 10);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
- }
-
-
- if (action == OnlyOneScanPerTrans){
- // There can only be one operation in a scan transaction
- NdbOperation* pOp4;
- pOp4 = pTrans->getNdbOperation(tab.getName());
- if (pOp4 == NULL) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
-
- if (action == OnlyOpenScanOnce){
- // Call openScan one more time when it's already defined
- check = pOp->openScanRead(parallelism);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
-
- if (action == OnlyOneOpInScanTrans){
- // Try to add another op to this scanTransaction
- NdbOperation* pOp2;
- pOp2 = pTrans->getNdbOperation(tab.getName());
- if (pOp2 == NULL) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
-
-
- if (action==EqualAfterOpenScan){
- check = pOp->equal(tab.getColumn(0)->getName(), 10);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
+ }
+ }
+
+ check = pOp->interpret_exit_ok();
+ if( check == -1 ) {
+ ERR(pTrans->getNdbError());
+ pNdb->closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
+ for(int a = 0; a<tab.getNoOfColumns(); a++){
+ if(pOp->getValue(tab.getColumn(a)->getName()) == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
- for(int a = 0; a<tab.getNoOfColumns(); a++){
- if(pOp->getValue(tab.getColumn(a)->getName()) == NULL) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- }
- }
- check = pTrans->executeScan();
+ }
+
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
-
+
int abortCount = records / 10;
bool abortTrans = (action==CloseWithoutStop);
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
-
+ eof = rs->nextResult();
+
while(eof == 0){
rows++;
-
+
if (abortCount == rows && abortTrans == true){
g_info << "Scan is aborted after "<<abortCount<<" rows" << endl;
if (action != CloseWithoutStop){
// Test that we can closeTrans without stopScan
- check = pTrans->stopScan();
+ rs->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -236,7 +201,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
}
}
- eof = pTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
@@ -246,7 +211,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
// Be cruel, call nextScanResult after error
for(int i=0; i<10; i++){
- eof =pTrans->nextScanResult();
+ eof = rs->nextResult();
if(eof == 0){
g_err << "nextScanResult returned eof = " << eof << endl
<< " That is an error when there are no more records" << endl;
@@ -276,7 +241,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
if (action == NextScanWhenNoMore){
g_info << "Calling nextScanresult when there are no more records" << endl;
for(int i=0; i<10; i++){
- eof =pTrans->nextScanResult();
+ eof = rs->nextResult();
if(eof == 0){
g_err << "nextScanResult returned eof = " << eof << endl
<< " That is an error when there are no more records" << endl;
@@ -285,7 +250,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
}
}
- if(action ==CheckInactivityBeforeClose){
+ if(action == CheckInactivityBeforeClose){
// Sleep for a long time before calling close
g_info << "NdbSleep_SecSleep(5) before close transaction" << endl;
NdbSleep_SecSleep(5);
diff --git a/ndb/test/ndbapi/ScanInterpretTest.hpp b/ndb/test/ndbapi/ScanInterpretTest.hpp
index 3862de34111..e8a0d4b6dca 100644
--- a/ndb/test/ndbapi/ScanInterpretTest.hpp
+++ b/ndb/test/ndbapi/ScanInterpretTest.hpp
@@ -197,7 +197,7 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
while (true){
@@ -220,16 +220,17 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
- check = pOp->openScanRead(parallelism);
- //check = pOp->openScanExclusive(parallelism);
- if( check == -1 ) {
+
+ NdbResultSet * rs = pOp->readTuples(NdbScanOperation::LM_Read,
+ 0, parallelism);
+
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -250,7 +251,7 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
return NDBT_FAILED;
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -261,32 +262,22 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
int rows = 0;
NdbConnection* pInsTrans;
- while((eof = pTrans->nextScanResult(true)) == 0){
- pInsTrans = pNdb->startTransaction();
- if (pInsTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
- ERR(err);
- return NDBT_FAILED;
- }
+ while((eof = rs->nextResult(true)) == 0){
do {
rows++;
- if (addRowToInsert(pNdb, pInsTrans) != 0){
+ if (addRowToInsert(pNdb, pTrans) != 0){
pNdb->closeTransaction(pTrans);
- pNdb->closeTransaction(pInsTrans);
return NDBT_FAILED;
}
- } while((eof = pTrans->nextScanResult(false)) == 0);
+ } while((eof = rs->nextResult(false)) == 0);
- check = pInsTrans->execute(Commit);
+ check = pTrans->execute(Commit);
if( check == -1 ) {
- const NdbError err = pInsTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pInsTrans);
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pInsTrans);
-
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
@@ -322,7 +313,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
const int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
while (true){
@@ -346,7 +337,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) { if (pOp->getValue("KOL2") == 0){
ERR(pNdb->getNdbError());
return NDBT_FAILED;
@@ -357,9 +348,10 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
- check = pOp->openScanRead(parallelism);
- if( check == -1 ) {
+
+ NdbResultSet * rs = pOp->readTuples(NdbScanOperation::LM_Read,
+ 0, parallelism);
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -382,7 +374,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
return NDBT_FAILED;
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -400,7 +392,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
NdbConnection* pExistTrans;
NdbConnection* pNoExistTrans;
- while((eof = pTrans->nextScanResult(true)) == 0){
+ while((eof = rs->nextResult(true)) == 0){
pExistTrans = pNdb->startTransaction();
if (pExistTrans == NULL) {
const NdbError err = pNdb->getNdbError();
@@ -432,7 +424,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
return NDBT_FAILED;
}
}
- } while((eof = pTrans->nextScanResult(false)) == 0);
+ } while((eof = rs->nextResult(false)) == 0);
// Execute the transaction containing reads of
diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/ndb/test/ndbapi/bank/Bank.cpp
index 14883205693..7a2c5b057a1 100644
--- a/ndb/test/ndbapi/bank/Bank.cpp
+++ b/ndb/test/ndbapi/bank/Bank.cpp
@@ -670,15 +670,15 @@ int Bank::findLastGL(Uint64 &lastTime){
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("GL");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("GL");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -698,7 +698,7 @@ int Bank::findLastGL(Uint64 &lastTime){
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -707,7 +707,7 @@ int Bank::findLastGL(Uint64 &lastTime){
int eof;
int rows = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
lastTime = 0;
while(eof == 0){
@@ -717,7 +717,7 @@ int Bank::findLastGL(Uint64 &lastTime){
if (t > lastTime)
lastTime = t;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1002,15 +1002,15 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("TRANSACTION");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("TRANSACTION");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanExclusive(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuplesExclusive();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1051,7 +1051,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1061,7 +1061,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
int eof;
int rows = 0;
int rowsFound = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
@@ -1085,7 +1085,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
}
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
if ((rows % 100) == 0){
// "refresh" ownner transaction every 100th row
@@ -1162,15 +1162,15 @@ int Bank::performValidateGL(Uint64 glTime){
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("GL");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("GL");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1238,7 +1238,7 @@ int Bank::performValidateGL(Uint64 glTime){
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1249,7 +1249,7 @@ int Bank::performValidateGL(Uint64 glTime){
int rows = 0;
int countGlRecords = 0;
int result = NDBT_OK;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
@@ -1336,7 +1336,7 @@ int Bank::performValidateGL(Uint64 glTime){
}
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1426,15 +1426,15 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("GL");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("GL");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1468,7 +1468,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1477,7 +1477,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
int eof;
int rows = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
oldest = 0;
while(eof == 0){
@@ -1491,7 +1491,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
if (t > oldest)
oldest = t;
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1518,15 +1518,15 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("GL");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("GL");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1560,7 +1560,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1569,7 +1569,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
int eof;
int rows = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
oldest = (Uint64)-1;
found = false;
@@ -1586,7 +1586,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
accountTypeId = a;
}
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1615,15 +1615,15 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("TRANSACTION");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("TRANSACTION");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1657,7 +1657,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1667,7 +1667,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
int eof;
int rows = 0;
int found = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
@@ -1683,7 +1683,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
<< " ti = " << ti << endl;
found++;
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1859,15 +1859,15 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("TRANSACTION");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("TRANSACTION");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanExclusive(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuplesExclusive();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1894,7 +1894,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1904,7 +1904,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
int eof;
int rows = 0;
int rowsFound = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
@@ -1914,8 +1914,8 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
if (a == accountType && t == glTime){
rowsFound++;
// One record found
- NdbOperation* pDelOp = pOp->takeOverForDelete(pTrans);
- if (pDelOp == NULL){
+ check = rs->deleteTuple(pTrans);
+ if (check == -1){
ERR(m_ndb.getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1929,7 +1929,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
return NDBT_FAILED;
}
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -2348,15 +2348,15 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("ACCOUNT");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("ACCOUNT");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanExclusive(64);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuplesExclusive();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -2376,7 +2376,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -2391,7 +2391,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
}
int eof;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
Uint32 b = balanceRec->u_32_value();
@@ -2403,7 +2403,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
// << ", sum="<< sumAccounts << endl;
// Take over the operation so that the lock is kept in db
- NdbOperation* pLockOp = pOp->takeOverForUpdate(pTrans);
+ NdbOperation* pLockOp = rs->updateTuple(pTrans);
if (pLockOp == NULL){
ERR(m_ndb.getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -2429,7 +2429,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
return NDBT_FAILED;
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/ndb/test/ndbapi/bank/BankLoad.cpp
index 76261b664a6..bbaac27735b 100644
--- a/ndb/test/ndbapi/bank/BankLoad.cpp
+++ b/ndb/test/ndbapi/bank/BankLoad.cpp
@@ -335,15 +335,15 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbOperation* pOp = pScanTrans->getNdbOperation("ACCOUNT");
+ NdbScanOperation* pOp = pScanTrans->getNdbScanOperation("ACCOUNT");
if (pOp == NULL) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(64);
- if( check == -1 ) {
+ NdbResultSet* rs = pOp->readTuples();
+ if( rs == 0 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -370,7 +370,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
return NDBT_FAILED;
}
- check = pScanTrans->executeScan();
+ check = pScanTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -379,7 +379,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
int eof;
int rows = 0;
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
@@ -391,7 +391,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
balance += b;
}
- eof = pScanTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
diff --git a/ndb/test/ndbapi/create_tab.cpp b/ndb/test/ndbapi/create_tab.cpp
index 8bb1e7a9572..c2e3b7f64ea 100644
--- a/ndb/test/ndbapi/create_tab.cpp
+++ b/ndb/test/ndbapi/create_tab.cpp
@@ -63,9 +63,10 @@ int main(int argc, const char** argv){
/**
* Print instead of creating
*/
- if(argv[optind] != NULL){
- for(int i = optind; i<argc; i++)
+ if(optind < argc){
+ for(int i = optind; i<argc; i++){
NDBT_Tables::print(argv[i]);
+ }
} else {
NDBT_Tables::printAll();
}
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp
index 396ac06c87a..9192ec21b93 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/ndb/test/ndbapi/flexAsynch.cpp
@@ -146,7 +146,7 @@ tellThreads(StartType what)
NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
{
ThreadNdb* pThreadData;
- int tLoops=0;
+ int tLoops=0, i;
int returnValue = NDBT_OK;
flexAsynchErrorData = new ErrorData;
@@ -256,7 +256,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
PRINT_TIMER("insert", noOfTransacts, tNoOfOpsPerTrans);
if (0 < failed) {
- int i = retry_opt ;
+ i = retry_opt ;
int ci = 1 ;
while (0 < failed && 0 < i){
ndbout << failed << " of the transactions returned errors!"
@@ -293,7 +293,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
PRINT_TIMER("read", noOfTransacts, tNoOfOpsPerTrans);
if (0 < failed) {
- int i = retry_opt ;
+ i = retry_opt ;
int cr = 1;
while (0 < failed && 0 < i){
ndbout << failed << " of the transactions returned errors!"<<endl ;
@@ -330,7 +330,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
PRINT_TIMER("update", noOfTransacts, tNoOfOpsPerTrans) ;
if (0 < failed) {
- int i = retry_opt ;
+ i = retry_opt ;
int cu = 1 ;
while (0 < failed && 0 < i){
ndbout << failed << " of the transactions returned errors!"<<endl ;
@@ -366,7 +366,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
PRINT_TIMER("read", noOfTransacts, tNoOfOpsPerTrans);
if (0 < failed) {
- int i = retry_opt ;
+ i = retry_opt ;
int cr2 = 1 ;
while (0 < failed && 0 < i){
ndbout << failed << " of the transactions returned errors!"<<endl ;
@@ -403,7 +403,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
PRINT_TIMER("delete", noOfTransacts, tNoOfOpsPerTrans);
if (0 < failed) {
- int i = retry_opt ;
+ i = retry_opt ;
int cd = 1 ;
while (0 < failed && 0 < i){
ndbout << failed << " of the transactions returned errors!"<< endl ;
@@ -438,7 +438,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
execute(stStop);
void * tmp;
- for(int i = 0; i<tNoOfThreads; i++){
+ for(i = 0; i<tNoOfThreads; i++){
NdbThread_WaitFor(threadLife[i], &tmp);
NdbThread_Destroy(&threadLife[i]);
}
diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp
index 809d11086bf..38c8f6e280f 100644
--- a/ndb/test/ndbapi/flexBench.cpp
+++ b/ndb/test/ndbapi/flexBench.cpp
@@ -282,7 +282,7 @@ tellThreads(ThreadData* pt, StartType what)
NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
{
ThreadData* pThreadsData;
- int tLoops = 0;
+ int tLoops = 0, i;
int returnValue = NDBT_OK;
if (readArguments(argc, argv) != 0){
@@ -355,7 +355,7 @@ NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
****************************************************************/
resetThreads(pThreadsData);
- for (unsigned int i = 0; i < tNoOfThreads; i++){
+ for (i = 0; i < tNoOfThreads; i++){
pThreadsData[i].threadNo = i;
pThreadsData[i].threadLife = NdbThread_Create(flexBenchThread,
(void**)&pThreadsData[i],
@@ -531,7 +531,7 @@ NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
waitForThreads(pThreadsData);
void * tmp;
- for(Uint32 i = 0; i<tNoOfThreads; i++){
+ for(i = 0; i<tNoOfThreads; i++){
NdbThread_WaitFor(pThreadsData[i].threadLife, &tmp);
NdbThread_Destroy(&pThreadsData[i].threadLife);
}
@@ -540,7 +540,7 @@ NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
if (useLongKeys == true) {
// Only free these areas if they have been allocated
// Otherwise cores will happen
- for (Uint32 i = 0; i < tNoOfLongPK; i++)
+ for (i = 0; i < tNoOfLongPK; i++)
free(longKeyAttrName[i]);
free(longKeyAttrName);
} // if
@@ -629,9 +629,10 @@ static void* flexBenchThread(void* pArg)
if(useLongKeys){
// Allocate and populate the longkey array.
longKeyAttrValue = (unsigned ***) malloc(sizeof(unsigned**) * tNoOfOperations );
- for (Uint32 n = 0; n < tNoOfOperations; n++)
+ Uint32 n;
+ for (n = 0; n < tNoOfOperations; n++)
longKeyAttrValue[n] = (unsigned **) malloc(sizeof(unsigned*) * tNoOfLongPK );
- for (Uint32 n = 0; n < tNoOfOperations; n++){
+ for (n = 0; n < tNoOfOperations; n++){
for (Uint32 i = 0; i < tNoOfLongPK ; i++) {
longKeyAttrValue[n][i] = (unsigned *) malloc(sizeof(unsigned) * tSizeOfLongPK);
memset(longKeyAttrValue[n][i], 0, sizeof(unsigned) * tSizeOfLongPK);
@@ -1064,13 +1065,14 @@ static void sleepBeforeStartingTest(int seconds){
static int
createTables(Ndb* pMyNdb){
- for (Uint32 i = 0; i < tNoOfAttributes; i++){
+ int i;
+ for (i = 0; i < tNoOfAttributes; i++){
snprintf(attrName[i], MAXSTRLEN, "COL%d", i);
}
// Note! Uses only uppercase letters in table name's
// so that we can look at the tables with SQL
- for (Uint32 i = 0; i < tNoOfTables; i++){
+ for (i = 0; i < tNoOfTables; i++){
if (theStdTableNameFlag == 0){
snprintf(tableName[i], MAXSTRLEN, "TAB%d_%d", i,
(int)(NdbTick_CurrentMillisecond() / 1000));
@@ -1079,7 +1081,7 @@ createTables(Ndb* pMyNdb){
}
}
- for(unsigned i = 0; i < tNoOfTables; i++){
+ for(i = 0; i < tNoOfTables; i++){
ndbout << "Creating " << tableName[i] << "... ";
NdbDictionary::Table tmpTable(tableName[i]);
diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp
index a82875de5c2..c0ff31d1677 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/ndb/test/ndbapi/flexTT.cpp
@@ -173,7 +173,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
{
ThreadNdb* pThreadData;
int returnValue = NDBT_OK;
-
+ int i;
flexTTErrorData = new ErrorData;
flexTTErrorData->resetErrorCounters();
@@ -250,7 +250,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
* Create NDB objects. *
****************************************************************/
resetThreads();
- for (int i = 0; i < tNoOfThreads ; i++) {
+ for (i = 0; i < tNoOfThreads ; i++) {
pThreadData[i].threadNo = i;
threadLife[i] = NdbThread_Create(threadLoop,
(void**)&pThreadData[i],
@@ -301,7 +301,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
execute(stStop);
void * tmp;
- for(int i = 0; i<tNoOfThreads; i++){
+ for(i = 0; i<tNoOfThreads; i++){
NdbThread_WaitFor(threadLife[i], &tmp);
NdbThread_Destroy(&threadLife[i]);
}
diff --git a/ndb/test/ndbapi/old_dirs/testBackup/Makefile b/ndb/test/ndbapi/old_dirs/testBackup/Makefile
index ce0e404803c..abf47dcfb2d 100644
--- a/ndb/test/ndbapi/old_dirs/testBackup/Makefile
+++ b/ndb/test/ndbapi/old_dirs/testBackup/Makefile
@@ -3,7 +3,6 @@ include .defs.mk
TYPE = ndbapitest
BIN_TARGET = testBackup
-BIN_TARGET_LIBS += bank
SOURCES = testBackup.cpp
include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/test/ndbapi/old_dirs/testGrep/Makefile b/ndb/test/ndbapi/old_dirs/testGrep/Makefile
index 34fdd7113d0..6bad3d56a00 100644
--- a/ndb/test/ndbapi/old_dirs/testGrep/Makefile
+++ b/ndb/test/ndbapi/old_dirs/testGrep/Makefile
@@ -3,7 +3,6 @@ include .defs.mk
TYPE = ndbapitest
DIRS = verify
BIN_TARGET = testGrep
-BIN_TARGET_LIBS += bank
SOURCES = testGrep.cpp
include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile b/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
index 4e6182de6b2..256e3c98f36 100644
--- a/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
+++ b/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile
@@ -3,7 +3,6 @@ include .defs.mk
TYPE = ndbapitest
BIN_TARGET = testGrepVerify
-BIN_TARGET_LIBS += bank
SOURCES = testGrepVerify.cpp
CFLAGS_testGrepVerify.cpp += -I$(call fixpath,$(NDB_TOP)/include/kernel) -I$(call fixpath,$(NDB_TOP)/include/mgmcommon)
diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp
index 129eced54b0..d328a7db292 100644
--- a/ndb/test/ndbapi/testBackup.cpp
+++ b/ndb/test/ndbapi/testBackup.cpp
@@ -205,6 +205,11 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int runDropTable(NDBT_Context* ctx, NDBT_Step* step){
+ GETNDB(step)->getDictionary()->dropTable(ctx->getTab()->getName());
+ return NDBT_OK;
+}
+
#include "bank/Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
@@ -408,7 +413,7 @@ TESTCASE("BackupOne",
INITIALIZER(runRestoreOne);
VERIFIER(runVerifyOne);
FINALIZER(runClearTable);
-
+ FINALIZER(runDropTable);
}
TESTCASE("BackupBank",
"Test that backup and restore works during transaction load\n"
@@ -428,7 +433,6 @@ TESTCASE("BackupBank",
STEP(runBackupBank);
VERIFIER(runRestoreBankAndVerify);
// FINALIZER(runDropBank);
-
}
TESTCASE("NFMaster",
"Test that backup behaves during node failiure\n"){
diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp
index 64dfe492c2c..26622f9b066 100644
--- a/ndb/test/ndbapi/testBasic.cpp
+++ b/ndb/test/ndbapi/testBasic.cpp
@@ -29,9 +29,18 @@
* delete should be visible to same transaction
*
*/
+int runLoadTable2(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int records = ctx->getNumRecords();
+ HugoTransactions hugoTrans(*ctx->getTab());
+ if (hugoTrans.loadTable(GETNDB(step), records, 512, false, 0, true) != 0){
+ return NDBT_FAILED;
+ }
+ return NDBT_OK;
+}
-int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
-
+int runLoadTable(NDBT_Context* ctx, NDBT_Step* step)
+{
int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*ctx->getTab());
if (hugoTrans.loadTable(GETNDB(step), records) != 0){
@@ -255,7 +264,7 @@ static
int
readOneNoCommit(Ndb* pNdb, NdbConnection* pTrans,
const NdbDictionary::Table* tab,NDBT_ResultRow * row){
-
+ int a;
NdbOperation * pOp = pTrans->getNdbOperation(tab->getName());
if (pOp == NULL){
ERR(pTrans->getNdbError());
@@ -271,7 +280,7 @@ readOneNoCommit(Ndb* pNdb, NdbConnection* pTrans,
}
// Define primary keys
- for(int a = 0; a<tab->getNoOfColumns(); a++){
+ for(a = 0; a<tab->getNoOfColumns(); a++){
if (tab->getColumn(a)->getPrimaryKey() == true){
if(tmp.equalForAttr(pOp, a, 0) != 0){
ERR(pTrans->getNdbError());
@@ -281,7 +290,7 @@ readOneNoCommit(Ndb* pNdb, NdbConnection* pTrans,
}
// Define attributes to read
- for(int a = 0; a<tab->getNoOfColumns(); a++){
+ for(a = 0; a<tab->getNoOfColumns(); a++){
if((row->attributeStore(a) =
pOp->getValue(tab->getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -630,35 +639,35 @@ int runNoCommitRollback630(NDBT_Context* ctx, NDBT_Step* step){
int runNoCommitAndClose(NDBT_Context* ctx, NDBT_Step* step){
- int result = NDBT_OK;
+ int i, result = NDBT_OK;
HugoOperations hugoOps(*ctx->getTab());
Ndb* pNdb = GETNDB(step);
do{
// Read
CHECK(hugoOps.startTransaction(pNdb) == 0);
- for (int i = 0; i < 10; i++)
+ for (i = 0; i < 10; i++)
CHECK(hugoOps.pkReadRecord(pNdb, i, true) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
CHECK(hugoOps.closeTransaction(pNdb) == 0);
// Update
CHECK(hugoOps.startTransaction(pNdb) == 0);
- for (int i = 0; i < 10; i++)
+ for (i = 0; i < 10; i++)
CHECK(hugoOps.pkUpdateRecord(pNdb, i) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
CHECK(hugoOps.closeTransaction(pNdb) == 0);
// Delete
CHECK(hugoOps.startTransaction(pNdb) == 0);
- for (int i = 0; i < 10; i++)
+ for (i = 0; i < 10; i++)
CHECK(hugoOps.pkDeleteRecord(pNdb, i) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
CHECK(hugoOps.closeTransaction(pNdb) == 0);
// Try to insert, record should already exist
CHECK(hugoOps.startTransaction(pNdb) == 0);
- for (int i = 0; i < 10; i++)
+ for (i = 0; i < 10; i++)
CHECK(hugoOps.pkInsertRecord(pNdb, i) == 0);
CHECK(hugoOps.execute_Commit(pNdb) == 630);
CHECK(hugoOps.closeTransaction(pNdb) == 0);
@@ -772,14 +781,14 @@ int runCheckRollbackDeleteMultiple(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoOps.closeTransaction(pNdb) == 0);
Uint32 updatesValue = 0;
-
+ Uint32 j;
for(Uint32 i = 0; i<1; i++){
// Read record 5 - 10
CHECK(hugoOps.startTransaction(pNdb) == 0);
CHECK(hugoOps.pkReadRecord(pNdb, 5, true, 10) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
- for(Uint32 j = 0; j<10; j++){
+ for(j = 0; j<10; j++){
// Update record 5 - 10
updatesValue++;
CHECK(hugoOps.pkUpdateRecord(pNdb, 5, 10, updatesValue) == 0);
@@ -790,7 +799,7 @@ int runCheckRollbackDeleteMultiple(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoOps.verifyUpdatesValue(updatesValue) == 0);
}
- for(Uint32 j = 0; j<10; j++){
+ for(j = 0; j<10; j++){
// Delete record 5 - 10 times
CHECK(hugoOps.pkDeleteRecord(pNdb, 5, 10) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
@@ -953,6 +962,7 @@ int runMassiveRollback(NDBT_Context* ctx, NDBT_Step* step){
const Uint32 OPS_TOTAL = 4096;
for(int row = 0; row < records; row++){
+ int res;
CHECK(hugoOps.startTransaction(pNdb) == 0);
for(int i = 0; i<OPS_TOTAL; i += OPS_PER_TRANS){
for(int j = 0; j<OPS_PER_TRANS; j++){
@@ -963,7 +973,12 @@ int runMassiveRollback(NDBT_Context* ctx, NDBT_Step* step){
if(result != NDBT_OK){
break;
}
- CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
+ res = hugoOps.execute_NoCommit(pNdb);
+ if(res != 0){
+ NdbError err = pNdb->getNdbError(res);
+ CHECK(err.classification == NdbError::TimeoutExpired);
+ break;
+ }
}
if(result != NDBT_OK){
break;
@@ -1255,6 +1270,11 @@ TESTCASE("MassiveRollback2",
INITIALIZER(runMassiveRollback2);
FINALIZER(runClearTable2);
}
+TESTCASE("MassiveTransaction",
+ "Test very large insert transaction"){
+ INITIALIZER(runLoadTable2);
+ FINALIZER(runClearTable2);
+}
NDBT_TESTSUITE_END(testBasic);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index 001ec83630a..64881ca39ab 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -38,6 +38,7 @@ struct Bcol {
};
struct Opt {
+ unsigned m_batch;
bool m_core;
bool m_dbg;
bool m_dbgall;
@@ -46,7 +47,8 @@ struct Opt {
unsigned m_parts;
unsigned m_rows;
unsigned m_seed;
- char m_skip[255];
+ const char* m_skip;
+ const char* m_style;
// metadata
const char* m_tname;
const char* m_x1name; // hash index
@@ -60,6 +62,7 @@ struct Opt {
int m_bug;
int (*m_bugtest)();
Opt() :
+ m_batch(7),
m_core(false),
m_dbg(false),
m_dbgall(false),
@@ -68,6 +71,8 @@ struct Opt {
m_parts(10),
m_rows(100),
m_seed(0),
+ m_skip(""),
+ m_style("012"),
// metadata
m_tname("TBLOB1"),
m_x1name("TBLOB1X1"),
@@ -80,7 +85,6 @@ struct Opt {
// bugs
m_bug(0),
m_bugtest(0) {
- memset(m_skip, false, sizeof(m_skip));
}
};
@@ -92,6 +96,7 @@ printusage()
Opt d;
ndbout
<< "usage: testBlobs options [default/max]" << endl
+ << " -batch N number of pk ops in batch [" << d.m_batch << "]" << endl
<< " -core dump core on error" << endl
<< " -dbg print debug" << endl
<< " -dbgall print also NDB API debug (if compiled in)" << endl
@@ -101,7 +106,8 @@ printusage()
<< " -parts N max parts in blob value [" << d.m_parts << "]" << endl
<< " -rows N number of rows [" << d.m_rows << "]" << endl
<< " -seed N random seed 0=loop number [" << d.m_seed << "]" << endl
- << " -skip xxx skip these tests (see list)" << endl
+ << " -skip xxx skip these tests (see list) [" << d.m_skip << endl
+ << " -style xxx access styles to test (see list) [" << d.m_style << "]" << endl
<< "metadata" << endl
<< " -pk2len N length of PK2 [" << d.m_pk2len << "/" << g_max_pk2len <<"]" << endl
<< " -oneblob only 1 blob attribute [default 2]" << endl
@@ -111,8 +117,10 @@ printusage()
<< " s table scans" << endl
<< " r ordered index scans" << endl
<< " u update blob value" << endl
- << " v getValue / setValue" << endl
- << " w readData / writeData" << endl
+ << "access styles for -style" << endl
+ << " 0 getValue / setValue" << endl
+ << " 1 setActiveHook" << endl
+ << " 2 readData / writeData" << endl
<< "bug tests (no blob test)" << endl
<< " -bug 4088 ndb api hang with mixed ops on index table" << endl
<< " -bug 2222 delete + write gives 626" << endl
@@ -122,11 +130,16 @@ printusage()
static Opt g_opt;
-static char&
-skip(unsigned x)
+static bool
+skipcase(int x)
{
- assert(x < sizeof(g_opt.m_skip));
- return g_opt.m_skip[x];
+ return strchr(g_opt.m_skip, x) != 0;
+}
+
+static bool
+skipstyle(int x)
+{
+ return strchr(g_opt.m_style, '0' + x) == 0;
}
static Ndb* g_ndb = 0;
@@ -138,11 +151,12 @@ static NdbScanOperation* g_ops = 0;
static NdbBlob* g_bh1 = 0;
static NdbBlob* g_bh2 = 0;
static bool g_printerror = true;
+static unsigned g_loop = 0;
static void
printerror(int line, const char* msg)
{
- ndbout << "line " << line << ": " << msg << " failed" << endl;
+ ndbout << "line " << line << " FAIL " << msg << endl;
if (! g_printerror) {
return;
}
@@ -205,6 +219,7 @@ static int
createTable()
{
NdbDictionary::Table tab(g_opt.m_tname);
+ tab.setLogging(false);
// col PK1 - Uint32
{ NdbDictionary::Column col("PK1");
col.setType(NdbDictionary::Column::Unsigned);
@@ -228,11 +243,11 @@ createTable()
col.setPrimaryKey(true);
tab.addColumn(col);
}
- // col BL2 - Clob nullable
+ // col BL2 - Text nullable
if (! g_opt.m_oneblob)
{ NdbDictionary::Column col("BL2");
const Bcol& b = g_opt.m_blob2;
- col.setType(NdbDictionary::Column::Clob);
+ col.setType(NdbDictionary::Column::Text);
col.setNullable(true);
col.setInlineSize(b.m_inline);
col.setPartSize(b.m_partsize);
@@ -245,6 +260,7 @@ createTable()
if (g_opt.m_pk2len != 0)
{ NdbDictionary::Index idx(g_opt.m_x1name);
idx.setType(NdbDictionary::Index::UniqueHashIndex);
+ idx.setLogging(false);
idx.setTable(g_opt.m_tname);
idx.addColumnName("PK2");
CHK(g_dic->createIndex(idx) == 0);
@@ -281,7 +297,7 @@ struct Bval {
m_buf = new char [m_buflen];
trash();
}
- void copy(const Bval& v) {
+ void copyfrom(const Bval& v) {
m_len = v.m_len;
delete [] m_val;
if (v.m_val == 0)
@@ -313,10 +329,10 @@ struct Tup {
m_blob1.alloc(g_opt.m_blob1.m_inline + g_opt.m_blob1.m_partsize * g_opt.m_parts);
m_blob2.alloc(g_opt.m_blob2.m_inline + g_opt.m_blob2.m_partsize * g_opt.m_parts);
}
- void copy(const Tup& tup) {
+ void copyfrom(const Tup& tup) {
assert(m_pk1 == tup.m_pk1);
- m_blob1.copy(tup.m_blob1);
- m_blob2.copy(tup.m_blob2);
+ m_blob1.copyfrom(tup.m_blob1);
+ m_blob2.copyfrom(tup.m_blob2);
}
private:
Tup(const Tup&);
@@ -358,6 +374,14 @@ calcBval(const Bcol& b, Bval& v, bool keepsize)
}
static void
+calcBval(Tup& tup, bool keepsize)
+{
+ calcBval(g_opt.m_blob1, tup.m_blob1, keepsize);
+ if (! g_opt.m_oneblob)
+ calcBval(g_opt.m_blob2, tup.m_blob2, keepsize);
+}
+
+static void
calcTups(bool keepsize)
{
for (unsigned k = 0; k < g_opt.m_rows; k++) {
@@ -371,31 +395,59 @@ calcTups(bool keepsize)
tup.m_pk2[i] = 'a' + i % 26;
}
}
- calcBval(g_opt.m_blob1, tup.m_blob1, keepsize);
- if (! g_opt.m_oneblob)
- calcBval(g_opt.m_blob2, tup.m_blob2, keepsize);
+ calcBval(tup, keepsize);
}
}
// blob handle ops
static int
+getBlobHandles(NdbOperation* opr)
+{
+ CHK((g_bh1 = opr->getBlobHandle("BL1")) != 0);
+ if (! g_opt.m_oneblob)
+ CHK((g_bh2 = opr->getBlobHandle("BL2")) != 0);
+ return 0;
+}
+
+static int
+getBlobHandles(NdbIndexOperation* opx)
+{
+ CHK((g_bh1 = opx->getBlobHandle("BL1")) != 0);
+ if (! g_opt.m_oneblob)
+ CHK((g_bh2 = opx->getBlobHandle("BL2")) != 0);
+ return 0;
+}
+
+static int
+getBlobHandles(NdbScanOperation* ops)
+{
+ CHK((g_bh1 = ops->getBlobHandle("BL1")) != 0);
+ if (! g_opt.m_oneblob)
+ CHK((g_bh2 = ops->getBlobHandle("BL2")) != 0);
+ return 0;
+}
+
+static int
getBlobLength(NdbBlob* h, unsigned& len)
{
Uint64 len2 = (unsigned)-1;
CHK(h->getLength(len2) == 0);
len = (unsigned)len2;
assert(len == len2);
+ DBG("getBlobLength " << h->getColumn()->getName() << " len=" << len);
return 0;
}
+// setValue / getValue
+
static int
setBlobValue(NdbBlob* h, const Bval& v)
{
bool null = (v.m_val == 0);
bool isNull;
unsigned len;
- DBG("set " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null);
+ DBG("setValue " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null);
if (null) {
CHK(h->setNull() == 0);
isNull = false;
@@ -410,10 +462,19 @@ setBlobValue(NdbBlob* h, const Bval& v)
}
static int
+setBlobValue(const Tup& tup)
+{
+ CHK(setBlobValue(g_bh1, tup.m_blob1) == 0);
+ if (! g_opt.m_oneblob)
+ CHK(setBlobValue(g_bh2, tup.m_blob2) == 0);
+ return 0;
+}
+
+static int
getBlobValue(NdbBlob* h, const Bval& v)
{
bool null = (v.m_val == 0);
- DBG("get " << h->getColumn()->getName() << " len=" << v.m_len << " null=" << null);
+ DBG("getValue " << h->getColumn()->getName() << " buflen=" << v.m_buflen);
CHK(h->getValue(v.m_buf, v.m_buflen) == 0);
return 0;
}
@@ -456,6 +517,8 @@ verifyBlobValue(const Tup& tup)
return 0;
}
+// readData / writeData
+
static int
writeBlobData(NdbBlob* h, const Bval& v)
{
@@ -469,6 +532,7 @@ writeBlobData(NdbBlob* h, const Bval& v)
CHK(h->getNull(isNull) == 0 && isNull == true);
CHK(getBlobLength(h, len) == 0 && len == 0);
} else {
+ CHK(h->truncate(v.m_len) == 0);
unsigned n = 0;
do {
unsigned m = g_opt.m_full ? v.m_len : urandom(v.m_len + 1);
@@ -487,6 +551,15 @@ writeBlobData(NdbBlob* h, const Bval& v)
}
static int
+writeBlobData(const Tup& tup)
+{
+ CHK(writeBlobData(g_bh1, tup.m_blob1) == 0);
+ if (! g_opt.m_oneblob)
+ CHK(writeBlobData(g_bh2, tup.m_blob2) == 0);
+ return 0;
+}
+
+static int
readBlobData(NdbBlob* h, const Bval& v)
{
bool null = (v.m_val == 0);
@@ -531,6 +604,71 @@ readBlobData(const Tup& tup)
return 0;
}
+// hooks
+
+static NdbBlob::ActiveHook blobWriteHook;
+
+static int
+blobWriteHook(NdbBlob* h, void* arg)
+{
+ DBG("blobWriteHook");
+ Bval& v = *(Bval*)arg;
+ CHK(writeBlobData(h, v) == 0);
+ return 0;
+}
+
+static int
+setBlobWriteHook(NdbBlob* h, Bval& v)
+{
+ DBG("setBlobWriteHook");
+ CHK(h->setActiveHook(blobWriteHook, &v) == 0);
+ return 0;
+}
+
+static int
+setBlobWriteHook(Tup& tup)
+{
+ CHK(setBlobWriteHook(g_bh1, tup.m_blob1) == 0);
+ if (! g_opt.m_oneblob)
+ CHK(setBlobWriteHook(g_bh2, tup.m_blob2) == 0);
+ return 0;
+}
+
+static NdbBlob::ActiveHook blobReadHook;
+
+// no PK yet to identify tuple so just read the value
+static int
+blobReadHook(NdbBlob* h, void* arg)
+{
+ DBG("blobReadHook");
+ Bval& v = *(Bval*)arg;
+ unsigned len;
+ CHK(getBlobLength(h, len) == 0);
+ v.alloc(len);
+ Uint32 maxlen = 0xffffffff;
+ CHK(h->readData(v.m_buf, maxlen) == 0);
+ DBG("read " << maxlen << " bytes");
+ CHK(len == maxlen);
+ return 0;
+}
+
+static int
+setBlobReadHook(NdbBlob* h, Bval& v)
+{
+ DBG("setBlobReadHook");
+ CHK(h->setActiveHook(blobReadHook, &v) == 0);
+ return 0;
+}
+
+static int
+setBlobReadHook(Tup& tup)
+{
+ CHK(setBlobReadHook(g_bh1, tup.m_blob1) == 0);
+ if (! g_opt.m_oneblob)
+ CHK(setBlobReadHook(g_bh2, tup.m_blob2) == 0);
+ return 0;
+}
+
// verify blob data
static int
@@ -540,7 +678,11 @@ verifyHeadInline(const Bcol& c, const Bval& v, NdbRecAttr* ra)
CHK(ra->isNULL() == 1);
} else {
CHK(ra->isNULL() == 0);
- CHK(ra->u_64_value() == v.m_len);
+ const NdbBlob::Head* head = (const NdbBlob::Head*)ra->aRef();
+ CHK(head->length == v.m_len);
+ const char* data = (const char*)(head + 1);
+ for (unsigned i = 0; i < head->length && i < c.m_inline; i++)
+ CHK(data[i] == v.m_val[i]);
}
return 0;
}
@@ -548,7 +690,7 @@ verifyHeadInline(const Bcol& c, const Bval& v, NdbRecAttr* ra)
static int
verifyHeadInline(const Tup& tup)
{
- DBG("verifyHeadInline pk1=" << tup.m_pk1);
+ DBG("verifyHeadInline pk1=" << hex << tup.m_pk1);
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
CHK(g_opr->readTuple() == 0);
@@ -580,17 +722,18 @@ verifyHeadInline(const Tup& tup)
static int
verifyBlobTable(const Bcol& b, const Bval& v, Uint32 pk1, bool exists)
{
- DBG("verify " << b.m_btname << " pk1=" << pk1);
+ DBG("verify " << b.m_btname << " pk1=" << hex << pk1);
NdbRecAttr* ra_pk;
NdbRecAttr* ra_part;
NdbRecAttr* ra_data;
+ NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
- CHK((g_opr = g_con->getNdbOperation(b.m_btname)) != 0);
- CHK(g_opr->openScanRead() == 0);
- CHK((ra_pk = g_opr->getValue("PK")) != 0);
- CHK((ra_part = g_opr->getValue("PART")) != 0);
- CHK((ra_data = g_opr->getValue("DATA")) != 0);
- CHK(g_con->executeScan() == 0);
+ CHK((g_ops = g_con->getNdbScanOperation(b.m_btname)) != 0);
+ CHK((rs = g_ops->readTuples()) != 0);
+ CHK((ra_pk = g_ops->getValue("PK")) != 0);
+ CHK((ra_part = g_ops->getValue("PART")) != 0);
+ CHK((ra_data = g_ops->getValue("DATA")) != 0);
+ CHK(g_con->execute(NoCommit) == 0);
unsigned partcount;
if (! exists || v.m_len <= b.m_inline)
partcount = 0;
@@ -600,7 +743,7 @@ verifyBlobTable(const Bcol& b, const Bval& v, Uint32 pk1, bool exists)
memset(seen, 0, partcount);
while (1) {
int ret;
- CHK((ret = g_con->nextScanResult()) == 0 || ret == 1);
+ CHK((ret = rs->nextResult()) == 0 || ret == 1);
if (ret == 1)
break;
if (pk1 != ra_pk->u_32_value())
@@ -620,7 +763,7 @@ verifyBlobTable(const Bcol& b, const Bval& v, Uint32 pk1, bool exists)
for (unsigned i = 0; i < partcount; i++)
CHK(seen[i] == 1);
g_ndb->closeTransaction(g_con);
- g_opr = 0;
+ g_ops = 0;
g_con = 0;
return 0;
}
@@ -639,7 +782,7 @@ verifyBlob()
{
for (unsigned k = 0; k < g_opt.m_rows; k++) {
const Tup& tup = g_tups[k];
- DBG("verifyBlob pk1=" << tup.m_pk1);
+ DBG("verifyBlob pk1=" << hex << tup.m_pk1);
CHK(verifyHeadInline(tup) == 0);
CHK(verifyBlobTable(tup) == 0);
}
@@ -648,105 +791,120 @@ verifyBlob()
// operations
+static const char* stylename[3] = {
+ "style=getValue/setValue",
+ "style=setActiveHook",
+ "style=readData/writeData"
+};
+
+// pk ops
+
static int
-insertPk(bool rw)
+insertPk(int style)
{
- DBG("--- insertPk ---");
+ DBG("--- insertPk " << stylename[style] << " ---");
+ unsigned n = 0;
+ CHK((g_con = g_ndb->startTransaction()) != 0);
for (unsigned k = 0; k < g_opt.m_rows; k++) {
Tup& tup = g_tups[k];
- DBG("insertPk pk1=" << tup.m_pk1);
- CHK((g_con = g_ndb->startTransaction()) != 0);
+ DBG("insertPk pk1=" << hex << tup.m_pk1);
CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
CHK(g_opr->insertTuple() == 0);
CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
if (g_opt.m_pk2len != 0)
CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
- CHK((g_bh1 = g_opr->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_opr->getBlobHandle("BL2")) != 0);
- if (! rw) {
- CHK(setBlobValue(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(setBlobValue(g_bh2, tup.m_blob2) == 0);
+ CHK(getBlobHandles(g_opr) == 0);
+ if (style == 0) {
+ CHK(setBlobValue(tup) == 0);
+ } else if (style == 1) {
+ // non-nullable must be set
+ CHK(g_bh1->setValue("", 0) == 0);
+ CHK(setBlobWriteHook(tup) == 0);
} else {
// non-nullable must be set
CHK(g_bh1->setValue("", 0) == 0);
CHK(g_con->execute(NoCommit) == 0);
- CHK(writeBlobData(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(writeBlobData(g_bh2, tup.m_blob2) == 0);
+ CHK(writeBlobData(tup) == 0);
+ }
+ // just another trap
+ if (urandom(10) == 0)
+ CHK(g_con->execute(NoCommit) == 0);
+ if (++n == g_opt.m_batch) {
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ n = 0;
}
- CHK(g_con->execute(Commit) == 0);
- g_ndb->closeTransaction(g_con);
g_opr = 0;
- g_con = 0;
tup.m_exists = true;
}
+ if (n != 0) {
+ CHK(g_con->execute(Commit) == 0);
+ n = 0;
+ }
+ g_ndb->closeTransaction(g_con);
+ g_con = 0;
return 0;
}
static int
-updatePk(bool rw)
+readPk(int style)
{
- DBG("--- updatePk ---");
+ DBG("--- readPk " << stylename[style] << " ---");
for (unsigned k = 0; k < g_opt.m_rows; k++) {
Tup& tup = g_tups[k];
- DBG("updatePk pk1=" << tup.m_pk1);
+ DBG("readPk pk1=" << hex << tup.m_pk1);
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
- CHK(g_opr->updateTuple() == 0);
+ CHK(g_opr->readTuple() == 0);
CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
if (g_opt.m_pk2len != 0)
CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
- CHK((g_bh1 = g_opr->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_opr->getBlobHandle("BL2")) != 0);
- if (! rw) {
- CHK(setBlobValue(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(setBlobValue(g_bh2, tup.m_blob2) == 0);
+ CHK(getBlobHandles(g_opr) == 0);
+ if (style == 0) {
+ CHK(getBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobReadHook(tup) == 0);
} else {
CHK(g_con->execute(NoCommit) == 0);
- CHK(writeBlobData(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(writeBlobData(g_bh2, tup.m_blob2) == 0);
+ CHK(readBlobData(tup) == 0);
}
CHK(g_con->execute(Commit) == 0);
+ if (style == 0 || style == 1) {
+ CHK(verifyBlobValue(tup) == 0);
+ }
g_ndb->closeTransaction(g_con);
g_opr = 0;
g_con = 0;
- tup.m_exists = true;
}
return 0;
}
static int
-updateIdx(bool rw)
+updatePk(int style)
{
- DBG("--- updateIdx ---");
+ DBG("--- updatePk " << stylename[style] << " ---");
for (unsigned k = 0; k < g_opt.m_rows; k++) {
Tup& tup = g_tups[k];
- DBG("updateIdx pk1=" << tup.m_pk1);
+ DBG("updatePk pk1=" << hex << tup.m_pk1);
CHK((g_con = g_ndb->startTransaction()) != 0);
- CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
- CHK(g_opx->updateTuple() == 0);
- CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
- CHK((g_bh1 = g_opx->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_opx->getBlobHandle("BL2")) != 0);
- if (! rw) {
- CHK(setBlobValue(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(setBlobValue(g_bh2, tup.m_blob2) == 0);
+ CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->updateTuple() == 0);
+ CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
+ CHK(getBlobHandles(g_opr) == 0);
+ if (style == 0) {
+ CHK(setBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobWriteHook(tup) == 0);
} else {
CHK(g_con->execute(NoCommit) == 0);
- CHK(writeBlobData(g_bh1, tup.m_blob1) == 0);
- if (! g_opt.m_oneblob)
- CHK(writeBlobData(g_bh2, tup.m_blob2) == 0);
+ CHK(writeBlobData(tup) == 0);
}
CHK(g_con->execute(Commit) == 0);
g_ndb->closeTransaction(g_con);
- g_opx = 0;
+ g_opr = 0;
g_con = 0;
tup.m_exists = true;
}
@@ -754,74 +912,115 @@ updateIdx(bool rw)
}
static int
-readPk(bool rw)
+deletePk()
{
- DBG("--- readPk ---");
+ DBG("--- deletePk ---");
for (unsigned k = 0; k < g_opt.m_rows; k++) {
Tup& tup = g_tups[k];
- DBG("readPk pk1=" << tup.m_pk1);
+ DBG("deletePk pk1=" << hex << tup.m_pk1);
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
- CHK(g_opr->readTuple() == 0);
+ CHK(g_opr->deleteTuple() == 0);
CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
if (g_opt.m_pk2len != 0)
CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
- CHK((g_bh1 = g_opr->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_opr->getBlobHandle("BL2")) != 0);
- if (! rw) {
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ g_opr = 0;
+ g_con = 0;
+ tup.m_exists = false;
+ }
+ return 0;
+}
+
+// hash index ops
+
+static int
+readIdx(int style)
+{
+ DBG("--- readIdx " << stylename[style] << " ---");
+ for (unsigned k = 0; k < g_opt.m_rows; k++) {
+ Tup& tup = g_tups[k];
+ DBG("readIdx pk1=" << hex << tup.m_pk1);
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
+ CHK(g_opx->readTuple() == 0);
+ CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
+ CHK(getBlobHandles(g_opx) == 0);
+ if (style == 0) {
CHK(getBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobReadHook(tup) == 0);
} else {
CHK(g_con->execute(NoCommit) == 0);
CHK(readBlobData(tup) == 0);
}
CHK(g_con->execute(Commit) == 0);
- if (! rw) {
+ if (style == 0 || style == 1) {
CHK(verifyBlobValue(tup) == 0);
}
g_ndb->closeTransaction(g_con);
- g_opr = 0;
+ g_opx = 0;
g_con = 0;
}
return 0;
}
static int
-readIdx(bool rw)
+updateIdx(int style)
{
- DBG("--- readIdx ---");
+ DBG("--- updateIdx " << stylename[style] << " ---");
for (unsigned k = 0; k < g_opt.m_rows; k++) {
Tup& tup = g_tups[k];
- DBG("readIdx pk1=" << tup.m_pk1);
+ DBG("updateIdx pk1=" << hex << tup.m_pk1);
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
- CHK(g_opx->readTuple() == 0);
+ CHK(g_opx->updateTuple() == 0);
CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
- CHK((g_bh1 = g_opx->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_opx->getBlobHandle("BL2")) != 0);
- if (! rw) {
- CHK(getBlobValue(tup) == 0);
+ CHK(getBlobHandles(g_opx) == 0);
+ if (style == 0) {
+ CHK(setBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobWriteHook(tup) == 0);
} else {
CHK(g_con->execute(NoCommit) == 0);
- CHK(readBlobData(tup) == 0);
+ CHK(writeBlobData(tup) == 0);
}
CHK(g_con->execute(Commit) == 0);
- if (! rw) {
- CHK(verifyBlobValue(tup) == 0);
- }
g_ndb->closeTransaction(g_con);
g_opx = 0;
g_con = 0;
+ tup.m_exists = true;
+ }
+ return 0;
+}
+
+static int
+deleteIdx()
+{
+ DBG("--- deleteIdx ---");
+ for (unsigned k = 0; k < g_opt.m_rows; k++) {
+ Tup& tup = g_tups[k];
+ DBG("deleteIdx pk1=" << hex << tup.m_pk1);
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
+ CHK(g_opx->deleteTuple() == 0);
+ CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ g_opx = 0;
+ g_con = 0;
+ tup.m_exists = false;
}
return 0;
}
+// scan ops table and index
+
static int
-readScan(bool rw, bool idx)
+readScan(int style, bool idx)
{
- const char* func = ! idx ? "scan read table" : "scan read index";
- DBG("--- " << func << " ---");
+ DBG("--- " << "readScan" << (idx ? "Idx" : "") << " " << stylename[style] << " ---");
Tup tup;
tup.alloc(); // allocate buffers
NdbResultSet* rs;
@@ -829,17 +1028,17 @@ readScan(bool rw, bool idx)
if (! idx) {
CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
} else {
- CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
+ CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- CHK((rs = g_ops->readTuples(240, NdbScanOperation::LM_Exclusive)) != 0);
+ CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Exclusive)) != 0);
CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
if (g_opt.m_pk2len != 0)
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
- CHK((g_bh1 = g_ops->getBlobHandle("BL1")) != 0);
- if (! g_opt.m_oneblob)
- CHK((g_bh2 = g_ops->getBlobHandle("BL2")) != 0);
- if (! rw) {
+ CHK(getBlobHandles(g_ops) == 0);
+ if (style == 0) {
CHK(getBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobReadHook(tup) == 0);
}
CHK(g_con->execute(NoCommit) == 0);
unsigned rows = 0;
@@ -850,11 +1049,14 @@ readScan(bool rw, bool idx)
CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
- DBG(func << " pk1=" << tup.m_pk1);
+ DBG("readScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
CHK(k < g_opt.m_rows && g_tups[k].m_exists);
- tup.copy(g_tups[k]);
- if (! rw) {
+ tup.copyfrom(g_tups[k]);
+ if (style == 0) {
+ CHK(verifyBlobValue(tup) == 0);
+ } else if (style == 1) {
+ // execute ops generated by callbacks, if any
CHK(verifyBlobValue(tup) == 0);
} else {
CHK(readBlobData(tup) == 0);
@@ -869,61 +1071,72 @@ readScan(bool rw, bool idx)
}
static int
-deletePk()
+updateScan(int style, bool idx)
{
- DBG("--- deletePk ---");
- for (unsigned k = 0; k < g_opt.m_rows; k++) {
- Tup& tup = g_tups[k];
- DBG("deletePk pk1=" << tup.m_pk1);
- CHK((g_con = g_ndb->startTransaction()) != 0);
- CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
- CHK(g_opr->deleteTuple() == 0);
- CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
- if (g_opt.m_pk2len != 0)
- CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
- CHK(g_con->execute(Commit) == 0);
- g_ndb->closeTransaction(g_con);
- g_opr = 0;
- g_con = 0;
- tup.m_exists = false;
+ DBG("--- " << "updateScan" << (idx ? "Idx" : "") << " " << stylename[style] << " ---");
+ Tup tup;
+ tup.alloc(); // allocate buffers
+ NdbResultSet* rs;
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ if (! idx) {
+ CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
+ } else {
+ CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- return 0;
-}
-
-static int
-deleteIdx()
-{
- DBG("--- deleteIdx ---");
- for (unsigned k = 0; k < g_opt.m_rows; k++) {
- Tup& tup = g_tups[k];
- DBG("deleteIdx pk1=" << tup.m_pk1);
- CHK((g_con = g_ndb->startTransaction()) != 0);
- CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
- CHK(g_opx->deleteTuple() == 0);
- CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
- CHK(g_con->execute(Commit) == 0);
- g_ndb->closeTransaction(g_con);
- g_opx = 0;
- g_con = 0;
- tup.m_exists = false;
+ CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Exclusive)) != 0);
+ CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
+ CHK(g_con->execute(NoCommit) == 0);
+ unsigned rows = 0;
+ while (1) {
+ int ret;
+ tup.m_pk1 = (Uint32)-1;
+ memset(tup.m_pk2, 'x', g_opt.m_pk2len);
+ CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ DBG("updateScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
+ Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
+ CHK(k < g_opt.m_rows && g_tups[k].m_exists);
+ // calculate new blob values
+ calcBval(g_tups[k], false);
+ tup.copyfrom(g_tups[k]);
+ CHK((g_opr = rs->updateTuple()) != 0);
+ CHK(getBlobHandles(g_opr) == 0);
+ if (style == 0) {
+ CHK(setBlobValue(tup) == 0);
+ } else if (style == 1) {
+ CHK(setBlobWriteHook(tup) == 0);
+ } else {
+ CHK(g_con->execute(NoCommit) == 0);
+ CHK(writeBlobData(tup) == 0);
+ }
+ CHK(g_con->execute(NoCommit) == 0);
+ g_opr = 0;
+ rows++;
}
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ g_con = 0;
+ g_ops = 0;
+ CHK(g_opt.m_rows == rows);
return 0;
}
static int
deleteScan(bool idx)
{
- const char* func = ! idx ? "scan delete table" : "scan delete index";
- DBG("--- " << func << " ---");
+ DBG("--- " << "deleteScan" << (idx ? "Idx" : "") << " ---");
Tup tup;
NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
if (! idx) {
CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
} else {
- CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
+ CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- CHK((rs = g_ops->readTuples(240, NdbScanOperation::LM_Exclusive)) != 0);
+ CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Exclusive)) != 0);
CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
if (g_opt.m_pk2len != 0)
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
@@ -936,7 +1149,7 @@ deleteScan(bool idx)
CHK((ret = rs->nextResult()) == 0 || ret == 1);
if (ret == 1)
break;
- DBG(func << " pk1=" << tup.m_pk1);
+ DBG("deleteScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
CHK(rs->deleteTuple() == 0);
CHK(g_con->execute(NoCommit) == 0);
Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
@@ -947,7 +1160,6 @@ deleteScan(bool idx)
CHK(g_con->execute(Commit) == 0);
g_ndb->closeTransaction(g_con);
g_con = 0;
- g_opr = 0;
g_ops = 0;
CHK(g_opt.m_rows == rows);
return 0;
@@ -958,6 +1170,7 @@ deleteScan(bool idx)
static int
testmain()
{
+ int style;
g_ndb = new Ndb("TEST_DB");
CHK(g_ndb->init() == 0);
CHK(g_ndb->waitUntilReady() == 0);
@@ -980,69 +1193,75 @@ testmain()
}
if (g_opt.m_seed != 0)
srandom(g_opt.m_seed);
- for (unsigned loop = 0; g_opt.m_loop == 0 || loop < g_opt.m_loop; loop++) {
- DBG("=== loop " << loop << " ===");
+ for (g_loop = 0; g_opt.m_loop == 0 || g_loop < g_opt.m_loop; g_loop++) {
+ DBG("=== loop " << g_loop << " ===");
if (g_opt.m_seed == 0)
- srandom(loop);
- bool llim = skip('v') ? true : false;
- bool ulim = skip('w') ? false : true;
+ srandom(g_loop);
// pk
- for (int rw = llim; rw <= ulim; rw++) {
- if (skip('k'))
+ for (style = 0; style <= 2; style++) {
+ if (skipcase('k') || skipstyle(style))
continue;
- DBG("--- pk ops " << (! rw ? "get/set" : "read/write") << " ---");
+ DBG("--- pk ops " << stylename[style] << " ---");
calcTups(false);
- CHK(insertPk(rw) == 0);
+ CHK(insertPk(style) == 0);
CHK(verifyBlob() == 0);
- CHK(readPk(rw) == 0);
- if (! skip('u')) {
- calcTups(rw);
- CHK(updatePk(rw) == 0);
+ CHK(readPk(style) == 0);
+ if (! skipcase('u')) {
+ calcTups(style);
+ CHK(updatePk(style) == 0);
CHK(verifyBlob() == 0);
}
- CHK(readPk(rw) == 0);
+ CHK(readPk(style) == 0);
CHK(deletePk() == 0);
CHK(verifyBlob() == 0);
}
// hash index
- for (int rw = llim; rw <= ulim; rw++) {
- if (skip('i'))
+ for (style = 0; style <= 2; style++) {
+ if (skipcase('i') || skipstyle(style))
continue;
- DBG("--- idx ops " << (! rw ? "get/set" : "read/write") << " ---");
+ DBG("--- idx ops " << stylename[style] << " ---");
calcTups(false);
- CHK(insertPk(rw) == 0);
+ CHK(insertPk(style) == 0);
CHK(verifyBlob() == 0);
- CHK(readIdx(rw) == 0);
- calcTups(rw);
- if (! skip('u')) {
- CHK(updateIdx(rw) == 0);
+ CHK(readIdx(style) == 0);
+ calcTups(style);
+ if (! skipcase('u')) {
+ CHK(updateIdx(style) == 0);
CHK(verifyBlob() == 0);
- CHK(readIdx(rw) == 0);
+ CHK(readIdx(style) == 0);
}
CHK(deleteIdx() == 0);
CHK(verifyBlob() == 0);
}
// scan table
- for (int rw = llim; rw <= ulim; rw++) {
- if (skip('s'))
+ for (style = 0; style <= 2; style++) {
+ if (skipcase('s') || skipstyle(style))
continue;
- DBG("--- table scan " << (! rw ? "get/set" : "read/write") << " ---");
+ DBG("--- table scan " << stylename[style] << " ---");
calcTups(false);
- CHK(insertPk(rw) == 0);
+ CHK(insertPk(style) == 0);
CHK(verifyBlob() == 0);
- CHK(readScan(rw, false) == 0);
+ CHK(readScan(style, false) == 0);
+ if (! skipcase('u')) {
+ CHK(updateScan(style, false) == 0);
+ CHK(verifyBlob() == 0);
+ }
CHK(deleteScan(false) == 0);
CHK(verifyBlob() == 0);
}
// scan index
- for (int rw = llim; rw <= ulim; rw++) {
- if (skip('r'))
+ for (style = 0; style <= 2; style++) {
+ if (skipcase('r') || skipstyle(style))
continue;
- DBG("--- index scan " << (! rw ? "get/set" : "read/write") << " ---");
+ DBG("--- index scan " << stylename[style] << " ---");
calcTups(false);
- CHK(insertPk(rw) == 0);
+ CHK(insertPk(style) == 0);
CHK(verifyBlob() == 0);
- CHK(readScan(rw, true) == 0);
+ CHK(readScan(style, true) == 0);
+ if (! skipcase('u')) {
+ CHK(updateScan(style, true) == 0);
+ CHK(verifyBlob() == 0);
+ }
CHK(deleteScan(true) == 0);
CHK(verifyBlob() == 0);
}
@@ -1056,6 +1275,7 @@ testmain()
static int
bugtest_4088()
{
+ unsigned i;
DBG("bug test 4088 - ndb api hang with mixed ops on index table");
// insert rows
calcTups(false);
@@ -1067,7 +1287,7 @@ bugtest_4088()
// read table pk via index as a table
const unsigned pkcnt = 2;
Tup pktup[pkcnt];
- for (unsigned i = 0; i < pkcnt; i++) {
+ for (i = 0; i < pkcnt; i++) {
char name[20];
// XXX guess table id
sprintf(name, "%d/%s", 4, g_opt.m_x1name);
@@ -1086,7 +1306,7 @@ bugtest_4088()
// BUG 4088: gets 1 tckeyconf, 1 tcindxconf, then hangs
CHK(g_con->execute(Commit) == 0);
// verify
- for (unsigned i = 0; i < pkcnt; i++) {
+ for (i = 0; i < pkcnt; i++) {
CHK(pktup[i].m_pk1 == tup.m_pk1);
CHK(memcmp(pktup[i].m_pk2, tup.m_pk2, g_opt.m_pk2len) == 0);
}
@@ -1120,6 +1340,12 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
{
while (++argv, --argc > 0) {
const char* arg = argv[0];
+ if (strcmp(arg, "-batch") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_batch = atoi(argv[0]);
+ continue;
+ }
+ }
if (strcmp(arg, "-core") == 0) {
g_opt.m_core = true;
continue;
@@ -1131,7 +1357,7 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
if (strcmp(arg, "-dbgall") == 0) {
g_opt.m_dbg = true;
g_opt.m_dbgall = true;
- putenv("NDB_BLOB_DEBUG=1");
+ putenv(strdup("NDB_BLOB_DEBUG=1"));
continue;
}
if (strcmp(arg, "-full") == 0) {
@@ -1164,9 +1390,13 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
}
if (strcmp(arg, "-skip") == 0) {
if (++argv, --argc > 0) {
- for (const char* p = argv[0]; *p != 0; p++) {
- skip(*p) = true;
- }
+ g_opt.m_skip = strdup(argv[0]);
+ continue;
+ }
+ }
+ if (strcmp(arg, "-style") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_style = strdup(argv[0]);
continue;
}
}
@@ -1174,10 +1404,6 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
if (strcmp(arg, "-pk2len") == 0) {
if (++argv, --argc > 0) {
g_opt.m_pk2len = atoi(argv[0]);
- if (g_opt.m_pk2len == 0) {
- skip('i') = true;
- skip('r') = true;
- }
if (g_opt.m_pk2len <= g_max_pk2len)
continue;
}
@@ -1204,7 +1430,15 @@ NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
printusage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+ if (g_opt.m_pk2len == 0) {
+ char b[100];
+ strcpy(b, g_opt.m_skip);
+ strcat(b, "i");
+ strcat(b, "r");
+ g_opt.m_skip = strdup(b);
+ }
if (testmain() == -1) {
+ ndbout << "line " << __LINE__ << " FAIL loop=" << g_loop << endl;
return NDBT_ProgramExit(NDBT_FAILED);
}
return NDBT_ProgramExit(NDBT_OK);
diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/ndb/test/ndbapi/testDataBuffers.cpp
index 75773040113..2e29dbb0d7b 100644
--- a/ndb/test/ndbapi/testDataBuffers.cpp
+++ b/ndb/test/ndbapi/testDataBuffers.cpp
@@ -84,6 +84,8 @@ static NdbSchemaCon* tcon = 0;
static NdbSchemaOp* top = 0;
static NdbConnection* con = 0;
static NdbOperation* op = 0;
+static NdbScanOperation* sop = 0;
+static NdbResultSet* rs = 0;
static int
ndberror(char const* fmt, ...)
@@ -438,9 +440,9 @@ testcase(int flag)
int newkey = 0;
if ((con = ndb->startTransaction()) == 0)
return ndberror("startTransaction key=%d", key);
- if ((op = con->getNdbOperation(tab)) == 0)
+ if ((op = sop = con->getNdbScanOperation(tab)) == 0)
return ndberror("getNdbOperation key=%d", key);
- if (op->openScanRead(1) < 0)
+ if ((rs = sop->readTuples(1)) == 0)
return ndberror("openScanRead key=%d", key);
{
col& c = ccol[0];
@@ -481,10 +483,10 @@ testcase(int flag)
}
}
}
- if (con->executeScan() < 0)
+ if (con->execute(NoCommit) < 0)
return ndberror("executeScan key=%d", key);
int ret, cnt = 0;
- while ((ret = con->nextScanResult()) == 0) {
+ while ((ret = rs->nextResult()) == 0) {
if (key != newkey)
return ndberror("unexpected key=%d newkey=%d", key, newkey);
for (int i = 1; i < attrcnt; i++) {
diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/ndb/test/ndbapi/testDeadlock.cpp
new file mode 100644
index 00000000000..f51b3cea1e5
--- /dev/null
+++ b/ndb/test/ndbapi/testDeadlock.cpp
@@ -0,0 +1,514 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <NdbMain.h>
+#include <NdbApi.hpp>
+#include <NdbOut.hpp>
+#include <NdbMutex.h>
+#include <NdbCondition.h>
+#include <NdbThread.h>
+#include <NdbTest.hpp>
+
+struct Opt {
+ bool m_dbg;
+ const char* m_scan;
+ const char* m_tname;
+ const char* m_xname;
+ Opt() :
+ m_dbg(true),
+ m_scan("tx"),
+ m_tname("T"),
+ m_xname("X")
+ {}
+};
+
+static void
+printusage()
+{
+ Opt d;
+ ndbout
+ << "usage: testDeadlock" << endl
+ << "-scan tx scan table, index [" << d.m_scan << "]" << endl
+ ;
+}
+
+static Opt g_opt;
+
+static NdbMutex ndbout_mutex = NDB_MUTEX_INITIALIZER;
+
+#define DBG(x) \
+ do { \
+ if (! g_opt.m_dbg) break; \
+ NdbMutex_Lock(&ndbout_mutex); \
+ ndbout << "line " << __LINE__ << " " << x << endl; \
+ NdbMutex_Unlock(&ndbout_mutex); \
+ } while (0)
+
+#define CHK(x) \
+ do { \
+ if (x) break; \
+ ndbout << "line " << __LINE__ << ": " << #x << " failed" << endl; \
+ return -1; \
+ } while (0)
+
+#define CHN(p, x) \
+ do { \
+ if (x) break; \
+ ndbout << "line " << __LINE__ << ": " << #x << " failed" << endl; \
+ ndbout << (p)->getNdbError() << endl; \
+ return -1; \
+ } while (0)
+
+// threads
+
+typedef int (*Runstep)(struct Thr& thr);
+
+struct Thr {
+ enum State { Wait, Start, Stop, Stopped, Exit };
+ State m_state;
+ int m_no;
+ Runstep m_runstep;
+ int m_ret;
+ NdbMutex* m_mutex;
+ NdbCondition* m_cond;
+ NdbThread* m_thread;
+ void* m_status;
+ Ndb* m_ndb;
+ NdbConnection* m_con;
+ NdbScanOperation* m_scanop;
+ NdbIndexScanOperation* m_indexscanop;
+ NdbResultSet* m_rs;
+ //
+ Thr(int no);
+ ~Thr();
+ int run();
+ void start(Runstep runstep);
+ void stop();
+ void stopped();
+ void lock() { NdbMutex_Lock(m_mutex); }
+ void unlock() { NdbMutex_Unlock(m_mutex); }
+ void wait() { NdbCondition_Wait(m_cond, m_mutex); }
+ void signal() { NdbCondition_Signal(m_cond); }
+ void exit();
+ void join() { NdbThread_WaitFor(m_thread, &m_status); }
+};
+
+static NdbOut&
+operator<<(NdbOut& out, const Thr& thr) {
+ out << "thr " << thr.m_no;
+ return out;
+}
+
+extern "C" { static void* runthread(void* arg); }
+
+Thr::Thr(int no)
+{
+ m_state = Wait;
+ m_no = no;
+ m_runstep = 0;
+ m_ret = 0;
+ m_mutex = NdbMutex_Create();
+ m_cond = NdbCondition_Create();
+ assert(m_mutex != 0 && m_cond != 0);
+ const unsigned stacksize = 256 * 1024;
+ const NDB_THREAD_PRIO prio = NDB_THREAD_PRIO_LOW;
+ m_thread = NdbThread_Create(runthread, (void**)this, stacksize, "me", prio);
+ if (m_thread == 0) {
+ DBG("create thread failed: errno=" << errno);
+ m_ret = -1;
+ }
+ m_status = 0;
+ m_ndb = 0;
+ m_con = 0;
+ m_scanop = 0;
+ m_indexscanop = 0;
+ m_rs = 0;
+}
+
+Thr::~Thr()
+{
+ if (m_thread != 0)
+ NdbThread_Destroy(&m_thread);
+ if (m_cond != 0)
+ NdbCondition_Destroy(m_cond);
+ if (m_mutex != 0)
+ NdbMutex_Destroy(m_mutex);
+}
+
+static void*
+runthread(void* arg) {
+ Thr& thr = *(Thr*)arg;
+ thr.run();
+ return 0;
+}
+
+int
+Thr::run()
+{
+ DBG(*this << " run");
+ while (true) {
+ lock();
+ while (m_state != Start && m_state != Exit) {
+ wait();
+ }
+ if (m_state == Exit) {
+ DBG(*this << " exit");
+ unlock();
+ break;
+ }
+ m_ret = (*m_runstep)(*this);
+ m_state = Stopped;
+ signal();
+ unlock();
+ if (m_ret != 0) {
+ DBG(*this << " error exit");
+ break;
+ }
+ }
+ delete m_ndb;
+ m_ndb = 0;
+ return 0;
+}
+
+void
+Thr::start(Runstep runstep)
+{
+ lock();
+ m_state = Start;
+ m_runstep = runstep;
+ signal();
+ unlock();
+}
+
+void
+Thr::stopped()
+{
+ lock();
+ while (m_state != Stopped) {
+ wait();
+ }
+ m_state = Wait;
+ unlock();
+}
+
+void
+Thr::exit()
+{
+ lock();
+ m_state = Exit;
+ signal();
+ unlock();
+}
+
+// general
+
+static int
+runstep_connect(Thr& thr)
+{
+ Ndb* ndb = thr.m_ndb = new Ndb("TEST_DB");
+ CHN(ndb, ndb->init() == 0);
+ CHN(ndb, ndb->waitUntilReady() == 0);
+ DBG(thr << " connected");
+ return 0;
+}
+
+static int
+runstep_starttx(Thr& thr)
+{
+ Ndb* ndb = thr.m_ndb;
+ assert(ndb != 0);
+ CHN(ndb, (thr.m_con = ndb->startTransaction()) != 0);
+ DBG("thr " << thr.m_no << " tx started");
+ return 0;
+}
+
+/*
+ * WL1822 flush locks
+ *
+ * Table T with 3 tuples X, Y, Z.
+ * Two transactions (* = lock wait).
+ *
+ * - tx1 reads and locks Z
+ * - tx2 scans X, Y, *Z
+ * - tx2 returns X, Y before lock wait on Z
+ * - tx1 reads and locks *X
+ * - api asks for next tx2 result
+ * - LQH unlocks X via ACC or TUX [*]
+ * - tx1 gets lock on X
+ * - tx1 returns X to api
+ * - api commits tx1
+ * - tx2 gets lock on Z
+ * - tx2 returs Z to api
+ *
+ * The point is deadlock is avoided due to [*].
+ * The test is for 1 db node and 1 fragment table.
+ */
+
+static char wl1822_scantx = 0;
+
+static const Uint32 wl1822_valA[3] = { 0, 1, 2 };
+static const Uint32 wl1822_valB[3] = { 3, 4, 5 };
+
+static Uint32 wl1822_bufA = ~0;
+static Uint32 wl1822_bufB = ~0;
+
+// map scan row to key (A) and reverse
+static unsigned wl1822_r2k[3] = { 0, 0, 0 };
+static unsigned wl1822_k2r[3] = { 0, 0, 0 };
+
+static int
+wl1822_createtable(Thr& thr)
+{
+ Ndb* ndb = thr.m_ndb;
+ assert(ndb != 0);
+ NdbDictionary::Dictionary* dic = ndb->getDictionary();
+ // drop T
+ if (dic->getTable(g_opt.m_tname) != 0)
+ CHN(dic, dic->dropTable(g_opt.m_tname) == 0);
+ // create T
+ NdbDictionary::Table tab(g_opt.m_tname);
+ tab.setFragmentType(NdbDictionary::Object::FragAllSmall);
+ { NdbDictionary::Column col("A");
+ col.setType(NdbDictionary::Column::Unsigned);
+ col.setPrimaryKey(true);
+ tab.addColumn(col);
+ }
+ { NdbDictionary::Column col("B");
+ col.setType(NdbDictionary::Column::Unsigned);
+ col.setPrimaryKey(false);
+ tab.addColumn(col);
+ }
+ CHN(dic, dic->createTable(tab) == 0);
+ // create X
+ NdbDictionary::Index ind(g_opt.m_xname);
+ ind.setTable(g_opt.m_tname);
+ ind.setType(NdbDictionary::Index::OrderedIndex);
+ ind.setLogging(false);
+ ind.addColumn("B");
+ CHN(dic, dic->createIndex(ind) == 0);
+ DBG("created " << g_opt.m_tname << ", " << g_opt.m_xname);
+ return 0;
+}
+
+static int
+wl1822_insertrows(Thr& thr)
+{
+ // insert X, Y, Z
+ Ndb* ndb = thr.m_ndb;
+ assert(ndb != 0);
+ NdbConnection* con;
+ NdbOperation* op;
+ for (unsigned k = 0; k < 3; k++) {
+ CHN(ndb, (con = ndb->startTransaction()) != 0);
+ CHN(con, (op = con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHN(op, op->insertTuple() == 0);
+ CHN(op, op->equal("A", (char*)&wl1822_valA[k]) == 0);
+ CHN(op, op->setValue("B", (char*)&wl1822_valB[k]) == 0);
+ CHN(con, con->execute(Commit) == 0);
+ ndb->closeTransaction(con);
+ }
+ DBG("inserted X, Y, Z");
+ return 0;
+}
+
+static int
+wl1822_getscanorder(Thr& thr)
+{
+ // cheat, table order happens to be key order in my test
+ wl1822_r2k[0] = 0;
+ wl1822_r2k[1] = 1;
+ wl1822_r2k[2] = 2;
+ wl1822_k2r[0] = 0;
+ wl1822_k2r[1] = 1;
+ wl1822_k2r[2] = 2;
+ DBG("scan order determined");
+ return 0;
+}
+
+static int
+wl1822_tx1_readZ(Thr& thr)
+{
+ // tx1 read Z with exclusive lock
+ NdbConnection* con = thr.m_con;
+ assert(con != 0);
+ NdbOperation* op;
+ CHN(con, (op = con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHN(op, op->readTupleExclusive() == 0);
+ CHN(op, op->equal("A", wl1822_valA[wl1822_r2k[2]]) == 0);
+ wl1822_bufB = ~0;
+ CHN(op, op->getValue("B", (char*)&wl1822_bufB) != 0);
+ CHN(con, con->execute(NoCommit) == 0);
+ CHK(wl1822_bufB == wl1822_valB[wl1822_r2k[2]]);
+ DBG("tx1 locked Z");
+ return 0;
+}
+
+static int
+wl1822_tx2_scanXY(Thr& thr)
+{
+ // tx2 scan X, Y with exclusive lock
+ NdbConnection* con = thr.m_con;
+ assert(con != 0);
+ NdbScanOperation* scanop;
+ NdbIndexScanOperation* indexscanop;
+ NdbResultSet* rs;
+ if (wl1822_scantx == 't') {
+ CHN(con, (scanop = thr.m_scanop = con->getNdbScanOperation(g_opt.m_tname)) != 0);
+ DBG("tx2 scan exclusive " << g_opt.m_tname);
+ }
+ if (wl1822_scantx == 'x') {
+ CHN(con, (scanop = thr.m_scanop = indexscanop = thr.m_indexscanop = con->getNdbIndexScanOperation(g_opt.m_xname, g_opt.m_tname)) != 0);
+ DBG("tx2 scan exclusive " << g_opt.m_xname);
+ }
+ CHN(scanop, (rs = thr.m_rs = scanop->readTuplesExclusive(16)) != 0);
+ CHN(scanop, scanop->getValue("A", (char*)&wl1822_bufA) != 0);
+ CHN(scanop, scanop->getValue("B", (char*)&wl1822_bufB) != 0);
+ CHN(con, con->execute(NoCommit) == 0);
+ unsigned row = 0;
+ while (row < 2) {
+ DBG("before row " << row);
+ int ret;
+ wl1822_bufA = wl1822_bufB = ~0;
+ CHN(con, (ret = rs->nextResult(true)) == 0);
+ DBG("got row " << row << " a=" << wl1822_bufA << " b=" << wl1822_bufB);
+ CHK(wl1822_bufA == wl1822_valA[wl1822_r2k[row]]);
+ CHK(wl1822_bufB == wl1822_valB[wl1822_r2k[row]]);
+ row++;
+ }
+ return 0;
+}
+
+static int
+wl1822_tx1_readX_commit(Thr& thr)
+{
+ // tx1 read X with exclusive lock and commit
+ NdbConnection* con = thr.m_con;
+ assert(con != 0);
+ NdbOperation* op;
+ CHN(con, (op = con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHN(op, op->readTupleExclusive() == 0);
+ CHN(op, op->equal("A", wl1822_valA[wl1822_r2k[2]]) == 0);
+ wl1822_bufB = ~0;
+ CHN(op, op->getValue("B", (char*)&wl1822_bufB) != 0);
+ CHN(con, con->execute(NoCommit) == 0);
+ CHK(wl1822_bufB == wl1822_valB[wl1822_r2k[2]]);
+ DBG("tx1 locked X");
+ CHN(con, con->execute(Commit) == 0);
+ DBG("tx1 commit");
+ return 0;
+}
+
+static int
+wl1822_tx2_scanZ_close(Thr& thr)
+{
+ // tx2 scan Z with exclusive lock and close scan
+ Ndb* ndb = thr.m_ndb;
+ NdbConnection* con = thr.m_con;
+ NdbScanOperation* scanop = thr.m_scanop;
+ NdbResultSet* rs = thr.m_rs;
+ assert(ndb != 0 && con != 0 && scanop != 0 && rs != 0);
+ unsigned row = 2;
+ while (true) {
+ DBG("before row " << row);
+ int ret;
+ wl1822_bufA = wl1822_bufB = ~0;
+ CHN(con, (ret = rs->nextResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ DBG("got row " << row << " a=" << wl1822_bufA << " b=" << wl1822_bufB);
+ CHK(wl1822_bufA == wl1822_valA[wl1822_r2k[row]]);
+ CHK(wl1822_bufB == wl1822_valB[wl1822_r2k[row]]);
+ row++;
+ }
+ ndb->closeTransaction(con);
+ CHK(row == 3);
+ return 0;
+}
+
+// threads are synced between each step
+static Runstep wl1822_step[][2] = {
+ { runstep_connect, runstep_connect },
+ { wl1822_createtable, 0 },
+ { wl1822_insertrows, 0 },
+ { wl1822_getscanorder, 0 },
+ { runstep_starttx, runstep_starttx },
+ { wl1822_tx1_readZ, 0 },
+ { 0, wl1822_tx2_scanXY },
+ { wl1822_tx1_readX_commit, wl1822_tx2_scanZ_close }
+};
+const unsigned wl1822_stepcount = sizeof(wl1822_step)/sizeof(wl1822_step[0]);
+
+static int
+wl1822_main(char scantx)
+{
+ wl1822_scantx = scantx;
+ static const unsigned thrcount = 2;
+ // create threads for tx1 and tx2
+ Thr* thrlist[2];
+ for (int n = 0; n < thrcount; n++) {
+ Thr& thr = *(thrlist[n] = new Thr(1 + n));
+ CHK(thr.m_ret == 0);
+ }
+ // run the steps
+ for (unsigned i = 0; i < wl1822_stepcount; i++) {
+ DBG("step " << i << " start");
+ for (int n = 0; n < thrcount; n++) {
+ Thr& thr = *thrlist[n];
+ Runstep runstep = wl1822_step[i][n];
+ if (runstep != 0)
+ thr.start(runstep);
+ }
+ for (int n = 0; n < thrcount; n++) {
+ Thr& thr = *thrlist[n];
+ Runstep runstep = wl1822_step[i][n];
+ if (runstep != 0)
+ thr.stopped();
+ }
+ }
+ // delete threads
+ for (int n = 0; n < thrcount; n++) {
+ Thr& thr = *thrlist[n];
+ thr.exit();
+ thr.join();
+ delete &thr;
+ }
+ return 0;
+}
+
+NDB_COMMAND(testOdbcDriver, "testDeadlock", "testDeadlock", "testDeadlock", 65535)
+{
+ while (++argv, --argc > 0) {
+ const char* arg = argv[0];
+ if (strcmp(arg, "-scan") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_scan = strdup(argv[0]);
+ continue;
+ }
+ }
+ printusage();
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+ }
+ if (
+ strchr(g_opt.m_scan, 't') != 0 && wl1822_main('t') == -1 ||
+ strchr(g_opt.m_scan, 'x') != 0 && wl1822_main('x') == -1
+ ) {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ return NDBT_ProgramExit(NDBT_OK);
+}
+
+// vim: set sw=2 et:
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index 1451c942362..e7597c26960 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -537,6 +537,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
}
const NdbDictionary::Table* pTab = ctx->getTab();
+ pNdb->getDictionary()->dropTable(pTab->getName());
NdbDictionary::Table newTab(* pTab);
// Set fragment type for table
diff --git a/ndb/test/ndbapi/testGrep.cpp b/ndb/test/ndbapi/testGrep.cpp
index 4b870f6f9a9..0bf84cb4ec8 100644
--- a/ndb/test/ndbapi/testGrep.cpp
+++ b/ndb/test/ndbapi/testGrep.cpp
@@ -254,8 +254,7 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
-
-#include "../bank/Bank.hpp"
+#include "bank/Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
Bank bank;
@@ -444,6 +443,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
*/
+
NDBT_TESTSUITE(testGrep);
TESTCASE("GrepBasic",
"Test that Global Replication works on one table \n"
@@ -473,8 +473,6 @@ TESTCASE("GrepNodeRestart",
}
-
-
TESTCASE("GrepBank",
"Test that grep and restore works during transaction load\n"
" by backing up the bank"
@@ -495,6 +493,7 @@ TESTCASE("GrepBank",
// FINALIZER(runDropBank);
}
+
TESTCASE("NFMaster",
"Test that grep behaves during node failiure\n"){
INITIALIZER(setMaster);
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index 47db0b3cff7..6ebbfd8b680 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -26,7 +26,7 @@
#define CHECK(b) if (!(b)) { \
g_err << "ERR: "<< step->getName() \
<< " failed on line " << __LINE__ << endl; \
- result = NDBT_FAILED; \
+ result = NDBT_FAILED; break;\
}
@@ -381,6 +381,27 @@ runVerifyIndex(NDBT_Context* ctx, NDBT_Step* step){
}
int
+sync_down(NDBT_Context* ctx){
+ Uint32 threads = ctx->getProperty("PauseThreads", (unsigned)0);
+ if(threads){
+ ctx->decProperty("PauseThreads");
+ }
+ return 0;
+}
+
+int
+sync_up_and_wait(NDBT_Context* ctx){
+ Uint32 threads = ctx->getProperty("Threads", (unsigned)0);
+ ndbout_c("Setting PauseThreads to %d", threads);
+ ctx->setProperty("PauseThreads", threads);
+ ctx->getPropertyWait("PauseThreads", (unsigned)0);
+ if(threads){
+ ndbout_c("wait completed");
+ }
+ return 0;
+}
+
+int
runTransactions1(NDBT_Context* ctx, NDBT_Step* step){
// Verify that data in index match
// table data
@@ -394,10 +415,17 @@ runTransactions1(NDBT_Context* ctx, NDBT_Step* step){
g_err << "Updated table failed" << endl;
return NDBT_FAILED;
}
+
+ sync_down(ctx);
+ if(ctx->isTestStopped())
+ break;
+
if (hugoTrans.scanUpdateRecords(pNdb, rows, batchSize) != 0){
g_err << "Updated table failed" << endl;
return NDBT_FAILED;
}
+
+ sync_down(ctx);
}
return NDBT_OK;
}
@@ -418,7 +446,7 @@ runTransactions2(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
#endif
-
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
#if 1
@@ -427,6 +455,7 @@ runTransactions2(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
#endif
+ sync_down(ctx);
}
return NDBT_OK;
}
@@ -447,6 +476,7 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
g_err << "Load table failed" << endl;
return NDBT_FAILED;
}
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
@@ -454,7 +484,8 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
g_err << "Updated table failed" << endl;
return NDBT_FAILED;
}
-
+
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
@@ -463,6 +494,7 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
@@ -471,6 +503,7 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
@@ -479,6 +512,7 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
@@ -486,12 +520,15 @@ runTransactions3(NDBT_Context* ctx, NDBT_Step* step){
g_err << "Clear table failed" << endl;
return NDBT_FAILED;
}
+
+ sync_down(ctx);
if(ctx->isTestStopped())
break;
-
+
int count = -1;
if(utilTrans.selectCount(pNdb, 64, &count) != 0 || count != 0)
return NDBT_FAILED;
+ sync_down(ctx);
}
return NDBT_OK;
}
@@ -510,6 +547,7 @@ int runRestarts(NDBT_Context* ctx, NDBT_Step* step){
result = NDBT_FAILED;
break;
}
+ sync_up_and_wait(ctx);
i++;
}
ctx->stopTest();
@@ -1130,7 +1168,7 @@ runUniqueNullTransactions(NDBT_Context* ctx, NDBT_Step* step){
if(!pTrans) goto done;
sOp = pTrans->getNdbScanOperation(pTab->getName());
if(!sOp) goto done;
- rs = sOp->readTuples(240, NdbScanOperation::LM_Exclusive);
+ rs = sOp->readTuples(NdbScanOperation::LM_Exclusive);
if(!rs) goto done;
if(pTrans->execute(NoCommit) == -1) goto done;
while((eof = rs->nextResult(true)) == 0){
@@ -1259,6 +1297,7 @@ TESTCASE("CreateLoadDrop_O",
TESTCASE("NFNR1",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ //TC_PROPERTY("Threads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(runLoadTable);
@@ -1492,4 +1531,4 @@ int main(int argc, const char** argv){
return testIndex.execute(argc, argv);
}
-
+template class Vector<Attrib*>;
diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp
index 2e08ebbed4e..5b171d42578 100644
--- a/ndb/test/ndbapi/testNdbApi.cpp
+++ b/ndb/test/ndbapi/testNdbApi.cpp
@@ -1010,4 +1010,5 @@ int main(int argc, const char** argv){
return testNdbApi.execute(argc, argv);
}
-
+template class Vector<Ndb*>;
+template class Vector<NdbConnection*>;
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp
index fd591f04c69..89b38c78e71 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/ndb/test/ndbapi/testNodeRestart.cpp
@@ -287,8 +287,6 @@ TESTCASE("Terror",
STEP(runPkUpdateUntilStopped);
STEP(runScanReadUntilStopped);
STEP(runScanUpdateUntilStopped);
- STEP(runInsertUntilStopped);
- STEP(runClearTableUntilStopped);
FINALIZER(runClearTable);
}
TESTCASE("FullDb",
diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp
index 0ca8ce79e2e..c58dd8538e9 100644
--- a/ndb/test/ndbapi/testOIBasic.cpp
+++ b/ndb/test/ndbapi/testOIBasic.cpp
@@ -33,14 +33,18 @@
struct Opt {
// common options
+ unsigned m_batch;
const char* m_case;
bool m_core;
bool m_dups;
NdbDictionary::Object::FragmentType m_fragtype;
+ unsigned m_idxloop;
const char* m_index;
unsigned m_loop;
bool m_nologging;
+ bool m_msglock;
unsigned m_rows;
+ unsigned m_samples;
unsigned m_scanrd;
unsigned m_scanex;
unsigned m_seed;
@@ -49,17 +53,21 @@ struct Opt {
unsigned m_threads;
unsigned m_v;
Opt() :
+ m_batch(32),
m_case(0),
m_core(false),
m_dups(false),
m_fragtype(NdbDictionary::Object::FragUndefined),
+ m_idxloop(4),
m_index(0),
m_loop(1),
m_nologging(false),
+ m_msglock(true),
m_rows(1000),
+ m_samples(0),
m_scanrd(240),
m_scanex(240),
- m_seed(1),
+ m_seed(0),
m_subloop(4),
m_table(0),
m_threads(4),
@@ -78,17 +86,19 @@ printhelp()
Opt d;
ndbout
<< "usage: testOIbasic [options]" << endl
+ << " -batch N pk operations in batch [" << d.m_batch << "]" << endl
<< " -case abc only given test cases (letters a-z)" << endl
<< " -core core dump on error [" << d.m_core << "]" << endl
<< " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl
<< " -fragtype T fragment type single/small/medium/large" << endl
<< " -index xyz only given index numbers (digits 1-9)" << endl
- << " -loop N loop count full suite forever=0 [" << d.m_loop << "]" << endl
+ << " -loop N loop count full suite 0=forever [" << d.m_loop << "]" << endl
<< " -nologging create tables in no-logging mode" << endl
<< " -rows N rows per thread [" << d.m_rows << "]" << endl
+ << " -samples N samples for some timings (0=all) [" << d.m_samples << "]" << endl
<< " -scanrd N scan read parallelism [" << d.m_scanrd << "]" << endl
<< " -scanex N scan exclusive parallelism [" << d.m_scanex << "]" << endl
- << " -seed N srandom seed [" << d.m_seed << "]" << endl
+ << " -seed N srandom seed 0=loop number[" << d.m_seed << "]" << endl
<< " -subloop N subtest loop count [" << d.m_subloop << "]" << endl
<< " -table xyz only given table numbers (digits 1-9)" << endl
<< " -threads N number of threads [" << d.m_threads << "]" << endl
@@ -99,6 +109,12 @@ printhelp()
printtables();
}
+// not yet configurable
+static const bool g_store_null_key = true;
+
+// compare NULL like normal value (NULL < not NULL, NULL == NULL)
+static const bool g_compare_null = true;
+
// log and error macros
static NdbMutex ndbout_mutex = NDB_MUTEX_INITIALIZER;
@@ -124,9 +140,9 @@ getthrstr()
#define LLN(n, s) \
do { \
if ((n) > g_opt.m_v) break; \
- NdbMutex_Lock(&ndbout_mutex); \
+ if (g_opt.m_msglock) NdbMutex_Lock(&ndbout_mutex); \
ndbout << getthrstr() << s << endl; \
- NdbMutex_Unlock(&ndbout_mutex); \
+ if (g_opt.m_msglock) NdbMutex_Unlock(&ndbout_mutex); \
} while(0)
#define LL0(s) LLN(0, s)
@@ -139,11 +155,10 @@ getthrstr()
// following check a condition and return -1 on failure
#undef CHK // simple check
-#undef CHKTRY // execute action (try-catch) on failure
-#undef CHKMSG // print extra message on failure
+#undef CHKTRY // check with action on fail
#undef CHKCON // print NDB API errors on failure
-#define CHK(x) CHKTRY(x, ;)
+#define CHK(x) CHKTRY(x, ;)
#define CHKTRY(x, act) \
do { \
@@ -154,14 +169,6 @@ getthrstr()
return -1; \
} while (0)
-#define CHKMSG(x, msg) \
- do { \
- if (x) break; \
- LL0("line " << __LINE__ << ": " << #x << " failed: " << msg); \
- if (g_opt.m_core) abort(); \
- return -1; \
- } while (0)
-
#define CHKCON(x, con) \
do { \
if (x) break; \
@@ -177,6 +184,7 @@ class Thr;
class Con;
class Tab;
class Set;
+class Tmr;
struct Par : public Opt {
unsigned m_no;
@@ -186,14 +194,17 @@ struct Par : public Opt {
const Tab& tab() const { assert(m_tab != 0); return *m_tab; }
Set* m_set;
Set& set() const { assert(m_set != 0); return *m_set; }
+ Tmr* m_tmr;
+ Tmr& tmr() const { assert(m_tmr != 0); return *m_tmr; }
unsigned m_totrows;
- unsigned m_batch;
// value calculation
unsigned m_pctnull;
unsigned m_range;
unsigned m_pctrange;
// do verify after read
bool m_verify;
+ // deadlock possible
+ bool m_deadlock;
// timer location
Par(const Opt& opt) :
Opt(opt),
@@ -201,12 +212,13 @@ struct Par : public Opt {
m_con(0),
m_tab(0),
m_set(0),
+ m_tmr(0),
m_totrows(m_threads * m_rows),
- m_batch(32),
m_pctnull(10),
m_range(m_rows),
m_pctrange(0),
- m_verify(false) {
+ m_verify(false),
+ m_deadlock(false) {
}
};
@@ -241,19 +253,20 @@ struct Tmr {
void on();
void off(unsigned cnt = 0);
const char* time();
+ const char* pct(const Tmr& t1);
const char* over(const Tmr& t1);
NDB_TICKS m_on;
unsigned m_ms;
unsigned m_cnt;
char m_time[100];
- char m_over[100];
+ char m_text[100];
Tmr() { clr(); }
};
void
Tmr::clr()
{
- m_on = m_ms = m_cnt = m_time[0] = m_over[0] = 0;
+ m_on = m_ms = m_cnt = m_time[0] = m_text[0] = 0;
}
void
@@ -285,14 +298,63 @@ Tmr::time()
}
const char*
+Tmr::pct(const Tmr& t1)
+{
+ if (0 < t1.m_ms) {
+ sprintf(m_text, "%u pct", (100 * m_ms) / t1.m_ms);
+ } else {
+ sprintf(m_text, "[cannot measure]");
+ }
+ return m_text;
+}
+
+const char*
Tmr::over(const Tmr& t1)
{
- if (0 < t1.m_ms && t1.m_ms < m_ms) {
- sprintf(m_over, "%u pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms);
+ if (0 < t1.m_ms) {
+ if (t1.m_ms <= m_ms)
+ sprintf(m_text, "%u pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms);
+ else
+ sprintf(m_text, "-%u pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms);
} else {
- sprintf(m_over, "[cannot measure]");
+ sprintf(m_text, "[cannot measure]");
}
- return m_over;
+ return m_text;
+}
+
+// list of ints
+
+struct Lst {
+ Lst();
+ unsigned m_arr[1000];
+ unsigned m_cnt;
+ void push(unsigned i);
+ unsigned cnt() const;
+ void reset();
+};
+
+Lst::Lst() :
+ m_cnt(0)
+{
+}
+
+void
+Lst::push(unsigned i)
+{
+ assert(m_cnt < sizeof(m_arr)/sizeof(m_arr[0]));
+ m_arr[m_cnt++] = i;
+}
+
+unsigned
+Lst::cnt() const
+{
+ return m_cnt;
+}
+
+void
+Lst::reset()
+{
+ m_cnt = 0;
}
// tables and indexes
@@ -409,7 +471,7 @@ operator<<(NdbOut& out, const Tab& tab)
return out;
}
-// tt1 + tt1x1 tt1x2 tt1x3 tt1x4
+// tt1 + tt1x1 tt1x2 tt1x3 tt1x4 tt1x5
static const Col
tt1col[] = {
@@ -422,24 +484,29 @@ tt1col[] = {
static const ICol
tt1x1col[] = {
- { 0, tt1col[1] }
+ { 0, tt1col[0] }
};
static const ICol
tt1x2col[] = {
+ { 0, tt1col[1] }
+};
+
+static const ICol
+tt1x3col[] = {
{ 0, tt1col[1] },
{ 1, tt1col[2] }
};
static const ICol
-tt1x3col[] = {
+tt1x4col[] = {
{ 0, tt1col[3] },
{ 1, tt1col[2] },
{ 2, tt1col[1] }
};
static const ICol
-tt1x4col[] = {
+tt1x5col[] = {
{ 0, tt1col[1] },
{ 1, tt1col[4] },
{ 2, tt1col[2] },
@@ -453,17 +520,22 @@ tt1x1 = {
static const ITab
tt1x2 = {
- "TT1X2", 2, tt1x2col
+ "TT1X2", 1, tt1x2col
};
static const ITab
tt1x3 = {
- "TT1X3", 3, tt1x3col
+ "TT1X3", 2, tt1x3col
};
static const ITab
tt1x4 = {
- "TT1X4", 4, tt1x4col
+ "TT1X4", 3, tt1x4col
+};
+
+static const ITab
+tt1x5 = {
+ "TT1X5", 4, tt1x5col
};
static const ITab
@@ -471,15 +543,16 @@ tt1itab[] = {
tt1x1,
tt1x2,
tt1x3,
- tt1x4
+ tt1x4,
+ tt1x5
};
static const Tab
tt1 = {
- "TT1", 5, tt1col, 4, tt1itab
+ "TT1", 5, tt1col, 5, tt1itab
};
-// tt2 + tt2x1 tt2x2 tt2x3 tt2x4
+// tt2 + tt2x1 tt2x2 tt2x3 tt2x4 tt2x5
static const Col
tt2col[] = {
@@ -492,24 +565,29 @@ tt2col[] = {
static const ICol
tt2x1col[] = {
+ { 0, tt2col[0] }
+};
+
+static const ICol
+tt2x2col[] = {
{ 0, tt2col[1] },
{ 1, tt2col[2] }
};
static const ICol
-tt2x2col[] = {
+tt2x3col[] = {
{ 0, tt2col[2] },
{ 1, tt2col[1] }
};
static const ICol
-tt2x3col[] = {
+tt2x4col[] = {
{ 0, tt2col[3] },
{ 1, tt2col[4] }
};
static const ICol
-tt2x4col[] = {
+tt2x5col[] = {
{ 0, tt2col[4] },
{ 1, tt2col[3] },
{ 2, tt2col[2] },
@@ -518,7 +596,7 @@ tt2x4col[] = {
static const ITab
tt2x1 = {
- "TT2X1", 2, tt2x1col
+ "TT2X1", 1, tt2x1col
};
static const ITab
@@ -533,7 +611,12 @@ tt2x3 = {
static const ITab
tt2x4 = {
- "TT2X4", 4, tt2x4col
+ "TT2X4", 2, tt2x4col
+};
+
+static const ITab
+tt2x5 = {
+ "TT2X5", 4, tt2x5col
};
static const ITab
@@ -541,12 +624,13 @@ tt2itab[] = {
tt2x1,
tt2x2,
tt2x3,
- tt2x4
+ tt2x4,
+ tt2x5
};
static const Tab
tt2 = {
- "TT2", 5, tt2col, 4, tt2itab
+ "TT2", 5, tt2col, 5, tt2itab
};
// all tables
@@ -567,40 +651,42 @@ struct Con {
NdbDictionary::Dictionary* m_dic;
NdbConnection* m_tx;
NdbOperation* m_op;
- NdbConnection* m_scantx;
- NdbOperation* m_scanop;
+ NdbScanOperation* m_scanop;
+ NdbIndexScanOperation* m_indexscanop;
+ NdbResultSet* m_resultset;
enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive };
ScanMode m_scanmode;
enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther };
ErrType m_errtype;
Con() :
m_ndb(0), m_dic(0), m_tx(0), m_op(0),
- m_scantx(0), m_scanop(0), m_scanmode(ScanNo), m_errtype(ErrNone) {}
+ m_scanop(0), m_indexscanop(0), m_resultset(0), m_scanmode(ScanNo), m_errtype(ErrNone) {}
+ ~Con() {
+ if (m_tx != 0)
+ closeTransaction();
+ }
int connect();
+ void connect(const Con& con);
void disconnect();
int startTransaction();
- int startBuddyTransaction(const Con& con);
int getNdbOperation(const Tab& tab);
- int getNdbOperation(const ITab& itab, const Tab& tab);
+ int getNdbScanOperation(const Tab& tab);
+ int getNdbScanOperation(const ITab& itab, const Tab& tab);
int equal(int num, const char* addr);
int getValue(int num, NdbRecAttr*& rec);
int setValue(int num, const char* addr);
int setBound(int num, int type, const void* value);
int execute(ExecType t);
+ int execute(ExecType t, bool& deadlock);
int openScanRead(unsigned parallelism);
int openScanExclusive(unsigned parallelism);
int executeScan();
- int nextScanResult();
- int takeOverForUpdate(Con& scan);
- int takeOverForDelete(Con& scan);
+ int nextScanResult(bool fetchAllowed);
+ int nextScanResult(bool fetchAllowed, bool& deadlock);
+ int updateScanTuple(Con& con2);
+ int deleteScanTuple(Con& con2);
void closeTransaction();
void printerror(NdbOut& out);
- // flush dict cache
- int bugger() {
- //disconnect();
- //CHK(connect() == 0);
- return 0;
- }
};
int
@@ -610,12 +696,18 @@ Con::connect()
m_ndb = new Ndb("TEST_DB");
CHKCON(m_ndb->init() == 0, *this);
CHKCON(m_ndb->waitUntilReady(30) == 0, *this);
- m_dic = m_ndb->getDictionary();
m_tx = 0, m_op = 0;
return 0;
}
void
+Con::connect(const Con& con)
+{
+ assert(m_ndb == 0);
+ m_ndb = con.m_ndb;
+}
+
+void
Con::disconnect()
{
delete m_ndb;
@@ -625,31 +717,34 @@ Con::disconnect()
int
Con::startTransaction()
{
- assert(m_ndb != 0 && m_tx == 0);
+ assert(m_ndb != 0);
+ if (m_tx != 0)
+ closeTransaction();
CHKCON((m_tx = m_ndb->startTransaction()) != 0, *this);
return 0;
}
int
-Con::startBuddyTransaction(const Con& con)
+Con::getNdbOperation(const Tab& tab)
{
- assert(m_ndb != 0 && m_tx == 0 && con.m_ndb == m_ndb && con.m_tx != 0);
- CHKCON((m_tx = m_ndb->hupp(con.m_tx)) != 0, *this);
+ assert(m_tx != 0);
+ CHKCON((m_op = m_tx->getNdbOperation(tab.m_name)) != 0, *this);
return 0;
}
int
-Con::getNdbOperation(const Tab& tab)
+Con::getNdbScanOperation(const Tab& tab)
{
assert(m_tx != 0);
- CHKCON((m_op = m_tx->getNdbOperation(tab.m_name)) != 0, *this);
+ CHKCON((m_op = m_scanop = m_tx->getNdbScanOperation(tab.m_name)) != 0, *this);
return 0;
}
int
-Con::getNdbOperation(const ITab& itab, const Tab& tab)
+Con::getNdbScanOperation(const ITab& itab, const Tab& tab)
{
- CHKCON((m_op = m_tx->getNdbOperation(itab.m_name, tab.m_name)) != 0, *this);
+ assert(m_tx != 0);
+ CHKCON((m_op = m_scanop = m_indexscanop = m_tx->getNdbIndexScanOperation(itab.m_name, tab.m_name)) != 0, *this);
return 0;
}
@@ -681,7 +776,7 @@ int
Con::setBound(int num, int type, const void* value)
{
assert(m_tx != 0 && m_op != 0);
- CHKCON(m_op->setBound(num, type, value) == 0, *this);
+ CHKCON(m_indexscanop->setBound(num, type, value) == 0, *this);
return 0;
}
@@ -694,10 +789,26 @@ Con::execute(ExecType t)
}
int
+Con::execute(ExecType t, bool& deadlock)
+{
+ int ret = execute(t);
+ if (ret != 0) {
+ if (deadlock && m_errtype == ErrDeadlock) {
+ LL3("caught deadlock");
+ ret = 0;
+ }
+ } else {
+ deadlock = false;
+ }
+ CHK(ret == 0);
+ return 0;
+}
+
+int
Con::openScanRead(unsigned parallelism)
{
assert(m_tx != 0 && m_op != 0);
- CHKCON(m_op->openScanRead(parallelism) == 0, *this);
+ CHKCON((m_resultset = m_scanop->readTuples(parallelism)) != 0, *this);
return 0;
}
@@ -705,39 +816,56 @@ int
Con::openScanExclusive(unsigned parallelism)
{
assert(m_tx != 0 && m_op != 0);
- CHKCON(m_op->openScanExclusive(parallelism) == 0, *this);
+ CHKCON((m_resultset = m_scanop->readTuplesExclusive(parallelism)) != 0, *this);
return 0;
}
int
Con::executeScan()
{
- CHKCON(m_tx->executeScan() == 0, *this);
+ CHKCON(m_tx->execute(NoCommit) == 0, *this);
return 0;
}
int
-Con::nextScanResult()
+Con::nextScanResult(bool fetchAllowed)
{
int ret;
- CHKCON((ret = m_tx->nextScanResult()) != -1, *this);
- assert(ret == 0 || ret == 1);
+ assert(m_resultset != 0);
+ CHKCON((ret = m_resultset->nextResult(fetchAllowed)) != -1, *this);
+ assert(ret == 0 || ret == 1 || (! fetchAllowed && ret == 2));
+ return ret;
+}
+
+int
+Con::nextScanResult(bool fetchAllowed, bool& deadlock)
+{
+ int ret = nextScanResult(fetchAllowed);
+ if (ret == -1) {
+ if (deadlock && m_errtype == ErrDeadlock) {
+ LL3("caught deadlock");
+ ret = 0;
+ }
+ } else {
+ deadlock = false;
+ }
+ CHK(ret == 0 || ret == 1 || (! fetchAllowed && ret == 2));
return ret;
}
int
-Con::takeOverForUpdate(Con& scan)
+Con::updateScanTuple(Con& con2)
{
- assert(m_tx != 0 && scan.m_op != 0);
- CHKCON((m_op = scan.m_op->takeOverForUpdate(m_tx)) != 0, scan);
+ assert(con2.m_tx != 0);
+ CHKCON((con2.m_op = m_resultset->updateTuple(con2.m_tx)) != 0, *this);
return 0;
}
int
-Con::takeOverForDelete(Con& scan)
+Con::deleteScanTuple(Con& con2)
{
- assert(m_tx != 0 && scan.m_op != 0);
- CHKCON((m_op = scan.m_op->takeOverForUpdate(m_tx)) != 0, scan);
+ assert(con2.m_tx != 0);
+ CHKCON(m_resultset->deleteTuple(con2.m_tx) == 0, *this);
return 0;
}
@@ -765,7 +893,7 @@ Con::printerror(NdbOut& out)
if (m_tx) {
if ((code = m_tx->getNdbError().code) != 0) {
LL0(++any << " con: error " << m_tx->getNdbError());
- if (code == 266 || code == 274 || code == 296 || code == 297)
+ if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499)
m_errtype = ErrDeadlock;
}
if (m_op && m_op->getNdbError().code != 0) {
@@ -785,7 +913,7 @@ invalidateindex(Par par, const ITab& itab)
{
Con& con = par.con();
const Tab& tab = par.tab();
- con.m_dic->invalidateIndex(itab.m_name, tab.m_name);
+ con.m_ndb->getDictionary()->invalidateIndex(itab.m_name, tab.m_name);
return 0;
}
@@ -809,7 +937,7 @@ invalidatetable(Par par)
Con& con = par.con();
const Tab& tab = par.tab();
invalidateindex(par);
- con.m_dic->invalidateTable(tab.m_name);
+ con.m_ndb->getDictionary()->invalidateTable(tab.m_name);
return 0;
}
@@ -818,6 +946,7 @@ droptable(Par par)
{
Con& con = par.con();
const Tab& tab = par.tab();
+ con.m_dic = con.m_ndb->getDictionary();
if (con.m_dic->getTable(tab.m_name) == 0) {
// how to check for error
LL4("no table " << tab.m_name);
@@ -825,6 +954,7 @@ droptable(Par par)
LL3("drop table " << tab.m_name);
CHKCON(con.m_dic->dropTable(tab.m_name) == 0, con);
}
+ con.m_dic = 0;
return 0;
}
@@ -832,7 +962,6 @@ static int
createtable(Par par)
{
Con& con = par.con();
- CHK(con.bugger() == 0);
const Tab& tab = par.tab();
LL3("create table " << tab.m_name);
LL4(tab);
@@ -852,7 +981,9 @@ createtable(Par par)
c.setNullable(col.m_nullable);
t.addColumn(c);
}
+ con.m_dic = con.m_ndb->getDictionary();
CHKCON(con.m_dic->createTable(t) == 0, con);
+ con.m_dic = 0;
return 0;
}
@@ -861,6 +992,7 @@ dropindex(Par par, const ITab& itab)
{
Con& con = par.con();
const Tab& tab = par.tab();
+ con.m_dic = con.m_ndb->getDictionary();
if (con.m_dic->getIndex(itab.m_name, tab.m_name) == 0) {
// how to check for error
LL4("no index " << itab.m_name);
@@ -868,6 +1000,7 @@ dropindex(Par par, const ITab& itab)
LL3("drop index " << itab.m_name);
CHKCON(con.m_dic->dropIndex(itab.m_name, tab.m_name) == 0, con);
}
+ con.m_dic = 0;
return 0;
}
@@ -888,7 +1021,6 @@ static int
createindex(Par par, const ITab& itab)
{
Con& con = par.con();
- CHK(con.bugger() == 0);
const Tab& tab = par.tab();
LL3("create index " << itab.m_name);
LL4(itab);
@@ -900,7 +1032,9 @@ createindex(Par par, const ITab& itab)
const Col& col = itab.m_icol[k].m_col;
x.addColumnName(col.m_name);
}
+ con.m_dic = con.m_ndb->getDictionary();
CHKCON(con.m_dic->createIndex(x) == 0, con);
+ con.m_dic = 0;
return 0;
}
@@ -1115,9 +1249,9 @@ Val::cmp(const Val& val2) const
assert(col.m_type == col2.m_type && col.m_length == col2.m_length);
if (m_null || val2.m_null) {
if (! m_null)
- return -1;
- if (! val2.m_null)
return +1;
+ if (! val2.m_null)
+ return -1;
return 0;
}
// verify data formats
@@ -1175,6 +1309,8 @@ struct Row {
const Tab& m_tab;
Val** m_val;
bool m_exist;
+ enum Op { NoOp = 0, ReadOp, InsOp, UpdOp, DelOp };
+ Op m_pending;
Row(const Tab& tab);
~Row();
void copy(const Row& row2);
@@ -1199,6 +1335,7 @@ Row::Row(const Tab& tab) :
m_val[k] = new Val(col);
}
m_exist = false;
+ m_pending = NoOp;
}
Row::~Row()
@@ -1236,7 +1373,7 @@ int
Row::verify(const Row& row2) const
{
const Tab& tab = m_tab;
- assert(&tab == &row2.m_tab);
+ assert(&tab == &row2.m_tab && m_exist && row2.m_exist);
for (unsigned k = 0; k < tab.m_cols; k++) {
const Val& val = *m_val[k];
const Val& val2 = *row2.m_val[k];
@@ -1257,7 +1394,7 @@ Row::insrow(Par par)
const Val& val = *m_val[k];
CHK(val.setval(par) == 0);
}
- m_exist = true;
+ m_pending = InsOp;
return 0;
}
@@ -1273,6 +1410,7 @@ Row::updrow(Par par)
const Val& val = *m_val[k];
CHK(val.setval(par) == 0);
}
+ m_pending = UpdOp;
return 0;
}
@@ -1290,7 +1428,7 @@ Row::delrow(Par par)
if (col.m_pk)
CHK(val.setval(par) == 0);
}
- m_exist = false;
+ m_pending = DelOp;
return 0;
}
@@ -1307,7 +1445,6 @@ Row::selrow(Par par)
if (col.m_pk)
CHK(val.setval(par) == 0);
}
- m_exist = false;
return 0;
}
@@ -1322,6 +1459,7 @@ Row::setrow(Par par)
if (! col.m_pk)
CHK(val.setval(par) == 0);
}
+ m_pending = UpdOp;
return 0;
}
@@ -1349,6 +1487,10 @@ operator<<(NdbOut& out, const Row& row)
out << " ";
out << *row.m_val[i];
}
+ out << " [exist=" << row.m_exist;
+ if (row.m_pending)
+ out << " pending=" << row.m_pending;
+ out << "]";
return out;
}
@@ -1357,15 +1499,19 @@ operator<<(NdbOut& out, const Row& row)
struct Set {
const Tab& m_tab;
unsigned m_rows;
- unsigned m_count;
Row** m_row;
Row** m_saverow;
Row* m_keyrow;
NdbRecAttr** m_rec;
Set(const Tab& tab, unsigned rows);
~Set();
+ void reset();
+ unsigned count() const;
// row methods
bool exist(unsigned i) const;
+ Row::Op pending(unsigned i) const;
+ void notpending(unsigned i);
+ void notpending(const Lst& lst);
void calc(Par par, unsigned i);
int insrow(Par par, unsigned i);
int updrow(Par par, unsigned i);
@@ -1380,7 +1526,7 @@ struct Set {
void savepoint();
void commit();
void rollback();
- // locking (not perfect since ops may complete in different order)
+ // protect structure
NdbMutex* m_mutex;
void lock() {
NdbMutex_Lock(m_mutex);
@@ -1396,9 +1542,9 @@ Set::Set(const Tab& tab, unsigned rows) :
m_tab(tab)
{
m_rows = rows;
- m_count = 0;
m_row = new Row* [m_rows];
for (unsigned i = 0; i < m_rows; i++) {
+ // allocate on need to save space
m_row[i] = 0;
}
m_saverow = 0;
@@ -1425,11 +1571,47 @@ Set::~Set()
NdbMutex_Destroy(m_mutex);
}
+void
+Set::reset()
+{
+ for (unsigned i = 0; i < m_rows; i++) {
+ if (m_row[i] != 0) {
+ Row& row = *m_row[i];
+ row.m_exist = false;
+ }
+ }
+}
+
+unsigned
+Set::count() const
+{
+ unsigned count = 0;
+ for (unsigned i = 0; i < m_rows; i++) {
+ if (m_row[i] != 0) {
+ Row& row = *m_row[i];
+ if (row.m_exist)
+ count++;
+ }
+ }
+ return count;
+}
+
bool
Set::exist(unsigned i) const
{
assert(i < m_rows);
- return m_row[i] != 0 && m_row[i]->m_exist;
+ if (m_row[i] == 0) // not allocated => not exist
+ return false;
+ return m_row[i]->m_exist;
+}
+
+Row::Op
+Set::pending(unsigned i) const
+{
+ assert(i < m_rows);
+ if (m_row[i] == 0) // not allocated => not pending
+ return Row::NoOp;
+ return m_row[i]->m_pending;
}
void
@@ -1448,9 +1630,9 @@ Set::calc(Par par, unsigned i)
int
Set::insrow(Par par, unsigned i)
{
- assert(m_row[i] != 0 && m_count < m_rows);
- CHK(m_row[i]->insrow(par) == 0);
- m_count++;
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ CHK(row.insrow(par) == 0);
return 0;
}
@@ -1458,16 +1640,17 @@ int
Set::updrow(Par par, unsigned i)
{
assert(m_row[i] != 0);
- CHK(m_row[i]->updrow(par) == 0);
+ Row& row = *m_row[i];
+ CHK(row.updrow(par) == 0);
return 0;
}
int
Set::delrow(Par par, unsigned i)
{
- assert(m_row[i] != 0 && m_count != 0);
- CHK(m_row[i]->delrow(par) == 0);
- m_count--;
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ CHK(row.delrow(par) == 0);
return 0;
}
@@ -1507,7 +1690,7 @@ Set::getkey(Par par, unsigned* i)
assert(m_rec[0] != 0);
const char* aRef0 = m_rec[0]->aRef();
Uint32 key = *(const Uint32*)aRef0;
- CHKMSG(key < m_rows, "key=" << key << " rows=" << m_rows);
+ CHK(key < m_rows);
*i = key;
return 0;
}
@@ -1532,19 +1715,37 @@ Set::putval(unsigned i, bool force)
val.copy(aRef);
val.m_null = false;
}
- if (! row.m_exist) {
+ if (! row.m_exist)
row.m_exist = true;
- m_count++;
- }
return 0;
}
+void
+Set::notpending(unsigned i)
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ if (row.m_pending == Row::InsOp)
+ row.m_exist = true;
+ if (row.m_pending == Row::DelOp)
+ row.m_exist = false;
+ row.m_pending = Row::NoOp;
+}
+
+void
+Set::notpending(const Lst& lst)
+{
+ for (unsigned j = 0; j < lst.m_cnt; j++) {
+ unsigned i = lst.m_arr[j];
+ notpending(i);
+ }
+}
+
int
Set::verify(const Set& set2) const
{
const Tab& tab = m_tab;
assert(&tab == &set2.m_tab && m_rows == set2.m_rows);
- CHKMSG(m_count == set2.m_count, "set=" << m_count << " set2=" << set2.m_count);
for (unsigned i = 0; i < m_rows; i++) {
CHK(exist(i) == set2.exist(i));
if (! exist(i))
@@ -1618,8 +1819,8 @@ int
BVal::setbnd(Par par) const
{
Con& con = par.con();
- const char* addr = (const char*)dataaddr();
- assert(! m_null);
+ assert(g_compare_null || ! m_null);
+ const char* addr = ! m_null ? (const char*)dataaddr() : 0;
const ICol& icol = m_icol;
CHK(con.setBound(icol.m_num, m_type, addr) == 0);
return 0;
@@ -1647,7 +1848,10 @@ struct BSet {
unsigned m_bvals;
BVal** m_bval;
BSet(const Tab& tab, const ITab& itab, unsigned rows);
+ ~BSet();
+ void reset();
void calc(Par par);
+ void calcpk(Par par, unsigned i);
int setbnd(Par par) const;
void filter(const Set& set, Set& set2) const;
};
@@ -1659,12 +1863,31 @@ BSet::BSet(const Tab& tab, const ITab& itab, unsigned rows) :
m_bvals(0)
{
m_bval = new BVal* [m_alloc];
+ for (unsigned i = 0; i < m_alloc; i++) {
+ m_bval[i] = 0;
+ }
+}
+
+BSet::~BSet()
+{
+ delete [] m_bval;
+}
+
+void
+BSet::reset()
+{
+ while (m_bvals > 0) {
+ unsigned i = --m_bvals;
+ delete m_bval[i];
+ m_bval[i] = 0;
+ }
}
void
BSet::calc(Par par)
{
const ITab& itab = m_itab;
+ reset();
for (unsigned k = 0; k < itab.m_icols; k++) {
const ICol& icol = itab.m_icol[k];
const Col& col = icol.m_col;
@@ -1686,7 +1909,8 @@ BSet::calc(Par par)
if (k + 1 < itab.m_icols)
bval.m_type = 4;
// value generation parammeters
- par.m_pctnull = 0;
+ if (! g_compare_null)
+ par.m_pctnull = 0;
par.m_pctrange = 50; // bit higher
do {
bval.calc(par, 0);
@@ -1705,6 +1929,23 @@ BSet::calc(Par par)
}
}
+void
+BSet::calcpk(Par par, unsigned i)
+{
+ const ITab& itab = m_itab;
+ reset();
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ const ICol& icol = itab.m_icol[k];
+ const Col& col = icol.m_col;
+ assert(col.m_pk);
+ assert(m_bvals < m_alloc);
+ BVal& bval = *new BVal(icol);
+ m_bval[m_bvals++] = &bval;
+ bval.m_type = 4;
+ bval.calc(par, i);
+ }
+}
+
int
BSet::setbnd(Par par) const
{
@@ -1721,23 +1962,25 @@ BSet::filter(const Set& set, Set& set2) const
const Tab& tab = m_tab;
const ITab& itab = m_itab;
assert(&tab == &set2.m_tab && set.m_rows == set2.m_rows);
- assert(set2.m_count == 0);
+ assert(set2.count() == 0);
for (unsigned i = 0; i < set.m_rows; i++) {
if (! set.exist(i))
continue;
const Row& row = *set.m_row[i];
- bool ok1 = false;
- for (unsigned k = 0; k < itab.m_icols; k++) {
- const ICol& icol = itab.m_icol[k];
- const Col& col = icol.m_col;
- const Val& val = *row.m_val[col.m_num];
- if (! val.m_null) {
- ok1 = true;
- break;
+ if (! g_store_null_key) {
+ bool ok1 = false;
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ const ICol& icol = itab.m_icol[k];
+ const Col& col = icol.m_col;
+ const Val& val = *row.m_val[col.m_num];
+ if (! val.m_null) {
+ ok1 = true;
+ break;
+ }
}
+ if (! ok1)
+ continue;
}
- if (! ok1)
- continue;
bool ok2 = true;
for (unsigned j = 0; j < m_bvals; j++) {
const BVal& bval = *m_bval[j];
@@ -1769,7 +2012,6 @@ BSet::filter(const Set& set, Set& set2) const
assert(! row2.m_exist);
row2.copy(row);
row2.m_exist = true;
- set2.m_count++;
}
}
@@ -1794,28 +2036,46 @@ pkinsert(Par par)
Set& set = par.set();
LL3("pkinsert");
CHK(con.startTransaction() == 0);
- unsigned n = 0;
+ Lst lst;
for (unsigned j = 0; j < par.m_rows; j++) {
unsigned i = thrrow(par, j);
set.lock();
- if (set.exist(i)) {
+ if (set.exist(i) || set.pending(i)) {
set.unlock();
continue;
}
set.calc(par, i);
- LL4("pkinsert " << i << ": " << *set.m_row[i]);
- CHKTRY(set.insrow(par, i) == 0, set.unlock());
+ CHK(set.insrow(par, i) == 0);
set.unlock();
- if (++n == par.m_batch) {
- CHK(con.execute(Commit) == 0);
+ LL4("pkinsert " << i << ": " << *set.m_row[i]);
+ lst.push(i);
+ if (lst.cnt() == par.m_batch) {
+ bool deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
con.closeTransaction();
+ if (deadlock) {
+ LL1("pkinsert: stop on deadlock");
+ return 0;
+ }
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ lst.reset();
CHK(con.startTransaction() == 0);
- n = 0;
}
}
- if (n != 0) {
- CHK(con.execute(Commit) == 0);
- n = 0;
+ if (lst.cnt() != 0) {
+ bool deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
+ con.closeTransaction();
+ if (deadlock) {
+ LL1("pkinsert: stop on deadlock");
+ return 0;
+ }
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ return 0;
}
con.closeTransaction();
return 0;
@@ -1828,28 +2088,45 @@ pkupdate(Par par)
Set& set = par.set();
LL3("pkupdate");
CHK(con.startTransaction() == 0);
- unsigned n = 0;
+ Lst lst;
+ bool deadlock = false;
for (unsigned j = 0; j < par.m_rows; j++) {
unsigned i = thrrow(par, j);
set.lock();
- if (! set.exist(i)) {
+ if (! set.exist(i) || set.pending(i)) {
set.unlock();
continue;
}
set.calc(par, i);
- LL4("pkupdate " << i << ": " << *set.m_row[i]);
- CHKTRY(set.updrow(par, i) == 0, set.unlock());
+ CHK(set.updrow(par, i) == 0);
set.unlock();
- if (++n == par.m_batch) {
- CHK(con.execute(Commit) == 0);
+ LL4("pkupdate " << i << ": " << *set.m_row[i]);
+ lst.push(i);
+ if (lst.cnt() == par.m_batch) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
+ if (deadlock) {
+ LL1("pkupdate: stop on deadlock");
+ break;
+ }
con.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ lst.reset();
CHK(con.startTransaction() == 0);
- n = 0;
}
}
- if (n != 0) {
- CHK(con.execute(Commit) == 0);
- n = 0;
+ if (! deadlock && lst.cnt() != 0) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
+ if (deadlock) {
+ LL1("pkupdate: stop on deadlock");
+ } else {
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ }
}
con.closeTransaction();
return 0;
@@ -1862,27 +2139,44 @@ pkdelete(Par par)
Set& set = par.set();
LL3("pkdelete");
CHK(con.startTransaction() == 0);
- unsigned n = 0;
+ Lst lst;
+ bool deadlock = false;
for (unsigned j = 0; j < par.m_rows; j++) {
unsigned i = thrrow(par, j);
set.lock();
- if (! set.exist(i)) {
+ if (! set.exist(i) || set.pending(i)) {
set.unlock();
continue;
}
- LL4("pkdelete " << i << ": " << *set.m_row[i]);
- CHKTRY(set.delrow(par, i) == 0, set.unlock());
+ CHK(set.delrow(par, i) == 0);
set.unlock();
- if (++n == par.m_batch) {
- CHK(con.execute(Commit) == 0);
+ LL4("pkdelete " << i << ": " << *set.m_row[i]);
+ lst.push(i);
+ if (lst.cnt() == par.m_batch) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
+ if (deadlock) {
+ LL1("pkdelete: stop on deadlock");
+ break;
+ }
con.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ lst.reset();
CHK(con.startTransaction() == 0);
- n = 0;
}
}
- if (n != 0) {
- CHK(con.execute(Commit) == 0);
- n = 0;
+ if (! deadlock && lst.cnt() != 0) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock) == 0);
+ if (deadlock) {
+ LL1("pkdelete: stop on deadlock");
+ } else {
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ }
}
con.closeTransaction();
return 0;
@@ -1893,21 +2187,25 @@ pkread(Par par)
{
Con& con = par.con();
const Tab& tab = par.tab();
- const Set& set = par.set();
+ Set& set = par.set();
LL3((par.m_verify ? "pkverify " : "pkread ") << tab.m_name);
// expected
const Set& set1 = set;
Set set2(tab, set.m_rows);
for (unsigned i = 0; i < set.m_rows; i++) {
- if (! set.exist(i))
+ set.lock();
+ if (! set.exist(i) || set.pending(i)) {
+ set.unlock();
continue;
+ }
+ set.unlock();
CHK(con.startTransaction() == 0);
CHK(set2.selrow(par, i) == 0);
CHK(con.execute(Commit) == 0);
unsigned i2 = (unsigned)-1;
CHK(set2.getkey(par, &i2) == 0 && i == i2);
CHK(set2.putval(i, false) == 0);
- LL4("row " << set2.m_count << ": " << *set2.m_row[i]);
+ LL4("row " << set2.count() << ": " << *set2.m_row[i]);
con.closeTransaction();
}
if (par.m_verify)
@@ -1915,6 +2213,32 @@ pkread(Par par)
return 0;
}
+static int
+pkreadfast(Par par, unsigned count)
+{
+ Con& con = par.con();
+ const Tab& tab = par.tab();
+ const Set& set = par.set();
+ LL3("pkfast " << tab.m_name);
+ Row keyrow(tab);
+ // not batched on purpose
+ for (unsigned j = 0; j < count; j++) {
+ unsigned i = urandom(set.m_rows);
+ assert(set.exist(i));
+ CHK(con.startTransaction() == 0);
+ // define key
+ keyrow.calc(par, i);
+ CHK(keyrow.selrow(par) == 0);
+ NdbRecAttr* rec;
+ CHK(con.getValue((Uint32)0, rec) == 0);
+ CHK(con.executeScan() == 0);
+ // get 1st column
+ CHK(con.execute(Commit) == 0);
+ con.closeTransaction();
+ }
+ return 0;
+}
+
// scan read
static int
@@ -1928,19 +2252,19 @@ scanreadtable(Par par)
LL3((par.m_verify ? "scanverify " : "scanread ") << tab.m_name);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbOperation(tab) == 0);
+ CHK(con.getNdbScanOperation(tab) == 0);
CHK(con.openScanRead(par.m_scanrd) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
while (1) {
int ret;
- CHK((ret = con.nextScanResult()) == 0 || ret == 1);
+ CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
unsigned i = (unsigned)-1;
CHK(set2.getkey(par, &i) == 0);
CHK(set2.putval(i, false) == 0);
- LL4("row " << set2.m_count << ": " << *set2.m_row[i]);
+ LL4("row " << set2.count() << ": " << *set2.m_row[i]);
}
con.closeTransaction();
if (par.m_verify)
@@ -1949,6 +2273,33 @@ scanreadtable(Par par)
}
static int
+scanreadtablefast(Par par, unsigned countcheck)
+{
+ Con& con = par.con();
+ const Tab& tab = par.tab();
+ const Set& set = par.set();
+ LL3("scanfast " << tab.m_name);
+ CHK(con.startTransaction() == 0);
+ CHK(con.getNdbScanOperation(tab) == 0);
+ CHK(con.openScanRead(par.m_scanrd) == 0);
+ // get 1st column
+ NdbRecAttr* rec;
+ CHK(con.getValue((Uint32)0, rec) == 0);
+ CHK(con.executeScan() == 0);
+ unsigned count = 0;
+ while (1) {
+ int ret;
+ CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ count++;
+ }
+ con.closeTransaction();
+ CHK(count == countcheck);
+ return 0;
+}
+
+static int
scanreadindex(Par par, const ITab& itab, const BSet& bset)
{
Con& con = par.con();
@@ -1961,21 +2312,21 @@ scanreadindex(Par par, const ITab& itab, const BSet& bset)
LL4(bset);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbOperation(itab, tab) == 0);
+ CHK(con.getNdbScanOperation(itab, tab) == 0);
CHK(con.openScanRead(par.m_scanrd) == 0);
CHK(bset.setbnd(par) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
while (1) {
int ret;
- CHK((ret = con.nextScanResult()) == 0 || ret == 1);
+ CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
unsigned i = (unsigned)-1;
CHK(set2.getkey(par, &i) == 0);
LL4("key " << i);
CHK(set2.putval(i, par.m_dups) == 0);
- LL4("row " << set2.m_count << ": " << *set2.m_row[i]);
+ LL4("row " << set2.count() << ": " << *set2.m_row[i]);
}
con.closeTransaction();
if (par.m_verify)
@@ -1984,10 +2335,39 @@ scanreadindex(Par par, const ITab& itab, const BSet& bset)
}
static int
+scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countcheck)
+{
+ Con& con = par.con();
+ const Tab& tab = par.tab();
+ const Set& set = par.set();
+ LL3("scanfast " << itab.m_name << " bounds=" << bset.m_bvals);
+ LL4(bset);
+ CHK(con.startTransaction() == 0);
+ CHK(con.getNdbScanOperation(itab, tab) == 0);
+ CHK(con.openScanRead(par.m_scanrd) == 0);
+ CHK(bset.setbnd(par) == 0);
+ // get 1st column
+ NdbRecAttr* rec;
+ CHK(con.getValue((Uint32)0, rec) == 0);
+ CHK(con.executeScan() == 0);
+ unsigned count = 0;
+ while (1) {
+ int ret;
+ CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ count++;
+ }
+ con.closeTransaction();
+ CHK(count == countcheck);
+ return 0;
+}
+
+static int
scanreadindex(Par par, const ITab& itab)
{
const Tab& tab = par.tab();
- for (unsigned i = 0; i < par.m_subloop; i++) {
+ for (unsigned i = 0; i < par.m_idxloop; i++) {
BSet bset(tab, itab, par.m_rows);
bset.calc(par);
CHK(scanreadindex(par, itab, bset) == 0);
@@ -2017,6 +2397,60 @@ scanreadall(Par par)
return 0;
}
+// timing scans
+
+static int
+timescantable(Par par)
+{
+ par.tmr().on();
+ CHK(scanreadtablefast(par, par.m_totrows) == 0);
+ par.tmr().off(par.set().m_rows);
+ return 0;
+}
+
+static int
+timescanpkindex(Par par)
+{
+ const Tab& tab = par.tab();
+ const ITab& itab = tab.m_itab[0]; // 1st index is on PK
+ BSet bset(tab, itab, par.m_rows);
+ par.tmr().on();
+ CHK(scanreadindexfast(par, itab, bset, par.m_totrows) == 0);
+ par.tmr().off(par.set().m_rows);
+ return 0;
+}
+
+static int
+timepkreadtable(Par par)
+{
+ par.tmr().on();
+ unsigned count = par.m_samples;
+ if (count == 0)
+ count = par.m_totrows;
+ CHK(pkreadfast(par, count) == 0);
+ par.tmr().off(count);
+ return 0;
+}
+
+static int
+timepkreadindex(Par par)
+{
+ const Tab& tab = par.tab();
+ const ITab& itab = tab.m_itab[0]; // 1st index is on PK
+ BSet bset(tab, itab, par.m_rows);
+ unsigned count = par.m_samples;
+ if (count == 0)
+ count = par.m_totrows;
+ par.tmr().on();
+ for (unsigned j = 0; j < count; j++) {
+ unsigned i = urandom(par.m_totrows);
+ bset.calcpk(par, i);
+ CHK(scanreadindexfast(par, itab, bset, 1) == 0);
+ }
+ par.tmr().off(count);
+ return 0;
+}
+
// scan update
static int
@@ -2028,36 +2462,70 @@ scanupdatetable(Par par)
LL3("scan update " << tab.m_name);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbOperation(tab) == 0);
+ CHK(con.getNdbScanOperation(tab) == 0);
CHK(con.openScanExclusive(par.m_scanex) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
unsigned count = 0;
// updating trans
Con con2;
- con2.m_ndb = con.m_ndb;
- CHK(con2.startBuddyTransaction(con) == 0);
+ con2.connect(con);
+ CHK(con2.startTransaction() == 0);
+ Lst lst;
+ bool deadlock = false;
while (1) {
int ret;
- CHK((ret = con.nextScanResult()) == 0 || ret == 1);
+ deadlock = par.m_deadlock;
+ CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ if (deadlock) {
+ LL1("scanupdatetable: stop on deadlock");
+ break;
+ }
+ do {
+ unsigned i = (unsigned)-1;
+ CHK(set2.getkey(par, &i) == 0);
+ const Row& row = *set.m_row[i];
+ set.lock();
+ if (! set.exist(i) || set.pending(i)) {
+ LL4("scan update " << tab.m_name << ": skip: " << row);
+ } else {
+ CHKTRY(set2.putval(i, false) == 0, set.unlock());
+ CHKTRY(con.updateScanTuple(con2) == 0, set.unlock());
+ Par par2 = par;
+ par2.m_con = &con2;
+ set.calc(par, i);
+ CHKTRY(set.setrow(par2, i) == 0, set.unlock());
+ LL4("scan update " << tab.m_name << ": " << row);
+ lst.push(i);
+ }
+ set.unlock();
+ if (lst.cnt() == par.m_batch) {
+ CHK(con2.execute(Commit) == 0);
+ con2.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ count += lst.cnt();
+ lst.reset();
+ CHK(con2.startTransaction() == 0);
+ }
+ CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
+ if (ret == 2 && lst.cnt() != 0) {
+ CHK(con2.execute(Commit) == 0);
+ con2.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ count += lst.cnt();
+ lst.reset();
+ CHK(con2.startTransaction() == 0);
+ }
+ } while (ret == 0);
if (ret == 1)
break;
- unsigned i = (unsigned)-1;
- CHK(set2.getkey(par, &i) == 0);
- LL4("key " << i);
- CHK(set2.putval(i, false) == 0);
- CHK(con2.takeOverForUpdate(con) == 0);
- Par par2 = par;
- par2.m_con = &con2;
- set.lock();
- set.calc(par, i);
- LL4("scan update " << tab.m_name << ": " << *set.m_row[i]);
- CHKTRY(set.setrow(par2, i) == 0, set.unlock());
- set.unlock();
- CHK(con2.execute(NoCommit) == 0);
- count++;
}
- CHK(con2.execute(Commit) == 0);
con2.closeTransaction();
LL3("scan update " << tab.m_name << " rows updated=" << count);
con.closeTransaction();
@@ -2073,7 +2541,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
LL3("scan update " << itab.m_name);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbOperation(itab, tab) == 0);
+ CHK(con.getNdbScanOperation(itab, tab) == 0);
CHK(con.openScanExclusive(par.m_scanex) == 0);
CHK(bset.setbnd(par) == 0);
set2.getval(par);
@@ -2081,32 +2549,61 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
unsigned count = 0;
// updating trans
Con con2;
- con2.m_ndb = con.m_ndb;
- CHK(con2.startBuddyTransaction(con) == 0);
+ con2.connect(con);
+ CHK(con2.startTransaction() == 0);
+ Lst lst;
+ bool deadlock = false;
while (1) {
int ret;
- CHK((ret = con.nextScanResult()) == 0 || ret == 1);
+ deadlock = par.m_deadlock;
+ CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1);
if (ret == 1)
break;
- unsigned i = (unsigned)-1;
- CHK(set2.getkey(par, &i) == 0);
- LL4("key " << i);
- CHK(set2.putval(i, par.m_dups) == 0);
- // avoid deadlock for now
- //if (! isthrrow(par, i))
- //continue;
- CHK(con2.takeOverForUpdate(con) == 0);
- Par par2 = par;
- par2.m_con = &con2;
- set.lock();
- set.calc(par, i);
- LL4("scan update " << itab.m_name << ": " << *set.m_row[i]);
- CHKTRY(set.setrow(par2, i) == 0, set.unlock());
- set.unlock();
- CHK(con2.execute(NoCommit) == 0);
- count++;
+ if (deadlock) {
+ LL1("scanupdateindex: stop on deadlock");
+ break;
+ }
+ do {
+ unsigned i = (unsigned)-1;
+ CHK(set2.getkey(par, &i) == 0);
+ const Row& row = *set.m_row[i];
+ set.lock();
+ if (! set.exist(i) || set.pending(i)) {
+ LL4("scan update " << itab.m_name << ": skip: " << row);
+ } else {
+ CHKTRY(set2.putval(i, par.m_dups) == 0, set.unlock());
+ CHKTRY(con.updateScanTuple(con2) == 0, set.unlock());
+ Par par2 = par;
+ par2.m_con = &con2;
+ set.calc(par, i);
+ CHKTRY(set.setrow(par2, i) == 0, set.unlock());
+ LL4("scan update " << itab.m_name << ": " << row);
+ lst.push(i);
+ }
+ set.unlock();
+ if (lst.cnt() == par.m_batch) {
+ CHK(con2.execute(Commit) == 0);
+ con2.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ count += lst.cnt();
+ lst.reset();
+ CHK(con2.startTransaction() == 0);
+ }
+ CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
+ if (ret == 2 && lst.cnt() != 0) {
+ CHK(con2.execute(Commit) == 0);
+ con2.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ count += lst.cnt();
+ lst.reset();
+ CHK(con2.startTransaction() == 0);
+ }
+ } while (ret == 0);
}
- CHK(con2.execute(Commit) == 0);
con2.closeTransaction();
LL3("scan update " << itab.m_name << " rows updated=" << count);
con.closeTransaction();
@@ -2117,7 +2614,7 @@ static int
scanupdateindex(Par par, const ITab& itab)
{
const Tab& tab = par.tab();
- for (unsigned i = 0; i < par.m_subloop; i++) {
+ for (unsigned i = 0; i < par.m_idxloop; i++) {
BSet bset(tab, itab, par.m_rows);
bset.calc(par);
CHK(scanupdateindex(par, itab, bset) == 0);
@@ -2148,41 +2645,15 @@ scanupdateall(Par par)
// medium level routines
-static bool
-ignoreverifyerror(Par par)
-{
- Con& con = par.con();
- bool b = par.m_threads > 1;
- if (b) {
- LL1("ignore verify error");
- if (con.m_tx != 0)
- con.closeTransaction();
- return true;
- }
- return b;
-}
-
static int
readverify(Par par)
{
par.m_verify = true;
- CHK(pkread(par) == 0 || ignoreverifyerror(par));
- CHK(scanreadall(par) == 0 || ignoreverifyerror(par));
+ CHK(pkread(par) == 0);
+ CHK(scanreadall(par) == 0);
return 0;
}
-static bool
-ignoredeadlock(Par par)
-{
- Con& con = par.con();
- if (con.m_errtype == Con::ErrDeadlock) {
- LL1("ignore deadlock");
- con.closeTransaction();
- return true;
- }
- return false;
-}
-
static int
pkupdatescanread(Par par)
{
@@ -2204,15 +2675,16 @@ static int
mixedoperations(Par par)
{
par.m_dups = true;
+ par.m_deadlock = true;
unsigned sel = urandom(10);
if (sel < 2) {
- CHK(pkdelete(par) == 0 || ignoredeadlock(par));
+ CHK(pkdelete(par) == 0);
} else if (sel < 4) {
- CHK(pkupdate(par) == 0 || ignoredeadlock(par));
+ CHK(pkupdate(par) == 0);
} else if (sel < 6) {
- CHK(scanupdatetable(par) == 0 || ignoredeadlock(par));
+ CHK(scanupdatetable(par) == 0);
} else {
- CHK(scanupdateindex(par) == 0 || ignoredeadlock(par));
+ CHK(scanupdateindex(par) == 0);
}
return 0;
}
@@ -2346,7 +2818,6 @@ Thr::run()
break;
}
LL4("start");
- CHK(con.bugger() == 0);
assert(m_state == Start);
m_ret = (*m_func)(m_par);
m_state = Stopped;
@@ -2426,6 +2897,7 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode)
Thr& thr = *g_thrlist[n];
thr.m_par.m_tab = par.m_tab;
thr.m_par.m_set = par.m_set;
+ thr.m_par.m_tmr = par.m_tmr;
thr.m_func = func;
thr.start();
}
@@ -2476,13 +2948,13 @@ tpkops(Par par)
RUNSTEP(par, pkinsert, MT);
RUNSTEP(par, createindex, ST);
RUNSTEP(par, invalidateindex, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
for (unsigned i = 0; i < par.m_subloop; i++) {
RUNSTEP(par, pkupdatescanread, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
}
RUNSTEP(par, pkdelete, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
return 0;
}
@@ -2495,10 +2967,10 @@ tmixedops(Par par)
RUNSTEP(par, pkinsert, MT);
RUNSTEP(par, createindex, ST);
RUNSTEP(par, invalidateindex, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
for (unsigned i = 0; i < par.m_subloop; i++) {
RUNSTEP(par, mixedoperations, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
}
return 0;
}
@@ -2513,7 +2985,7 @@ tbusybuild(Par par)
for (unsigned i = 0; i < par.m_subloop; i++) {
RUNSTEP(par, pkupdateindexbuild, MT);
RUNSTEP(par, invalidateindex, MT);
- RUNSTEP(par, readverify, MT);
+ RUNSTEP(par, readverify, ST);
RUNSTEP(par, dropindex, ST);
}
return 0;
@@ -2564,6 +3036,50 @@ ttimemaint(Par par)
}
static int
+ttimescan(Par par)
+{
+ Tmr t1, t2;
+ RUNSTEP(par, droptable, ST);
+ RUNSTEP(par, createtable, ST);
+ RUNSTEP(par, invalidatetable, MT);
+ for (unsigned i = 0; i < par.m_subloop; i++) {
+ RUNSTEP(par, pkinsert, MT);
+ RUNSTEP(par, createindex, ST);
+ par.m_tmr = &t1;
+ RUNSTEP(par, timescantable, ST);
+ par.m_tmr = &t2;
+ RUNSTEP(par, timescanpkindex, ST);
+ RUNSTEP(par, dropindex, ST);
+ }
+ LL1("full scan table - " << t1.time());
+ LL1("full scan PK index - " << t2.time());
+ LL1("overhead - " << t2.over(t1));
+ return 0;
+}
+
+static int
+ttimepkread(Par par)
+{
+ Tmr t1, t2;
+ RUNSTEP(par, droptable, ST);
+ RUNSTEP(par, createtable, ST);
+ RUNSTEP(par, invalidatetable, MT);
+ for (unsigned i = 0; i < par.m_subloop; i++) {
+ RUNSTEP(par, pkinsert, MT);
+ RUNSTEP(par, createindex, ST);
+ par.m_tmr = &t1;
+ RUNSTEP(par, timepkreadtable, ST);
+ par.m_tmr = &t2;
+ RUNSTEP(par, timepkreadindex, ST);
+ RUNSTEP(par, dropindex, ST);
+ }
+ LL1("pk read table - " << t1.time());
+ LL1("pk read PK index - " << t2.time());
+ LL1("overhead - " << t2.over(t1));
+ return 0;
+}
+
+static int
tdrop(Par par)
{
RUNSTEP(par, droptable, ST);
@@ -2589,6 +3105,8 @@ tcaselist[] = {
TCase("d", tbusybuild, "pk operations and index build"),
TCase("t", ttimebuild, "time index build"),
TCase("u", ttimemaint, "time index maintenance"),
+ TCase("v", ttimescan, "time full scan table vs index on pk"),
+ TCase("w", ttimepkread, "time pk read table vs index on pk"),
TCase("z", tdrop, "drop test tables")
};
@@ -2608,7 +3126,7 @@ printcases()
static void
printtables()
{
- ndbout << "tables and indexes:" << endl;
+ ndbout << "tables and indexes (X1 is on table PK):" << endl;
for (unsigned j = 0; j < tabcount; j++) {
const Tab& tab = tablist[j];
ndbout << " " << tab.m_name;
@@ -2624,7 +3142,8 @@ static int
runtest(Par par)
{
LL1("start");
- srandom(par.m_seed);
+ if (par.m_seed != 0)
+ srandom(par.m_seed);
Con con;
CHK(con.connect() == 0);
par.m_con = &con;
@@ -2639,6 +3158,8 @@ runtest(Par par)
}
for (unsigned l = 0; par.m_loop == 0 || l < par.m_loop; l++) {
LL1("loop " << l);
+ if (par.m_seed == 0)
+ srandom(l);
for (unsigned i = 0; i < tcasecount; i++) {
const TCase& tcase = tcaselist[i];
if (par.m_case != 0 && strchr(par.m_case, tcase.m_name[0]) == 0)
@@ -2649,8 +3170,8 @@ runtest(Par par)
continue;
const Tab& tab = tablist[j];
par.m_tab = &tab;
- Set set(tab, par.m_totrows);
- par.m_set = &set;
+ delete par.m_set;
+ par.m_set = new Set(tab, par.m_totrows);
LL1("table " << tab.m_name);
CHK(tcase.m_func(par) == 0);
}
@@ -2680,6 +3201,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
ndbout << "testOIBasic: unknown argument " << arg;
goto usage;
}
+ if (strcmp(arg, "-batch") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_batch = atoi(argv[0]);
+ continue;
+ }
+ }
if (strcmp(arg, "-case") == 0) {
if (++argv, --argc > 0) {
g_opt.m_case = strdup(argv[0]);
@@ -2736,6 +3263,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
continue;
}
}
+ if (strcmp(arg, "-samples") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_samples = atoi(argv[0]);
+ continue;
+ }
+ }
if (strcmp(arg, "-scanrd") == 0) {
if (++argv, --argc > 0) {
g_opt.m_scanrd = atoi(argv[0]);
diff --git a/ndb/test/ndbapi/testRestartGci.cpp b/ndb/test/ndbapi/testRestartGci.cpp
index e3dd1f8e2ce..54d38654ff2 100644
--- a/ndb/test/ndbapi/testRestartGci.cpp
+++ b/ndb/test/ndbapi/testRestartGci.cpp
@@ -216,3 +216,5 @@ NDBT_TESTSUITE_END(testRestartGci);
int main(int argc, const char** argv){
return testRestartGci.execute(argc, argv);
}
+
+template class Vector<SavedRecord>;
diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp
index bc3be0b7dc9..3d8b37df0ca 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/ndb/test/ndbapi/testScan.cpp
@@ -65,7 +65,7 @@ int runDropAllTablesExceptTestTable(NDBT_Context* ctx, NDBT_Step* step){
}
int res = GETNDB(step)->getDictionary()->dropTable(tab->getName());
- if(res != -1){
+ if(res == -1){
return NDBT_FAILED;
}
}
@@ -776,108 +776,19 @@ int runOnlyOpenScanOnce(NDBT_Context* ctx, NDBT_Step* step){
}
int runOnlyOneOpInScanTrans(NDBT_Context* ctx, NDBT_Step* step){
- const NdbDictionary::Table* pTab = ctx->getTab();
- int records = ctx->getNumRecords();
- int numFailed = 0;
-
- ScanFunctions scanF(*pTab);
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneOpInScanTrans,
- false) == 0){
- numFailed++;
- }
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneOpInScanTrans,
- true) == 0){
- numFailed++;
- }
-
-
- if(numFailed > 0)
- return NDBT_FAILED;
- else
- return NDBT_OK;
-
+ return NDBT_OK;
}
int runExecuteScanWithoutOpenScan(NDBT_Context* ctx, NDBT_Step* step){
- const NdbDictionary::Table* pTab = ctx->getTab();
- int records = ctx->getNumRecords();
- int numFailed = 0;
- ScanFunctions scanF(*pTab);
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 1,
- ScanFunctions::ExecuteScanWithOutOpenScan,
- false) == 0){
- numFailed++;
- }
-
- if(numFailed > 0)
- return NDBT_FAILED;
- else
- return NDBT_OK;
+ return NDBT_OK;
}
-
-
int runOnlyOneOpBeforeOpenScan(NDBT_Context* ctx, NDBT_Step* step){
- const NdbDictionary::Table* pTab = ctx->getTab();
- int records = ctx->getNumRecords();
- int numFailed = 0;
-
- ScanFunctions scanF(*pTab);
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneOpBeforeOpenScan,
- false) == 0){
- numFailed++;
- }
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneOpBeforeOpenScan,
- true) == 0){
- numFailed++;
- }
-
- if(numFailed > 0)
- return NDBT_FAILED;
- else
return NDBT_OK;
-
}
-int runOnlyOneScanPerTrans(NDBT_Context* ctx, NDBT_Step* step){
- const NdbDictionary::Table* pTab = ctx->getTab();
- int records = ctx->getNumRecords();
- int numFailed = 0;
-
- ScanFunctions scanF(*pTab);
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneScanPerTrans,
- false) == 0){
- numFailed++;
- }
- if (scanF.scanReadFunctions(GETNDB(step),
- records,
- 6,
- ScanFunctions::OnlyOneScanPerTrans,
- true) == 0){
- numFailed++;
- }
-
- if(numFailed > 0)
- return NDBT_FAILED;
- else
- return NDBT_OK;
+int runOnlyOneScanPerTrans(NDBT_Context* ctx, NDBT_Step* step){
+ return NDBT_OK;
}
int runNoCloseTransaction(NDBT_Context* ctx, NDBT_Step* step){
@@ -970,6 +881,93 @@ int runCheckInactivityBeforeClose(NDBT_Context* ctx, NDBT_Step* step){
}
+int runScanRestart(NDBT_Context* ctx, NDBT_Step* step){
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ Ndb * pNdb = GETNDB(step);
+ const NdbDictionary::Table* pTab = ctx->getTab();
+
+ HugoCalculator calc(* pTab);
+ NDBT_ResultRow tmpRow(* pTab);
+
+ int i = 0;
+ while (i<loops && !ctx->isTestStopped()) {
+ g_info << i++ << ": ";
+ const int record = (rand() % records);
+ g_info << " row=" << record;
+
+ NdbConnection* pCon = pNdb->startTransaction();
+ NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
+ if (pOp == NULL) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ NdbResultSet* rs = pOp->readTuples();
+ if( rs == 0 ) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ int check = pOp->interpret_exit_ok();
+ if( check == -1 ) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ // Define attributes to read
+ for(int a = 0; a<pTab->getNoOfColumns(); a++){
+ if((tmpRow.attributeStore(a) =
+ pOp->getValue(pTab->getColumn(a)->getName())) == 0) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
+
+ check = pCon->execute(NoCommit);
+ if( check == -1 ) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ int res;
+ int row = 0;
+ while(row < record && (res = rs->nextResult()) == 0) {
+ if(calc.verifyRowValues(&tmpRow) != 0){
+ abort();
+ return NDBT_FAILED;
+ }
+ row++;
+ }
+ if(row != record){
+ ERR(pCon->getNdbError());
+ abort();
+ return NDBT_FAILED;
+ }
+ g_info << " restarting" << endl;
+ if((res = rs->restart()) != 0){
+ ERR(pCon->getNdbError());
+ abort();
+ return NDBT_FAILED;
+ }
+
+ row = 0;
+ while((res = rs->nextResult()) == 0) {
+ if(calc.verifyRowValues(&tmpRow) != 0){
+ abort();
+ return NDBT_FAILED;
+ }
+ row++;
+ }
+ if(res != 1 || row != records){
+ ERR(pCon->getNdbError());
+ abort();
+ return NDBT_FAILED;
+ }
+ pCon->close();
+ }
+ return NDBT_OK;
+}
NDBT_TESTSUITE(testScan);
@@ -1393,6 +1391,12 @@ TESTCASE("ScanReadWhileNodeIsDown",
STEP(runStopAndStartNode);
FINALIZER(runClearTable);
}
+TESTCASE("ScanRestart",
+ "Verify restart functionallity"){
+ INITIALIZER(runLoadTable);
+ STEP(runScanRestart);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testScan);
int main(int argc, const char** argv){
@@ -1400,3 +1404,4 @@ int main(int argc, const char** argv){
return testScan.execute(argc, argv);
}
+template class Vector<Attrib*>;
diff --git a/ndb/test/ndbapi/testScanPerf/testScanPerf.cpp b/ndb/test/ndbapi/testScanPerf.cpp
index 61af1ffb989..61af1ffb989 100644
--- a/ndb/test/ndbapi/testScanPerf/testScanPerf.cpp
+++ b/ndb/test/ndbapi/testScanPerf.cpp
diff --git a/ndb/test/ndbapi/testScanPerf/Makefile b/ndb/test/ndbapi/testScanPerf/Makefile
deleted file mode 100644
index fdf5980b385..00000000000
--- a/ndb/test/ndbapi/testScanPerf/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-include .defs.mk
-
-TYPE = ndbapitest
-
-BIN_TARGET = testScanPerf
-
-SOURCES = testScanPerf.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/test/ndbapi/testTimeout.cpp b/ndb/test/ndbapi/testTimeout.cpp
index 8a7866880b3..62e69125073 100644
--- a/ndb/test/ndbapi/testTimeout.cpp
+++ b/ndb/test/ndbapi/testTimeout.cpp
@@ -20,6 +20,44 @@
#include <UtilTransactions.hpp>
#include <random.h>
#include <NdbConfig.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+
+#define TIMEOUT 3000
+
+Uint32 g_org_timeout = 3000;
+
+int
+setTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){
+ NdbRestarter restarter;
+
+ NdbConfig conf(GETNDB(step)->getNodeId()+1);
+ unsigned int nodeId = conf.getMasterNodeId();
+ if (!conf.getProperty(nodeId,
+ NODE_TYPE_DB,
+ CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
+ &g_org_timeout)){
+ return NDBT_FAILED;
+ }
+
+ int val[] = { DumpStateOrd::TcSetApplTransactionTimeout, TIMEOUT };
+ if(restarter.dumpStateAllNodes(val, 2) != 0){
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+resetTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){
+ NdbRestarter restarter;
+
+ int val[] = { DumpStateOrd::TcSetApplTransactionTimeout, g_org_timeout };
+ if(restarter.dumpStateAllNodes(val, 2) != 0){
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
@@ -55,16 +93,10 @@ int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
NdbConfig conf(GETNDB(step)->getNodeId()+1);
unsigned int nodeId = conf.getMasterNodeId();
int stepNo = step->getStepNo();
- Uint32 timeoutVal;
- if (!conf.getProperty(nodeId,
- NODE_TYPE_DB,
- CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
- &timeoutVal)){
- return NDBT_FAILED;
- }
- int minSleep = (int)(timeoutVal * 1.5);
- int maxSleep = timeoutVal * 2;
- ndbout << "TransactionInactiveTimeout="<<timeoutVal
+
+ int minSleep = (int)(TIMEOUT * 1.5);
+ int maxSleep = TIMEOUT * 2;
+ ndbout << "TransactionInactiveTimeout="<< TIMEOUT
<< ", minSleep="<<minSleep
<< ", maxSleep="<<maxSleep<<endl;
@@ -89,7 +121,125 @@ int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
} while(false);
hugoOps.closeTransaction(pNdb);
+ }
+
+ return result;
+}
+
+int runTimeoutTrans2(NDBT_Context* ctx, NDBT_Step* step){
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int stepNo = step->getStepNo();
+ int mul1 = ctx->getProperty("Op1", (Uint32)0);
+ int mul2 = ctx->getProperty("Op2", (Uint32)0);
+ int records = ctx->getNumRecords();
+
+ int minSleep = (int)(TIMEOUT * 1.5);
+ int maxSleep = TIMEOUT * 2;
+ HugoOperations hugoOps(*ctx->getTab());
+ Ndb* pNdb = GETNDB(step);
+
+ for (int l = 0; l<loops && !ctx->isTestStopped() && result == NDBT_OK; l++){
+
+ int op1 = 0 + (l + stepNo) * mul1;
+ int op2 = 0 + (l + stepNo) * mul2;
+
+ op1 = (op1 % 5);
+ op2 = (op2 % 5);
+
+ ndbout << stepNo << ": TransactionInactiveTimeout="<< TIMEOUT
+ << ", minSleep="<<minSleep
+ << ", maxSleep="<<maxSleep
+ << ", op1=" << op1
+ << ", op2=" << op2 << endl;;
+
+ do{
+ // Commit transaction
+ CHECK(hugoOps.startTransaction(pNdb) == 0);
+
+ switch(op1){
+ case 0:
+ break;
+ case 1:
+ if(hugoOps.pkReadRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 2:
+ if(hugoOps.pkUpdateRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 3:
+ if(hugoOps.pkDeleteRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 4:
+ if(hugoOps.pkInsertRecord(pNdb, stepNo+records+l, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ }
+
+ if(result != NDBT_OK)
+ break;
+
+ int res = hugoOps.execute_NoCommit(pNdb);
+ if(res != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+
+ int sleep = minSleep + myRandom48(maxSleep-minSleep);
+ ndbout << stepNo << ": Sleeping for "<< sleep << " milliseconds" << endl;
+ NdbSleep_MilliSleep(sleep);
+
+ switch(op2){
+ case 0:
+ break;
+ case 1:
+ if(hugoOps.pkReadRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 2:
+ if(hugoOps.pkUpdateRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 3:
+ if(hugoOps.pkDeleteRecord(pNdb, stepNo, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ case 4:
+ if(hugoOps.pkInsertRecord(pNdb, stepNo+2*records+l, true) != 0){
+ g_err << stepNo << ": Fail" << __LINE__ << endl;
+ result = NDBT_FAILED; break;
+ }
+ break;
+ }
+
+ // Expect that transaction has timed-out
+ res = hugoOps.execute_Commit(pNdb);
+ if(op1 != 0 && res != 266){
+ g_err << stepNo << ": Fail: " << res << "!= 237, op1="
+ << op1 << ", op2=" << op2 << endl;
+ result = NDBT_FAILED; break;
+ }
+
+ } while(false);
+
+ hugoOps.closeTransaction(pNdb);
}
return result;
@@ -98,18 +248,10 @@ int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
int runDontTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
int loops = ctx->getNumLoops();
- NdbConfig conf(GETNDB(step)->getNodeId()+1);
- unsigned int nodeId = conf.getMasterNodeId();
int stepNo = step->getStepNo();
- Uint32 timeoutVal;
- if (!conf.getProperty(nodeId,
- NODE_TYPE_DB,
- CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
- &timeoutVal)){
- return NDBT_FAILED;
- }
- int maxSleep = (int)(timeoutVal * 0.5);
- ndbout << "TransactionInactiveTimeout="<<timeoutVal
+
+ int maxSleep = (int)(TIMEOUT * 0.5);
+ ndbout << "TransactionInactiveTimeout="<< TIMEOUT
<< ", maxSleep="<<maxSleep<<endl;
@@ -134,11 +276,8 @@ int runDontTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
} while(false);
hugoOps.closeTransaction(pNdb);
-
-
}
-
return result;
}
@@ -146,18 +285,9 @@ int runBuddyTransNoTimeout(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
- NdbConfig conf(GETNDB(step)->getNodeId()+1);
- unsigned int nodeId = conf.getMasterNodeId();
int stepNo = step->getStepNo();
- Uint32 timeoutVal;
- if (!conf.getProperty(nodeId,
- NODE_TYPE_DB,
- CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
- &timeoutVal)){
- return NDBT_FAILED;
- }
- int maxSleep = (int)(timeoutVal * 0.3);
- ndbout << "TransactionInactiveTimeout="<<timeoutVal
+ int maxSleep = (int)(TIMEOUT * 0.3);
+ ndbout << "TransactionInactiveTimeout="<< TIMEOUT
<< ", maxSleep="<<maxSleep<<endl;
HugoOperations hugoOps(*ctx->getTab());
@@ -172,11 +302,11 @@ int runBuddyTransNoTimeout(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoOps.pkInsertRecord(pNdb, recordNo, true) == 0);
CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
- for (int i = 0; i < 10; i++){
+ for (int i = 0; i < 3; i++){
// Perform buddy scan reads
- CHECK(hugoOps.scanReadRecords(pNdb) == 0);
- CHECK(hugoOps.executeScanRead(pNdb) == 0);
-
+ CHECK((hugoOps.scanReadRecords(pNdb)) == 0);
+ CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
+
int sleep = myRandom48(maxSleep);
ndbout << "Sleeping for " << sleep << " milliseconds" << endl;
NdbSleep_MilliSleep(sleep);
@@ -188,10 +318,7 @@ int runBuddyTransNoTimeout(NDBT_Context* ctx, NDBT_Step* step){
} while(false);
hugoOps.closeTransaction(pNdb);
-
-
}
-
return result;
}
@@ -202,7 +329,9 @@ TESTCASE("DontTimeoutTransaction",
"if we sleep during the transaction. Use a sleep "\
"value which is smaller than TransactionInactiveTimeout"){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runDontTimeoutTrans, 1);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("DontTimeoutTransaction5",
@@ -211,7 +340,9 @@ TESTCASE("DontTimeoutTransaction5",
"value which is smaller than TransactionInactiveTimeout" \
"Five simultaneous threads"){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runDontTimeoutTrans, 5);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("TimeoutTransaction",
@@ -219,7 +350,9 @@ TESTCASE("TimeoutTransaction",
"if we sleep during the transaction. Use a sleep "\
"value which is larger than TransactionInactiveTimeout"){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runTimeoutTrans, 1);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("TimeoutTransaction5",
@@ -228,7 +361,21 @@ TESTCASE("TimeoutTransaction5",
"value which is larger than TransactionInactiveTimeout" \
"Five simultaneous threads"){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runTimeoutTrans, 5);
+ FINALIZER(resetTransactionTimeout);
+ FINALIZER(runClearTable);
+}
+TESTCASE("TimeoutRandTransaction",
+ "Test that the transaction does timeout "\
+ "if we sleep during the transaction. Use a sleep "\
+ "value which is larger than TransactionInactiveTimeout"){
+ INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
+ TC_PROPERTY("Op1", 7);
+ TC_PROPERTY("Op2", 11);
+ STEPS(runTimeoutTrans2, 5);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("BuddyTransNoTimeout",
@@ -238,7 +385,9 @@ TESTCASE("BuddyTransNoTimeout",
"The total sleep time is longer than TransactionInactiveTimeout" \
"Commit the first transaction, it should not have timed out."){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runBuddyTransNoTimeout, 1);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
TESTCASE("BuddyTransNoTimeout5",
@@ -249,7 +398,9 @@ TESTCASE("BuddyTransNoTimeout5",
"Commit the first transaction, it should not have timed out." \
"Five simultaneous threads"){
INITIALIZER(runLoadTable);
+ INITIALIZER(setTransactionTimeout);
STEPS(runBuddyTransNoTimeout, 5);
+ FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
NDBT_TESTSUITE_END(testTimeout);
diff --git a/ndb/test/ndbapi/testTransactions.cpp b/ndb/test/ndbapi/testTransactions.cpp
index 9ce928f8736..67a2df24390 100644
--- a/ndb/test/ndbapi/testTransactions.cpp
+++ b/ndb/test/ndbapi/testTransactions.cpp
@@ -102,7 +102,7 @@ OperationTestCase matrix[] = {
{ "ScanExInsert", true, "SCAN-EX",1, "INSERT", 266, X, 0, 1 },
{ "ScanExUpdate", true, "SCAN-EX",1, "UPDATE", 266, 2, 0, 1 },
{ "ScanExDelete", true, "SCAN-EX",1, "DELETE", 266, X, 0, 1 },
- { "ScanExScan", true, "SCAN-EX",1, "SCAN", 274, X, 0, 1 },
+ { "ScanExScan", true, "SCAN-EX",1, "SCAN", 0, 1, 0, 1 },
{ "ScanExScanHl", true, "SCAN-EX",1, "SCAN-HL", 274, X, 0, 1 },
{ "ScanExScanEx", true, "SCAN-EX",1, "SCAN-EX", 274, X, 0, 1 },
#if 0
@@ -117,8 +117,8 @@ OperationTestCase matrix[] = {
{ "ReadExInsert", true, "READ-EX",1, "INSERT", 266, X, 0, 1 },
{ "ReadExUpdate", true, "READ-EX",1, "UPDATE", 266, X, 0, 1 },
{ "ReadExDelete", true, "READ-EX",1, "DELETE", 266, X, 0, 1 },
- { "ReadExScan", true, "READ-EX",1, "SCAN", 274, 1, 0, 1 },
- { "ReadExScanHl", true, "READ-EX",1, "SCAN-HL", 274, 1, 0, 1 },
+ { "ReadExScan", true, "READ-EX",1, "SCAN", 0, 1, 0, 1 },
+ { "ReadExScanHl", true, "READ-EX",1, "SCAN-HL", 274, X, 0, 1 },
{ "ReadExScanEx", true, "READ-EX",1, "SCAN-EX", 274, X, 0, 1 },
#if 0
{ "ReadExScanUp", true, "READ-EX",1, "SCAN-UP", 266, X, 0, 1 },
@@ -132,7 +132,7 @@ OperationTestCase matrix[] = {
{ "InsertInsert", false, "INSERT", 1, "INSERT", 266, X, 0, 1 },
{ "InsertUpdate", false, "INSERT", 1, "UPDATE", 266, X, 0, 1 },
{ "InsertDelete", false, "INSERT", 1, "DELETE", 266, X, 0, 1 },
- { "InsertScan", false, "INSERT", 1, "SCAN", 274, X, 0, 1 },
+ { "InsertScan", false, "INSERT", 1, "SCAN", 626, X, 0, 1 },
{ "InsertScanHl", false, "INSERT", 1, "SCAN-HL", 274, X, 0, 1 },
{ "InsertScanEx", false, "INSERT", 1, "SCAN-EX", 274, X, 0, 1 },
#if 0
@@ -147,7 +147,7 @@ OperationTestCase matrix[] = {
{ "UpdateInsert", true, "UPDATE", 2, "INSERT", 266, X, 0, 2 },
{ "UpdateUpdate", true, "UPDATE", 2, "UPDATE", 266, X, 0, 2 },
{ "UpdateDelete", true, "UPDATE", 2, "DELETE", 266, X, 0, 2 },
- { "UpdateScan", true, "UPDATE", 2, "SCAN", 274, X, 0, 2 },
+ { "UpdateScan", true, "UPDATE", 2, "SCAN", 0, 1, 0, 2 },
{ "UpdateScanHl", true, "UPDATE", 2, "SCAN-HL", 274, X, 0, 2 },
{ "UpdateScanEx", true, "UPDATE", 2, "SCAN-EX", 274, X, 0, 2 },
#if 0
@@ -162,7 +162,7 @@ OperationTestCase matrix[] = {
{ "DeleteInsert", true, "DELETE", X, "INSERT", 266, X, 626, X },
{ "DeleteUpdate", true, "DELETE", X, "UPDATE", 266, X, 626, X },
{ "DeleteDelete", true, "DELETE", X, "DELETE", 266, X, 626, X },
- { "DeleteScan", true, "DELETE", X, "SCAN", 274, X, 626, X },
+ { "DeleteScan", true, "DELETE", X, "SCAN", 0, 1, 626, X },
{ "DeleteScanHl", true, "DELETE", X, "SCAN-HL", 274, X, 626, X },
{ "DeleteScanEx", true, "DELETE", X, "SCAN-EX", 274, X, 626, X },
#if 0
@@ -206,9 +206,9 @@ runOp(HugoOperations & hugoOps,
} else if(strcmp(op, "SCAN") == 0){
C2(hugoOps.scanReadRecords(pNdb) == 0);
} else if(strcmp(op, "SCAN-HL") == 0){
- C2(hugoOps.scanReadRecords(pNdb, 240, HugoOperations::SL_ReadHold) == 0);
+ C2(hugoOps.scanReadRecords(pNdb, NdbScanOperation::LM_Read)== 0);
} else if(strcmp(op, "SCAN-EX") == 0){
- C2(hugoOps.scanReadRecords(pNdb, 240, HugoOperations::SL_Exclusive) == 0);
+ C2(hugoOps.scanReadRecords(pNdb, NdbScanOperation::LM_Exclusive)== 0);
} else {
g_err << __FILE__ << " - " << __LINE__
<< ": Unknown operation" << op << endl;
diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am
index 3dd9632ce4b..04be35325db 100644
--- a/ndb/test/run-test/Makefile.am
+++ b/ndb/test/run-test/Makefile.am
@@ -1,18 +1,18 @@
-ndbtest_PROGRAMS = atrt
+testdir=$(prefix)/mysql-test/ndb
-atrt_SOURCES = main.cpp
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_util.mk.am
+include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
-ndbtest_SCRIPTS = atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
+test_PROGRAMS = atrt
+test_DATA=daily-basic-tests.txt daily-devel-tests.txt
+test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
-EXTRA_DIST = $(ndbtest_SCRIPTS)
+atrt_SOURCES = main.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmclient
LDADD_LOC = $(top_builddir)/ndb/src/mgmclient/CpcClient.o $(top_builddir)/ndb/src/libndbclient.la
-include $(top_srcdir)/ndb/config/common.mk.am
-include $(top_srcdir)/ndb/config/type_util.mk.am
-include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
-
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
new file mode 100644
index 00000000000..631378cb636
--- /dev/null
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -0,0 +1,1008 @@
+# BASIC FUNCTIONALITY
+max-time: 500
+cmd: testBasic
+args: -n PkRead
+
+max-time: 500
+cmd: testBasic
+args: -n PkUpdate
+
+max-time: 500
+cmd: testBasic
+args: -n PkDelete
+
+max-time: 500
+cmd: testBasic
+args: -n PkInsert
+
+max-time: 600
+cmd: testBasic
+args: -n UpdateAndRead
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadAndLocker T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadAndLocker2 T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadUpdateAndLocker T6
+
+max-time: 500
+cmd: testBasic
+args: -n ReadWithLocksAndInserts T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkInsertTwice T1 T6 T10
+
+max-time: 1500
+cmd: testBasic
+args: -n Fill T13
+
+max-time: 1500
+cmd: testBasic
+args: -n Fill T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitSleep T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitAndClose T6
+
+max-time: 500
+cmd: testBasic
+args: -n Commit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitTry626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitAsMuch626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitRollback626 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n Commit630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitTry630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitAsMuch630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitRollback630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitAndClose T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackUpdate T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackDeleteMultiple T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n ImplicitRollbackDelete T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitDelete T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackNothing T1 T6
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkInsertAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkReadAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkUpdateAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkDeleteAsynch
+
+max-time: 500
+cmd: testBasic
+args: -n MassiveRollback T1 T6 T13
+
+max-time: 500
+cmd: testBasic
+args: -n MassiveRollback2 T1 T6 T13
+
+#-m 500 1: testBasic -n ReadConsistency T6
+max-time: 500
+cmd: testTimeout
+args: -n DontTimeoutTransaction T1
+
+max-time: 500
+cmd: testTimeout
+args: -n DontTimeoutTransaction5 T1
+
+max-time: 500
+cmd: testTimeout
+args: -n TimeoutTransaction T1
+
+max-time: 500
+cmd: testTimeout
+args: -n TimeoutTransaction5 T1
+
+max-time: 500
+cmd: testTimeout
+args: -n BuddyTransNoTimeout T1
+
+max-time: 500
+cmd: testTimeout
+args: -n BuddyTransNoTimeout5 T1
+
+max-time: 500
+cmd: testTimeout
+args: -n TimeoutRandTransaction T1
+
+# SCAN TESTS
+#
+max-time: 500
+cmd: testScan
+args: -n ScanRead16
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead240
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadCommitted240
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdate
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdate2 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanDelete
+
+max-time: 500
+cmd: testScan
+args: -n ScanDelete2 T10
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateAndScanRead T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAndLocker T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAndPkRead T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead488 -l 10 T6
+
+max-time: 600
+cmd: testScan
+args: -n ScanRead40 -l 100 T2
+
+max-time: 1800
+cmd: testScan
+args: -n ScanRead100 -l 100 T1
+
+max-time: 600
+cmd: testScan
+args: -n ScanRead40 -l 100 T1
+
+max-time: 1800
+cmd: testScan
+args: -n ScanRead40RandomTable -l 100 T1
+
+max-time: 3600
+cmd: testScan
+args: -n ScanRead40RandomTable -l 1000 T2
+
+max-time: 500
+cmd: testScan
+args: -n ScanWithLocksAndInserts T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort15 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort240 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateAbort16 T6
+
+max-time: 3600
+cmd: testScan
+args: -n ScanReadRestart T1 T6 T13
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateRestart T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckGetValue T6
+
+max-time: 500
+cmd: testScan
+args: -n CloseWithoutStop T6
+
+max-time: 500
+cmd: testScan
+args: -n NextScanWhenNoMore T6
+
+max-time: 500
+cmd: testScan
+args: -n ExecuteScanWithoutOpenScan T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOpenScanOnce T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneOpInScanTrans T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneOpBeforeOpenScan T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneScanPerTrans T6
+
+max-time: 500
+cmd: testScan
+args: -n NoCloseTransaction T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckInactivityTimeOut T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckInactivityBeforeClose T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckAfterTerror T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5021 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReaderror5022 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5023 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5024 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5025 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5030 T1
+
+max-time: 500
+cmd: testScan
+args: -n InsertDelete T1 T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckAfterTerror T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadWhileNodeIsDown T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanRestart T1
+
+# OLD FLEX
+max-time: 500
+cmd: flexBench
+args: -c 25 -t 10
+
+max-time: 500
+cmd: flexHammer
+args: -r 5 -t 32
+
+#
+# DICT TESTS
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDrop
+
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDropWithData
+
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDropDuring T6 T10
+
+max-time: 1500
+cmd: testDict
+args: -n CreateInvalidTables
+
+max-time: 1500
+cmd: testDict
+args: -n CreateTableWhenDbIsFull T6
+
+max-time: 1500
+cmd: testDict
+args: -n CreateMaxTables T6
+
+max-time: 500
+cmd: testDict
+args: -n FragmentTypeSingle T1
+
+max-time: 1500
+cmd: testDict
+args: -n FragmentTypeAll T1 T6 T7 T8
+
+max-time: 1500
+cmd: testDict
+args: -n FragmentTypeAllLarge T1 T6 T7 T8
+
+max-time: 1500
+cmd: testDict
+args: -n TemporaryTables T1 T6 T7 T8
+
+#
+# TEST NDBAPI
+#
+max-time: 500
+cmd: testDataBuffers
+args:
+
+# Testsuite: testNdbApi
+# Number of tests: 5
+max-time: 500
+cmd: testNdbApi
+args: -n MaxNdb T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxTransactions T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxOperations T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxGetValue T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxEqual
+
+max-time: 500
+cmd: testNdbApi
+args: -n DeleteNdb T1 T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n WaitUntilReady T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n GetOperationNoTab T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbErrorOperation T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n MissingOperation T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n GetValueInUpdate T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n UpdateWithoutKeys T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n UpdateWithoutValues T6
+
+#max-time: 500
+#cmd: testInterpreter
+#args: T1
+#
+max-time: 1500
+cmd: testOperations
+args: -n ReadRead
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadRead
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExRead
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertRead
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateRead
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteRead
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteReadEx
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteInsert
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteUpdate
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteDelete
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadDirtyRead
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n FReadDirtyRead
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n ReadExDirtyRead
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n InsertDirtyRead
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n UpdateDirtyRead
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteSimpleRead
+
+max-time: 1500
+cmd: testOperations
+args: -n DeleteDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanHlScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExReadEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExSimpleRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExDirtyRead
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExInsert
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExUpdate
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExDelete
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n ScanExScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n ReadExScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n InsertScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n UpdateScanEx
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteScan
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteScanHl
+
+max-time: 1500
+cmd: testTransactions
+args: -n DeleteScanEx
+
+max-time: 1500
+cmd: testRestartGci
+args: T6
+
+max-time: 600
+cmd: testBlobs
+args:
+
+max-time: 2500
+cmd: testOIBasic
+args:
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T8
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T7
+
diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt
new file mode 100644
index 00000000000..15fa4db4abc
--- /dev/null
+++ b/ndb/test/run-test/daily-devel-tests.txt
@@ -0,0 +1,204 @@
+#
+# INDEX
+#
+max-time: 1500
+cmd: testIndex
+args: -n CreateAll T1 T6 T13
+
+#-m 7200 1: testIndex -n InsertDeleteGentle T7
+max-time: 3600
+cmd: testIndex
+args: -n InsertDelete T1 T10
+
+#-m 3600 1: testIndex -n CreateLoadDropGentle T7
+max-time: 3600
+cmd: testIndex
+args: -n CreateLoadDrop T1 T10
+
+#
+# BACKUP
+#
+max-time: 600
+cmd: testBackup
+args: -n BackupOne T1 T6 T3 I3
+
+#max-time: 600
+#cmd: testBackup
+#args: -n BackupBank T6
+#
+#
+# MGMAPI AND MGSRV
+#
+max-time: 1800
+cmd: testMgm
+args: -n SingleUserMode T1
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T8
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR3 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR4 T6
+
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_FULLDB T1
+
+#
+# NODE RESTARTS
+#
+max-time: 2500
+cmd: testNodeRestart
+args: -n NoLoad T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n PkRead T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -l 1 -n PkReadPkUpdate
+
+max-time: 2500
+cmd: testNodeRestart
+args: -l 1 -n ReadUpdateScan
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n Terror T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FullDb T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNode T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeError T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeInitial T6 T13
+
+max-time: 3600
+cmd: testNodeRestart
+args: -l 1 -n RestartNFDuringNR T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartMasterNodeError T6 T8 T13
+
+max-time: 3600
+cmd: testNodeRestart
+args: -n RestartNodeDuringLCP T6
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoNodeFailure T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoMasterNodeFailure T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentFail T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodes T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesAbort T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesError9999 T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentStopAndWait T6 T8 T13
+
+#max-time: 500
+#cmd: testNodeRestart
+#args: -n StopOnError T1
+#
+#
+max-time: 2500
+cmd: testIndex
+args: -n NFNR1 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR2 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR3 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n BuildDuring T6
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR1_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR2_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR3_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n BuildDuring_O T6
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1_O T6 T13
+
+max-time: 500
+cmd: testIndex
+args: -n MixedTransaction T1
+
+max-time: 2500
+cmd: testDict
+args: -n NF1 T1 T6 T13
+
+max-time: 2500
+cmd: test_event
+args: -n BasicEventOperation T1 T6
+
diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp
index 9e318b0219e..6f1899fdbe2 100644
--- a/ndb/test/run-test/main.cpp
+++ b/ndb/test/run-test/main.cpp
@@ -106,13 +106,6 @@ main(int argc, const char ** argv){
if(!setup_hosts(g_config))
goto end;
- if(!start_processes(g_config, atrt_process::NDB_MGM))
- goto end;
-
- if(!connect_ndb_mgm(g_config)){
- goto end;
- }
-
/**
* Main loop
*/
@@ -122,25 +115,32 @@ main(int argc, const char ** argv){
*/
if(restart){
g_logger.info("(Re)starting ndb processes");
+ if(!stop_processes(g_config, atrt_process::NDB_MGM))
+ goto end;
+
if(!stop_processes(g_config, atrt_process::NDB_DB))
goto end;
- if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NO_CONTACT))
+ if(!start_processes(g_config, atrt_process::NDB_MGM))
goto end;
+ if(!connect_ndb_mgm(g_config)){
+ goto end;
+ }
+
if(!start_processes(g_config, atrt_process::NDB_DB))
goto end;
-
+
if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED))
goto end;
-
+
for(Uint32 i = 0; i<3; i++)
if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED))
goto started;
-
+
goto end;
-
-started:
+
+ started:
g_logger.info("Ndb start completed");
}
@@ -211,7 +211,7 @@ started:
(result == 0 ? "OK" : "FAILED"), result);
if(g_report_file != 0){
- fprintf(g_report_file, "%s %s ; %d ; %d ; %d\n",
+ fprintf(g_report_file, "%s %s ; %d ; %d ; %ld\n",
test_case.m_command.c_str(),
test_case.m_args.c_str(),
test_no, result, elapsed);
@@ -447,7 +447,6 @@ setup_config(atrt_config& config){
proc.m_proc.m_owner = "atrt";
proc.m_proc.m_group = "group";
proc.m_proc.m_cwd.assign(dir).append("/run/");
- proc.m_proc.m_env.assfmt("LD_LIBRARY_PATH=%s/lib/mysql", dir.c_str());
proc.m_proc.m_stdout = "log.out";
proc.m_proc.m_stderr = "2>&1";
proc.m_proc.m_runas = proc.m_host->m_user;
@@ -460,7 +459,7 @@ setup_config(atrt_config& config){
proc.m_proc.m_path.assign(dir).append("/libexec/ndb_mgmd");
proc.m_proc.m_args = "-n -c initconfig.txt";
proc.m_proc.m_cwd.appfmt("%d.ndb_mgmd", index);
- connect_string.appfmt(";host=%s:%d",
+ connect_string.appfmt("host=%s:%d;",
proc.m_hostname.c_str(), proc.m_ndb_mgm_port);
} else if(split1[0] == "ndb"){
proc.m_type = atrt_process::NDB_DB;
@@ -502,10 +501,10 @@ setup_config(atrt_config& config){
// Setup connect string
for(size_t i = 0; i<config.m_processes.size(); i++){
- config.m_processes[i].m_proc.m_env.appfmt(" NDB_CONNECTSTRING=nodeid=%d%s",
- i+1, connect_string.c_str());
+ config.m_processes[i].m_proc.m_env.assfmt("NDB_CONNECTSTRING=%s",
+ connect_string.c_str());
}
-
+
end:
fclose(f);
return result;
@@ -615,11 +614,22 @@ wait_ndb(atrt_config& config, int goal){
/**
* 1) retreive current state
*/
- state = ndb_mgm_get_status(handle);
- if(state == 0){
- g_logger.critical("Unable to poll db state");
- return false;
- }
+ state = 0;
+ do {
+ state = ndb_mgm_get_status(handle);
+ if(state == 0){
+ const int err = ndb_mgm_get_latest_error(handle);
+ g_logger.error("Unable to poll db state: %d %s %s",
+ ndb_mgm_get_latest_error(handle),
+ ndb_mgm_get_latest_error_msg(handle),
+ ndb_mgm_get_latest_error_desc(handle));
+ if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){
+ g_logger.error("Reconnected...");
+ continue;
+ }
+ return false;
+ }
+ } while(state == 0);
NdbAutoPtr<void> tmp(state);
min2 = goal;
@@ -791,6 +801,10 @@ update_status(atrt_config& config, int){
proc.m_proc.m_id,
proc.m_hostname.c_str(),
proc.m_proc.m_path.c_str());
+ for(size_t j = 0; j<h_procs.size(); j++){
+ g_logger.error("found: %d %s", h_procs[j].m_id,
+ h_procs[j].m_path.c_str());
+ }
return false;
}
}
@@ -924,9 +938,11 @@ gather_result(atrt_config& config, int * result){
BaseString tmp = g_gather_progname;
for(size_t i = 0; i<config.m_processes.size(); i++){
atrt_process & proc = config.m_processes[i];
- tmp.appfmt(" %s:%s",
- proc.m_hostname.c_str(),
- proc.m_proc.m_cwd.c_str());
+ if(proc.m_proc.m_path != ""){
+ tmp.appfmt(" %s:%s",
+ proc.m_hostname.c_str(),
+ proc.m_proc.m_cwd.c_str());
+ }
}
const int r1 = system(tmp.c_str());
@@ -970,3 +986,7 @@ setup_hosts(atrt_config& config){
}
return true;
}
+
+template class Vector<Vector<SimpleCpcClient::Process> >;
+template class Vector<atrt_host>;
+template class Vector<atrt_process>;
diff --git a/ndb/test/src/HugoAsynchTransactions.cpp b/ndb/test/src/HugoAsynchTransactions.cpp
index 2af22b5f48d..f75293f5a14 100644
--- a/ndb/test/src/HugoAsynchTransactions.cpp
+++ b/ndb/test/src/HugoAsynchTransactions.cpp
@@ -165,12 +165,13 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
allocRows(trans*operations);
allocTransactions(trans);
+ int a, t, r;
for (int i = 0; i < batch; i++) { // For each batch
while (cRecords < records*batch) {
cTrans = 0;
cReadIndex = 0;
- for (int t = 0; t < trans; t++) { // For each transaction
+ for (t = 0; t < trans; t++) { // For each transaction
transactions[t] = pNdb->startTransaction();
if (transactions[t] == NULL) {
ERR(pNdb->getNdbError());
@@ -187,7 +188,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
// Read
// Define primary keys
check = pOp->readTupleExclusive();
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (tab.getColumn(a)->getPrimaryKey() == true) {
if (equalForAttr(pOp, a, cReadRecords) != 0){
ERR(transactions[t]->getNdbError());
@@ -197,7 +198,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
}
}
// Define attributes to read
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if ((rows[cReadIndex]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(transactions[t]->getNdbError());
@@ -225,7 +226,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
pNdb->sendPollNdb(3000, 0, 0);
// Verify the data!
- for (int r = 0; r < trans*operations; r++) {
+ for (r = 0; r < trans*operations; r++) {
if (calc.verifyRowValues(rows[r]) != 0) {
g_info << "|- Verify failed..." << endl;
// Close all transactions
@@ -239,7 +240,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
// Update
cTrans = 0;
cIndex = 0;
- for (int t = 0; t < trans; t++) { // For each transaction
+ for (t = 0; t < trans; t++) { // For each transaction
for (int k = 0; k < operations; k++) { // For each operation
NdbOperation* pOp = transactions[t]->getNdbOperation(tab.getName());
if (pOp == NULL) {
@@ -258,7 +259,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
}
// Set search condition for the record
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (tab.getColumn(a)->getPrimaryKey() == true) {
if (equalForAttr(pOp, a, cRecords) != 0) {
ERR(transactions[t]->getNdbError());
@@ -269,7 +270,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
}
// Update the record
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (tab.getColumn(a)->getPrimaryKey() == false) {
if (setValueForAttr(pOp, a, cRecords, updates) != 0) {
ERR(transactions[t]->getNdbError());
@@ -298,7 +299,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb,
pNdb->sendPollNdb(3000, 0, 0);
// Close all transactions
- for (int t = 0; t < cTrans; t++) {
+ for (t = 0; t < cTrans; t++) {
pNdb->closeTransaction(transactions[t]);
}
@@ -346,6 +347,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
int cTrans = 0;
int cRecords = 0;
int cIndex = 0;
+ int a,t,r;
transactionsCompleted = 0;
allocTransactions(trans);
@@ -354,7 +356,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
while (cRecords < records*batch) {
cTrans = 0;
cIndex = 0;
- for (int t = 0; t < trans; t++) { // For each transaction
+ for (t = 0; t < trans; t++) { // For each transaction
transactions[t] = pNdb->startTransaction();
if (transactions[t] == NULL) {
ERR(pNdb->getNdbError());
@@ -379,7 +381,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
}
// Set a calculated value for each attribute in this table
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (setValueForAttr(pOp, a, cRecords, 0 ) != 0) {
ERR(transactions[t]->getNdbError());
pNdb->closeTransaction(transactions[t]);
@@ -394,7 +396,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
case NO_READ:
// Define primary keys
check = pOp->readTuple();
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (tab.getColumn(a)->getPrimaryKey() == true) {
if (equalForAttr(pOp, a, cRecords) != 0){
ERR(transactions[t]->getNdbError());
@@ -404,7 +406,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
}
}
// Define attributes to read
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if ((rows[cIndex]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(transactions[t]->getNdbError());
@@ -423,7 +425,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
}
// Define primary keys
- for (int a = 0; a < tab.getNoOfColumns(); a++) {
+ for (a = 0; a < tab.getNoOfColumns(); a++) {
if (tab.getColumn(a)->getPrimaryKey() == true){
if (equalForAttr(pOp, a, cRecords) != 0) {
ERR(transactions[t]->getNdbError());
@@ -462,7 +464,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
switch (theOperation) {
case NO_READ:
// Verify the data!
- for (int r = 0; r < trans*operations; r++) {
+ for (r = 0; r < trans*operations; r++) {
if (calc.verifyRowValues(rows[r]) != 0) {
g_info << "|- Verify failed..." << endl;
// Close all transactions
@@ -476,11 +478,11 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb,
case NO_INSERT:
case NO_UPDATE:
case NO_DELETE:
- abort();
+ break;
}
// Close all transactions
- for (int t = 0; t < cTrans; t++) {
+ for (t = 0; t < cTrans; t++) {
pNdb->closeTransaction(transactions[t]);
}
diff --git a/ndb/test/src/HugoCalculator.cpp b/ndb/test/src/HugoCalculator.cpp
index 55aa96a4909..147c8b104d8 100644
--- a/ndb/test/src/HugoCalculator.cpp
+++ b/ndb/test/src/HugoCalculator.cpp
@@ -28,7 +28,8 @@
HugoCalculator::HugoCalculator(const NdbDictionary::Table& tab) : m_tab(tab) {
// The "id" column of this table is found in the first integer column
- for (int i=0; i<m_tab.getNoOfColumns(); i++){
+ int i;
+ for (i=0; i<m_tab.getNoOfColumns(); i++){
const NdbDictionary::Column* attr = m_tab.getColumn(i);
if (attr->getType() == NdbDictionary::Column::Unsigned){
m_idCol = i;
@@ -37,7 +38,7 @@ HugoCalculator::HugoCalculator(const NdbDictionary::Table& tab) : m_tab(tab) {
}
// The "number of updates" column for this table is found in the last column
- for (int i=m_tab.getNoOfColumns()-1; i>=0; i--){
+ for (i=m_tab.getNoOfColumns()-1; i>=0; i--){
const NdbDictionary::Column* attr = m_tab.getColumn(i);
if (attr->getType() == NdbDictionary::Column::Unsigned){
m_updatesCol = i;
@@ -102,7 +103,8 @@ HugoCalculator::calcValue(int record,
// Fill buf with some pattern so that we can detect
// anomalies in the area that we don't fill with chars
- for (int i = 0; i<attr->getLength(); i++)
+ int i;
+ for (i = 0; i<attr->getLength(); i++)
buf[i] = ((i+2) % 255);
// Calculate length of the string to create. We want the string
@@ -116,7 +118,7 @@ HugoCalculator::calcValue(int record,
else
len++;
}
- for(int i=0; i < len; i++)
+ for(i=0; i < len; i++)
buf[i] = a[((val^i)%25)];
buf[len] = 0;
}
diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp
index 91263aa29b4..7c05cb86a93 100644
--- a/ndb/test/src/HugoOperations.cpp
+++ b/ndb/test/src/HugoOperations.cpp
@@ -40,6 +40,9 @@ int HugoOperations::closeTransaction(Ndb* pNdb){
}
pTrans = NULL;
+ m_result_sets.clear();
+ m_executed_result_sets.clear();
+
return NDBT_OK;
}
@@ -51,7 +54,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
int recordNo,
bool exclusive,
int numRecords){
-
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -71,7 +74,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -81,7 +84,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[r]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -95,7 +98,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
int HugoOperations::pkDirtyReadRecord(Ndb* pNdb,
int recordNo,
int numRecords){
-
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -113,7 +116,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -123,7 +126,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[r]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -137,7 +140,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb,
int HugoOperations::pkSimpleReadRecord(Ndb* pNdb,
int recordNo,
int numRecords){
-
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -155,7 +158,7 @@ int HugoOperations::pkSimpleReadRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -165,7 +168,7 @@ int HugoOperations::pkSimpleReadRecord(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[r]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -180,7 +183,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb,
int recordNo,
int numRecords,
int updatesValue){
-
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -197,7 +200,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -207,7 +210,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb,
}
// Define attributes to update
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){
ERR(pTrans->getNdbError());
@@ -224,7 +227,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb,
int numRecords,
int updatesValue){
- int check;
+ int a, check;
for(int r=0; r < numRecords; r++){
NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
@@ -239,7 +242,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -249,7 +252,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb,
}
// Define attributes to update
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){
ERR(pTrans->getNdbError());
@@ -265,7 +268,7 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb,
int recordNo,
int numRecords){
- int check;
+ int a, check;
for(int r=0; r < numRecords; r++){
NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
@@ -280,7 +283,7 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -291,110 +294,70 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb,
}
return NDBT_OK;
}
-
-int HugoOperations::scanReadRecords(Ndb* pNdb,
- Uint32 parallelism, ScanLock lock){
-
- NdbConnection * pCon = pNdb->hupp(pTrans);
+#if 0
+NdbResultSet*
+HugoOperations::scanReadRecords(Ndb* pNdb, ScanLock lock){
+
NDBT_ResultRow * m_tmpRow = new NDBT_ResultRow(tab);
- ScanTmp tmp(pCon, m_tmpRow);
- tmp.m_op = ScanTmp::READ;
- NdbOperation* pOp = pCon->getNdbOperation(tab.getName());
+ NdbScanOperation* pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
+ ERR(pTrans->getNdbError());
+ return 0;
}
+
int check = 0;
+ NdbResultSet * rs = 0;
switch(lock){
case SL_ReadHold:
- check = pOp->openScanReadHoldLock(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Read, 1, 1);
break;
case SL_Exclusive:
- check = pOp->openScanExclusive(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Exclusive, 1, 1);
break;
case SL_Read:
default:
- check = pOp->openScanRead(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Dirty, 1, 1);
}
- if( check == -1 ) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
+ if( rs == 0) {
+ ERR(pTrans->getNdbError());
+ return 0;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
+ ERR(pTrans->getNdbError());
+ return 0;
}
// Define attributes to read
for(int a = 0; a<tab.getNoOfColumns(); a++){
if((m_tmpRow->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
+ ERR(pTrans->getNdbError());
+ return 0;
}
}
-
- check = tmp.pTrans->executeScan();
- if( check == -1 ) {
- NdbError err = tmp.pTrans->getNdbError();
- ERR(err);
- return err.code;
- }
-
- tmp.m_delete = false;
- m_scans.push_back(tmp);
-
- return 0;
+ return rs;
}
-int HugoOperations::executeScanRead(Ndb* pNdb){
-
- int check = 0;
- for(Uint32 i = 0; i<m_scans.size(); i++){
- ScanTmp & tmp = m_scans[i];
- check = run(tmp);
- if(check != 0){
- return check;
- }
- }
- while(m_scans.size() > 0){
- ScanTmp & tmp = m_scans[m_scans.size() - 1];
- if(tmp.m_op != ScanTmp::DONE)
- abort();
-
- tmp.pTrans->close();
- delete tmp.m_tmpRow;
- m_scans.erase(m_scans.size() - 1);
- }
- if(check != 0){
- return check;
+int
+HugoOperations::readTuples(NdbResultSet* rs){
+ int res = 0;
+ while((res = rs->nextResult()) == 0){
}
-
+ if(res != 1)
+ return NDBT_FAILED;
return NDBT_OK;
}
+#endif
int HugoOperations::execute_Commit(Ndb* pNdb,
AbortOption eao){
int check = 0;
- while(m_scans.size() > 0){
- ScanTmp & tmp = m_scans[m_scans.size() - 1];
- if(tmp.m_op != ScanTmp::DONE)
- abort();
-
- tmp.pTrans->close();
- delete tmp.m_tmpRow;
- m_scans.erase(m_scans.size() - 1);
- }
- if(check != 0){
- return check;
- }
-
check = pTrans->execute(Commit, eao);
if( check == -1 ) {
@@ -409,55 +372,41 @@ int HugoOperations::execute_Commit(Ndb* pNdb,
return NDBT_FAILED;
return err.code;
}
- return NDBT_OK;
-}
-int
-HugoOperations::run(ScanTmp & tmp){
- int count = 0;
- if(tmp.m_op == ScanTmp::DONE)
- abort();
+ for(int i = 0; i<m_result_sets.size(); i++){
+ m_executed_result_sets.push_back(m_result_sets[i]);
- int eof = tmp.pTrans->nextScanResult(true) ;
- while(eof == 0){
- count++;
- switch(tmp.m_op){
- case ScanTmp::READ:
- case ScanTmp::UPDATE:
- case ScanTmp::DELETE:
- break;
- case ScanTmp::DONE:
- abort();
+ int rows = m_result_sets[i].records;
+ NdbResultSet* rs = m_result_sets[i].m_result_set;
+ int res = rs->nextResult();
+ switch(res){
+ case 1:
+ return 626;
+ case -1:
+ const NdbError err = pTrans->getNdbError();
+ ERR(err);
+ return (err.code > 0 ? err.code : NDBT_FAILED);
}
- rows.push_back(tmp.m_tmpRow->clone());
- eof = tmp.pTrans->nextScanResult(false);
- }
- tmp.m_op = ScanTmp::DONE;
- if (eof == -1) {
- deallocRows();
- NdbError err = tmp.pTrans->getNdbError();
- ERR(err);
- return err.code;
- }
+ // A row found
- if(count == 0)
- return 626;
+ switch(rows){
+ case 0:
+ return 4000;
+ default:
+ m_result_sets[i].records--;
+ break;
+ }
+ }
- return 0;
+ m_result_sets.clear();
+
+ return NDBT_OK;
}
int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){
int check;
- for(Uint32 i = 0; i<m_scans.size(); i++){
- ScanTmp & tmp = m_scans[i];
- check = run(tmp);
- if(check != 0){
- return check;
- }
- }
-
check = pTrans->execute(NoCommit, eao);
if( check == -1 ) {
@@ -472,6 +421,35 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){
return NDBT_FAILED;
return err.code;
}
+
+ for(int i = 0; i<m_result_sets.size(); i++){
+ m_executed_result_sets.push_back(m_result_sets[i]);
+
+ int rows = m_result_sets[i].records;
+ NdbResultSet* rs = m_result_sets[i].m_result_set;
+ int res = rs->nextResult();
+ switch(res){
+ case 1:
+ return 626;
+ case -1:
+ const NdbError err = pTrans->getNdbError();
+ ERR(err);
+ return (err.code > 0 ? err.code : NDBT_FAILED);
+ }
+
+ // A row found
+
+ switch(rows){
+ case 0:
+ return 4000;
+ default:
+ case 1:
+ break;
+ }
+ }
+
+ m_result_sets.clear();
+
return NDBT_OK;
}
@@ -697,16 +675,13 @@ HugoOperations::refresh() {
NdbConnection* t = getTransaction();
if(t)
t->refresh();
- for(Uint32 i = 0; i<m_scans.size(); i++){
- if(m_scans[i].pTrans)
- m_scans[i].pTrans->refresh();
- }
}
int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo,
bool exclusive,
int numRecords){
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -726,7 +701,7 @@ int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -736,7 +711,7 @@ int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[r]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -753,7 +728,7 @@ HugoOperations::indexUpdateRecord(Ndb*,
int recordNo,
int numRecords,
int updatesValue){
-
+ int a;
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
@@ -770,7 +745,7 @@ HugoOperations::indexUpdateRecord(Ndb*,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+recordNo) != 0){
ERR(pTrans->getNdbError());
@@ -780,7 +755,7 @@ HugoOperations::indexUpdateRecord(Ndb*,
}
// Define attributes to update
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){
ERR(pTrans->getNdbError());
@@ -791,3 +766,35 @@ HugoOperations::indexUpdateRecord(Ndb*,
}
return NDBT_OK;
}
+
+int
+HugoOperations::scanReadRecords(Ndb* pNdb, NdbScanOperation::LockMode lm,
+ int records){
+
+ allocRows(records);
+ NdbScanOperation * pOp = pTrans->getNdbScanOperation(tab.getName());
+
+ if(!pOp)
+ return -1;
+
+ NdbResultSet * rs = pOp->readTuples(lm, 1, 1);
+
+ if(!rs){
+ return -1;
+ }
+
+ for(int a = 0; a<tab.getNoOfColumns(); a++){
+ if((rows[0]->attributeStore(a) =
+ pOp->getValue(tab.getColumn(a)->getName())) == 0) {
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
+
+ RsPair p = {rs, records};
+ m_result_sets.push_back(p);
+
+ return 0;
+}
+
+template class Vector<HugoOperations::RsPair>;
diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp
index 7f12484ddc8..05039562c76 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/ndb/test/src/HugoTransactions.cpp
@@ -46,9 +46,9 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
while (true){
@@ -72,19 +72,18 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- if (committed == true)
- check = pOp->openScanReadCommitted(parallelism);
- else
- check = pOp->openScanRead(parallelism);
+ NdbResultSet * rs;
+ rs = pOp ->readTuples(committed ? NdbScanOperation::LM_CommittedRead :
+ NdbScanOperation::LM_Read);
- if( check == -1 ) {
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -97,7 +96,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -106,7 +105,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
@@ -130,12 +129,10 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (abortCount < abortPercent)
abortTrans = true;
}
-
+
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
-
- while(eof == 0){
+ while((eof = rs->nextResult(true)) == 0){
rows++;
if (calc.verifyRowValues(&row) != 0){
pNdb->closeTransaction(pTrans);
@@ -145,22 +142,20 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (abortCount == rows && abortTrans == true){
ndbout << "Scan is aborted" << endl;
g_info << "Scan is aborted" << endl;
- check = pTrans->stopScan();
+ rs->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
+
pNdb->closeTransaction(pTrans);
return NDBT_OK;
}
-
- eof = pTrans->nextScanResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
ERR_INFO(err);
pNdb->closeTransaction(pTrans);
@@ -199,106 +194,6 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
#define RESTART_SCAN 99
-// Take over one record from pOrgOp and update it
-int
-HugoTransactions::takeOverAndUpdateRecord(Ndb* pNdb,
- NdbOperation* pOrgOp){
- int retryAttempt = 0;
- const int retryMax = 10;
- int check;
- NdbConnection *pUpdTrans;
- NdbOperation *pUpdOp;
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
- pUpdTrans = pNdb->startTransaction();
- if (pUpdTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- if ((pUpdOp = pOrgOp->takeOverForUpdate(pUpdTrans)) == NULL){
- ERR(pNdb->getNdbError());
- return NDBT_FAILED;
- }
-
- int updates = calc.getUpdatesValue(&row) + 1;
- int id = calc.getIdValue(&row);
-
- // Set a calculated value for each non-PK attribute in this table
- for (int a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pUpdOp, a, id, updates ) != 0){
- ERR(pUpdTrans->getNdbError());
- pNdb->closeTransaction(pUpdTrans);
- return NDBT_FAILED;
- }
- }
- }
- check = pUpdTrans->execute( Commit );
- if(check == -1 ) {
- const NdbError err = pUpdTrans->getNdbError();
- pNdb->closeTransaction(pUpdTrans);
-
- ERR(err);
- if(err.code == 499 || err.code == 250){
- return RESTART_SCAN;
- }
-
- switch(err.status){
- case NdbError::Success:
- g_info << "ERROR: NdbError reports success when transcaction failed"
- << endl;
- return NDBT_FAILED;
- break;
-
- case NdbError::TemporaryError:
- NdbSleep_MilliSleep(50+50*retryAttempt);
- retryAttempt++;
- continue;
- break;
-
- case NdbError::UnknownResult:
- return NDBT_FAILED;
- break;
-
- default:
- case NdbError::PermanentError:
- switch (err.code){
- case 499:
- case 250:
- return NDBT_TEMPORARY;
-
- default:
- return NDBT_FAILED;
- break;
- }
- break;
- }
- }
- else{
- pNdb->closeTransaction(pUpdTrans);
- }
-
- return NDBT_OK;
- }
- return NDBT_FAILED;
-}
-
int
HugoTransactions::scanUpdateRecords(Ndb* pNdb,
int records,
@@ -320,9 +215,12 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
int records,
int abortPercent,
int parallelism){
+#if 1
+ return scanUpdateRecords3(pNdb, records, abortPercent, 1);
+#else
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbOperation *pOp;
@@ -371,7 +269,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
}
// Read all attributes from this table
- for(int a=0; a<tab.getNoOfColumns(); a++){
+ for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -472,9 +370,9 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
return NDBT_OK;
}
return NDBT_FAILED;
+#endif
}
-
// Scan all records exclusive and update
// them batched by asking nextScanResult to
// give us all cached records before fetching new
@@ -484,9 +382,12 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
int records,
int abortPercent,
int parallelism){
+#if 1
+ return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
+#else
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbOperation *pOp;
@@ -535,7 +436,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
}
// Read all attributes from this table
- for(int a=0; a<tab.getNoOfColumns(); a++){
+ for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -642,35 +543,9 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
return NDBT_OK;
}
return NDBT_FAILED;
+#endif
}
-int
-HugoTransactions::addRowToUpdate(Ndb* pNdb,
- NdbConnection* pUpdTrans,
- NdbOperation* pOrgOp){
-
- int updates = calc.getUpdatesValue(&row) + 1;
- int r = calc.getIdValue(&row);
-
- NdbOperation* pUpdOp = pOrgOp->takeOverForUpdate(pUpdTrans);
- if (pUpdOp == NULL){
- ERR(pNdb->getNdbError());
- return NDBT_FAILED;
- }
-
- for(int a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pUpdOp, a, r, updates ) != 0){
- ERR(pUpdTrans->getNdbError());
- pNdb->closeTransaction(pUpdTrans);
- return NDBT_FAILED;
- }
- }
- }
- return NDBT_OK;
-}
-
-
int
HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
int records,
@@ -678,7 +553,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
int parallelism){
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbScanOperation *pOp;
@@ -717,7 +592,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
}
// Read all attributes from this table
- for(int a=0; a<tab.getNoOfColumns(); a++){
+ for(a=0; a<tab.getNoOfColumns(); a++){
if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -759,8 +634,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
}
const int updates = calc.getUpdatesValue(&row) + 1;
const int r = calc.getIdValue(&row);
-
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUp, a, r, updates ) != 0){
ERR(pTrans->getNdbError());
@@ -780,7 +654,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
if(check != -1){
check = pTrans->execute(Commit);
- pTrans->releaseCompletedOperations();
+ pTrans->restart();
}
const NdbError err = pTrans->getNdbError();
@@ -794,7 +668,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
return NDBT_FAILED;
}
}
-
+
const NdbError err = pTrans->getNdbError();
if( check == -1 ) {
pNdb->closeTransaction(pTrans);
@@ -819,12 +693,14 @@ HugoTransactions::loadTable(Ndb* pNdb,
int records,
int batch,
bool allowConstraintViolation,
- int doSleep){
- int check;
+ int doSleep,
+ bool oneTrans){
+ int check, a;
int retryAttempt = 0;
int retryMax = 5;
NdbConnection *pTrans;
NdbOperation *pOp;
+ bool first_batch = true;
const int org = batch;
const int cols = tab.getNoOfColumns();
@@ -833,7 +709,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
batch = (batch * 256); // -> 512 -> 65536k per commit
batch = batch/bytes; //
batch = batch == 0 ? 1 : batch;
-
+
if(batch != org){
g_info << "batch = " << org << " rowsize = " << bytes
<< " -> rows/commit = " << batch << endl;
@@ -841,7 +717,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
g_info << "|- Inserting records..." << endl;
for (int c=0 ; c<records ; ){
-
+ bool closeTrans;
if (retryAttempt >= retryMax){
g_info << "Record " << c << " could not be inserted, has retried "
<< retryAttempt << " times " << endl;
@@ -852,19 +728,22 @@ HugoTransactions::loadTable(Ndb* pNdb,
if (doSleep > 0)
NdbSleep_MilliSleep(doSleep);
- pTrans = pNdb->startTransaction();
-
- if (pTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
+ if (first_batch || !oneTrans) {
+ first_batch = false;
+ pTrans = pNdb->startTransaction();
+
+ if (pTrans == NULL) {
+ const NdbError err = pNdb->getNdbError();
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ ERR(err);
+ return NDBT_FAILED;
}
- ERR(err);
- return NDBT_FAILED;
}
for(int b = 0; b < batch && c+b<records; b++){
@@ -884,7 +763,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
// Set a calculated value for each attribute in this table
- for (int a = 0; a<tab.getNoOfColumns(); a++){
+ for (a = 0; a<tab.getNoOfColumns(); a++){
if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -894,7 +773,13 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
// Execute the transaction and insert the record
- check = pTrans->execute( Commit );
+ if (!oneTrans || (c + batch) >= records) {
+ closeTrans = true;
+ check = pTrans->execute( Commit );
+ } else {
+ closeTrans = false;
+ check = pTrans->execute( NoCommit );
+ }
if(check == -1 ) {
const NdbError err = pTrans->getNdbError();
pNdb->closeTransaction(pTrans);
@@ -937,8 +822,10 @@ HugoTransactions::loadTable(Ndb* pNdb,
break;
}
}
- else{
- pNdb->closeTransaction(pTrans);
+ else{
+ if (closeTrans) {
+ pNdb->closeTransaction(pTrans);
+ }
}
// Step to next record
@@ -951,7 +838,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
int
HugoTransactions::fillTable(Ndb* pNdb,
int batch){
- int check;
+ int check, a, b;
int retryAttempt = 0;
int retryMax = 5;
NdbConnection *pTrans;
@@ -982,7 +869,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; b < batch; b++){
+ for(b = 0; b < batch; b++){
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
@@ -999,7 +886,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
}
// Set a calculated value for each attribute in this table
- for (int a = 0; a<tab.getNoOfColumns(); a++){
+ for (a = 0; a<tab.getNoOfColumns(); a++){
if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -1138,7 +1025,7 @@ int
HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
int records) {
int myXXXXX = XXXXX++;
-
+ Uint32 i;
const char function[] = "HugoTransactions::eventOperation: ";
struct receivedEvent* recInsertEvent;
NdbAutoObjArrayPtr<struct receivedEvent>
@@ -1155,7 +1042,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
stats.n_duplicates = 0;
stats.n_inconsistent_gcis = 0;
- for (int i = 0; i < records; i++) {
+ for (i = 0; i < records; i++) {
recInsertEvent[i].pk = 0xFFFFFFFF;
recInsertEvent[i].count = 0;
recInsertEvent[i].event = 0xFFFFFFFF;
@@ -1263,7 +1150,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
}
g_info << "overrun " << overrun << " pk " << pk;
- for (int i = 1; i < noEventColumnName; i++) {
+ for (i = 1; i < noEventColumnName; i++) {
if (recAttr[i]->isNULL() >= 0) { // we have a value
g_info << " post[" << i << "]=";
if (recAttr[i]->isNULL() == 0) // we have a non-null value
@@ -1306,7 +1193,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
if (stats.n_updates > 0) {
stats.n_consecutive++;
}
- for (Uint32 i = 0; i < (Uint32)records/3; i++) {
+ for (i = 0; i < (Uint32)records/3; i++) {
if (recInsertEvent[i].pk != i) {
stats.n_consecutive ++;
ndbout << "missing insert pk " << i << endl;
@@ -1345,7 +1232,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbOperation *pOp;
@@ -1397,7 +1284,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -1408,7 +1295,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -1471,13 +1358,13 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a, b;
NdbConnection *pTrans;
NdbOperation *pOp;
allocRows(batch);
- g_info << "|- Updating records..." << endl;
+ g_info << "|- Updating records (batch=" << batch << ")..." << endl;
while (r < records){
if (retryAttempt >= retryMax){
@@ -1503,7 +1390,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; b<batch && (r+b) < records; b++){
+ for(b = 0; b<batch && (r+b) < records; b++){
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
@@ -1519,7 +1406,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -1530,7 +1417,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -1556,7 +1443,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; b<batch && (b+r)<records; b++){
+ for(b = 0; b<batch && (b+r)<records; b++){
if (calc.verifyRowValues(rows[b]) != 0){
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -1579,7 +1466,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -1589,7 +1476,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
}
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){
ERR(pTrans->getNdbError());
@@ -1639,7 +1526,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
while (r < records){
@@ -1679,7 +1566,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r) != 0){
ERR(pTrans->getNdbError());
@@ -1690,7 +1577,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
// Read update value
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (calc.isUpdateCol(a) == true){
if((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
@@ -1735,7 +1622,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
// PKs
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r) != 0){
ERR(pTrans->getNdbError());
@@ -1746,7 +1633,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
// Update col
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if ((tab.getColumn(a)->getPrimaryKey() == false) &&
(calc.isUpdateCol(a) == true)){
@@ -1763,7 +1650,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
// Remaining attributes
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if ((tab.getColumn(a)->getPrimaryKey() == false) &&
(calc.isUpdateCol(a) == false)){
if(setValueForAttr(pUpdOp, a, r, updates ) != 0){
@@ -1818,7 +1705,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbOperation *pOp;
@@ -1863,7 +1750,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r) != 0){
ERR(pTrans->getNdbError());
@@ -1933,7 +1820,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a, b;
NdbConnection *pTrans;
NdbOperation *pOp;
@@ -1970,7 +1857,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; (b<lockBatch) && (r+b < records); b++){
+ for(b = 0; (b<lockBatch) && (r+b < records); b++){
pOp = pTrans->getNdbOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
@@ -1986,7 +1873,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -1997,7 +1884,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -2080,10 +1967,10 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans;
NdbOperation *pOp;
- NdbScanOperation *sOp;
+ NdbIndexScanOperation *sOp;
NdbResultSet * rs;
const NdbDictionary::Index* pIndex
@@ -2134,7 +2021,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
}
check = pOp->readTuple();
} else {
- pOp = sOp = pTrans->getNdbScanOperation(idxName, tab.getName());
+ pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName());
if (sOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -2152,7 +2039,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -2163,7 +2050,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -2231,7 +2118,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a, b;
NdbConnection *pTrans;
NdbOperation *pOp;
NdbScanOperation * sOp;
@@ -2268,7 +2155,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; b<batchsize && (b+r)<records; b++){
+ for(b = 0; b<batchsize && (b+r)<records; b++){
if(!ordered){
pOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
if (pOp == NULL) {
@@ -2284,7 +2171,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
} else {
- pOp = sOp = pTrans->getNdbScanOperation(idxName, tab.getName());
+ pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -2296,7 +2183,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
// Define primary keys
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -2307,7 +2194,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
// Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((rows[b]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
@@ -2338,7 +2225,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b = 0; b<batchsize && (b+r)<records; b++){
+ for(b = 0; b<batchsize && (b+r)<records; b++){
if (calc.verifyRowValues(rows[b]) != 0){
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -2367,7 +2254,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
if(!ordered){
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == true){
if(equalForAttr(pUpdOp, a, r+b) != 0){
ERR(pTrans->getNdbError());
@@ -2378,7 +2265,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
}
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if (tab.getColumn(a)->getPrimaryKey() == false){
if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){
ERR(pTrans->getNdbError());
@@ -2415,4 +2302,4 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_OK;
}
-
+template class Vector<NDBT_ResultRow*>;
diff --git a/ndb/test/src/NDBT_Table.cpp b/ndb/test/src/NDBT_Table.cpp
index 485377e690a..d283cdf5912 100644
--- a/ndb/test/src/NDBT_Table.cpp
+++ b/ndb/test/src/NDBT_Table.cpp
@@ -19,35 +19,6 @@
#include <NDBT.hpp>
class NdbOut&
-operator <<(class NdbOut& ndbout, const NDBT_Attribute & attr){
-
- NdbDictionary::Column::Type type = attr.getType();
-
- ndbout << attr.getName() << " " << type;
-
- switch(type){
- case NdbDictionary::Column::Decimal:
- ndbout << "(" << attr.getScale() << ", " << attr.getPrecision() << ")";
- break;
- default:
- break;
- }
-
- if(attr.getLength() != 1)
- ndbout << "[" << attr.getLength() << "]";
-
- if(attr.getNullable())
- ndbout << " NULL";
- else
- ndbout << " NOT NULL";
-
- if(attr.getPrimaryKey())
- ndbout << " PRIMARY KEY";
-
- return ndbout;
-}
-
-class NdbOut&
operator <<(class NdbOut& ndbout, const NDBT_Table & tab)
{
ndbout << "-- " << tab.getName() << " --" << endl;
diff --git a/ndb/test/src/NDBT_Tables.cpp b/ndb/test/src/NDBT_Tables.cpp
index 548e755a3fb..ff6db3e892c 100644
--- a/ndb/test/src/NDBT_Tables.cpp
+++ b/ndb/test/src/NDBT_Tables.cpp
@@ -297,7 +297,7 @@ NDBT_Table T14("T14", sizeof(T14Attribs)/sizeof(NDBT_Attribute), T14Attribs);
*/
static
const
-NDBT_Attribute C2_PORTS_Attribs[] = {
+NDBT_Attribute I1_Cols[] = {
NDBT_Attribute("ID", NdbDictionary::Column::Unsigned, true),
NDBT_Attribute("PORT", NdbDictionary::Column::Char, 16, true),
NDBT_Attribute("ACCESSNODE", NdbDictionary::Column::Char, 16, true),
@@ -310,11 +310,19 @@ NDBT_Attribute C2_PORTS_Attribs[] = {
};
static
-NDBT_Table C2_PORTS("C2_PORTS", sizeof(C2_PORTS_Attribs)/sizeof(NDBT_Attribute), C2_PORTS_Attribs);
+const
+char* I1_Indexes[] = {
+ "UNIQUE", "ID", "PORT", "ACCESSNODE", "POP", "PORTSTATE", 0,
+ 0
+};
+
+static
+NDBT_Table I1("I1", sizeof(I1_Cols)/sizeof(NDBT_Attribute), I1_Cols
+ );// ,I1_Indexes);
static
const
-NDBT_Attribute C2_SERVICES_Attribs[] = {
+NDBT_Attribute I2_Cols[] = {
NDBT_Attribute("ID", NdbDictionary::Column::Unsigned, true),
NDBT_Attribute("PORT", NdbDictionary::Column::Char, 16, true),
NDBT_Attribute("ACCESSNODE", NdbDictionary::Column::Char, 16, true),
@@ -331,12 +339,20 @@ NDBT_Attribute C2_SERVICES_Attribs[] = {
NDBT_Attribute("UPDATES", NdbDictionary::Column::Unsigned)
};
+const
+char* I2_Indexes[] = {
+ "ORDERED", "CUSTOMER_ID", 0,
+ "ORDERED", "NUM_IP", 0,
+ 0
+};
+
static
-NDBT_Table C2_SERVICES("C2_SERVICES", sizeof(C2_SERVICES_Attribs)/sizeof(NDBT_Attribute), C2_SERVICES_Attribs);
+NDBT_Table I2("I2", sizeof(I2_Cols)/sizeof(NDBT_Attribute), I2_Cols
+ );//, I2_Indexes);
static
const
-NDBT_Attribute C2_CLIENTS_Attribs[] = {
+NDBT_Attribute I3_Cols[] = {
NDBT_Attribute("ID", NdbDictionary::Column::Unsigned, true),
NDBT_Attribute("PORT", NdbDictionary::Column::Char, 16), // SI2
NDBT_Attribute("ACCESSNODE", NdbDictionary::Column::Char, 16), // SI2
@@ -355,8 +371,17 @@ NDBT_Attribute C2_CLIENTS_Attribs[] = {
NDBT_Attribute("UPDATES", NdbDictionary::Column::Unsigned)
};
+const
+char* I3_Indexes[] = {
+ "UNIQUE", "ID", 0,
+ "ORDERED", "MAC", 0,
+ "ORDERED", "GW", 0,
+ 0
+};
+
static
-NDBT_Table C2_CLIENTS("C2_CLIENTS", sizeof(C2_CLIENTS_Attribs)/sizeof(NDBT_Attribute), C2_CLIENTS_Attribs);
+NDBT_Table I3("I3", sizeof(I3_Cols)/sizeof(NDBT_Attribute), I3_Cols
+ ); // ,I3_Indexes);
// Define array with pointer to all tables
static
@@ -377,10 +402,23 @@ NDBT_Table *test_tables[]=
&T12,
&T13,
&T14,
- &C2_PORTS,
- &C2_SERVICES,
- &C2_CLIENTS
+ &I1,
+ &I2,
+ &I3
+};
+
+struct NDBT_IndexList {
+ const char * m_table;
+ const char ** m_indexes;
+};
+static
+const
+NDBT_IndexList indexes[] = {
+ "I1", I1_Indexes,
+ "I2", I2_Indexes,
+ "I3", I3_Indexes,
+ 0, 0
};
static
@@ -430,7 +468,7 @@ NDBT_Table F2("F2", sizeof(F2Attribs)/sizeof(NDBT_Attribute), F2Attribs);
/* F3
*
- * Error: Too many primary keys defined, 16 is max?
+ * Error: Too many primary keys defined, 32 is max
*/
static
const
@@ -452,10 +490,26 @@ NDBT_Attribute F3Attribs[] = {
NDBT_Attribute("KOL15", NdbDictionary::Column::Unsigned, 1, true),
NDBT_Attribute("KOL16", NdbDictionary::Column::Unsigned, 1, true),
NDBT_Attribute("KOL17", NdbDictionary::Column::Unsigned, 1, true),
- NDBT_Attribute("KOL20", NdbDictionary::Column::Unsigned),
- NDBT_Attribute("KOL30", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("KOL18", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL19", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL20", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL21", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL22", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL23", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL24", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL25", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL26", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL27", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL28", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL29", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL30", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL31", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL32", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("KOL33", NdbDictionary::Column::Unsigned, 1, true),
NDBT_Attribute("KOL40", NdbDictionary::Column::Unsigned),
- NDBT_Attribute("KOL50", NdbDictionary::Column::Unsigned)
+ NDBT_Attribute("KOL50", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("KOL60", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("KOL70", NdbDictionary::Column::Unsigned)
};
static
@@ -678,17 +732,18 @@ NdbDictionary::Table*
NDBT_Tables::getTable(const char* _nam){
// Search tables list to find a table
NDBT_Table* tab = NULL;
- for (int i=0; i<numTestTables; i++){
+ int i;
+ for (i=0; i<numTestTables; i++){
if (strcmp(test_tables[i]->getName(), _nam) == 0){
return test_tables[i];
}
}
- for (int i=0; i<numFailTables; i++){
+ for (i=0; i<numFailTables; i++){
if (strcmp(fail_tables[i]->getName(), _nam) == 0){
return fail_tables[i];
}
}
- for (int i=0; i<numUtilTables; i++){
+ for (i=0; i<numUtilTables; i++){
if (strcmp(util_tables[i]->getName(), _nam) == 0){
return util_tables[i];
}
@@ -746,26 +801,13 @@ NDBT_Tables::getNumTables(){
int
NDBT_Tables::createAllTables(Ndb* pNdb, bool _temp, bool existsOk){
-
+
for (int i=0; i < NDBT_Tables::getNumTables(); i++){
-
- const NdbDictionary::Table* tab = NDBT_Tables::getTable(i);
- if (tab == NULL){
- return NDBT_ProgramExit(NDBT_FAILED);
- }
-
- // Set temporary table
- NdbDictionary::Table tmpTab(* tab);
- tmpTab.setStoredTable(_temp? 0 : 1);
-
- int r = pNdb->getDictionary()->createTable(tmpTab);
- int err = pNdb->getDictionary()->getNdbError().code;
- if(r == -1){
- if (existsOk && err == 721)
- ;
- else {
- return NDBT_FAILED;
- }
+ pNdb->getDictionary()->dropTable(NDBT_Tables::getTable(i)->getName());
+ int ret= createTable(pNdb,
+ NDBT_Tables::getTable(i)->getName(), _temp, existsOk);
+ if(ret){
+ return ret;
}
}
return NDBT_OK;
@@ -777,7 +819,8 @@ NDBT_Tables::createAllTables(Ndb* pNdb){
}
int
-NDBT_Tables::createTable(Ndb* pNdb, const char* _name, bool _temp){
+NDBT_Tables::createTable(Ndb* pNdb, const char* _name, bool _temp,
+ bool existsOk){
const NdbDictionary::Table* tab = NDBT_Tables::getTable(_name);
if (tab == NULL){
@@ -787,10 +830,61 @@ NDBT_Tables::createTable(Ndb* pNdb, const char* _name, bool _temp){
return NDBT_WRONGARGS;
}
- NdbDictionary::Table tmpTab(* tab);
- tmpTab.setStoredTable(_temp ? 0 : 1);
+ int r = 0;
+ do {
+ NdbDictionary::Table tmpTab(* tab);
+ tmpTab.setStoredTable(_temp ? 0 : 1);
+
+ r = pNdb->getDictionary()->createTable(tmpTab);
+ if(r == -1){
+ if(!existsOk){
+ ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl;
+ break;
+ }
+ if(pNdb->getDictionary()->getNdbError().code != 721){
+ ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl;
+ break;
+ }
+ r = 0;
+ }
+
+ Uint32 i = 0;
+ for(i = 0; indexes[i].m_table != 0; i++){
+ if(strcmp(indexes[i].m_table, _name) != 0)
+ continue;
+ Uint32 j = 0;
+ while(indexes[i].m_indexes[j] != 0){
+ NdbDictionary::Index tmpIndx;
+ BaseString name;
+ name.assfmt("%s$NDBT_IDX%d", _name, j);
+ tmpIndx.setName(name.c_str());
+ tmpIndx.setTable(_name);
+ bool logging = !_temp;
+ if(strcmp(indexes[i].m_indexes[j], "ORDERED") == 0){
+ logging = false;
+ tmpIndx.setType(NdbDictionary::Index::OrderedIndex);
+ } else if(strcmp(indexes[i].m_indexes[j], "UNIQUE") == 0){
+ tmpIndx.setType(NdbDictionary::Index::UniqueHashIndex);
+ } else {
+ ndbout << "Unknown index type";
+ abort();
+ }
+ tmpIndx.setLogging(logging);
+
+ j++;
+ while(indexes[i].m_indexes[j] != 0){
+ tmpIndx.addIndexColumn(indexes[i].m_indexes[j]);
+ j++;
+ }
+ j++;
+ if(pNdb->getDictionary()->createIndex(tmpIndx) != 0){
+ ndbout << pNdb->getDictionary()->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ }
+ } while(false);
- int r = pNdb->getDictionary()->createTable(tmpTab);
return r;
}
@@ -815,7 +909,7 @@ NDBT_Tables::dropAllTables(Ndb* pNdb){
int
NDBT_Tables::print(const char * _name){
- const NdbDictionary::Table* tab = NDBT_Tables::getTable(_name);
+ const NDBT_Table * tab = (const NDBT_Table*)NDBT_Tables::getTable(_name);
if (tab == NULL){
ndbout << "Could not print table " << _name
<< ", it doesn't exist in list of tables "
@@ -835,7 +929,7 @@ NDBT_Tables::printAll(){
if (tab == NULL){
abort();
}
- ndbout << (* tab) << endl;
+ ndbout << (* (NDBT_Table*)tab) << endl;
}
return NDBT_OK;
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index 4cd2c96486b..b1691c379a9 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -132,6 +132,17 @@ void NDBT_Context::setProperty(const char* _name, Uint32 _val){
assert(b == true);
NdbMutex_Unlock(propertyMutexPtr);
}
+void
+NDBT_Context::decProperty(const char * name){
+ NdbMutex_Lock(propertyMutexPtr);
+ Uint32 val = 0;
+ if(props.get(name, &val)){
+ assert(val > 0);
+ props.put(name, (val - 1), true);
+ }
+ NdbCondition_Broadcast(propertyCondPtr);
+ NdbMutex_Unlock(propertyMutexPtr);
+}
void NDBT_Context::setProperty(const char* _name, const char* _val){
NdbMutex_Lock(propertyMutexPtr);
@@ -336,24 +347,24 @@ NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(NDBT_TestSuite* psuite,
NDBT_TestCaseImpl1::~NDBT_TestCaseImpl1(){
NdbCondition_Destroy(waitThreadsCondPtr);
NdbMutex_Destroy(waitThreadsMutexPtr);
-
- for(size_t i = 0; i < initializers.size(); i++)
+ size_t i;
+ for(i = 0; i < initializers.size(); i++)
delete initializers[i];
initializers.clear();
- for(size_t i = 0; i < verifiers.size(); i++)
+ for(i = 0; i < verifiers.size(); i++)
delete verifiers[i];
verifiers.clear();
- for(size_t i = 0; i < finalizers.size(); i++)
+ for(i = 0; i < finalizers.size(); i++)
delete finalizers[i];
finalizers.clear();
- for(size_t i = 0; i < steps.size(); i++)
+ for(i = 0; i < steps.size(); i++)
delete steps[i];
steps.clear();
results.clear();
- for(size_t i = 0; i < testTables.size(); i++)
+ for(i = 0; i < testTables.size(); i++)
delete testTables[i];
testTables.clear();
- for(size_t i = 0; i < testResults.size(); i++)
+ for(i = 0; i < testResults.size(); i++)
delete testResults[i];
testResults.clear();
@@ -487,7 +498,8 @@ void NDBT_TestCaseImpl1::waitSteps(){
waitThreadsMutexPtr);
unsigned completedSteps = 0;
- for(unsigned i=0; i<steps.size(); i++){
+ unsigned i;
+ for(i=0; i<steps.size(); i++){
if (results[i] != NORESULT){
completedSteps++;
if (results[i] == NDBT_OK)
@@ -501,7 +513,7 @@ void NDBT_TestCaseImpl1::waitSteps(){
NdbMutex_Unlock(waitThreadsMutexPtr);
void *status;
- for(unsigned i=0; i<steps.size();i++){
+ for(i=0; i<steps.size();i++){
NdbThread_WaitFor(threads[i], &status);
NdbThread_Destroy(&threads[i]);
}
@@ -633,12 +645,12 @@ int NDBT_TestCaseImpl1::runSteps(NDBT_Context* ctx){
numStepsOk = 0;
numStepsFail = 0;
numStepsCompleted = 0;
-
- for (unsigned i = 0; i < steps.size(); i++)
+ unsigned i;
+ for (i = 0; i < steps.size(); i++)
startStepInThread(i, ctx);
waitSteps();
- for(unsigned i = 0; i < steps.size(); i++)
+ for(i = 0; i < steps.size(); i++)
if (results[i] != NDBT_OK)
res = NDBT_FAILED;
return res;
@@ -808,18 +820,19 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab,
const NdbDictionary::Table* pTab2 = pDict->getTable(pTab->getName());
if (createTable == true){
- if (pTab2 != 0 && !pTab->equal(* pTab2)){
+ if(pTab2 != 0 && pDict->dropTable(pTab->getName()) != 0){
numTestsFail++;
numTestsExecuted++;
- g_err << "ERROR0: Failed to create table " << pTab->getName() << endl;
+ g_err << "ERROR0: Failed to drop table " << pTab->getName() << endl;
tests[t]->saveTestResult(pTab, FAILED_TO_CREATE);
continue;
}
-
- if(pTab2 == 0 && pDict->createTable(* pTab) != 0){
+
+ if(NDBT_Tables::createTable(ndb, pTab->getName()) != 0){
numTestsFail++;
numTestsExecuted++;
- g_err << "ERROR1: Failed to create table " << pTab->getName() << endl;
+ g_err << "ERROR1: Failed to create table " << pTab->getName()
+ << pDict->getNdbError() << endl;
tests[t]->saveTestResult(pTab, FAILED_TO_CREATE);
continue;
}
@@ -994,6 +1007,7 @@ int NDBT_TestSuite::execute(int argc, const char** argv){
res = executeAll(_testname);
} else {
testSuiteTimer.doStart();
+ Ndb ndb("TEST_DB"); ndb.init();
for(int i = optind; i<argc; i++){
executeOne(argv[i], _testname);
}
@@ -1091,20 +1105,20 @@ void NDBT_TestCaseImpl1::print(){
abort();
}
}
-
- for(unsigned i=0; i<initializers.size(); i++){
+ unsigned i;
+ for(i=0; i<initializers.size(); i++){
ndbout << "Initializers[" << i << "]: " << endl;
initializers[i]->print();
}
- for(unsigned i=0; i<steps.size(); i++){
+ for(i=0; i<steps.size(); i++){
ndbout << "Step[" << i << "]: " << endl;
steps[i]->print();
}
- for(unsigned i=0; i<verifiers.size(); i++){
+ for(i=0; i<verifiers.size(); i++){
ndbout << "Verifier[" << i << "]: " << endl;
verifiers[i]->print();
}
- for(unsigned i=0; i<finalizers.size(); i++){
+ for(i=0; i<finalizers.size(); i++){
ndbout << "Finalizer[" << i << "]: " << endl;
finalizers[i]->print();
}
@@ -1116,6 +1130,11 @@ void NDBT_Step::print(){
}
-
-
-
+template class Vector<NDBT_TestCase*>;
+template class Vector<NDBT_TestCaseResult*>;
+template class Vector<NDBT_Step*>;
+template class Vector<NdbThread*>;
+template class Vector<NDBT_Verifier*>;
+template class Vector<NDBT_Initializer*>;
+template class Vector<NDBT_Finalizer*>;
+template class Vector<const NdbDictionary::Table*>;
diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp
index 169034e0c07..a40c6ba7d7c 100644
--- a/ndb/test/src/NdbBackup.cpp
+++ b/ndb/test/src/NdbBackup.cpp
@@ -69,17 +69,24 @@ NdbBackup::getFileSystemPathForNode(int _node_id){
/**
* Fetch configuration from management server
*/
- ConfigRetriever cr;
+ ConfigRetriever cr(0, NODE_TYPE_API);
+ ndb_mgm_configuration * p = 0;
- ndb_mgm_configuration * p = cr.getConfig(host, port, 0);
- if(p == 0){
- const char * s = cr.getErrorString();
- if(s == 0)
- s = "No error given!";
+ BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port);
+ NdbMgmHandle handle = ndb_mgm_create_handle();
+ if(handle == 0 || ndb_mgm_connect(handle, tmp.c_str()) != 0 ||
+ (p = ndb_mgm_get_configuration(handle, 0)) == 0){
- ndbout << "Could not fetch configuration" << endl;
- ndbout << s << endl;
- return NULL;
+ const char * s = 0;
+ if(p == 0 && handle != 0){
+ s = ndb_mgm_get_latest_error_msg(handle);
+ if(s == 0)
+ s = "No error given!";
+
+ ndbout << "Could not fetch configuration" << endl;
+ ndbout << s << endl;
+ return NULL;
+ }
}
/**
@@ -90,7 +97,8 @@ NdbBackup::getFileSystemPathForNode(int _node_id){
ndbout << "Invalid configuration fetched, DB missing" << endl;
return NULL;
}
- unsigned int type = 123456;
+
+ unsigned int type = NODE_TYPE_DB + 1;
if(iter.get(CFG_TYPE_OF_SECTION, &type) || type != NODE_TYPE_DB){
ndbout <<"type = " << type << endl;
ndbout <<"Invalid configuration fetched, I'm wrong type of node" << endl;
@@ -141,31 +149,18 @@ NdbBackup::execRestore(bool _restore_data,
ndbout << "res: " << res << endl;
-#if 0
- snprintf(buf, 255, "ndb_restore -c \"nodeid=%d;host=%s\" -n %d -b %d %s %s %s/BACKUP/BACKUP-%d",
- ownNodeId,
- addr,
- _node_id,
- _backup_id,
- _restore_data?"-r":"",
- _restore_meta?"-m":"",
- path,
- _backup_id);
-
+ snprintf(buf, 255, "%sndb_restore -c \"host=%s\" -n %d -b %d %s %s .",
+#if 1
+ "",
+#else
+ "valgrind --leak-check=yes -v "
#endif
-
- snprintf(buf, 255, "ndb_restore -c \"nodeid=%d;host=%s\" -n %d -b %d %s %s .",
- ownNodeId,
- addr,
+ addr.c_str(),
_node_id,
_backup_id,
_restore_data?"-r":"",
_restore_meta?"-m":"");
- // path,
- // _backup_id);
-
-
ndbout << "buf: "<< buf <<endl;
res = system(buf);
diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp
index b731cccb259..4d6d3ddc001 100644
--- a/ndb/test/src/NdbRestarter.cpp
+++ b/ndb/test/src/NdbRestarter.cpp
@@ -33,13 +33,11 @@
NdbRestarter::NdbRestarter(const char* _addr):
connected(false),
- addr(_addr),
- host(NULL),
port(-1),
handle(NULL),
m_config(0)
{
- if (addr == NULL){
+ if (_addr == NULL){
LocalConfig lcfg;
if(!lcfg.init()){
lcfg.printError();
@@ -48,32 +46,32 @@ NdbRestarter::NdbRestarter(const char* _addr):
return;
}
- if (lcfg.items == 0){
+ if (lcfg.ids.size() == 0){
g_err << "NdbRestarter - No management servers configured in local config file" << endl;
return;
}
- for (int i = 0; i<lcfg.items; i++){
- MgmtSrvrId * m = lcfg.ids[i];
+ for (int i = 0; i<lcfg.ids.size(); i++){
+ MgmtSrvrId * m = &lcfg.ids[i];
switch(m->type){
case MgmId_TCP:
char buf[255];
- snprintf(buf, 255, "%s:%d", m->data.tcp.remoteHost, m->data.tcp.port);
- addr = strdup(buf);
- host = strdup(m->data.tcp.remoteHost);
- port = m->data.tcp.port;
+ snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port);
+ addr.assign(buf);
+ host.assign(m->name.c_str());
+ port = m->port;
+ return;
break;
case MgmId_File:
break;
default:
break;
}
- if (addr != NULL)
- break;
}
+ } else {
+ addr.assign(_addr);
}
-
}
NdbRestarter::~NdbRestarter(){
@@ -398,10 +396,10 @@ NdbRestarter::connect(){
g_err << "handle == NULL" << endl;
return -1;
}
- g_info << "Connecting to mgmsrv at " << addr << endl;
- if (ndb_mgm_connect(handle, addr) == -1) {
+ g_info << "Connecting to mgmsrv at " << addr.c_str() << endl;
+ if (ndb_mgm_connect(handle, addr.c_str()) == -1) {
MGMERR(handle);
- g_err << "Connection to " << addr << " failed" << endl;
+ g_err << "Connection to " << addr.c_str() << " failed" << endl;
return -1;
}
@@ -672,3 +670,5 @@ NdbRestarter::getConfig(){
m_config = ndb_mgm_get_configuration(handle, 0);
return m_config;
}
+
+template class Vector<ndb_mgm_node_state>;
diff --git a/ndb/test/src/NdbRestarts.cpp b/ndb/test/src/NdbRestarts.cpp
index f6a85d69fc2..b649a60d98b 100644
--- a/ndb/test/src/NdbRestarts.cpp
+++ b/ndb/test/src/NdbRestarts.cpp
@@ -625,9 +625,9 @@ int restartNFDuringNR(NdbRestarter& _restarter,
const NdbRestarts::NdbRestart* _restart){
myRandom48Init(NdbTick_CurrentMillisecond());
-
+ int i;
const int sz = sizeof(NFDuringNR_codes)/sizeof(NFDuringNR_codes[0]);
- for(int i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
int error = NFDuringNR_codes[i];
@@ -673,7 +673,7 @@ int restartNFDuringNR(NdbRestarter& _restarter,
if(NdbEnv_GetEnv("USER", buf, 256) == 0 || strcmp(buf, "ejonore") != 0)
return NDBT_OK;
- for(int i = 0; i<sz; i++){
+ for(i = 0; i<sz; i++){
const int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
const int error = NFDuringNR_codes[i];
@@ -753,14 +753,14 @@ NRDuringLCP_NonMaster_codes[] = {
int restartNodeDuringLCP(NdbRestarter& _restarter,
const NdbRestarts::NdbRestart* _restart) {
-
+ int i;
// Master
int val = DumpStateOrd::DihMinTimeBetweenLCP;
CHECK(_restarter.dumpStateAllNodes(&val, 1) == 0,
"Failed to set LCP to min value"); // Set LCP to min val
int sz = sizeof(NRDuringLCP_Master_codes)/
sizeof(NRDuringLCP_Master_codes[0]);
- for(int i = 0; i<sz; i++) {
+ for(i = 0; i<sz; i++) {
int error = NRDuringLCP_Master_codes[i];
int masterNodeId = _restarter.getMasterNodeId();
@@ -798,7 +798,7 @@ int restartNodeDuringLCP(NdbRestarter& _restarter,
// NON-Master
sz = sizeof(NRDuringLCP_NonMaster_codes)/
sizeof(NRDuringLCP_NonMaster_codes[0]);
- for(int i = 0; i<sz; i++) {
+ for(i = 0; i<sz; i++) {
int error = NRDuringLCP_NonMaster_codes[i];
int nodeId = getRandomNodeId(_restarter);
diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp
index 3ef31a2f535..9f3f1d33587 100644
--- a/ndb/test/src/UtilTransactions.cpp
+++ b/ndb/test/src/UtilTransactions.cpp
@@ -47,10 +47,14 @@ UtilTransactions::clearTable(Ndb* pNdb,
}
}
+
int
UtilTransactions::clearTable1(Ndb* pNdb,
int records,
int parallelism){
+#if 1
+ return clearTable3(pNdb, records, 1);
+#else
// Scan all records exclusive and delete
// them one by one
int retryAttempt = 0;
@@ -191,12 +195,16 @@ UtilTransactions::clearTable1(Ndb* pNdb,
return NDBT_OK;
}
return NDBT_FAILED;
+#endif
}
int
UtilTransactions::clearTable2(Ndb* pNdb,
int records,
int parallelism){
+#if 1
+ return clearTable3(pNdb, records, parallelism);
+#else
// Scan all records exclusive and delete
// them one by one
int retryAttempt = 0;
@@ -336,6 +344,7 @@ UtilTransactions::clearTable2(Ndb* pNdb,
return NDBT_OK;
}
return NDBT_FAILED;
+#endif
}
int
@@ -403,7 +412,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
if(check != -1){
check = pTrans->execute(Commit);
- pTrans->releaseCompletedOperations();
+ pTrans->restart();
}
err = pTrans->getNdbError();
@@ -451,7 +460,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
int parallelism = 240;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
while (true){
@@ -477,14 +486,15 @@ UtilTransactions::copyTableData(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- check = pOp->openScanRead(parallelism);
+ NdbResultSet* rs = pOp->readTuples(NdbScanOperation::LM_Read,
+ parallelism);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -508,7 +518,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -516,39 +526,27 @@ UtilTransactions::copyTableData(Ndb* pNdb,
}
int eof;
- NdbConnection* pInsTrans;
-
- while((eof = pTrans->nextScanResult(true)) == 0){
- pInsTrans = pNdb->startTransaction();
- if (pInsTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
- ERR(err);
- pNdb->closeTransaction(pInsTrans);
- return NDBT_FAILED;
- }
+ while((eof = rs->nextResult(true)) == 0){
do {
insertedRows++;
- if (addRowToInsert(pNdb, pInsTrans, row, destName) != 0){
- pNdb->closeTransaction(pInsTrans);
+ if (addRowToInsert(pNdb, pTrans, row, destName) != 0){
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- } while((eof = pTrans->nextScanResult(false)) == 0);
-
- check = pInsTrans->execute(Commit);
+ } while((eof = rs->nextResult(false)) == 0);
+
+ check = pTrans->execute(Commit);
+ pTrans->restart();
if( check == -1 ) {
- const NdbError err = pInsTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pInsTrans);
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pInsTrans);
-
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
ERR(err);
pNdb->closeTransaction(pTrans);
@@ -562,30 +560,17 @@ UtilTransactions::copyTableData(Ndb* pNdb,
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
+
pNdb->closeTransaction(pTrans);
-
+
g_info << insertedRows << " rows copied" << endl;
-
+
return NDBT_OK;
}
return NDBT_FAILED;
}
int
-UtilTransactions::addRowToDelete(Ndb* pNdb,
- NdbConnection* pDelTrans,
- NdbOperation* pOrgOp){
-
- NdbOperation* pDelOp = pOrgOp->takeOverForDelete(pDelTrans);
- if (pDelOp == NULL){
- ERR(pNdb->getNdbError());
- return NDBT_FAILED;
- }
- return NDBT_OK;
-}
-
-int
UtilTransactions::addRowToInsert(Ndb* pNdb,
NdbConnection* pInsTrans,
NDBT_ResultRow & row,
@@ -621,101 +606,6 @@ UtilTransactions::addRowToInsert(Ndb* pNdb,
return NDBT_OK;
}
-// Take over one record from pOrgOp and delete it
-int
-UtilTransactions::takeOverAndDeleteRecord(Ndb* pNdb,
- NdbOperation* pOrgOp){
-
- int retryAttempt = 0;
- const int retryMax = 10;
- int check;
- NdbConnection *pDelTrans;
- NdbOperation *pDelOp;
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
- pDelTrans = pNdb->startTransaction();
- if (pDelTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- if ((pDelOp = pOrgOp->takeOverForDelete(pDelTrans)) == NULL){
- ERR(pNdb->getNdbError());
- return NDBT_FAILED;
- }
-
-#if 0
- // It should not be necessary to call deleteTuple HERE!!!
- check = pDelOp->deleteTuple();
- if( check == -1 ) {
- ERR(pDelTrans->getNdbError());
- pNdb->closeTransaction(pDelTrans);
- return NDBT_FAILED;
- }
-#endif
-
- check = pDelTrans->execute( Commit );
- if(check == -1 ) {
- const NdbError err = pDelTrans->getNdbError();
- pNdb->closeTransaction(pDelTrans);
-
- ERR(err);
- if(err.code == 250 || err.code == 499)
- return RESTART_SCAN;
-
- switch(err.status){
- case NdbError::Success:
- g_info << "ERROR: NdbError reports success when transcaction failed"
- << endl;
- RETURN_FAIL(err);
- break;
-
- case NdbError::TemporaryError:
- NdbSleep_MilliSleep(50+50*retryAttempt);
- retryAttempt++;
- continue;
- break;
-
- case NdbError::UnknownResult:
- RETURN_FAIL(err);
- break;
-
- default:
- case NdbError::PermanentError:
- switch (err.classification){
- default:
- RETURN_FAIL(err);
- break;
- }
- break;
- }
- }
- else{
- pNdb->closeTransaction(pDelTrans);
- }
-
- return NDBT_OK;
- }
- return NDBT_FAILED;
-}
-
-
-
int
UtilTransactions::scanReadRecords(Ndb* pNdb,
@@ -730,7 +620,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
const int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
while (true){
@@ -755,18 +645,18 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- if (exclusive == true)
- check = pOp->openScanExclusive(parallelism);
- else
- check = pOp->openScanRead(parallelism);
- if( check == -1 ) {
+ NdbResultSet * rs = pOp->readTuples(exclusive ?
+ NdbScanOperation::LM_Exclusive :
+ NdbScanOperation::LM_Read,
+ 0, parallelism);
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -778,7 +668,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
+
// Call getValue for all the attributes supplied in attrib_list
// ************************************************
for (int a = 0; a < noAttribs; a++){
@@ -793,8 +683,8 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
}
}
// *************************************************
-
- check = pTrans->executeScan();
+
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
@@ -812,15 +702,14 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
- while(eof == 0){
+
+ while((eof = rs->nextResult()) == 0){
rows++;
// Call callback for each record returned
if(fn != NULL)
fn(&row);
- eof = pTrans->nextScanResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
@@ -856,14 +745,15 @@ UtilTransactions::selectCount(Ndb* pNdb,
int parallelism,
int* count_rows,
ScanLock lock,
- NdbConnection* pBuddyTrans){
+ NdbConnection* pTrans){
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
+ if(!pTrans)
+ pTrans = pNdb->startTransaction();
while (true){
if (retryAttempt >= retryMax){
@@ -871,39 +761,27 @@ UtilTransactions::selectCount(Ndb* pNdb,
<< " times, failing!" << endl;
return NDBT_FAILED;
}
-
- pTrans = pNdb->hupp(pBuddyTrans);
- if (pTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
+ NdbResultSet * rs;
switch(lock){
case SL_ReadHold:
- check = pOp->openScanReadHoldLock(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Read);
break;
case SL_Exclusive:
- check = pOp->openScanExclusive(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Exclusive);
break;
case SL_Read:
default:
- check = pOp->openScanRead(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead);
}
- if( check == -1 ) {
+ if( rs == 0) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -922,9 +800,9 @@ UtilTransactions::selectCount(Ndb* pNdb,
return NDBT_FAILED;
}
}
-
-
- check = pTrans->executeScan();
+
+
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -933,15 +811,14 @@ UtilTransactions::selectCount(Ndb* pNdb,
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
+
- while(eof == 0){
+ while((eof = rs->nextResult()) == 0){
rows++;
- eof = pTrans->nextScanResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
pNdb->closeTransaction(pTrans);
NdbSleep_MilliSleep(50);
@@ -952,7 +829,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
+
pNdb->closeTransaction(pTrans);
if (count_rows != NULL){
@@ -963,7 +840,6 @@ UtilTransactions::selectCount(Ndb* pNdb,
}
return NDBT_FAILED;
}
-
int
UtilTransactions::verifyIndex(Ndb* pNdb,
@@ -1028,7 +904,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
const int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
parallelism = 1;
@@ -1055,20 +931,21 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
+ pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
+ NdbResultSet* rs;
if(transactional){
- check = pOp->openScanReadHoldLock(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism);
} else {
- check = pOp->openScanRead(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallelism);
}
-
- if( check == -1 ) {
+
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -1091,10 +968,10 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
ERR(err);
pNdb->closeTransaction(pTrans);
@@ -1109,14 +986,14 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
- while(eof == 0){
+
+ while((eof = rs->nextResult()) == 0){
rows++;
-
+
// ndbout << row.c_str().c_str() << endl;
-
-
+
+
if (readRowFromTableAndIndex(pNdb,
pTrans,
indexName,
@@ -1124,11 +1001,6 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
-
-
-
-
- eof = pTrans->nextScanResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
@@ -1168,7 +1040,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
NdbDictionary::Index::Type indexType= pIndex->getType();
int retryAttempt = 0;
const int retryMax = 100;
- int check;
+ int check, a;
NdbConnection *pTrans1=NULL;
NdbResultSet *cursor= NULL;
NdbOperation *pOp;
@@ -1228,7 +1100,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE
printf("PK: ");
#endif
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
const NdbDictionary::Column* attr = tab.getColumn(a);
if (attr->getPrimaryKey() == true){
if (pOp->equal(attr->getName(), row.attributeStore(a)->aRef()) != 0){
@@ -1247,7 +1119,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE
printf("Reading %u attributes: ", tab.getNoOfColumns());
#endif
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
if((tabRow.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans1->getNdbError());
@@ -1265,13 +1137,13 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
* Read the record from INDEX_TABLE
*/
NdbIndexOperation* pIndexOp= NULL;
- NdbScanOperation *pScanOp= NULL;
+ NdbIndexScanOperation *pScanOp= NULL;
{
void* pOpCheck= NULL;
if (indexType == NdbDictionary::Index::UniqueHashIndex) {
pOpCheck= pIndexOp= pTrans1->getNdbIndexOperation(indexName, tab.getName());
} else {
- pOpCheck= pScanOp= pTrans1->getNdbScanOperation(indexName, tab.getName());
+ pOpCheck= pScanOp= pTrans1->getNdbIndexScanOperation(indexName, tab.getName());
}
if (pOpCheck == NULL) {
@@ -1298,7 +1170,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE
printf("SI: ");
#endif
- for(int a = 0; a<(int)pIndex->getNoOfColumns(); a++){
+ for(a = 0; a<(int)pIndex->getNoOfColumns(); a++){
const NdbDictionary::Column * col = pIndex->getColumn(a);
int r;
@@ -1308,7 +1180,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
// setBound not possible for null attributes
if ( !row.attributeStore(col->getName())->isNULL() ) {
r = pScanOp->setBound(col->getName(),
- NdbOperation::BoundEQ,
+ NdbIndexScanOperation::BoundEQ,
row.attributeStore(col->getName())->aRef());
}
}
@@ -1328,7 +1200,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE
printf("Reading %u attributes: ", tab.getNoOfColumns());
#endif
- for(int a = 0; a<tab.getNoOfColumns(); a++){
+ for(a = 0; a<tab.getNoOfColumns(); a++){
void* pCheck;
if (pIndexOp)
diff --git a/ndb/test/tools/cpcc.cpp b/ndb/test/tools/cpcc.cpp
index e768d707bbc..e30d458ffee 100644
--- a/ndb/test/tools/cpcc.cpp
+++ b/ndb/test/tools/cpcc.cpp
@@ -192,7 +192,7 @@ main(int argc, const char** argv){
,{ "rm", 0, arg_flag, &rm, "-c rm", "undefine process(es)" }
};
const int num_args = 10;
-
+ int i;
int optind = 0;
char desc[] = "[host:[port]]\n";
@@ -215,13 +215,13 @@ main(int argc, const char** argv){
Expression * m_expr = 0;
- for(int i = optind; i<argc; i++){
+ for(i = optind; i<argc; i++){
add_host(g_hosts, argv[i]);
}
OrExpr * orE = new OrExpr(new Operate(cmd, g_settings), true);
m_expr = orE;
- for(int i = optind; i<argc; i++){
+ for(i = optind; i<argc; i++){
BaseString tmp(argv[i]);
Vector<BaseString> split;
tmp.split(split, ":");
@@ -347,3 +347,5 @@ Operate::evaluate(SimpleCpcClient* c, const SimpleCpcClient::Process & pp){
return true;
}
+template class Vector<Expression*>;
+template class Vector<SimpleCpcClient*>;
diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp
index dc9e6c606d6..f883755ea24 100644
--- a/ndb/test/tools/create_index.cpp
+++ b/ndb/test/tools/create_index.cpp
@@ -29,10 +29,13 @@ main(int argc, const char** argv){
const char* _dbname = "TEST_DB";
int _help = 0;
+ int _ordered, _pk;
struct getargs args[] = {
{ "database", 'd', arg_string, &_dbname, "dbname",
"Name of database table is in"},
+ { "ordered", 'o', arg_flag, &_ordered, "Create ordered index", "" },
+ { "pk", 'p', arg_flag, &_pk, "Create index on primary key", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
@@ -73,14 +76,21 @@ main(int argc, const char** argv){
}
NdbDictionary::Index ind;
+ if(_ordered){
+ ind.setType(NdbDictionary::Index::OrderedIndex);
+ ind.setLogging(false);
+ } else {
+ ind.setType(NdbDictionary::Index::UniqueHashIndex);
+ }
char buf[512];
- sprintf(buf, "IND_%s", argv[i]);
+ sprintf(buf, "IND_%s_%s_%c",
+ argv[i], (_pk ? "PK" : "FULL"), (_ordered ? 'O' : 'U'));
ind.setName(buf);
ind.setTable(argv[i]);
- ind.setType(NdbDictionary::Index::UniqueHashIndex);
- for(int c = 0; c<tab->getNoOfColumns(); c++)
- ind.addIndexColumn(tab->getColumn(c)->getName());
-
+ for(int c = 0; c<tab->getNoOfColumns(); c++){
+ if(!_pk || tab->getColumn(c)->getPrimaryKey())
+ ind.addIndexColumn(tab->getColumn(c)->getName());
+ }
ndbout << "creating index " << buf << " on table " << argv[i] << "...";
const int res = dict->createIndex(ind);
if(res != 0)
diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp
index ac17ffffee8..85f20bd2060 100644
--- a/ndb/test/tools/hugoPkReadRecord.cpp
+++ b/ndb/test/tools/hugoPkReadRecord.cpp
@@ -43,7 +43,7 @@ int main(int argc, const char** argv)
};
int num_args = sizeof(args) / sizeof(args[0]);
- int optind = 0;
+ int optind = 0, i;
if(getarg(args, num_args, argc, argv, &optind) || argv[optind] == NULL) {
arg_printusage(args, num_args, argv[0], "table name\n");
@@ -80,7 +80,7 @@ int main(int argc, const char** argv)
}
op->readTuple();
NdbRecAttr** data = new NdbRecAttr*[table->getNoOfColumns()];
- for (int i = 0; i < table->getNoOfColumns(); i++)
+ for (i = 0; i < table->getNoOfColumns(); i++)
{
const NdbDictionary::Column* c = table->getColumn(i);
if (c->getPrimaryKey())
@@ -93,11 +93,10 @@ int main(int argc, const char** argv)
data[i] = op->getValue(c->getName(), NULL);
}
}
-
if (conn->execute(Commit) == 0)
{
// Print column names
- for (int i = 0; i < table->getNoOfColumns(); i++)
+ for (i = 0; i < table->getNoOfColumns(); i++)
{
const NdbDictionary::Column* c = table->getColumn(i);
@@ -111,7 +110,7 @@ int main(int argc, const char** argv)
{
g_info << hex;
}
- for (int i = 0; i < table->getNoOfColumns(); i++)
+ for (i = 0; i < table->getNoOfColumns(); i++)
{
NdbRecAttr* a = data[i];
switch(a->getType())
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index dabd9a0e8fa..5110947c6a2 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -143,7 +143,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
if(check != -1){
check = pTrans->execute(Commit);
- pTrans->releaseCompletedOperations();
+ pTrans->restart();
}
err = pTrans->getNdbError();
diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp
index a5ff11edca9..e5b98c4c8e9 100644
--- a/ndb/tools/desc.cpp
+++ b/ndb/tools/desc.cpp
@@ -73,7 +73,8 @@ int main(int argc, const char** argv){
ndbout << "-- Indexes -- " << endl;
ndbout << "PRIMARY KEY(";
- for (unsigned j= 0; j < pTab->getNoOfPrimaryKeys(); j++)
+ unsigned j;
+ for (j= 0; j < pTab->getNoOfPrimaryKeys(); j++)
{
const NdbDictionary::Column * col = pTab->getColumn(j);
ndbout << col->getName();
@@ -82,7 +83,7 @@ int main(int argc, const char** argv){
}
ndbout << ") - UniqueHashIndex" << endl;
- for (unsigned j= 0; j < list.count; j++) {
+ for (j= 0; j < list.count; j++) {
NdbDictionary::Dictionary::List::Element& elt = list.elements[j];
const NdbDictionary::Index *pIdx = dict->getIndex(elt.name, argv[i]);
if (!pIdx){
diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp
index 34f63a095bb..329ed87bc48 100644
--- a/ndb/tools/select_all.cpp
+++ b/ndb/tools/select_all.cpp
@@ -29,11 +29,13 @@
int scanReadRecords(Ndb*,
const NdbDictionary::Table*,
+ const NdbDictionary::Index*,
int parallel,
int lockType,
bool headers,
bool useHexFormat,
- char delim);
+ char delim,
+ bool orderby);
int main(int argc, const char** argv){
int _parallelism = 240;
@@ -44,6 +46,7 @@ int main(int argc, const char** argv){
const char* _dbname = "TEST_DB";
int _help = 0;
int _lock = 0;
+ int _order = 0;
struct getargs args[] = {
{ "database", 'd', arg_string, &_dbname, "dbname",
@@ -57,7 +60,8 @@ int main(int argc, const char** argv){
"delimiter" },
{ "usage", '?', arg_flag, &_help, "Print help", "" },
{ "lock", 'l', arg_integer, &_lock,
- "Read(0), Read-hold(1), Exclusive(2)", "lock"}
+ "Read(0), Read-hold(1), Exclusive(2)", "lock"},
+ { "order", 'o', arg_flag, &_order, "Sort resultset according to index", ""}
};
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
@@ -90,6 +94,11 @@ int main(int argc, const char** argv){
// Check if table exists in db
const NdbDictionary::Table* pTab = NDBT_Table::discoverTableFromDb(&MyNdb, _tabname);
+ const NdbDictionary::Index * pIdx = 0;
+ if(optind+1 < argc){
+ pIdx = MyNdb.getDictionary()->getIndex(argv[optind+1], _tabname);
+ }
+
if(pTab == NULL){
ndbout << " Table " << _tabname << " does not exist!" << endl;
return NDBT_ProgramExit(NDBT_WRONGARGS);
@@ -97,11 +106,12 @@ int main(int argc, const char** argv){
if (scanReadRecords(&MyNdb,
pTab,
+ pIdx,
_parallelism,
_lock,
_header,
_useHexFormat,
- (char)*_delimiter) != 0){
+ (char)*_delimiter, _order) != 0){
return NDBT_ProgramExit(NDBT_FAILED);
}
@@ -111,17 +121,19 @@ int main(int argc, const char** argv){
int scanReadRecords(Ndb* pNdb,
const NdbDictionary::Table* pTab,
+ const NdbDictionary::Index* pIdx,
int parallel,
int _lock,
bool headers,
bool useHexFormat,
- char delimiter){
+ char delimiter, bool order){
int retryAttempt = 0;
const int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
+ NdbIndexScanOperation * pIOp;
NDBT_ResultRow * row = new NDBT_ResultRow(*pTab, delimiter);
@@ -146,29 +158,45 @@ int scanReadRecords(Ndb* pNdb,
return -1;
}
- pOp = pTrans->getNdbOperation(pTab->getName());
+
+ pOp = (!pIdx) ? pTrans->getNdbScanOperation(pTab->getName()) :
+ pIOp=pTrans->getNdbIndexScanOperation(pIdx->getName(), pTab->getName());
+
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return -1;
}
- switch(_lock){
+ NdbResultSet * rs;
+ switch(_lock + (3 * order)){
case 1:
- check = pOp->openScanReadHoldLock(parallel);
+ rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallel);
break;
case 2:
- check = pOp->openScanExclusive(parallel);
+ rs = pOp->readTuples(NdbScanOperation::LM_Exclusive, 0, parallel);
+ break;
+ case 3:
+ rs = pIOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallel,
+ true);
break;
+ case 4:
+ rs = pIOp->readTuples(NdbScanOperation::LM_Read, 0, parallel, true);
+ break;
+ case 5:
+ rs = pIOp->readTuples(NdbScanOperation::LM_Exclusive, 0, parallel, true);
+ break;
+ case 0:
default:
- check = pOp->openScanRead(parallel);
+ rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallel);
+ break;
}
- if( check == -1 ) {
+ if( rs == 0 ){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return -1;
}
-
+
if(0){
NdbScanFilter sf(pOp);
#if 0
@@ -229,10 +257,10 @@ int scanReadRecords(Ndb* pNdb,
}
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
pNdb->closeTransaction(pTrans);
NdbSleep_MilliSleep(50);
@@ -246,11 +274,11 @@ int scanReadRecords(Ndb* pNdb,
if (headers)
row->header(ndbout) << endl;
-
+
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
-
+ eof = rs->nextResult();
+
while(eof == 0){
rows++;
@@ -260,7 +288,7 @@ int scanReadRecords(Ndb* pNdb,
ndbout << (*row) << endl;
}
- eof = pTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp
index cae91feb378..2c43af20e64 100644
--- a/ndb/tools/select_count.cpp
+++ b/ndb/tools/select_count.cpp
@@ -30,8 +30,7 @@ static int
select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
int parallelism,
int* count_rows,
- UtilTransactions::ScanLock lock,
- NdbConnection* pBuddyTrans=0);
+ UtilTransactions::ScanLock lock);
int main(int argc, const char** argv){
const char* _dbname = "TEST_DB";
@@ -95,14 +94,13 @@ int
select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
int parallelism,
int* count_rows,
- UtilTransactions::ScanLock lock,
- NdbConnection* pBuddyTrans){
+ UtilTransactions::ScanLock lock){
int retryAttempt = 0;
const int retryMax = 100;
int check;
NdbConnection *pTrans;
- NdbOperation *pOp;
+ NdbScanOperation *pOp;
while (true){
@@ -112,7 +110,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
return NDBT_FAILED;
}
- pTrans = pNdb->hupp(pBuddyTrans);
+ pTrans = pNdb->startTransaction();
if (pTrans == NULL) {
const NdbError err = pNdb->getNdbError();
@@ -124,26 +122,27 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
ERR(err);
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(pTab->getName());
+ pOp = pTrans->getNdbScanOperation(pTab->getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
+ NdbResultSet * rs;
switch(lock){
case UtilTransactions::SL_ReadHold:
- check = pOp->openScanReadHoldLock(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism);
break;
case UtilTransactions::SL_Exclusive:
- check = pOp->openScanExclusive(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Exclusive, 0, parallelism);
break;
case UtilTransactions::SL_Read:
default:
- check = pOp->openScanRead(parallelism);
+ rs = pOp->readTuples(NdbScanOperation::LM_Dirty, 0, parallelism);
}
- if( check == -1 ) {
+ if( rs == 0 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -156,7 +155,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
return NDBT_FAILED;
}
- check = pTrans->executeScan();
+ check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -165,11 +164,11 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
int eof;
int rows = 0;
- eof = pTrans->nextScanResult();
+ eof = rs->nextResult();
while(eof == 0){
rows++;
- eof = pTrans->nextScanResult();
+ eof = rs->nextResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp
index 7ce2739a157..86d34066c55 100644
--- a/ndb/tools/waiter.cpp
+++ b/ndb/tools/waiter.cpp
@@ -22,21 +22,26 @@
#include <NdbSleep.h>
#include <getarg.h>
#include <kernel/ndb_limits.h>
-#include "../src/common/mgmcommon/LocalConfig.hpp"
+#include "../include/mgmcommon/LocalConfig.hpp"
#include <NDBT.hpp>
int
-waitClusterStarted(const char* _addr, unsigned int _timeout= 120);
+waitClusterStatus(const char* _addr,
+ ndb_mgm_node_status _status= NDB_MGM_NODE_STATUS_STARTED,
+ unsigned int _timeout= 120);
int main(int argc, const char** argv){
const char* _hostName = NULL;
+ int _no_contact = 0;
int _help = 0;
struct getargs args[] = {
+ { "no-contact", 0, arg_flag, &_no_contact, "Wait for cluster no contact", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
+
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
char desc[] =
@@ -62,13 +67,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- for (int i = 0; i<lcfg.items; i++)
+ for (int i = 0; i<lcfg.ids.size();i++)
{
- MgmtSrvrId * m = lcfg.ids[i];
+ MgmtSrvrId * m = &lcfg.ids[i];
switch(m->type){
case MgmId_TCP:
- snprintf(buf, 255, "%s:%d", m->data.tcp.remoteHost, m->data.tcp.port);
+ snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port);
_hostName = buf;
break;
case MgmId_File:
@@ -86,7 +91,10 @@ int main(int argc, const char** argv){
}
}
- if (waitClusterStarted(_hostName) != 0)
+ if (_no_contact) {
+ if (waitClusterStatus(_hostName, NDB_MGM_NODE_STATUS_NO_CONTACT) != 0)
+ return NDBT_ProgramExit(NDBT_FAILED);
+ } else if (waitClusterStatus(_hostName) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
return NDBT_ProgramExit(NDBT_OK);
@@ -121,7 +129,8 @@ getStatus(){
retries++;
continue;
}
- for (int i = 0; i < status->no_of_nodes; i++){
+ int count = status->no_of_nodes;
+ for (int i = 0; i < count; i++){
node = &status->node_states[i];
switch(node->node_type){
case NDB_MGM_NODE_TYPE_NDB:
@@ -142,7 +151,7 @@ getStatus(){
apiNodes.clear();
free(status);
status = NULL;
- i = status->no_of_nodes;
+ count = 0;
ndbout << "kalle"<< endl;
break;
@@ -164,9 +173,10 @@ getStatus(){
}
int
-waitClusterStarted(const char* _addr, unsigned int _timeout)
+waitClusterStatus(const char* _addr,
+ ndb_mgm_node_status _status,
+ unsigned int _timeout)
{
- ndb_mgm_node_status _status = NDB_MGM_NODE_STATUS_STARTED;
int _startphase = -1;
int _nodes[MAX_NDB_NODES];
@@ -290,10 +300,12 @@ waitClusterStarted(const char* _addr, unsigned int _timeout)
allInState = false;
}
}
- g_info << "Waiting for cluster enter state"
+ g_info << "Waiting for cluster enter state "
<< ndb_mgm_get_node_status_string(_status)<< endl;
NdbSleep_SecSleep(1);
attempts++;
}
return 0;
}
+
+template class Vector<ndb_mgm_node_state>;
diff --git a/netware/mysql_test_run.c b/netware/mysql_test_run.c
index a69c5015968..fd5725a6414 100644
--- a/netware/mysql_test_run.c
+++ b/netware/mysql_test_run.c
@@ -170,7 +170,7 @@ void report_stats()
log_msg("\nThe .out and .err files in %s may give you some\n", result_dir);
log_msg("hint of what when wrong.\n");
log_msg("\nIf you want to report this error, please first read the documentation\n");
- log_msg("at: http://www.mysql.com/doc/M/y/MySQL_test_suite.html\n");
+ log_msg("at: http://www.mysql.com/doc/en/MySQL_test_suite.html\n");
}
log_msg("\n%.02f total minutes elapsed in the test cases\n\n", total_time / 60);
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 9c76c5a51f3..87473d6bd17 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -88,7 +88,7 @@ do
done
for i in COPYING COPYING.LIB README Docs/INSTALL-BINARY \
- MySQLEULA.txt LICENSE.doc README.NW
+ EXCEPTIONS-CLIENT MySQLEULA.txt LICENSE.doc README.NW
do
if [ -f $i ]
then
diff --git a/scripts/make_sharedlib_distribution.sh b/scripts/make_sharedlib_distribution.sh
index 4104a315296..fbc945e445a 100644
--- a/scripts/make_sharedlib_distribution.sh
+++ b/scripts/make_sharedlib_distribution.sh
@@ -45,8 +45,10 @@ fi
mkdir -p $BASE/lib
for i in \
- libmysql/.libs/libmysqlclient.so* \
- libmysql_r/.libs/libmysqlclient_r.so*
+ libmysql/.libs/libmysqlclient.s{l,o}* \
+ libmysql/.libs/libmysqlclient*.dylib \
+ libmysql_r/.libs/libmysqlclient_r.s{l,o}* \
+ libmysql_r/.libs/libmysqlclient_r*.dylib
do
if [ -f $i ]
then
diff --git a/scripts/make_win_src_distribution.sh b/scripts/make_win_src_distribution.sh
index eaaf219afc4..19a5fb7276b 100644
--- a/scripts/make_win_src_distribution.sh
+++ b/scripts/make_win_src_distribution.sh
@@ -275,7 +275,7 @@ touch $BASE/innobase/ib_config.h
#
cd $SOURCE
-for i in COPYING ChangeLog README \
+for i in COPYING ChangeLog README EXCEPTIONS-CLIENT\
INSTALL-SOURCE INSTALL-WIN \
INSTALL-WIN-SOURCE \
Docs/manual_toc.html Docs/manual.html \
diff --git a/scripts/mysql_create_system_tables.sh b/scripts/mysql_create_system_tables.sh
index 99e997d6adc..5a2a45c4b3d 100644
--- a/scripts/mysql_create_system_tables.sh
+++ b/scripts/mysql_create_system_tables.sh
@@ -71,7 +71,8 @@ then
c_d="$c_d Show_view_priv enum('N','Y') DEFAULT 'N' NOT NULL,"
c_d="$c_d PRIMARY KEY Host (Host,Db,User),"
c_d="$c_d KEY User (User)"
- c_d="$c_d )"
+ c_d="$c_d ) engine=MyISAM"
+ c_d="$c_d CHARACTER SET utf8 COLLATE utf8_bin"
c_d="$c_d comment='Database privileges';"
i_d="INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y');
@@ -102,7 +103,8 @@ then
c_h="$c_h Create_view_priv enum('N','Y') DEFAULT 'N' NOT NULL,"
c_h="$c_h Show_view_priv enum('N','Y') DEFAULT 'N' NOT NULL,"
c_h="$c_h PRIMARY KEY Host (Host,Db)"
- c_h="$c_h )"
+ c_h="$c_h ) engine=MyISAM"
+ c_h="$c_h CHARACTER SET utf8 COLLATE utf8_bin"
c_h="$c_h comment='Host privileges; Merged with database privileges';"
fi
@@ -147,7 +149,8 @@ then
c_u="$c_u max_updates int(11) unsigned DEFAULT 0 NOT NULL,"
c_u="$c_u max_connections int(11) unsigned DEFAULT 0 NOT NULL,"
c_u="$c_u PRIMARY KEY Host (Host,User)"
- c_u="$c_u )"
+ c_u="$c_u ) engine=MyISAM"
+ c_u="$c_u CHARACTER SET utf8 COLLATE utf8_bin"
c_u="$c_u comment='Users and global privileges';"
if test "$1" = "test"
@@ -186,7 +189,8 @@ then
c_f="$c_f dl char(128) DEFAULT '' NOT NULL,"
c_f="$c_f type enum ('function','aggregate') NOT NULL,"
c_f="$c_f PRIMARY KEY (name)"
- c_f="$c_f )"
+ c_f="$c_f ) engine=MyISAM"
+ c_f="$c_f CHARACTER SET utf8 COLLATE utf8_bin"
c_f="$c_f comment='User defined functions';"
fi
@@ -207,7 +211,8 @@ then
c_t="$c_t Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,"
c_t="$c_t PRIMARY KEY (Host,Db,User,Table_name),"
c_t="$c_t KEY Grantor (Grantor)"
- c_t="$c_t )"
+ c_t="$c_t ) engine=MyISAM"
+ c_t="$c_t CHARACTER SET utf8 COLLATE utf8_bin"
c_t="$c_t comment='Table privileges';"
fi
@@ -226,7 +231,8 @@ then
c_c="$c_c Timestamp timestamp(14),"
c_c="$c_c Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,"
c_c="$c_c PRIMARY KEY (Host,Db,User,Table_name,Column_name)"
- c_c="$c_c )"
+ c_c="$c_c ) engine=MyISAM"
+ c_c="$c_c CHARACTER SET utf8 COLLATE utf8_bin"
c_c="$c_c comment='Column privileges';"
fi
@@ -245,7 +251,8 @@ then
c_ht="$c_ht url varchar(128) not null,"
c_ht="$c_ht primary key (help_topic_id),"
c_ht="$c_ht unique index (name)"
- c_ht="$c_ht )"
+ c_ht="$c_ht ) engine=MyISAM"
+ c_ht="$c_ht CHARACTER SET utf8"
c_ht="$c_ht comment='help topics';"
fi
@@ -264,7 +271,8 @@ then
c_hc="$c_hc url varchar(128) not null,"
c_hc="$c_hc primary key (help_category_id),"
c_hc="$c_hc unique index (name)"
- c_hc="$c_hc )"
+ c_hc="$c_hc ) engine=MyISAM"
+ c_hc="$c_hc CHARACTER SET utf8"
c_hc="$c_hc comment='help categories';"
fi
@@ -279,7 +287,8 @@ then
c_hk="$c_hk name varchar(64) not null,"
c_hk="$c_hk primary key (help_keyword_id),"
c_hk="$c_hk unique index (name)"
- c_hk="$c_hk )"
+ c_hk="$c_hk ) engine=MyISAM"
+ c_hk="$c_hk CHARACTER SET utf8"
c_hk="$c_hk comment='help keywords';"
fi
@@ -293,7 +302,8 @@ then
c_hr="$c_hr help_topic_id int unsigned not null references help_topic,"
c_hr="$c_hr help_keyword_id int unsigned not null references help_keyword,"
c_hr="$c_hr primary key (help_keyword_id, help_topic_id)"
- c_hr="$c_hr )"
+ c_hr="$c_hr ) engine=MyISAM"
+ c_hr="$c_hr CHARACTER SET utf8"
c_hr="$c_hr comment='keyword-topic relation';"
fi
@@ -307,14 +317,15 @@ then
c_tzn="$c_tzn Name char(64) NOT NULL,"
c_tzn="$c_tzn Time_zone_id int unsigned NOT NULL,"
c_tzn="$c_tzn PRIMARY KEY Name (Name)"
- c_tzn="$c_tzn ) DEFAULT CHARACTER SET latin1"
+ c_tzn="$c_tzn ) engine=MyISAM CHARACTER SET utf8"
c_tzn="$c_tzn comment='Time zone names';"
if test "$1" = "test"
then
i_tzn="$i_tzn INSERT INTO time_zone_name (Name, Time_Zone_id) VALUES"
i_tzn="$i_tzn ('MET', 1), ('UTC', 2), ('Universal', 2), "
- i_tzn="$i_tzn ('Europe/Moscow',3), ('leap/Europe/Moscow',4);"
+ i_tzn="$i_tzn ('Europe/Moscow',3), ('leap/Europe/Moscow',4), "
+ i_tzn="$i_tzn ('Japan', 5);"
fi
fi
@@ -328,13 +339,13 @@ then
c_tz="$c_tz Time_zone_id int unsigned NOT NULL auto_increment,"
c_tz="$c_tz Use_leap_seconds enum('Y','N') DEFAULT 'N' NOT NULL,"
c_tz="$c_tz PRIMARY KEY TzId (Time_zone_id)"
- c_tz="$c_tz ) DEFAULT CHARACTER SET latin1"
+ c_tz="$c_tz ) engine=MyISAM CHARACTER SET utf8"
c_tz="$c_tz comment='Time zones';"
if test "$1" = "test"
then
i_tz="$i_tz INSERT INTO time_zone (Time_zone_id, Use_leap_seconds)"
- i_tz="$i_tz VALUES (1,'N'), (2,'N'), (3,'N'), (4,'Y');"
+ i_tz="$i_tz VALUES (1,'N'), (2,'N'), (3,'N'), (4,'Y'), (5,'N');"
fi
fi
@@ -349,7 +360,7 @@ then
c_tzt="$c_tzt Transition_time bigint signed NOT NULL,"
c_tzt="$c_tzt Transition_type_id int unsigned NOT NULL,"
c_tzt="$c_tzt PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time)"
- c_tzt="$c_tzt ) DEFAULT CHARACTER SET latin1"
+ c_tzt="$c_tzt ) engine=MyISAM CHARACTER SET utf8"
c_tzt="$c_tzt comment='Time zone transitions';"
if test "$1" = "test"
@@ -553,7 +564,8 @@ then
i_tzt="$i_tzt ,(4, 2045689222, 8) ,(4, 2058390022, 9)"
i_tzt="$i_tzt ,(4, 2077138822, 8) ,(4, 2090444422, 9)"
i_tzt="$i_tzt ,(4, 2108588422, 8) ,(4, 2121894022, 9)"
- i_tzt="$i_tzt ,(4, 2140038022, 8);"
+ i_tzt="$i_tzt ,(4, 2140038022, 8)"
+ i_tzt="$i_tzt ,(5, -1009875600, 1);"
fi
fi
@@ -570,7 +582,7 @@ then
c_tztt="$c_tztt Is_DST tinyint unsigned DEFAULT 0 NOT NULL,"
c_tztt="$c_tztt Abbreviation char(8) DEFAULT '' NOT NULL,"
c_tztt="$c_tztt PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id)"
- c_tztt="$c_tztt ) DEFAULT CHARACTER SET latin1"
+ c_tztt="$c_tztt ) engine=MyISAM CHARACTER SET utf8"
c_tztt="$c_tztt comment='Time zone transition types';"
if test "$1" = "test"
@@ -591,7 +603,8 @@ then
i_tztt="$i_tztt ,(4, 4, 10800, 0, 'MSK') ,(4, 5, 14400, 1, 'MSD')"
i_tztt="$i_tztt ,(4, 6, 18000, 1, 'MSD') ,(4, 7, 7200, 0, 'EET')"
i_tztt="$i_tztt ,(4, 8, 10800, 0, 'MSK') ,(4, 9, 14400, 1, 'MSD')"
- i_tztt="$i_tztt ,(4, 10, 10800, 1, 'EEST') ,(4, 11, 7200, 0, 'EET');"
+ i_tztt="$i_tztt ,(4, 10, 10800, 1, 'EEST') ,(4, 11, 7200, 0, 'EET')"
+ i_tztt="$i_tztt ,(5, 0, 32400, 0, 'CJT') ,(5, 1, 32400, 0, 'JST');"
fi
fi
@@ -605,7 +618,7 @@ then
c_tzls="$c_tzls Transition_time bigint signed NOT NULL,"
c_tzls="$c_tzls Correction int signed NOT NULL,"
c_tzls="$c_tzls PRIMARY KEY TranTime (Transition_time)"
- c_tzls="$c_tzls ) DEFAULT CHARACTER SET latin1"
+ c_tzls="$c_tzls ) engine=MyISAM CHARACTER SET utf8"
c_tzls="$c_tzls comment='Leap seconds information for time zones';"
if test "$1" = "test"
diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql
index 3e7a2f12f3e..18dfe14bc45 100644
--- a/scripts/mysql_fix_privilege_tables.sql
+++ b/scripts/mysql_fix_privilege_tables.sql
@@ -9,12 +9,12 @@
-- this sql script.
-- On windows you should do 'mysql --force mysql < mysql_fix_privilege_tables.sql'
-ALTER TABLE user type=MyISAM;
-ALTER TABLE db type=MyISAM;
-ALTER TABLE host type=MyISAM;
-ALTER TABLE func type=MyISAM;
-ALTER TABLE columns_priv type=MyISAM;
-ALTER TABLE tables_priv type=MyISAM;
+ALTER TABLE user type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE db type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE host type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE func type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE columns_priv type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE tables_priv type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
ALTER TABLE user change Password Password char(41) binary not null;
ALTER TABLE user add File_priv enum('N','Y') NOT NULL;
CREATE TABLE IF NOT EXISTS func (
@@ -23,7 +23,7 @@ CREATE TABLE IF NOT EXISTS func (
dl char(128) DEFAULT '' NOT NULL,
type enum ('function','aggregate') NOT NULL,
PRIMARY KEY (name)
-);
+) CHARACTER SET utf8 COLLATE utf8_bin;
-- Detect whether or not we had the Grant_priv column
SET @hadGrantPriv:=0;
@@ -63,7 +63,7 @@ CREATE TABLE IF NOT EXISTS tables_priv (
Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') DEFAULT '' NOT NULL,
Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,
PRIMARY KEY (Host,Db,User,Table_name)
-);
+) CHARACTER SET utf8 COLLATE utf8_bin;
CREATE TABLE IF NOT EXISTS columns_priv (
Host char(60) DEFAULT '' NOT NULL,
@@ -74,7 +74,7 @@ CREATE TABLE IF NOT EXISTS columns_priv (
Timestamp timestamp(14),
Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,
PRIMARY KEY (Host,Db,User,Table_name,Column_name)
-);
+) CHARACTER SET utf8 COLLATE utf8_bin;
--
@@ -169,7 +169,7 @@ description text not null,
example text not null,
url varchar(128) not null,
primary key (help_topic_id), unique index (name)
-) comment='help topics';
+) CHARACTER SET utf8 comment='help topics';
CREATE TABLE IF NOT EXISTS help_category (
help_category_id smallint unsigned not null,
@@ -178,20 +178,20 @@ parent_category_id smallint unsigned null,
url varchar(128) not null,
primary key (help_category_id),
unique index (name)
-) comment='help categories';
+) CHARACTER SET utf8 comment='help categories';
CREATE TABLE IF NOT EXISTS help_relation (
help_topic_id int unsigned not null references help_topic,
help_keyword_id int unsigned not null references help_keyword,
primary key (help_keyword_id, help_topic_id)
-) comment='keyword-topic relation';
+) CHARACTER SET utf8 comment='keyword-topic relation';
CREATE TABLE IF NOT EXISTS help_keyword (
help_keyword_id int unsigned not null,
name varchar(64) not null,
primary key (help_keyword_id),
unique index (name)
-) comment='help keywords';
+) CHARACTER SET utf8 comment='help keywords';
#
# Create missing time zone related tables
@@ -201,20 +201,20 @@ CREATE TABLE IF NOT EXISTS time_zone_name (
Name char(64) NOT NULL,
Time_zone_id int unsigned NOT NULL,
PRIMARY KEY Name (Name)
-) DEFAULT CHARACTER SET latin1 comment='Time zone names';
+) CHARACTER SET utf8 comment='Time zone names';
CREATE TABLE IF NOT EXISTS time_zone (
Time_zone_id int unsigned NOT NULL auto_increment,
Use_leap_seconds enum('Y','N') DEFAULT 'N' NOT NULL,
PRIMARY KEY TzId (Time_zone_id)
-) DEFAULT CHARACTER SET latin1 comment='Time zones';
+) CHARACTER SET utf8 comment='Time zones';
CREATE TABLE IF NOT EXISTS time_zone_transition (
Time_zone_id int unsigned NOT NULL,
Transition_time bigint signed NOT NULL,
Transition_type_id int unsigned NOT NULL,
PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time)
-) DEFAULT CHARACTER SET latin1 comment='Time zone transitions';
+) CHARACTER SET utf8 comment='Time zone transitions';
CREATE TABLE IF NOT EXISTS time_zone_transition_type (
Time_zone_id int unsigned NOT NULL,
@@ -223,13 +223,13 @@ Offset int signed DEFAULT 0 NOT NULL,
Is_DST tinyint unsigned DEFAULT 0 NOT NULL,
Abbreviation char(8) DEFAULT '' NOT NULL,
PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id)
-) DEFAULT CHARACTER SET latin1 comment='Time zone transition types';
+) CHARACTER SET utf8 comment='Time zone transition types';
CREATE TABLE IF NOT EXISTS time_zone_leap_second (
Transition_time bigint signed NOT NULL,
Correction int signed NOT NULL,
PRIMARY KEY TranTime (Transition_time)
-) DEFAULT CHARACTER SET latin1 comment='Leap seconds information for time zones';
+) CHARACTER SET utf8 comment='Leap seconds information for time zones';
#
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index 7b77bf449cd..8ad2ee1df4d 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -311,6 +311,7 @@ do
fi
if test ! -f $pid_file # This is removed if normal shutdown
then
+ echo "STOPPING server from pid file $pid_file"
break
fi
@@ -321,12 +322,24 @@ do
# but should work for the rest of the servers.
# The only thing is ps x => redhat 5 gives warnings when using ps -x.
# kill -9 is used or the process won't react on the kill.
- numofproces=`ps xa | grep -v "grep" | grep -c $ledir/$MYSQLD`
+ if test -n "$mysql_tcp_port"
+ then
+ numofproces=`ps xa | grep -v "grep" | grep $ledir/$MYSQLD| grep -c "port=$mysql_tcp_port"`
+ else
+ numofproces=`ps xa | grep -v "grep" | grep -c $ledir/$MYSQLD`
+ fi
+
echo -e "\nNumber of processes running now: $numofproces" | tee -a $err_log
I=1
while test "$I" -le "$numofproces"
do
- PROC=`ps xa | grep $ledir/$MYSQLD | grep -v "grep" | sed -n '$p'`
+ if test -n "$mysql_tcp_port"
+ then
+ PROC=`ps xa | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "port=$mysql_tcp_port" | sed -n '$p'`
+ else
+ PROC=`ps xa | grep "$ledir/$MYSQLD\>" | grep -v "grep" | sed -n '$p'`
+ fi
+
for T in $PROC
do
break
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 1f5ba707f05..b0c40102a6b 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -189,7 +189,6 @@ sub new
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_columns'} = 90; # Max number of columns in table
$limits{'max_tables'} = 32; # No comments
- $limits{'working_blobs'} = 0; # NDB tables can't handle BLOB's
}
if (defined($main::opt_create_options) &&
$main::opt_create_options =~ /type=bdb/i)
diff --git a/sql-common/client.c b/sql-common/client.c
index 3aaa9f23665..41f0bb80844 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -133,6 +133,7 @@ static void mysql_close_free(MYSQL *mysql);
static int wait_for_data(my_socket fd, uint timeout);
#endif
+
/****************************************************************************
A modified version of connect(). my_connect() allows you to specify
a timeout value, in seconds, that we should wait until we
@@ -723,7 +724,7 @@ void set_mysql_error(MYSQL *mysql, int errcode, const char *sqlstate)
Flush result set sent from server
*/
-void flush_use_result(MYSQL *mysql)
+static void cli_flush_use_result(MYSQL *mysql)
{
/* Clear the current execution status */
DBUG_PRINT("warning",("Not all packets read, clearing them"));
@@ -842,7 +843,7 @@ mysql_free_result(MYSQL_RES *result)
mysql->unbuffered_fetch_owner= 0;
if (mysql->status == MYSQL_STATUS_USE_RESULT)
{
- flush_use_result(mysql);
+ (*mysql->methods->flush_use_result)(mysql);
mysql->status=MYSQL_STATUS_READY;
}
}
@@ -1037,7 +1038,7 @@ void mysql_read_default_options(struct st_mysql_options *options,
options->client_flag&= ~CLIENT_LOCAL_FILES;
break;
case 22:
- options->client_flag&= CLIENT_LOCAL_FILES;
+ options->client_flag&= ~CLIENT_LOCAL_FILES;
break;
case 23: /* replication probe */
#ifndef TO_BE_DELETED
@@ -1057,9 +1058,8 @@ void mysql_read_default_options(struct st_mysql_options *options,
options->max_allowed_packet= atoi(opt_arg);
break;
case 28: /* protocol */
- if ((options->protocol = find_type(opt_arg,
- &sql_protocol_typelib,0))
- == ~(ulong) 0)
+ if ((options->protocol= find_type(opt_arg,
+ &sql_protocol_typelib,0)) <= 0)
{
fprintf(stderr, "Unknown option to protocol: %s\n", opt_arg);
exit(1);
@@ -1493,7 +1493,8 @@ static MYSQL_METHODS client_methods=
cli_advanced_command,
cli_read_rows,
cli_use_result,
- cli_fetch_lengths
+ cli_fetch_lengths,
+ cli_flush_use_result
#ifndef MYSQL_SERVER
,cli_list_fields,
cli_read_prepare_result,
diff --git a/sql-common/my_time.c b/sql-common/my_time.c
index 46c84ac9ba7..4b5daf53bea 100644
--- a/sql-common/my_time.c
+++ b/sql-common/my_time.c
@@ -17,6 +17,8 @@
#include <my_time.h>
#include <m_string.h>
#include <m_ctype.h>
+/* Windows version of localtime_r() is declared in my_ptrhead.h */
+#include <my_pthread.h>
ulonglong log_10_int[20]=
{
@@ -35,6 +37,16 @@ static uchar internal_format_positions[]=
static char time_separator=':';
+static ulong const days_at_timestart=719528; /* daynr at 1970.01.01 */
+uchar days_in_month[]= {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0};
+
+/*
+ Offset of system time zone from UTC in seconds used to speed up
+ work of my_system_gmt_sec() function.
+*/
+static long my_time_zone=0;
+
+
/*
Convert a timestamp string to a MYSQL_TIME value.
@@ -559,3 +571,158 @@ fractional:
}
+/*
+ Prepare offset of system time zone from UTC for my_system_gmt_sec() func.
+
+ SYNOPSIS
+ init_time()
+*/
+void init_time(void)
+{
+ time_t seconds;
+ struct tm *l_time,tm_tmp;
+ MYSQL_TIME my_time;
+ bool not_used;
+
+ seconds= (time_t) time((time_t*) 0);
+ localtime_r(&seconds,&tm_tmp);
+ l_time= &tm_tmp;
+ my_time_zone= 3600; /* Comp. for -3600 in my_gmt_sec */
+ my_time.year= (uint) l_time->tm_year+1900;
+ my_time.month= (uint) l_time->tm_mon+1;
+ my_time.day= (uint) l_time->tm_mday;
+ my_time.hour= (uint) l_time->tm_hour;
+ my_time.minute= (uint) l_time->tm_min;
+ my_time.second= (uint) l_time->tm_sec;
+ my_system_gmt_sec(&my_time, &my_time_zone, &not_used); /* Init my_time_zone */
+}
+
+
+ /* Calculate nr of day since year 0 in new date-system (from 1615) */
+
+long calc_daynr(uint year,uint month,uint day)
+{
+ long delsum;
+ int temp;
+ DBUG_ENTER("calc_daynr");
+
+ if (year == 0 && month == 0 && day == 0)
+ DBUG_RETURN(0); /* Skip errors */
+ if (year < 200)
+ {
+ if ((year=year+1900) < 1900+YY_PART_YEAR)
+ year+=100;
+ }
+ delsum= (long) (365L * year+ 31*(month-1) +day);
+ if (month <= 2)
+ year--;
+ else
+ delsum-= (long) (month*4+23)/10;
+ temp=(int) ((year/100+1)*3)/4;
+ DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld",
+ year+(month <= 2),month,day,delsum+year/4-temp));
+ DBUG_RETURN(delsum+(int) year/4-temp);
+} /* calc_daynr */
+
+
+/*
+ Convert time in MYSQL_TIME representation in system time zone to its
+ my_time_t form (number of seconds in UTC since begginning of Unix Epoch).
+
+ SYNOPSIS
+ my_system_gmt_sec()
+ t - time value to be converted
+ my_timezone - pointer to long where offset of system time zone
+ from UTC will be stored for caching
+ in_dst_time_gap - set to true if time falls into spring time-gap
+
+ NOTES
+ The idea is to cache the time zone offset from UTC (including daylight
+ saving time) for the next call to make things faster. But currently we
+ just calculate this offset during startup (by calling init_time()
+ function) and use it all the time.
+ Time value provided should be legal time value (e.g. '2003-01-01 25:00:00'
+ is not allowed).
+
+ RETURN VALUE
+ Time in UTC seconds since Unix Epoch representation.
+*/
+my_time_t
+my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap)
+{
+ uint loop;
+ time_t tmp;
+ struct tm *l_time,tm_tmp;
+ long diff, current_timezone;
+
+ /*
+ Calculate the gmt time based on current time and timezone
+ The -1 on the end is to ensure that if have a date that exists twice
+ (like 2002-10-27 02:00:0 MET), we will find the initial date.
+
+ By doing -3600 we will have to call localtime_r() several times, but
+ I couldn't come up with a better way to get a repeatable result :(
+
+ We can't use mktime() as it's buggy on many platforms and not thread safe.
+ */
+ tmp=(time_t) (((calc_daynr((uint) t->year,(uint) t->month,(uint) t->day) -
+ (long) days_at_timestart)*86400L + (long) t->hour*3600L +
+ (long) (t->minute*60 + t->second)) + (time_t) my_time_zone -
+ 3600);
+ current_timezone= my_time_zone;
+
+ localtime_r(&tmp,&tm_tmp);
+ l_time=&tm_tmp;
+ for (loop=0;
+ loop < 2 &&
+ (t->hour != (uint) l_time->tm_hour ||
+ t->minute != (uint) l_time->tm_min);
+ loop++)
+ { /* One check should be enough ? */
+ /* Get difference in days */
+ int days= t->day - l_time->tm_mday;
+ if (days < -1)
+ days= 1; /* Month has wrapped */
+ else if (days > 1)
+ days= -1;
+ diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) +
+ (long) (60*((int) t->minute - (int) l_time->tm_min)));
+ current_timezone+= diff+3600; /* Compensate for -3600 above */
+ tmp+= (time_t) diff;
+ localtime_r(&tmp,&tm_tmp);
+ l_time=&tm_tmp;
+ }
+ /*
+ Fix that if we are in the not existing daylight saving time hour
+ we move the start of the next real hour
+ */
+ if (loop == 2 && t->hour != (uint) l_time->tm_hour)
+ {
+ int days= t->day - l_time->tm_mday;
+ if (days < -1)
+ days=1; /* Month has wrapped */
+ else if (days > 1)
+ days= -1;
+ diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+
+ (long) (60*((int) t->minute - (int) l_time->tm_min)));
+ if (diff == 3600)
+ tmp+=3600 - t->minute*60 - t->second; /* Move to next hour */
+ else if (diff == -3600)
+ tmp-=t->minute*60 + t->second; /* Move to previous hour */
+
+ *in_dst_time_gap= 1;
+ }
+ *my_timezone= current_timezone;
+
+ return (my_time_t) tmp;
+} /* my_system_gmt_sec */
+
+
+/* Set MYSQL_TIME structure to 0000-00-00 00:00:00.000000 */
+
+void set_zero_time(MYSQL_TIME *tm)
+{
+ bzero((void*) tm, sizeof(*tm));
+ tm->time_type= MYSQL_TIMESTAMP_NONE;
+}
+
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 175cc3786cf..7a55367c717 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -19,7 +19,7 @@
MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
MYSQLBASEdir= $(prefix)
-INCLUDES = @MT_INCLUDES@ \
+INCLUDES = @MT_INCLUDES@ @ZLIB_INCLUDES@ \
@bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \
-I$(top_srcdir)/include -I$(top_srcdir)/regex \
-I$(srcdir) $(openssl_includes)
@@ -30,14 +30,14 @@ noinst_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
LDADD = @isam_libs@ \
- ../myisam/libmyisam.a \
- ../myisammrg/libmyisammrg.a \
- ../heap/libheap.a \
- ../vio/libvio.a \
- ../mysys/libmysys.a \
- ../dbug/libdbug.a \
- ../regex/libregex.a \
- ../strings/libmystrings.a
+ $(top_builddir)/myisam/libmyisam.a \
+ $(top_builddir)/myisammrg/libmyisammrg.a \
+ $(top_builddir)/heap/libheap.a \
+ $(top_builddir)/vio/libvio.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/regex/libregex.a \
+ $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@bdb_libs@ @innodb_libs@ @pstack_libs@ \
@@ -59,9 +59,12 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
log_event.h sql_repl.h slave.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
- tztime.h examples/ha_example.h examples/ha_archive.h \
+ tztime.h \
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
- parse_file.h sql_view.h
+ parse_file.h sql_view.h \
+ examples/ha_example.h examples/ha_archive.h \
+ examples/ha_tina.h
+
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -93,13 +96,14 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sql_olap.cc sql_view.cc \
gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \
tztime.cc my_time.c \
- examples/ha_example.cc examples/ha_archive.cc \
sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \
- sp_cache.cc parse_file.cc
+ sp_cache.cc parse_file.cc \
+ examples/ha_example.cc examples/ha_archive.cc \
+ examples/ha_tina.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc
-mysql_tzinfo_to_sql_LDADD = $(LDADD) $(CXXLDFLAGS)
+mysql_tzinfo_to_sql_LDADD = @MYSQLD_EXTRA_LDFLAGS@ $(LDADD) $(CXXLDFLAGS)
DEFS = -DMYSQL_SERVER \
-DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
@@ -154,8 +158,8 @@ sql_lex.o: lex_hash.h
udf_example.so: udf_example.cc
$(CXXCOMPILE) -shared -o $@ $<
-#distclean:
-# rm -f lex_hash.h
+distclean:
+ rm -f lex_hash.h
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 9b439087259..c004330932c 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -192,7 +192,7 @@ static int free_share(ARCHIVE_SHARE *share)
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
if (gzclose(share->archive_write) == Z_ERRNO)
- rc= -1;
+ rc= 1;
my_free((gptr) share, MYF(0));
}
pthread_mutex_unlock(&archive_mutex);
@@ -226,7 +226,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
{
(void)free_share(share); //We void since we already have an error
- DBUG_RETURN(-1);
+ DBUG_RETURN(errno ? errno : -1);
}
DBUG_RETURN(0);
@@ -234,56 +234,91 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
/*
- Closes the file. We first close this storage engines file handle to the
- archive and then remove our reference count to the table (and possibly
- free it as well).
- */
+ Closes the file.
+
+ SYNOPSIS
+ close();
+
+ IMPLEMENTATION:
+
+ We first close this storage engines file handle to the archive and
+ then remove our reference count to the table (and possibly free it
+ as well).
+
+ RETURN
+ 0 ok
+ 1 Error
+*/
+
int ha_archive::close(void)
{
+ int rc= 0;
DBUG_ENTER("ha_archive::close");
- DBUG_RETURN(((gzclose(archive) == Z_ERRNO || free_share(share)) ? -1 : 0));
+
+ /* First close stream */
+ if (gzclose(archive) == Z_ERRNO)
+ rc= 1;
+ /* then also close share */
+ rc|= free_share(share);
+
+ DBUG_RETURN(rc);
}
/*
- We create our data file here. The format is pretty simple. The first bytes in
- any file are the version number. Currently we do nothing with this, but in
- the future this gives us the ability to figure out version if we change the
- format at all. After the version we starting writing our rows. Unlike other
- storage engines we do not "pack" our data. Since we are about to do a general
- compression, packing would just be a waste of CPU time. If the table has blobs
- they are written after the row in the order of creation.
+ We create our data file here. The format is pretty simple. The first
+ bytes in any file are the version number. Currently we do nothing
+ with this, but in the future this gives us the ability to figure out
+ version if we change the format at all. After the version we
+ starting writing our rows. Unlike other storage engines we do not
+ "pack" our data. Since we are about to do a general compression,
+ packing would just be a waste of CPU time. If the table has blobs
+ they are written after the row in the order of creation.
+
So to read a row we:
Read the version
Read the record and copy it into buf
Loop through any blobs and read them
- */
-int ha_archive::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
+*/
+
+int ha_archive::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
{
File create_file;
char name_buff[FN_REFLEN];
size_t written;
+ int error;
DBUG_ENTER("ha_archive::create");
- if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- DBUG_RETURN(-1);
+ if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ {
+ error= my_errno;
+ goto err;
+ }
if ((archive= gzdopen(create_file, "ab")) == NULL)
{
+ error= errno;
delete_table(name);
- DBUG_RETURN(-1);
+ goto err;
}
version= ARCHIVE_VERSION;
written= gzwrite(archive, &version, sizeof(version));
- if (written != sizeof(version) || gzclose(archive))
+ if (gzclose(archive) || written != sizeof(version))
{
+ error= errno;
delete_table(name);
- DBUG_RETURN(-1);
+ goto err;
}
-
DBUG_RETURN(0);
+
+err:
+ /* Return error number, if we got one */
+ DBUG_RETURN(error ? error : -1);
}
+
/*
Look at ha_archive::open() for an explanation of the row format.
Here we just write out the row.
@@ -298,9 +333,9 @@ int ha_archive::write_row(byte * buf)
if (table->timestamp_default_now)
update_timestamp(buf+table->timestamp_default_now-1);
written= gzwrite(share->archive_write, buf, table->reclength);
- share->dirty= true;
+ share->dirty= TRUE;
if (written != table->reclength)
- DBUG_RETURN(-1);
+ DBUG_RETURN(errno ? errno : -1);
for (Field_blob **field=table->blob_field ; *field ; field++)
{
@@ -310,7 +345,7 @@ int ha_archive::write_row(byte * buf)
(*field)->get_ptr(&ptr);
written= gzwrite(share->archive_write, ptr, (unsigned)size);
if (written != size)
- DBUG_RETURN(-1);
+ DBUG_RETURN(errno ? errno : -1);
}
DBUG_RETURN(0);
@@ -322,6 +357,7 @@ int ha_archive::write_row(byte * buf)
that it is a table scan we rewind the file to the beginning, otherwise
we assume the position will be set.
*/
+
int ha_archive::rnd_init(bool scan)
{
DBUG_ENTER("ha_archive::rnd_init");
@@ -339,10 +375,10 @@ int ha_archive::rnd_init(bool scan)
If dirty, we lock, and then reset/flush the data.
I found that just calling gzflush() doesn't always work.
*/
- if (share->dirty == true)
+ if (share->dirty == TRUE)
{
pthread_mutex_lock(&share->mutex);
- if (share->dirty == true)
+ if (share->dirty == TRUE)
{
/* I was having problems with OSX, but it worked for 10.3 so I am wrapping this with and ifdef */
#ifdef BROKEN_GZFLUSH
@@ -350,12 +386,12 @@ int ha_archive::rnd_init(bool scan)
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
{
pthread_mutex_unlock(&share->mutex);
- DBUG_RETURN(-1);
+ DBUG_RETURN(errno ? errno : -1);
}
#else
gzflush(share->archive_write, Z_SYNC_FLUSH);
#endif
- share->dirty= false;
+ share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
}
@@ -367,8 +403,8 @@ int ha_archive::rnd_init(bool scan)
if (scan)
{
read= gzread(archive, &version, sizeof(version));
- if (read == 0 || read != sizeof(version))
- DBUG_RETURN(-1);
+ if (read != sizeof(version))
+ DBUG_RETURN(errno ? errno : -1);
}
DBUG_RETURN(0);
@@ -393,7 +429,7 @@ int ha_archive::get_row(byte *buf)
DBUG_RETURN(HA_ERR_END_OF_FILE);
/* If the record is the wrong size, the file is probably damaged */
- if (read != table->reclength)
+ if ((ulong) read != table->reclength)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/* Calculate blob length, we use this for our buffer */
@@ -409,7 +445,7 @@ int ha_archive::get_row(byte *buf)
{
size_t size= (*field)->get_length();
read= gzread(archive, last, size);
- if (read == 0 || read != size)
+ if ((size_t) read != size)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
(*field)->set_ptr(size, last);
last += size;
@@ -417,19 +453,21 @@ int ha_archive::get_row(byte *buf)
DBUG_RETURN(0);
}
+
/*
Called during ORDER BY. Its position is either from being called sequentially
or by having had ha_archive::rnd_pos() called before it is called.
*/
+
int ha_archive::rnd_next(byte *buf)
{
- DBUG_ENTER("ha_archive::rnd_next");
int rc;
+ DBUG_ENTER("ha_archive::rnd_next");
statistic_increment(ha_read_rnd_next_count,&LOCK_status);
current_position= gztell(archive);
rc= get_row(buf);
- if (!(HA_ERR_END_OF_FILE == rc))
+ if (rc != HA_ERR_END_OF_FILE)
records++;
DBUG_RETURN(rc);
@@ -450,10 +488,12 @@ void ha_archive::position(const byte *record)
/*
- This is called after a table scan for each row if the results of the scan need
- to be ordered. It will take *pos and use it to move the cursor in the file so
- that the next row that is called is the correctly ordered row.
+ This is called after a table scan for each row if the results of the
+ scan need to be ordered. It will take *pos and use it to move the
+ cursor in the file so that the next row that is called is the
+ correctly ordered row.
*/
+
int ha_archive::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_archive::rnd_pos");
@@ -568,11 +608,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
return to;
}
-ha_rows ha_archive::records_in_range(int inx,
- const byte *start_key,uint start_key_len,
- enum ha_rkey_function start_search_flag,
- const byte *end_key,uint end_key_len,
- enum ha_rkey_function end_search_flag)
+ha_rows ha_archive::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
{
DBUG_ENTER("ha_archive::records_in_range ");
DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 2fab80f0598..f08353a5d6c 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -86,7 +86,8 @@ public:
*/
virtual double scan_time() { return (double) (records) / 20.0+10; }
/* The next method will never be called */
- virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
+ virtual double read_time(uint index, uint ranges, ha_rows rows)
+ { return (double) rows / 20.0+1; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
@@ -109,10 +110,7 @@ public:
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
- ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len,
- enum ha_rkey_function start_search_flag,
- const byte *end_key,uint end_key_len,
- enum ha_rkey_function end_search_flag);
+ ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
new file mode 100644
index 00000000000..06a19e478ae
--- /dev/null
+++ b/sql/examples/ha_tina.cc
@@ -0,0 +1,851 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Make sure to look at ha_tina.h for more details.
+
+ First off, this is a play thing for me, there are a number of things wrong with it:
+ *) It was designed for csv and therefor its performance is highly questionable.
+ *) Indexes have not been implemented. This is because the files can be traded in
+ and out of the table directory without having to worry about rebuilding anything.
+ *) NULLs and "" are treated equally (like a spreadsheet).
+ *) There was in the beginning no point to anyone seeing this other then me, so there
+ is a good chance that I haven't quite documented it well.
+ *) Less design, more "make it work"
+
+ Now there are a few cool things with it:
+ *) Errors can result in corrupted data files.
+ *) Data files can be read by spreadsheets directly.
+
+TODO:
+ *) Move to a block system for larger files
+ *) Error recovery, its all there, just need to finish it
+ *) Document how the chains work.
+
+ -Brian
+*/
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include "mysql_priv.h"
+
+#ifdef HAVE_CSV_DB
+
+#include "ha_tina.h"
+#include <sys/mman.h>
+
+/* Stuff for shares */
+pthread_mutex_t tina_mutex;
+static HASH tina_open_tables;
+static int tina_init= 0;
+
+/*****************************************************************************
+ ** TINA tables
+ *****************************************************************************/
+
+/*
+ Used for sorting chains.
+*/
+int sort_set (tina_set *a, tina_set *b)
+{
+ return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) );
+}
+
+static byte* tina_get_key(TINA_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+/*
+ Reloads the mmap file.
+*/
+int get_mmap(TINA_SHARE *share, int write)
+{
+ DBUG_ENTER("ha_tina::get_mmap");
+ if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size))
+ DBUG_RETURN(1);
+
+ if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
+ DBUG_RETURN(1);
+
+ if (share->file_stat.st_size)
+ {
+ if (write)
+ share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED,
+ share->data_file, 0);
+ else
+ share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
+ PROT_READ, MAP_PRIVATE,
+ share->data_file, 0);
+ if ((share->mapped_file ==(caddr_t)-1))
+ {
+ /*
+ Bad idea you think? See the problem is that nothing actually checks
+ the return value of ::rnd_init(), so tossing an error is about
+ it for us.
+ Never going to happen right? :)
+ */
+ my_message(errno, "Woops, blew up opening a mapped file", 0);
+ DBUG_ASSERT(0);
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ share->mapped_file= NULL;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Simple lock controls.
+*/
+static TINA_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ TINA_SHARE *share;
+ char *tmp_name;
+ uint length;
+
+ if (!tina_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!tina_init)
+ {
+ tina_init++;
+ VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST));
+ (void) hash_init(&tina_open_tables,system_charset_info,32,0,0,
+ (hash_get_key) tina_get_key,0,0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&tina_mutex);
+ length=(uint) strlen(table_name);
+ if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
+ (byte*) table_name,
+ length)))
+ {
+ char data_file_name[FN_REFLEN];
+ if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length+1,
+ NullS))
+ {
+ pthread_mutex_unlock(&tina_mutex);
+ return NULL;
+ }
+
+ share->use_count=0;
+ share->table_name_length=length;
+ share->table_name=tmp_name;
+ strmov(share->table_name,table_name);
+ fn_format(data_file_name, table_name, "", ".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ if (my_hash_insert(&tina_open_tables, (byte*) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
+
+ if ((share->data_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
+ goto error2;
+
+ /* We only use share->data_file for writing, so we scan to the end to append */
+ if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR)
+ goto error2;
+
+ share->mapped_file= NULL; // We don't know the state since we just allocated it
+ if (get_mmap(share, 0) > 0)
+ goto error3;
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&tina_mutex);
+
+ return share;
+
+error3:
+ my_close(share->data_file,MYF(0));
+error2:
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+error:
+ pthread_mutex_unlock(&tina_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls.
+*/
+static int free_share(TINA_SHARE *share)
+{
+ DBUG_ENTER("ha_tina::free_share");
+ pthread_mutex_lock(&tina_mutex);
+ int result_code= 0;
+ if (!--share->use_count){
+ /* Drop the mapped file */
+ if (share->mapped_file)
+ munmap(share->mapped_file, share->file_stat.st_size);
+ result_code= my_close(share->data_file,MYF(0));
+ hash_delete(&tina_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&tina_mutex);
+
+ DBUG_RETURN(result_code);
+}
+
+
+/*
+ Finds the end of a line.
+ Currently only supports files written on a UNIX OS.
+*/
+byte * find_eoln(byte *data, off_t begin, off_t end)
+{
+ for (off_t x= begin; x < end; x++)
+ if (data[x] == '\n')
+ return data + x;
+
+ return 0;
+}
+
+/*
+ Encode a buffer into the quoted format.
+*/
+int ha_tina::encode_quote(byte *buf)
+{
+ char attribute_buffer[1024];
+ String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin);
+
+ buffer.length(0);
+ for (Field **field=table->field ; *field ; field++)
+ {
+ const char *ptr;
+ const char *end_ptr;
+
+ (*field)->val_str(&attribute,&attribute);
+ ptr= attribute.ptr();
+ end_ptr= attribute.length() + ptr;
+
+ buffer.append('"');
+
+ while (ptr < end_ptr)
+ {
+ if (*ptr == '"')
+ {
+ buffer.append('\\');
+ buffer.append('"');
+ *ptr++;
+ }
+ else if (*ptr == '\r')
+ {
+ buffer.append('\\');
+ buffer.append('r');
+ *ptr++;
+ }
+ else if (*ptr == '\\')
+ {
+ buffer.append('\\');
+ buffer.append('\\');
+ *ptr++;
+ }
+ else if (*ptr == '\n')
+ {
+ buffer.append('\\');
+ buffer.append('n');
+ *ptr++;
+ }
+ else
+ buffer.append(*ptr++);
+ }
+ buffer.append('"');
+ buffer.append(',');
+ }
+ // Remove the comma, add a line feed
+ buffer.length(buffer.length() - 1);
+ buffer.append('\n');
+ //buffer.replace(buffer.length(), 0, "\n", 1);
+
+ return (buffer.length());
+}
+
+/*
+ chain_append() adds delete positions to the chain that we use to keep track of space.
+*/
+int ha_tina::chain_append()
+{
+ if ( chain_ptr != chain && (chain_ptr -1)->end == current_position)
+ (chain_ptr -1)->end= next_position;
+ else
+ {
+ /* We set up for the next position */
+ if ((off_t)(chain_ptr - chain) == (chain_size -1))
+ {
+ off_t location= chain_ptr - chain;
+ chain_size += DEFAULT_CHAIN_LENGTH;
+ if (chain_alloced)
+ {
+ /* Must cast since my_malloc unlike malloc doesn't have a void ptr */
+ if ((chain= (tina_set *)my_realloc((gptr)chain,chain_size,MYF(MY_WME))) == NULL)
+ return -1;
+ }
+ else
+ {
+ tina_set *ptr= (tina_set *)my_malloc(chain_size * sizeof(tina_set),MYF(MY_WME));
+ memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
+ chain= ptr;
+ chain_alloced++;
+ }
+ chain_ptr= chain + location;
+ }
+ chain_ptr->begin= current_position;
+ chain_ptr->end= next_position;
+ chain_ptr++;
+ }
+
+ return 0;
+}
+
+
+/*
+ Scans for a row.
+*/
+int ha_tina::find_current_row(byte *buf)
+{
+ byte *mapped_ptr= (byte *)share->mapped_file + current_position;
+ byte *end_ptr;
+ DBUG_ENTER("ha_tina::find_current_row");
+
+ /* EOF should be counted as new line */
+ if ((end_ptr= find_eoln(share->mapped_file, current_position, share->file_stat.st_size)) == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ for (Field **field=table->field ; *field ; field++)
+ {
+ int x;
+ buffer.length(0);
+ mapped_ptr++; // Increment past the first quote
+ for(;mapped_ptr != end_ptr; mapped_ptr++)
+ {
+ //Need to convert line feeds!
+ if (*mapped_ptr == '"' &&
+ (((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) || (mapped_ptr == end_ptr -1 )))
+ {
+ mapped_ptr += 2; // Move past the , and the "
+ break;
+ }
+ if (*mapped_ptr == '\\' && mapped_ptr != (end_ptr - 1))
+ {
+ mapped_ptr++;
+ if (*mapped_ptr == 'r')
+ buffer.append('\r');
+ else if (*mapped_ptr == 'n' )
+ buffer.append('\n');
+ else if ((*mapped_ptr == '\\') || (*mapped_ptr == '"'))
+ buffer.append(*mapped_ptr);
+ else /* This could only happed with an externally created file */
+ {
+ buffer.append('\\');
+ buffer.append(*mapped_ptr);
+ }
+ }
+ else
+ buffer.append(*mapped_ptr);
+ }
+ (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
+ }
+ next_position= (end_ptr - share->mapped_file)+1;
+ /* Maybe use \N for null? */
+ memset(buf, 0, table->null_bytes); /* We do not implement nulls! */
+
+ DBUG_RETURN(0);
+}
+
+/*
+ If frm_error() is called in table.cc this is called to find out what file
+ extensions exist for this handler.
+*/
+const char **ha_tina::bas_ext() const
+{ static const char *ext[]= { ".CSV", NullS }; return ext; }
+
+
+/*
+ Open a database file. Keep in mind that tables are caches, so
+ this will not be called for every request. Any sort of positions
+ that need to be reset should be kept in the ::extra() call.
+*/
+int ha_tina::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_tina::open");
+
+ if (!(share= get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+ ref_length=sizeof(off_t);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Close a database file. We remove ourselves from the shared strucutre.
+ If it is empty we destroy it and free the mapped file.
+*/
+int ha_tina::close(void)
+{
+ DBUG_ENTER("ha_tina::close");
+ DBUG_RETURN(free_share(share));
+}
+
+/*
+ This is an INSERT. At the moment this handler just seeks to the end
+ of the file and appends the data. In an error case it really should
+ just truncate to the original position (this is not done yet).
+*/
+int ha_tina::write_row(byte * buf)
+{
+ int size;
+ DBUG_ENTER("ha_tina::write_row");
+
+ statistic_increment(ha_write_count,&LOCK_status);
+
+ if (table->timestamp_default_now)
+ update_timestamp(buf+table->timestamp_default_now-1);
+
+ size= encode_quote(buf);
+
+ if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
+ DBUG_RETURN(-1);
+
+ /*
+ Ok, this is means that we will be doing potentially bad things
+ during a bulk insert on some OS'es. What we need is a cleanup
+ call for ::write_row that would let us fix up everything after the bulk
+ insert. The archive handler does this with an extra mutx call, which
+ might be a solution for this.
+ */
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This is called for an update.
+ Make sure you put in code to increment the auto increment, also
+ update any timestamp data. Currently auto increment is not being
+ fixed since autoincrements have yet to be added to this table handler.
+ This will be called in a table scan right before the previous ::rnd_next()
+ call.
+*/
+int ha_tina::update_row(const byte * old_data, byte * new_data)
+{
+ int size;
+ DBUG_ENTER("ha_tina::update_row");
+
+ statistic_increment(ha_update_count,&LOCK_status);
+
+ if (table->timestamp_default_now)
+ update_timestamp(new_data+table->timestamp_default_now-1);
+
+ size= encode_quote(new_data);
+
+ if (chain_append())
+ DBUG_RETURN(-1);
+
+ if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Deletes a row. First the database will find the row, and then call this method.
+ In the case of a table scan, the previous call to this will be the ::rnd_next()
+ that found this row.
+ The exception to this is an ORDER BY. This will cause the table handler to walk
+ the table noting the positions of all rows that match a query. The table will
+ then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC).
+*/
+int ha_tina::delete_row(const byte * buf)
+{
+ DBUG_ENTER("ha_tina::delete_row");
+ statistic_increment(ha_delete_count,&LOCK_status);
+
+ if (chain_append())
+ DBUG_RETURN(-1);
+
+ --records;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Fill buf with value from key. Simply this is used for a single index read
+ with a key.
+*/
+int ha_tina::index_read(byte * buf, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_tina::index_read");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Fill buf with value from key. Simply this is used for a single index read
+ with a key.
+ Whatever the current key is we will use it. This is what will be in "index".
+*/
+int ha_tina::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_tina::index_read_idx");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+
+/*
+ Read the next position in the index.
+*/
+int ha_tina::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_next");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the previous position in the index.
+*/
+int ha_tina::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_prev");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the first position in the index
+*/
+int ha_tina::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_first");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the last position in the index
+ With this we don't need to do a filesort() with index.
+ We just read the last row and call previous.
+*/
+int ha_tina::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_last");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ All table scans call this first.
+ The order of a table scan is:
+
+ ha_tina::store_lock
+ ha_tina::external_lock
+ ha_tina::info
+ ha_tina::rnd_init
+ ha_tina::extra
+ ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::extra
+ ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
+ ha_tina::external_lock
+ ha_tina::extra
+ ENUM HA_EXTRA_RESET Reset database to after open
+
+ Each call to ::rnd_next() represents a row returned in the can. When no more
+ rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
+ The ::info() call is just for the optimizer.
+
+*/
+
+int ha_tina::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_tina::rnd_init");
+
+ current_position= next_position= 0;
+ records= 0;
+ chain_ptr= chain;
+ (void)madvise(share->mapped_file,share->file_stat.st_size,MADV_SEQUENTIAL);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ ::rnd_next() does all the heavy lifting for a table scan. You will need to populate *buf
+ with the correct field data. You can walk the field to determine at what position you
+ should store the data (take a look at how ::find_current_row() works). The structure
+ is something like:
+ 0Foo Dog Friend
+ The first offset is for the first attribute. All space before that is reserved for null count.
+ Basically this works as a mask for which rows are nulled (compared to just empty).
+ This table handler doesn't do nulls and does not know the difference between NULL and "". This
+ is ok since this table handler is for spreadsheets and they don't know about them either :)
+*/
+int ha_tina::rnd_next(byte *buf)
+{
+ DBUG_ENTER("ha_tina::rnd_next");
+
+ statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+
+ current_position= next_position;
+ if (!share->mapped_file)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if (HA_ERR_END_OF_FILE == find_current_row(buf) )
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ records++;
+ DBUG_RETURN(0);
+}
+
+/*
+ In the case of an order by rows will need to be sorted.
+ ::position() is called after each call to ::rnd_next(),
+ the data it stores is to a byte array. You can store this
+ data via ha_store_ptr(). ref_length is a variable defined to the
+ class that is the sizeof() of position being stored. In our case
+ its just a position. Look at the bdb code if you want to see a case
+ where something other then a number is stored.
+*/
+void ha_tina::position(const byte *record)
+{
+ DBUG_ENTER("ha_tina::position");
+ ha_store_ptr(ref, ref_length, current_position);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Used to fetch a row from a posiion stored with ::position().
+ ha_get_ptr() retrieves the data for you.
+*/
+
+int ha_tina::rnd_pos(byte * buf, byte *pos)
+{
+ DBUG_ENTER("ha_tina::rnd_pos");
+ statistic_increment(ha_read_rnd_count,&LOCK_status);
+ current_position= ha_get_ptr(pos,ref_length);
+ DBUG_RETURN(find_current_row(buf));
+}
+
+/*
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+*/
+void ha_tina::info(uint flag)
+{
+ DBUG_ENTER("ha_tina::info");
+ /* This is a lie, but you don't want the optimizer to see zero or 1 */
+ if (records < 2)
+ records= 2;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Grab bag of flags that are sent to the able handler every so often.
+ HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called.
+ You are not required to implement any of these.
+*/
+int ha_tina::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_tina::extra");
+ DBUG_RETURN(0);
+}
+
+/*
+ This is no longer used.
+*/
+int ha_tina::reset(void)
+{
+ DBUG_ENTER("ha_tina::reset");
+ ha_tina::extra(HA_EXTRA_RESET);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Called after deletes, inserts, and updates. This is where we clean up all of
+ the dead space we have collected while writing the file.
+*/
+int ha_tina::rnd_end()
+{
+ DBUG_ENTER("ha_tina::rnd_end");
+
+ /* First position will be truncate position, second will be increment */
+ if ((chain_ptr - chain) > 0)
+ {
+ tina_set *ptr;
+ off_t length;
+
+ /*
+ Setting up writable map, this will contain all of the data after the
+ get_mmap call that we have added to the file.
+ */
+ if (get_mmap(share, 1) > 0)
+ DBUG_RETURN(-1);
+ length= share->file_stat.st_size;
+
+ /*
+ The sort handles updates/deletes with random orders.
+ It also sorts so that we move the final blocks to the
+ beginning so that we move the smallest amount of data possible.
+ */
+ qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set);
+ for (ptr= chain; ptr < chain_ptr; ptr++)
+ printf("Chain %d, %d\n", (int)ptr->begin, (int)ptr->end);
+ for (ptr= chain; ptr < chain_ptr; ptr++)
+ {
+ //memmove(share->mapped_file + ptr->begin, share->mapped_file
+ //+ ptr->end, length - (size_t)ptr->end);
+ /* We peek a head to see if this is the last chain */
+ printf("Delete %d, %d, %d\n", (int)ptr->begin, (int)ptr->end, (int)length);
+ if (ptr+1 == chain_ptr)
+ {
+ printf("Shiftina(end) %d(%d) to %d\n", (int)ptr->end, (int)(length - (size_t)ptr->end), (int)ptr->begin);
+ memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
+ length - (size_t)ptr->end);
+ }
+ else
+ {
+ printf("Shifting %d(%d) to %d\n", (int)ptr->end, (int)((ptr++)->begin - (size_t)ptr->end), (int)ptr->begin);
+ memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
+ (size_t)(ptr++)->begin - (size_t)ptr->end);
+ }
+ length= length - (size_t)(ptr->end - ptr->begin);
+ }
+ printf("Buffer %s\n",share->mapped_file);
+
+ /* Truncate the file to the new size */
+ if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
+ DBUG_RETURN(-1);
+
+ if (munmap(share->mapped_file, length))
+ DBUG_RETURN(-1);
+
+ /* We set it to null so that get_mmap() won't try to unmap it */
+ share->mapped_file= NULL;
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Truncate table and others of its ilk call this.
+*/
+int ha_tina::delete_all_rows()
+{
+ DBUG_ENTER("ha_tina::delete_all_rows");
+
+ int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
+
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+
+ DBUG_RETURN(rc);
+}
+
+/*
+ Always called by the start of a transaction (or by "lock tables");
+*/
+int ha_tina::external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("ha_tina::external_lock");
+ DBUG_RETURN(0); // No external locking
+}
+
+/*
+ Called by the database to lock the table. Keep in mind that this
+ is an internal lock.
+*/
+THR_LOCK_DATA **ha_tina::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ lock.type=lock_type;
+ *to++= &lock;
+ return to;
+}
+
+/*
+ Range optimizer calls this.
+ I need to update the information on this.
+*/
+ha_rows ha_tina::records_in_range(int inx,
+ const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag)
+{
+ DBUG_ENTER("ha_tina::records_in_range ");
+ DBUG_RETURN(records); // Good guess
+}
+
+
+/*
+ Create a table. You do not want to leave the table open after a call to
+ this (the database will call ::open() if it needs to).
+*/
+
+int ha_tina::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
+{
+ char name_buff[FN_REFLEN];
+ File create_file;
+ DBUG_ENTER("ha_tina::create");
+
+ if ((create_file= my_create(fn_format(name_buff,name,"",".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ DBUG_RETURN(-1);
+
+ my_close(create_file,MYF(0));
+
+ DBUG_RETURN(0);
+}
+
+#endif /* enable CSV */
diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h
new file mode 100644
index 00000000000..67a907fddb6
--- /dev/null
+++ b/sql/examples/ha_tina.h
@@ -0,0 +1,132 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <my_dir.h>
+
+#define DEFAULT_CHAIN_LENGTH 512
+
+typedef struct st_tina_share {
+ char *table_name;
+ byte *mapped_file; /* mapped region of file */
+ uint table_name_length,use_count;
+ MY_STAT file_stat; /* Stat information for the data file */
+ File data_file; /* Current open data file */
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+} TINA_SHARE;
+
+typedef struct tina_set {
+ off_t begin;
+ off_t end;
+};
+
+class ha_tina: public handler
+{
+ THR_LOCK_DATA lock; /* MySQL lock */
+ TINA_SHARE *share; /* Shared lock info */
+ off_t current_position; /* Current position in the file during a file scan */
+ off_t next_position; /* Next position in the file scan */
+ byte byte_buffer[IO_SIZE];
+ String buffer;
+ tina_set chain_buffer[DEFAULT_CHAIN_LENGTH];
+ tina_set *chain;
+ tina_set *chain_ptr;
+ byte chain_alloced;
+ uint32 chain_size;
+
+ public:
+ ha_tina(TABLE *table): handler(table),
+ /*
+ These definitions are found in hanler.h
+ Theses are not probably completely right.
+ */
+ current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
+ {
+ /* Set our original buffers from pre-allocated memory */
+ buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+ chain = chain_buffer;
+ }
+ ~ha_tina()
+ {
+ if (chain_alloced)
+ my_free((gptr)chain,0);
+ }
+ const char *table_type() const { return "CSV"; }
+ const char *index_type(uint inx) { return "NONE"; }
+ const char **bas_ext() const;
+ ulong table_flags() const
+ {
+ return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT |
+ HA_NO_AUTO_INCREMENT );
+ }
+ ulong index_flags(uint idx, uint part, bool all_parts) const
+ {
+ /* We will never have indexes so this will never be called(AKA we return zero) */
+ return 0;
+ }
+ uint max_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_keys() const { return 0; }
+ uint max_key_parts() const { return 0; }
+ uint max_key_length() const { return 0; }
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ /* The next method will never be called */
+ virtual double read_time(ha_rows rows) { DBUG_ASSERT(0); return((double) rows / 20.0+1); }
+ virtual bool fast_key_read() { return 1;}
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+ int write_row(byte * buf);
+ int update_row(const byte * old_data, byte * new_data);
+ int delete_row(const byte * buf);
+ int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_next(byte * buf);
+ int index_prev(byte * buf);
+ int index_first(byte * buf);
+ int index_last(byte * buf);
+ int rnd_init(bool scan=1);
+ int rnd_next(byte *buf);
+ int rnd_pos(byte * buf, byte *pos);
+ int rnd_end();
+ void position(const byte *record);
+ void info(uint);
+ int extra(enum ha_extra_function operation);
+ int reset(void);
+ int external_lock(THD *thd, int lock_type);
+ int delete_all_rows(void);
+ ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag);
+// int delete_table(const char *from);
+// int rename_table(const char * from, const char * to);
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type);
+
+ /* The following methods were added just for TINA */
+ int encode_quote(byte *buf);
+ int find_current_row(byte *buf);
+ int chain_append();
+};
diff --git a/sql/field.cc b/sql/field.cc
index c96a5a6d809..5356fbc773a 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -46,6 +46,8 @@ template class List_iterator<create_field>;
uchar Field_null::null[1]={1};
const char field_separator=',';
+#define DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE 320
+
/*****************************************************************************
Static help functions
*****************************************************************************/
@@ -876,7 +878,7 @@ int Field_decimal::store(double nr)
reg4 uint i,length;
char fyllchar,*to;
- char buff[320];
+ char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
fyllchar = zerofill ? (char) '0' : (char) ' ';
#ifdef HAVE_SNPRINTF
@@ -1758,6 +1760,7 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
len-= tmp;
from+= tmp;
my_errno=0;
+
if (unsigned_flag)
{
if (!len || *from == '-')
@@ -1774,6 +1777,34 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
if (error ||
(from+len != end && table->in_use->count_cuted_fields &&
!test_if_int(from,len,end,cs)))
+ error= 1;
+#if SIZEOF_LONG > 4
+ if (unsigned_flag)
+ {
+ if ((ulong) tmp > UINT_MAX32)
+ {
+ tmp= UINT_MAX32;
+ error= 1;
+ my_errno=ERANGE;
+ }
+ }
+ else
+ {
+ if (tmp > INT_MAX32)
+ {
+ tmp= INT_MAX32;
+ error= 1;
+ my_errno=ERANGE;
+ }
+ else if (tmp < INT_MIN32)
+ {
+ tmp= INT_MIN32;
+ error= 1;
+ my_errno=ERANGE;
+ }
+ }
+#endif
+ if (error)
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1);
error= 1;
@@ -2695,7 +2726,7 @@ String *Field_double::val_str(String *val_buffer,
#endif
doubleget(nr,ptr);
- uint to_length=max(field_length,320);
+ uint to_length=max(field_length, DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE);
val_buffer->alloc(to_length);
char *to=(char*) val_buffer->ptr();
@@ -2707,7 +2738,8 @@ String *Field_double::val_str(String *val_buffer,
else
{
#ifdef HAVE_FCONVERT
- char buff[320],*pos=buff;
+ char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
+ char *pos= buff;
int decpt,sign,tmp_dec=dec;
VOID(fconvert(nr,tmp_dec,&decpt,&sign,buff));
@@ -2877,7 +2909,8 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
:Field_str(ptr_arg, 19, (uchar*) 0,0,
unireg_check_arg, field_name_arg, table_arg, cs)
{
- flags|=ZEROFILL_FLAG; /* 4.0 MYD compatibility */
+ /* For 4.0 MYD and 4.0 InnoDB compatibility */
+ flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
if (table && !table->timestamp_field &&
unireg_check != NONE)
{
@@ -4231,13 +4264,40 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
}
-int Field_string::store(double nr)
+/*
+ Store double value in Field_string or Field_varstring.
+
+ SYNOPSIS
+ store(double nr)
+ nr number
+
+ DESCRIPTION
+ Pretty prints double number into field_length characters buffer.
+*/
+
+int Field_str::store(double nr)
{
- char buff[MAX_FIELD_WIDTH],*end;
- int width=min(field_length,DBL_DIG+5);
- sprintf(buff,"%-*.*g",width,max(width-5,0),nr);
- end=strcend(buff,' ');
- return Field_string::store(buff,(uint) (end - buff), &my_charset_bin);
+ char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
+ uint length;
+ bool use_scientific_notation= TRUE;
+ use_scientific_notation= TRUE;
+if (field_length < 32 && fabs(nr) < log_10[field_length]-1)
+ use_scientific_notation= FALSE;
+ length= (uint) my_sprintf(buff, (buff, "%-.*g",
+ (use_scientific_notation ?
+ max(0, (int)field_length-5) :
+ field_length),
+ nr));
+ /*
+ +1 below is because "precision" in %g above means the
+ max. number of significant digits, not the output width.
+ Thus the width can be larger than number of significant digits by 1
+ (for decimal point)
+ the test for field_length < 5 is for extreme cases,
+ like inserting 500.0 in char(1)
+ */
+ DBUG_ASSERT(field_length < 5 || length <= field_length+1);
+ return store((const char *)buff, min(length, field_length), charset());
}
@@ -4313,7 +4373,7 @@ void Field_string::sql_type(String &res) const
(field_length > 3 &&
(table->db_options_in_use &
HA_OPTION_PACK_RECORD) ?
- (has_charset() ? "varchar" : "varbinary") :
+ (has_charset() ? "varchar" : "varbinary") :
(has_charset() ? "char" : "binary")),
(int) field_length / charset()->mbmaxlen);
res.length(length);
@@ -4332,6 +4392,21 @@ char *Field_string::pack(char *to, const char *from, uint max_length)
}
+char *Field_string::pack_key(char *to, const char *from, uint max_length)
+{
+ uint length= min(field_length,max_length);
+ uint char_length= max_length/field_charset->mbmaxlen;
+ if (length > char_length)
+ char_length= my_charpos(field_charset, from, from+length, char_length);
+ set_if_smaller(length, char_length);
+ while (length && from[length-1] == ' ')
+ length--;
+ *to= (uchar)length;
+ memcpy(to+1, from, length);
+ return to+1+length;
+}
+
+
const char *Field_string::unpack(char *to, const char *from)
{
uint length= (uint) (uchar) *from++;
@@ -4409,16 +4484,6 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
}
-int Field_varstring::store(double nr)
-{
- char buff[MAX_FIELD_WIDTH],*end;
- int width=min(field_length,DBL_DIG+5);
- sprintf(buff,"%-*.*g",width,max(width-5,0),nr);
- end=strcend(buff,' ');
- return Field_varstring::store(buff,(uint) (end - buff), &my_charset_bin);
-}
-
-
int Field_varstring::store(longlong nr)
{
char buff[64];
@@ -4505,6 +4570,24 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length)
}
+char *Field_varstring::pack_key(char *to, const char *from, uint max_length)
+{
+ uint length=uint2korr(from);
+ uint char_length= (field_charset->mbmaxlen > 1) ?
+ max_length/field_charset->mbmaxlen : max_length;
+ from+=HA_KEY_BLOB_LENGTH;
+ if (length > char_length)
+ char_length= my_charpos(field_charset, from, from+length, char_length);
+ set_if_smaller(length, char_length);
+ *to++= (char) (length & 255);
+ if (max_length > 255)
+ *to++= (char) (length >> 8);
+ if (length)
+ memcpy(to, from, length);
+ return to+length;
+}
+
+
const char *Field_varstring::unpack(char *to, const char *from)
{
uint length;
@@ -5080,16 +5163,17 @@ char *Field_blob::pack_key(char *to, const char *from, uint max_length)
char *save=ptr;
ptr=(char*) from;
uint32 length=get_length(); // Length of from string
- if (length > max_length)
- length=max_length;
+ uint char_length= (field_charset->mbmaxlen > 1) ?
+ max_length/field_charset->mbmaxlen : max_length;
+ if (length)
+ get_ptr((char**) &from);
+ if (length > char_length)
+ char_length= my_charpos(field_charset, from, from+length, char_length);
+ set_if_smaller(length, char_length);
*to++= (uchar) length;
if (max_length > 255) // 2 byte length
*to++= (uchar) (length >> 8);
- if (length)
- {
- get_ptr((char**) &from);
- memcpy(to, from, length);
- }
+ memcpy(to, from, length);
ptr=save; // Restore org row pointer
return to+length;
}
@@ -5666,6 +5750,10 @@ void create_field::create_length_to_internal_length(void)
pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ?
FIELD_TYPE_STRING : sql_type, length);
break;
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ length*= charset->mbmaxlen;
+ break;
default:
/* do nothing */
break;
diff --git a/sql/field.h b/sql/field.h
index 24faee9d314..e12dd60c13b 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -38,7 +38,7 @@ class Field
public:
static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); }
static void operator delete(void *ptr_arg, size_t size) {
-#ifdef PEDANTIC_SAFEMALLOC
+#ifdef SAFEMALLOC
bfill(ptr_arg, size, 0x8F);
#endif
}
@@ -343,18 +343,21 @@ public:
struct st_table *table_arg,CHARSET_INFO *charset)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg)
- {
+ {
field_charset=charset;
if (charset->state & MY_CS_BINSORT)
flags|=BINARY_FLAG;
}
Item_result result_type () const { return STRING_RESULT; }
uint decimals() const { return NOT_FIXED_DEC; }
+ int store(double nr);
+ int store(longlong nr)=0;
+ int store(const char *to,uint length,CHARSET_INFO *cs)=0;
void make_field(Send_field *);
uint size_of() const { return sizeof(*this); }
CHARSET_INFO *charset(void) const { return field_charset; }
void set_charset(CHARSET_INFO *charset) { field_charset=charset; }
- bool binary() const { return field_charset->state & MY_CS_BINSORT ? 1 : 0; }
+ bool binary() const { return field_charset == &my_charset_bin; }
uint32 max_length() { return field_length; }
friend class create_field;
};
@@ -904,8 +907,8 @@ public:
bool zero_pack() const { return 0; }
void reset(void) { charset()->cset->fill(charset(),ptr,field_length,' '); }
int store(const char *to,uint length,CHARSET_INFO *charset);
- int store(double nr);
int store(longlong nr);
+ int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -913,6 +916,7 @@ public:
void sort_string(char *buff,uint length);
void sql_type(String &str) const;
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
+ char *pack_key(char *to, const char *from, uint max_length);
const char *unpack(char* to, const char *from);
int pack_cmp(const char *a,const char *b,uint key_length);
int pack_cmp(const char *b,uint key_length);
@@ -950,8 +954,8 @@ public:
uint32 pack_length() const { return (uint32) field_length+2; }
uint32 key_length() const { return (uint32) field_length; }
int store(const char *to,uint length,CHARSET_INFO *charset);
- int store(double nr);
int store(longlong nr);
+ int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -961,6 +965,7 @@ public:
void set_key_image(char *buff,uint length, CHARSET_INFO *cs);
void sql_type(String &str) const;
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
+ char *pack_key(char *to, const char *from, uint max_length);
const char *unpack(char* to, const char *from);
int pack_cmp(const char *a, const char *b, uint key_length);
int pack_cmp(const char *b, uint key_length);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index e98068ef974..d7993939092 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -340,8 +340,10 @@ static void do_cut_string(Copy_field *copy)
static void do_expand_string(Copy_field *copy)
{
+ CHARSET_INFO *cs= copy->from_field->charset();
memcpy(copy->to_ptr,copy->from_ptr,copy->from_length);
- bfill(copy->to_ptr+copy->from_length,copy->to_length-copy->from_length,' ');
+ cs->cset->fill(cs, copy->to_ptr+copy->from_length,
+ copy->to_length-copy->from_length, ' ');
}
static void do_varstring(Copy_field *copy)
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 7a445ed8c4d..0bbdf84c8d6 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -362,7 +362,7 @@ static int get_options(int argc, char **argv)
{
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+ if ((ho_error= handle_options(&argc, &argv, my_long_options, get_one_option)))
exit(ho_error);
if (argc >= 1)
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 00df84e3797..b4f07073afa 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -357,9 +357,11 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const
case HA_KEYTYPE_VARTEXT:
/*
As BDB stores only one copy of equal strings, we can't use key read
- on these
+ on these. Binary collations do support key read though.
*/
- flags&= ~HA_KEYREAD_ONLY;
+ if (!(table->key_info[idx].key_part[i].field->charset()->state
+ & MY_CS_BINSORT))
+ flags&= ~HA_KEYREAD_ONLY;
break;
default: // Keep compiler happy
break;
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index cc828b6e6b2..d7327362286 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -360,7 +360,8 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
int ha_heap::delete_table(const char *name)
{
- int error=heap_delete_table(name);
+ char buff[FN_REFLEN];
+ int error= heap_delete_table(fn_format(buff,name,"","",4+2));
return error == ENOENT ? 0 : error;
}
@@ -429,7 +430,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
{
if (!f_is_packed(flag) &&
f_packtype(flag) == (int) FIELD_TYPE_DECIMAL &&
- !(flag & FIELDFLAG_BINARY))
+ !(field->charset() == &my_charset_bin))
seg->type= (int) HA_KEYTYPE_TEXT;
else
seg->type= (int) HA_KEYTYPE_BINARY;
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 700b8fafe19..3003425a489 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -40,6 +40,7 @@ have disables the InnoDB inlining in this file. */
#include <m_ctype.h>
#include <hash.h>
#include <myisampack.h>
+#include <mysys_err.h>
#define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1))
@@ -117,6 +118,8 @@ my_bool innobase_log_archive = FALSE;/* unused */
my_bool innobase_use_native_aio = FALSE;
my_bool innobase_fast_shutdown = TRUE;
my_bool innobase_file_per_table = FALSE;
+my_bool innobase_locks_unsafe_for_binlog = FALSE;
+my_bool innobase_create_status_file = FALSE;
static char *internal_innobase_data_file_path = NULL;
@@ -134,6 +137,10 @@ char innodb_dummy_stmt_trx_handle = 'D';
static HASH innobase_open_tables;
+#ifdef __NETWARE__ /* some special cleanup for NetWare */
+bool nw_panic = FALSE;
+#endif
+
static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
static INNOBASE_SHARE *get_share(const char *table_name);
@@ -292,8 +299,9 @@ convert_error_code_to_mysql(
} else if (error == (int) DB_CANNOT_DROP_CONSTRAINT) {
- return(HA_ERR_ROW_IS_REFERENCED);
-
+ return(HA_ERR_CANNOT_ADD_FOREIGN); /* TODO: This is a bit
+ misleading, a new MySQL error
+ code should be introduced */
} else if (error == (int) DB_COL_APPEARS_TWICE_IN_INDEX) {
return(HA_ERR_CRASHED);
@@ -419,6 +427,50 @@ innobase_mysql_print_thd(
}
/*************************************************************************
+Creates a temporary file. */
+extern "C"
+int
+innobase_mysql_tmpfile(void)
+/*========================*/
+ /* out: temporary file descriptor, or < 0 on error */
+{
+ char filename[FN_REFLEN];
+ int fd2 = -1;
+ File fd = create_temp_file(filename, NullS, "ib",
+#ifdef __WIN__
+ O_BINARY | O_TRUNC | O_SEQUENTIAL |
+ O_TEMPORARY | O_SHORT_LIVED |
+#endif /* __WIN__ */
+ O_CREAT | O_EXCL | O_RDWR,
+ MYF(MY_WME));
+ if (fd >= 0) {
+#ifndef __WIN__
+ /* On Windows, open files cannot be removed, but files can be
+ created with the O_TEMPORARY flag to the same effect
+ ("delete on close"). */
+ unlink(filename);
+#endif /* !__WIN__ */
+ /* Copy the file descriptor, so that the additional resources
+ allocated by create_temp_file() can be freed by invoking
+ my_close().
+
+ Because the file descriptor returned by this function
+ will be passed to fdopen(), it will be closed by invoking
+ fclose(), which in turn will invoke close() instead of
+ my_close(). */
+ fd2 = dup(fd);
+ if (fd2 < 0) {
+ DBUG_PRINT("error",("Got error %d on dup",fd2));
+ my_errno=errno;
+ my_error(EE_OUT_OF_FILERESOURCES,
+ MYF(ME_BELL+ME_WAITTANG), filename, my_errno);
+ }
+ my_close(fd, MYF(MY_WME));
+ }
+ return(fd2);
+}
+
+/*************************************************************************
Gets the InnoDB transaction handle for a MySQL handler object, creates
an InnoDB transaction struct if the corresponding MySQL thread struct still
lacks one. */
@@ -910,8 +962,10 @@ innobase_init(void)
srv_fast_shutdown = (ibool) innobase_fast_shutdown;
srv_file_per_table = (ibool) innobase_file_per_table;
+ srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog;
srv_max_n_open_files = (ulint) innobase_open_files;
+ srv_innodb_status = (ibool) innobase_create_status_file;
srv_print_verbose_log = mysql_embedded ? 0 : 1;
@@ -982,6 +1036,11 @@ innobase_end(void)
DBUG_ENTER("innobase_end");
+#ifdef __NETWARE__ /* some special cleanup for NetWare */
+ if (nw_panic) {
+ set_panic_flag_for_netware();
+ }
+#endif
if (innodb_inited)
{
innodb_inited= 0;
@@ -3645,11 +3704,19 @@ ha_innobase::create(
}
if (current_thd->query != NULL) {
-
- error = row_table_add_foreign_constraints(trx,
- current_thd->query, norm_name);
- error = convert_error_code_to_mysql(error, NULL);
+ LEX_STRING q;
+ if (thd->convert_string(&q, system_charset_info,
+ current_thd->query,
+ current_thd->query_length,
+ current_thd->charset())) {
+ error = HA_ERR_OUT_OF_MEM;
+ } else {
+ error = row_table_add_foreign_constraints(trx,
+ q.str, norm_name);
+
+ error = convert_error_code_to_mysql(error, NULL);
+ }
if (error) {
innobase_commit_low(trx);
@@ -4398,7 +4465,7 @@ ha_innobase::update_table_comment(
trx_search_latch_release_if_reserved(prebuilt->trx);
str = NULL;
- if (FILE* file = tmpfile()) {
+ if (FILE* file = os_file_create_tmpfile()) {
long flen;
/* output the data to a temporary file */
@@ -4460,7 +4527,7 @@ ha_innobase::get_foreign_key_create_info(void)
update_thd(current_thd);
- if (FILE* file = tmpfile()) {
+ if (FILE* file = os_file_create_tmpfile()) {
long flen;
prebuilt->trx->op_info = (char*)"getting info on foreign keys";
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index a158321c5df..1bfb86de944 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -190,7 +190,8 @@ extern char *innobase_unix_file_flush_method;
/* The following variables have to be my_bool for SHOW VARIABLES to work */
extern my_bool innobase_log_archive,
innobase_use_native_aio, innobase_fast_shutdown,
- innobase_file_per_table;
+ innobase_file_per_table, innobase_locks_unsafe_for_binlog,
+ innobase_create_status_file;
extern "C" {
extern ulong srv_max_buf_pool_modified_pct;
}
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 79e1d7b463b..89288d6059f 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -958,15 +958,21 @@ int ha_myisam::indexes_are_disabled(void)
start_bulk_insert(rows)
rows Rows to be inserted
0 if we don't know
+
+ NOTICE
+ Do not forget to call end_bulk_insert() later!
*/
void ha_myisam::start_bulk_insert(ha_rows rows)
{
+ DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd=current_thd;
ulong size= min(thd->variables.read_buff_size, table->avg_row_length*rows);
+ DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
+ (ulong) rows, size));
/* don't enable row cache if too few rows */
- if (!rows && rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE)
+ if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
can_enable_indexes= (file->s->state.key_map ==
@@ -990,8 +996,22 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows);
}
}
+ DBUG_VOID_RETURN;
}
+/*
+ end special bulk-insert optimizations,
+ which have been activated by start_bulk_insert().
+
+ SYNOPSIS
+ end_bulk_insert()
+ no arguments
+
+ RETURN
+ 0 OK
+ != 0 Error
+*/
+
int ha_myisam::end_bulk_insert()
{
mi_end_bulk_insert(file);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 5b36d6d2b55..815aed13ce3 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -41,7 +41,13 @@ static const int parallelism= 240;
// Default value for max number of transactions
// createable against NDB from this handler
-static const int max_transactions = 256;
+static const int max_transactions= 256;
+
+// Default value for prefetch of autoincrement values
+static const ha_rows autoincrement_prefetch= 32;
+
+// connectstring to cluster if given by mysqld
+const char *ndbcluster_connectstring= 0;
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
@@ -138,6 +144,7 @@ static int ndb_to_mysql_error(const NdbError *err)
int ha_ndbcluster::ndb_err(NdbConnection *trans)
{
+ int res;
const NdbError err= trans->getNdbError();
if (!err.code)
return 0; // Don't log things to DBUG log if no error
@@ -155,7 +162,13 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
default:
break;
}
- DBUG_RETURN(ndb_to_mysql_error(&err));
+ res= ndb_to_mysql_error(&err);
+ DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d",
+ err.code, res));
+ if (res == HA_ERR_FOUND_DUPP_KEY)
+ dupkey= table->primary_key;
+
+ DBUG_RETURN(res);
}
@@ -182,6 +195,45 @@ bool ha_ndbcluster::get_error_message(int error,
/*
+ Check if type is supported by NDB.
+ TODO Use this once, not in every operation
+*/
+
+static inline bool ndb_supported_type(enum_field_types type)
+{
+ switch (type) {
+ case MYSQL_TYPE_DECIMAL:
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_INT24:
+ case MYSQL_TYPE_LONGLONG:
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
+ case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_YEAR:
+ case MYSQL_TYPE_STRING:
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ return true;
+ case MYSQL_TYPE_NULL:
+ case MYSQL_TYPE_GEOMETRY:
+ break;
+ }
+ return false;
+}
+
+
+/*
Instruct NDB to set the value of the hidden primary key
*/
@@ -208,40 +260,15 @@ int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field,
pack_len));
DBUG_DUMP("key", (char*)field_ptr, pack_len);
- switch (field->type()) {
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_NEWDATE:
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_STRING:
- // Common implementation for most field types
- DBUG_RETURN(ndb_op->equal(fieldnr, (char*) field_ptr, pack_len) != 0);
-
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_NULL:
- case MYSQL_TYPE_GEOMETRY:
- default:
- // Unhandled field types
- DBUG_PRINT("error", ("Field type %d not supported", field->type()));
- DBUG_RETURN(2);
+ if (ndb_supported_type(field->type()))
+ {
+ if (! (field->flags & BLOB_FLAG))
+ // Common implementation for most field types
+ DBUG_RETURN(ndb_op->equal(fieldnr, (char*) field_ptr, pack_len) != 0);
}
- DBUG_RETURN(3);
+ // Unhandled field types
+ DBUG_PRINT("error", ("Field type %d not supported", field->type()));
+ DBUG_RETURN(2);
}
@@ -259,63 +286,197 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
fieldnr, field->field_name, field->type(),
pack_len, field->is_null()?"Y":"N"));
DBUG_DUMP("value", (char*) field_ptr, pack_len);
-
- if (field->is_null())
+
+ if (ndb_supported_type(field->type()))
{
- // Set value to NULL
- DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0));
- }
-
- switch (field->type()) {
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_NEWDATE:
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_STRING:
- // Common implementation for most field types
- DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr, pack_len) != 0);
-
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_NULL:
- case MYSQL_TYPE_GEOMETRY:
- default:
- // Unhandled field types
- DBUG_PRINT("error", ("Field type %d not supported", field->type()));
- DBUG_RETURN(2);
+ if (! (field->flags & BLOB_FLAG))
+ {
+ if (field->is_null())
+ // Set value to NULL
+ DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0));
+ // Common implementation for most field types
+ DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr, pack_len) != 0);
+ }
+
+ // Blob type
+ NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr);
+ if (ndb_blob != NULL)
+ {
+ if (field->is_null())
+ DBUG_RETURN(ndb_blob->setNull() != 0);
+
+ Field_blob *field_blob= (Field_blob*)field;
+
+ // Get length and pointer to data
+ uint32 blob_len= field_blob->get_length(field_ptr);
+ char* blob_ptr= NULL;
+ field_blob->get_ptr(&blob_ptr);
+
+ // Looks like NULL blob can also be signaled in this way
+ if (blob_ptr == NULL)
+ DBUG_RETURN(ndb_blob->setNull() != 0);
+
+ DBUG_PRINT("value", ("set blob ptr=%x len=%u",
+ (unsigned)blob_ptr, blob_len));
+ DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
+
+ // No callback needed to write value
+ DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0);
+ }
+ DBUG_RETURN(1);
}
- DBUG_RETURN(3);
+ // Unhandled field types
+ DBUG_PRINT("error", ("Field type %d not supported", field->type()));
+ DBUG_RETURN(2);
+}
+
+
+/*
+ Callback to read all blob values.
+ - not done in unpack_record because unpack_record is valid
+ after execute(Commit) but reading blobs is not
+ - may only generate read operations; they have to be executed
+ somewhere before the data is available
+ - due to single buffer for all blobs, we let the last blob
+ process all blobs (last so that all are active)
+ - null bit is still set in unpack_record
+ - TODO allocate blob part aligned buffers
+*/
+
+NdbBlob::ActiveHook g_get_ndb_blobs_value;
+
+int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg)
+{
+ DBUG_ENTER("g_get_ndb_blobs_value");
+ if (ndb_blob->blobsNextBlob() != NULL)
+ DBUG_RETURN(0);
+ ha_ndbcluster *ha= (ha_ndbcluster *)arg;
+ DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob));
+}
+
+int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
+{
+ DBUG_ENTER("get_ndb_blobs_value");
+
+ // Field has no field number so cannot use TABLE blob_field
+ // Loop twice, first only counting total buffer size
+ for (int loop= 0; loop <= 1; loop++)
+ {
+ uint32 offset= 0;
+ for (uint i= 0; i < table->fields; i++)
+ {
+ Field *field= table->field[i];
+ NdbValue value= m_value[i];
+ if (value.ptr != NULL && (field->flags & BLOB_FLAG))
+ {
+ Field_blob *field_blob= (Field_blob *)field;
+ NdbBlob *ndb_blob= value.blob;
+ Uint64 blob_len= 0;
+ if (ndb_blob->getLength(blob_len) != 0)
+ DBUG_RETURN(-1);
+ // Align to Uint64
+ uint32 blob_size= blob_len;
+ if (blob_size % 8 != 0)
+ blob_size+= 8 - blob_size % 8;
+ if (loop == 1)
+ {
+ char *buf= blobs_buffer + offset;
+ uint32 len= 0xffffffff; // Max uint32
+ DBUG_PRINT("value", ("read blob ptr=%x len=%u",
+ (uint)buf, (uint)blob_len));
+ if (ndb_blob->readData(buf, len) != 0)
+ DBUG_RETURN(-1);
+ DBUG_ASSERT(len == blob_len);
+ field_blob->set_ptr(len, buf);
+ }
+ offset+= blob_size;
+ }
+ }
+ if (loop == 0 && offset > blobs_buffer_size)
+ {
+ my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ blobs_buffer_size= 0;
+ DBUG_PRINT("value", ("allocate blobs buffer size %u", offset));
+ blobs_buffer= my_malloc(offset, MYF(MY_WME));
+ if (blobs_buffer == NULL)
+ DBUG_RETURN(-1);
+ blobs_buffer_size= offset;
+ }
+ }
+ DBUG_RETURN(0);
}
/*
Instruct NDB to fetch one field
- - data is read directly into buffer provided by field_ptr
- if it's NULL, data is read into memory provided by NDBAPI
+ - data is read directly into buffer provided by field
+ if field is NULL, data is read into memory provided by NDBAPI
*/
-int ha_ndbcluster::get_ndb_value(NdbOperation *op,
- uint field_no, byte *field_ptr)
+int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
+ uint fieldnr)
{
DBUG_ENTER("get_ndb_value");
- DBUG_PRINT("enter", ("field_no: %d", field_no));
- m_value[field_no]= op->getValue(field_no, field_ptr);
- DBUG_RETURN(m_value == NULL);
+ DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr,
+ (int)(field != NULL ? field->flags : 0)));
+
+ if (field != NULL)
+ {
+ if (ndb_supported_type(field->type()))
+ {
+ DBUG_ASSERT(field->ptr != NULL);
+ if (! (field->flags & BLOB_FLAG))
+ {
+ m_value[fieldnr].rec= ndb_op->getValue(fieldnr, field->ptr);
+ DBUG_RETURN(m_value[fieldnr].rec == NULL);
+ }
+
+ // Blob type
+ NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr);
+ m_value[fieldnr].blob= ndb_blob;
+ if (ndb_blob != NULL)
+ {
+ // Set callback
+ void *arg= (void *)this;
+ DBUG_RETURN(ndb_blob->setActiveHook(g_get_ndb_blobs_value, arg) != 0);
+ }
+ DBUG_RETURN(1);
+ }
+ // Unhandled field types
+ DBUG_PRINT("error", ("Field type %d not supported", field->type()));
+ DBUG_RETURN(2);
+ }
+
+ // Used for hidden key only
+ m_value[fieldnr].rec= ndb_op->getValue(fieldnr, NULL);
+ DBUG_RETURN(m_value[fieldnr].rec == NULL);
+}
+
+
+/*
+ Check if any set or get of blob value in current query.
+*/
+bool ha_ndbcluster::uses_blob_value(bool all_fields)
+{
+ if (table->blob_fields == 0)
+ return false;
+ if (all_fields)
+ return true;
+ {
+ uint no_fields= table->fields;
+ int i;
+ THD *thd= current_thd;
+ // They always put blobs at the end..
+ for (i= no_fields - 1; i >= 0; i--)
+ {
+ Field *field= table->field[i];
+ if (thd->query_id == field->query_id)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
}
@@ -391,41 +552,95 @@ int ha_ndbcluster::get_metadata(const char *path)
// All checks OK, lets use the table
m_table= (void*)tab;
- DBUG_RETURN(build_index_list());
+ DBUG_RETURN(build_index_list(table, ILBP_OPEN));
}
-int ha_ndbcluster::build_index_list()
+
+int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
{
+ int error= 0;
char *name;
const char *index_name;
static const char* unique_suffix= "$unique";
uint i, name_len;
+ KEY* key_info= tab->key_info;
+ const char **key_name= tab->keynames.type_names;
+ NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
DBUG_ENTER("build_index_list");
// Save information about all known indexes
- for (uint i= 0; i < table->keys; i++)
+ for (i= 0; i < tab->keys; i++, key_info++, key_name++)
{
+ index_name= *key_name;
NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
- m_indextype[i]= idx_type;
-
+ m_index[i].type= idx_type;
if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
{
- index_name= get_index_name(i);
name_len= strlen(index_name)+strlen(unique_suffix)+1;
// Create name for unique index by appending "$unique";
if (!(name= my_malloc(name_len, MYF(MY_WME))))
DBUG_RETURN(2);
strxnmov(name, name_len, index_name, unique_suffix, NullS);
- m_unique_index_name[i]= name;
+ m_index[i].unique_name= name;
DBUG_PRINT("info", ("Created unique index name: %s for index %d",
name, i));
}
+ // Create secondary indexes if in create phase
+ if (phase == ILBP_CREATE)
+ {
+ DBUG_PRINT("info", ("Creating index %u: %s", i, index_name));
+
+ switch (m_index[i].type){
+
+ case PRIMARY_KEY_INDEX:
+ // Do nothing, already created
+ break;
+ case PRIMARY_KEY_ORDERED_INDEX:
+ error= create_ordered_index(index_name, key_info);
+ break;
+ case UNIQUE_ORDERED_INDEX:
+ if (!(error= create_ordered_index(index_name, key_info)))
+ error= create_unique_index(get_unique_index_name(i), key_info);
+ break;
+ case UNIQUE_INDEX:
+ error= create_unique_index(get_unique_index_name(i), key_info);
+ break;
+ case ORDERED_INDEX:
+ error= create_ordered_index(index_name, key_info);
+ break;
+ default:
+ DBUG_ASSERT(false);
+ break;
+ }
+ if (error)
+ {
+ DBUG_PRINT("error", ("Failed to create index %u", i));
+ drop_table();
+ break;
+ }
+ }
+ // Add handles to index objects
+ DBUG_PRINT("info", ("Trying to add handle to index %s", index_name));
+ if ((m_index[i].type != PRIMARY_KEY_INDEX) &&
+ (m_index[i].type != UNIQUE_INDEX))
+ {
+ const NDBINDEX *index= dict->getIndex(index_name, m_tabname);
+ if (!index) DBUG_RETURN(1);
+ m_index[i].index= (void *) index;
+ }
+ if (m_index[i].unique_name)
+ {
+ const NDBINDEX *index= dict->getIndex(m_index[i].unique_name, m_tabname);
+ if (!index) DBUG_RETURN(1);
+ m_index[i].unique_index= (void *) index;
+ }
+ DBUG_PRINT("info", ("Added handle to index %s", index_name));
}
- DBUG_RETURN(0);
+
+ DBUG_RETURN(error);
}
-
/*
Decode the type of an index from information
provided in table object
@@ -454,18 +669,29 @@ void ha_ndbcluster::release_metadata()
// Release index list
for (i= 0; i < MAX_KEY; i++)
{
- if (m_unique_index_name[i])
- my_free((char*)m_unique_index_name[i], MYF(0));
- m_unique_index_name[i]= NULL;
+ if (m_index[i].unique_name)
+ my_free((char*)m_index[i].unique_name, MYF(0));
+ m_index[i].unique_name= NULL;
+ m_index[i].unique_index= NULL;
+ m_index[i].index= NULL;
}
DBUG_VOID_RETURN;
}
-NdbCursorOperation::LockMode get_ndb_lock_type(enum thr_lock_type type)
+int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
- return (type == TL_WRITE_ALLOW_WRITE) ?
- NdbCursorOperation::LM_Exclusive : NdbCursorOperation::LM_Read;
+ int lm;
+ if (type == TL_WRITE_ALLOW_WRITE)
+ lm= NdbScanOperation::LM_Exclusive;
+ else if (uses_blob_value(retrieve_all_fields))
+ /*
+ TODO use a new scan mode to read + lock + keyinfo
+ */
+ lm= NdbScanOperation::LM_Exclusive;
+ else
+ lm= NdbScanOperation::LM_CommittedRead;
+ return lm;
}
static const ulong index_type_flags[]=
@@ -507,13 +733,13 @@ inline const char* ha_ndbcluster::get_index_name(uint idx_no) const
inline const char* ha_ndbcluster::get_unique_index_name(uint idx_no) const
{
- return m_unique_index_name[idx_no];
+ return m_index[idx_no].unique_name;
}
inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
{
DBUG_ASSERT(idx_no < MAX_KEY);
- return m_indextype[idx_no];
+ return m_index[idx_no].type;
}
@@ -593,7 +819,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op)
Read one record from NDB using primary key
*/
-int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
+int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
uint no_fields= table->fields, i;
NdbConnection *trans= m_active_trans;
@@ -603,8 +829,9 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
DBUG_PRINT("enter", ("key_len: %u", key_len));
DBUG_DUMP("key", (char*)key, key_len);
- if (!(op= trans->getNdbOperation(m_tabname)) || op->readTuple() != 0)
- goto err;
+ if (!(op= trans->getNdbOperation((NDBTAB *) m_table)) ||
+ op->readTuple() != 0)
+ ERR_RETURN(trans->getNdbError());
if (table->primary_key == MAX_KEY)
{
@@ -612,10 +839,11 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
DBUG_PRINT("info", ("Using hidden key"));
DBUG_DUMP("key", (char*)key, 8);
if (set_hidden_key(op, no_fields, key))
- goto err;
+ ERR_RETURN(trans->getNdbError());
+
// Read key at the same time, for future reference
- if (get_ndb_value(op, no_fields, NULL))
- goto err;
+ if (get_ndb_value(op, NULL, no_fields))
+ ERR_RETURN(trans->getNdbError());
}
else
{
@@ -624,19 +852,20 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
return res;
}
- // Read non-key field(s)
+ // Read all wanted non-key field(s) unless HA_EXTRA_RETRIEVE_ALL_COLS
for (i= 0; i < no_fields; i++)
{
Field *field= table->field[i];
- if (thd->query_id == field->query_id)
+ if ((thd->query_id == field->query_id) ||
+ retrieve_all_fields)
{
- if (get_ndb_value(op, i, field->ptr))
- goto err;
+ if (get_ndb_value(op, field, i))
+ ERR_RETURN(trans->getNdbError());
}
else
{
// Attribute was not to be read
- m_value[i]= NULL;
+ m_value[i].ptr= NULL;
}
}
@@ -650,9 +879,55 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
unpack_record(buf);
table->status= 0;
DBUG_RETURN(0);
+}
+
+
+/*
+ Read one complementing record from NDB using primary key from old_data
+*/
+
+int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
+{
+ uint no_fields= table->fields, i;
+ NdbConnection *trans= m_active_trans;
+ NdbOperation *op;
+ THD *thd= current_thd;
+ DBUG_ENTER("complemented_pk_read");
+
+ if (retrieve_all_fields)
+ // We have allready retrieved all fields, nothing to complement
+ DBUG_RETURN(0);
+
+ if (!(op= trans->getNdbOperation((NDBTAB *) m_table)) ||
+ op->readTuple() != 0)
+ ERR_RETURN(trans->getNdbError());
+
+ int res;
+ if ((res= set_primary_key_from_old_data(op, old_data)))
+ ERR_RETURN(trans->getNdbError());
+
+ // Read all unreferenced non-key field(s)
+ for (i= 0; i < no_fields; i++)
+ {
+ Field *field= table->field[i];
+ if (!(field->flags & PRI_KEY_FLAG) &&
+ (thd->query_id != field->query_id))
+ {
+ if (get_ndb_value(op, field, i))
+ ERR_RETURN(trans->getNdbError());
+ }
+ }
+
+ if (trans->execute(NoCommit) != 0)
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(ndb_err(trans));
+ }
- err:
- ERR_RETURN(trans->getNdbError());
+ // The value have now been fetched from NDB
+ unpack_record(new_data);
+ table->status= 0;
+ DBUG_RETURN(0);
}
@@ -675,8 +950,9 @@ int ha_ndbcluster::unique_index_read(const byte *key,
DBUG_DUMP("key", (char*)key, key_len);
DBUG_PRINT("enter", ("name: %s", get_unique_index_name(active_index)));
- if (!(op= trans->getNdbIndexOperation(get_unique_index_name(active_index),
- m_tabname)) ||
+ if (!(op= trans->getNdbIndexOperation((NDBINDEX *)
+ m_index[active_index].unique_index,
+ (NDBTAB *) m_table)) ||
op->readTuple() != 0)
ERR_RETURN(trans->getNdbError());
@@ -700,13 +976,13 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG))
{
- if (get_ndb_value(op, i, field->ptr))
+ if (get_ndb_value(op, field, i))
ERR_RETURN(op->getNdbError());
}
else
{
// Attribute was not to be read
- m_value[i]= NULL;
+ m_value[i].ptr= NULL;
}
}
@@ -746,14 +1022,25 @@ inline int ha_ndbcluster::next_result(byte *buf)
If this an update or delete, call nextResult with false
to process any records already cached in NdbApi
*/
- bool contact_ndb = m_lock.type != TL_WRITE_ALLOW_WRITE;
+ bool contact_ndb= m_lock.type != TL_WRITE_ALLOW_WRITE;
do {
DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb));
+ /*
+ We can only handle one tuple with blobs at a time.
+ */
+ if (ops_pending && blobs_pending)
+ {
+ if (trans->execute(NoCommit) != 0)
+ DBUG_RETURN(ndb_err(trans));
+ ops_pending= 0;
+ blobs_pending= false;
+ }
check= cursor->nextResult(contact_ndb);
if (check == 0)
{
// One more record found
DBUG_PRINT("info", ("One more record found"));
+
unpack_record(buf);
table->status= 0;
DBUG_RETURN(0);
@@ -791,15 +1078,17 @@ inline int ha_ndbcluster::next_result(byte *buf)
Set bounds for a ordered index scan, use key_range
*/
-int ha_ndbcluster::set_bounds(NdbOperation *op,
+int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
const key_range *key,
int bound)
{
- uint i, tot_len;
+ uint key_len, key_store_len, tot_len, key_tot_len;
byte *key_ptr;
KEY* key_info= table->key_info + active_index;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
+ Field* field;
+ bool key_nullable, key_null;
DBUG_ENTER("set_bounds");
DBUG_PRINT("enter", ("bound: %d", bound));
@@ -809,29 +1098,37 @@ int ha_ndbcluster::set_bounds(NdbOperation *op,
// Set bounds using key data
tot_len= 0;
- key_ptr= (byte *) key->key;
+ key_ptr= (byte *) key->key;
+ key_tot_len= key->length;
for (; key_part != end; key_part++)
{
- Field* field= key_part->field;
- uint32 field_len= field->pack_length();
- tot_len+= field_len;
+ field= key_part->field;
+ key_len= key_part->length;
+ key_store_len= key_part->store_length;
+ key_nullable= (bool) key_part->null_bit;
+ key_null= (field->maybe_null() && *key_ptr);
+ tot_len+= key_store_len;
const char* bounds[]= {"LE", "LT", "GE", "GT", "EQ"};
DBUG_ASSERT(bound >= 0 && bound <= 4);
- DBUG_PRINT("info", ("Set Bound%s on %s",
+ DBUG_PRINT("info", ("Set Bound%s on %s %s %s %s",
bounds[bound],
- field->field_name));
- DBUG_DUMP("key", (char*)key_ptr, field_len);
-
+ field->field_name,
+ key_nullable ? "NULLABLE" : "",
+ key_null ? "NULL":""));
+ DBUG_PRINT("info", ("Total length %ds", tot_len));
+
+ DBUG_DUMP("key", (char*) key_ptr, key_store_len);
+
if (op->setBound(field->field_name,
bound,
- key_ptr,
- field_len) != 0)
+ key_null ? 0 : (key_nullable ? key_ptr + 1 : key_ptr),
+ key_null ? 0 : key_len) != 0)
ERR_RETURN(op->getNdbError());
- key_ptr+= field_len;
-
- if (tot_len >= key->length)
+ key_ptr+= key_store_len;
+
+ if (tot_len >= key_tot_len)
break;
/*
@@ -839,7 +1136,7 @@ int ha_ndbcluster::set_bounds(NdbOperation *op,
so if this bound was not EQ, bail out and make
a best effort attempt
*/
- if (bound != NdbOperation::BoundEQ)
+ if (bound != NdbIndexScanOperation::BoundEQ)
break;
}
@@ -857,7 +1154,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
{
NdbConnection *trans= m_active_trans;
NdbResultSet *cursor;
- NdbScanOperation *op;
+ NdbIndexScanOperation *op;
const char *index_name;
DBUG_ENTER("ordered_index_scan");
@@ -865,19 +1162,24 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname));
index_name= get_index_name(active_index);
- if (!(op= trans->getNdbScanOperation(index_name, m_tabname)))
+ if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *)
+ m_index[active_index].index,
+ (NDBTAB *) m_table)))
ERR_RETURN(trans->getNdbError());
- if (!(cursor= op->readTuples(parallelism, get_ndb_lock_type(m_lock.type))))
+
+ NdbScanOperation::LockMode lm= (NdbScanOperation::LockMode)
+ get_ndb_lock_type(m_lock.type);
+ if (!(cursor= op->readTuples(lm, 0, parallelism, sorted)))
ERR_RETURN(trans->getNdbError());
m_active_cursor= cursor;
if (start_key &&
set_bounds(op, start_key,
(start_key->flag == HA_READ_KEY_EXACT) ?
- NdbOperation::BoundEQ :
+ NdbIndexScanOperation::BoundEQ :
(start_key->flag == HA_READ_AFTER_KEY) ?
- NdbOperation::BoundLT :
- NdbOperation::BoundLE))
+ NdbIndexScanOperation::BoundLT :
+ NdbIndexScanOperation::BoundLE))
DBUG_RETURN(1);
if (end_key)
@@ -888,8 +1190,8 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
}
else if (set_bounds(op, end_key,
(end_key->flag == HA_READ_AFTER_KEY) ?
- NdbOperation::BoundGE :
- NdbOperation::BoundGT))
+ NdbIndexScanOperation::BoundGE :
+ NdbIndexScanOperation::BoundGT))
DBUG_RETURN(1);
}
DBUG_RETURN(define_read_attrs(buf, op));
@@ -925,12 +1227,14 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
DBUG_PRINT("info", ("Starting a new filtered scan on %s",
m_tabname));
- if (!(op= trans->getNdbScanOperation(m_tabname)))
+ if (!(op= trans->getNdbScanOperation((NDBTAB *) m_table)))
ERR_RETURN(trans->getNdbError());
- if (!(cursor= op->readTuples(parallelism, get_ndb_lock_type(m_lock.type))))
+ NdbScanOperation::LockMode lm= (NdbScanOperation::LockMode)
+ get_ndb_lock_type(m_lock.type);
+ if (!(cursor= op->readTuples(lm, 0, parallelism)))
ERR_RETURN(trans->getNdbError());
m_active_cursor= cursor;
-
+
{
// Start scan filter
NdbScanFilter sf(op);
@@ -994,9 +1298,11 @@ int ha_ndbcluster::full_table_scan(byte *buf)
DBUG_ENTER("full_table_scan");
DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname));
- if (!(op=trans->getNdbScanOperation(m_tabname)))
+ if (!(op=trans->getNdbScanOperation((NDBTAB *) m_table)))
ERR_RETURN(trans->getNdbError());
- if (!(cursor= op->readTuples(parallelism, get_ndb_lock_type(m_lock.type))))
+ NdbScanOperation::LockMode lm= (NdbScanOperation::LockMode)
+ get_ndb_lock_type(m_lock.type);
+ if (!(cursor= op->readTuples(lm, 0, parallelism)))
ERR_RETURN(trans->getNdbError());
m_active_cursor= cursor;
DBUG_RETURN(define_read_attrs(buf, op));
@@ -1020,12 +1326,12 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
(field->flags & PRI_KEY_FLAG) ||
retrieve_all_fields)
{
- if (get_ndb_value(op, i, field->ptr))
+ if (get_ndb_value(op, field, i))
ERR_RETURN(op->getNdbError());
}
else
{
- m_value[i]= NULL;
+ m_value[i].ptr= NULL;
}
}
@@ -1039,7 +1345,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
if (!tab->getColumn(hidden_no))
DBUG_RETURN(1);
#endif
- if (get_ndb_value(op, hidden_no, NULL))
+ if (get_ndb_value(op, NULL, hidden_no))
ERR_RETURN(op->getNdbError());
}
@@ -1056,6 +1362,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
int ha_ndbcluster::write_row(byte *record)
{
+ bool has_auto_increment;
uint i;
NdbConnection *trans= m_active_trans;
NdbOperation *op;
@@ -1065,10 +1372,10 @@ int ha_ndbcluster::write_row(byte *record)
statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_default_now)
update_timestamp(record+table->timestamp_default_now-1);
- if (table->next_number_field && record == table->record[0])
- update_auto_increment();
+ has_auto_increment= (table->next_number_field && record == table->record[0]);
+ skip_auto_increment= table->auto_increment_field_not_null;
- if (!(op= trans->getNdbOperation(m_tabname)))
+ if (!(op= trans->getNdbOperation((NDBTAB *) m_table)))
ERR_RETURN(trans->getNdbError());
res= (m_use_write) ? op->writeTuple() :op->insertTuple();
@@ -1078,13 +1385,17 @@ int ha_ndbcluster::write_row(byte *record)
if (table->primary_key == MAX_KEY)
{
// Table has hidden primary key
- Uint64 auto_value= m_ndb->getAutoIncrementValue(m_tabname);
+ Uint64 auto_value= m_ndb->getAutoIncrementValue((NDBTAB *) m_table);
if (set_hidden_key(op, table->fields, (const byte*)&auto_value))
ERR_RETURN(op->getNdbError());
}
else
{
int res;
+
+ if ((has_auto_increment) && (!skip_auto_increment))
+ update_auto_increment();
+
if ((res= set_primary_key(op)))
return res;
}
@@ -1095,7 +1406,10 @@ int ha_ndbcluster::write_row(byte *record)
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
set_ndb_value(op, field, i))
+ {
+ skip_auto_increment= true;
ERR_RETURN(op->getNdbError());
+ }
}
/*
@@ -1106,16 +1420,34 @@ int ha_ndbcluster::write_row(byte *record)
Find out how this is detected!
*/
rows_inserted++;
- if ((rows_inserted == rows_to_insert) ||
- ((rows_inserted % bulk_insert_rows) == 0))
+ bulk_insert_not_flushed= true;
+ if ((rows_to_insert == 1) ||
+ ((rows_inserted % bulk_insert_rows) == 0) ||
+ uses_blob_value(false) != 0)
{
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
- rows_inserted, bulk_insert_rows));
+ (int)rows_inserted, (int)bulk_insert_rows));
+ bulk_insert_not_flushed= false;
if (trans->execute(NoCommit) != 0)
+ {
+ skip_auto_increment= true;
DBUG_RETURN(ndb_err(trans));
+ }
}
+ if ((has_auto_increment) && (skip_auto_increment))
+ {
+ Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
+ DBUG_PRINT("info",
+ ("Trying to set next auto increment value to %lu",
+ (ulong) next_val));
+ if (m_ndb->setAutoIncrementValue((NDBTAB *) m_table, next_val, true))
+ DBUG_PRINT("info",
+ ("Setting next auto increment value to %u", next_val));
+ }
+ skip_auto_increment= true;
+
DBUG_RETURN(0);
}
@@ -1171,11 +1503,40 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (table->timestamp_on_update_now)
update_timestamp(new_data+table->timestamp_on_update_now-1);
- /* Check for update of primary key and return error */
+ /* Check for update of primary key for special handling */
if ((table->primary_key != MAX_KEY) &&
(key_cmp(table->primary_key, old_data, new_data)))
- DBUG_RETURN(HA_ERR_UNSUPPORTED);
-
+ {
+ int read_res, insert_res, delete_res;
+
+ DBUG_PRINT("info", ("primary key update, doing pk read+insert+delete"));
+ // Get all old fields, since we optimize away fields not in query
+ read_res= complemented_pk_read(old_data, new_data);
+ if (read_res)
+ {
+ DBUG_PRINT("info", ("pk read failed"));
+ DBUG_RETURN(read_res);
+ }
+ // Insert new row
+ insert_res= write_row(new_data);
+ if (insert_res)
+ {
+ DBUG_PRINT("info", ("insert failed"));
+ DBUG_RETURN(insert_res);
+ }
+ // Delete old row
+ DBUG_PRINT("info", ("insert succeded"));
+ delete_res= delete_row(old_data);
+ if (delete_res)
+ {
+ DBUG_PRINT("info", ("delete failed"));
+ // Undo write_row(new_data)
+ DBUG_RETURN(delete_row(new_data));
+ }
+ DBUG_PRINT("info", ("insert+delete succeeded"));
+ DBUG_RETURN(0);
+ }
+
if (cursor)
{
/*
@@ -1189,10 +1550,12 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (!(op= cursor->updateTuple()))
ERR_RETURN(trans->getNdbError());
ops_pending++;
+ if (uses_blob_value(false))
+ blobs_pending= true;
}
else
{
- if (!(op= trans->getNdbOperation(m_tabname)) ||
+ if (!(op= trans->getNdbOperation((NDBTAB *) m_table)) ||
op->updateTuple() != 0)
ERR_RETURN(trans->getNdbError());
@@ -1204,7 +1567,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
// Require that the PK for this record has previously been
// read into m_value
uint no_fields= table->fields;
- NdbRecAttr* rec= m_value[no_fields];
+ NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec);
DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH);
@@ -1253,9 +1616,9 @@ int ha_ndbcluster::delete_row(const byte *record)
if (cursor)
{
/*
- We are scanning records and want to update the record
+ We are scanning records and want to delete the record
that was just found, call deleteTuple on the cursor
- to take over the lock to a new update operation
+ to take over the lock to a new delete operation
And thus setting the primary key of the record from
the active record in cursor
*/
@@ -1270,7 +1633,7 @@ int ha_ndbcluster::delete_row(const byte *record)
else
{
- if (!(op=trans->getNdbOperation(m_tabname)) ||
+ if (!(op=trans->getNdbOperation((NDBTAB *) m_table)) ||
op->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
@@ -1279,7 +1642,7 @@ int ha_ndbcluster::delete_row(const byte *record)
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
uint no_fields= table->fields;
- NdbRecAttr* rec= m_value[no_fields];
+ NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec != NULL);
if (set_hidden_key(op, no_fields, rec->aRef()))
@@ -1317,7 +1680,7 @@ void ha_ndbcluster::unpack_record(byte* buf)
{
uint row_offset= (uint) (buf - table->record[0]);
Field **field, **end;
- NdbRecAttr **value= m_value;
+ NdbValue *value= m_value;
DBUG_ENTER("unpack_record");
// Set null flag(s)
@@ -1326,8 +1689,23 @@ void ha_ndbcluster::unpack_record(byte* buf)
field < end;
field++, value++)
{
- if (*value && (*value)->isNULL())
- (*field)->set_null(row_offset);
+ if ((*value).ptr)
+ {
+ if (! ((*field)->flags & BLOB_FLAG))
+ {
+ if ((*value).rec->isNULL())
+ (*field)->set_null(row_offset);
+ }
+ else
+ {
+ NdbBlob* ndb_blob= (*value).blob;
+ bool isNull= true;
+ int ret= ndb_blob->getNull(isNull);
+ DBUG_ASSERT(ret == 0);
+ if (isNull)
+ (*field)->set_null(row_offset);
+ }
+ }
}
#ifndef DBUG_OFF
@@ -1338,7 +1716,7 @@ void ha_ndbcluster::unpack_record(byte* buf)
int hidden_no= table->fields;
const NDBTAB *tab= (NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
- NdbRecAttr* rec= m_value[hidden_no];
+ NdbRecAttr* rec= m_value[hidden_no].rec;
DBUG_ASSERT(rec);
DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no,
hidden_col->getName(), rec->u_64_value()));
@@ -1348,7 +1726,6 @@ void ha_ndbcluster::unpack_record(byte* buf)
DBUG_VOID_RETURN;
}
-
/*
Utility function to print/dump the fetched field
*/
@@ -1366,9 +1743,9 @@ void ha_ndbcluster::print_results()
{
Field *field;
const NDBCOL *col;
- NdbRecAttr *value;
+ NdbValue value;
- if (!(value= m_value[f]))
+ if (!(value= m_value[f]).ptr)
{
fprintf(DBUG_FILE, "Field %d was not read\n", f);
continue;
@@ -1377,19 +1754,28 @@ void ha_ndbcluster::print_results()
DBUG_DUMP("field->ptr", (char*)field->ptr, field->pack_length());
col= tab->getColumn(f);
fprintf(DBUG_FILE, "%d: %s\t", f, col->getName());
-
- if (value->isNULL())
+
+ NdbBlob *ndb_blob= NULL;
+ if (! (field->flags & BLOB_FLAG))
{
- fprintf(DBUG_FILE, "NULL\n");
- continue;
+ if (value.rec->isNULL())
+ {
+ fprintf(DBUG_FILE, "NULL\n");
+ continue;
+ }
+ }
+ else
+ {
+ ndb_blob= value.blob;
+ bool isNull= true;
+ ndb_blob->getNull(isNull);
+ if (isNull) {
+ fprintf(DBUG_FILE, "NULL\n");
+ continue;
+ }
}
switch (col->getType()) {
- case NdbDictionary::Column::Blob:
- case NdbDictionary::Column::Clob:
- case NdbDictionary::Column::Undefined:
- fprintf(DBUG_FILE, "Unknown type: %d", col->getType());
- break;
case NdbDictionary::Column::Tinyint: {
char value= *field->ptr;
fprintf(DBUG_FILE, "Tinyint\t%d", value);
@@ -1481,6 +1867,21 @@ void ha_ndbcluster::print_results()
fprintf(DBUG_FILE, "Timespec\t%llu", value);
break;
}
+ case NdbDictionary::Column::Blob: {
+ Uint64 len= 0;
+ ndb_blob->getLength(len);
+ fprintf(DBUG_FILE, "Blob\t[len=%u]", (unsigned)len);
+ break;
+ }
+ case NdbDictionary::Column::Text: {
+ Uint64 len= 0;
+ ndb_blob->getLength(len);
+ fprintf(DBUG_FILE, "Text\t[len=%u]", (unsigned)len);
+ break;
+ }
+ case NdbDictionary::Column::Undefined:
+ fprintf(DBUG_FILE, "Unknown type: %d", col->getType());
+ break;
}
fprintf(DBUG_FILE, "\n");
@@ -1537,7 +1938,7 @@ int ha_ndbcluster::index_next(byte *buf)
{
DBUG_ENTER("index_next");
- int error = 1;
+ int error= 1;
statistic_increment(ha_read_next_count,&LOCK_status);
DBUG_RETURN(next_result(buf));
}
@@ -1628,9 +2029,13 @@ int ha_ndbcluster::rnd_init(bool scan)
NdbResultSet *cursor= m_active_cursor;
DBUG_ENTER("rnd_init");
DBUG_PRINT("enter", ("scan: %d", scan));
- // Check that cursor is not defined
+ // Check if scan is to be restarted
if (cursor)
- DBUG_RETURN(1);
+ {
+ if (!scan)
+ DBUG_RETURN(1);
+ cursor->restart();
+ }
index_init(table->primary_key);
DBUG_RETURN(0);
}
@@ -1638,11 +2043,25 @@ int ha_ndbcluster::rnd_init(bool scan)
int ha_ndbcluster::close_scan()
{
NdbResultSet *cursor= m_active_cursor;
+ NdbConnection *trans= m_active_trans;
DBUG_ENTER("close_scan");
if (!cursor)
DBUG_RETURN(1);
+
+ if (ops_pending)
+ {
+ /*
+ Take over any pending transactions to the
+ deleteing/updating transaction before closing the scan
+ */
+ DBUG_PRINT("info", ("ops_pending: %d", ops_pending));
+ if (trans->execute(NoCommit) != 0)
+ DBUG_RETURN(ndb_err(trans));
+ ops_pending= 0;
+ }
+
cursor->close();
m_active_cursor= NULL;
DBUG_RETURN(0);
@@ -1724,7 +2143,7 @@ void ha_ndbcluster::position(const byte *record)
// No primary key, get hidden key
DBUG_PRINT("info", ("Getting hidden key"));
int hidden_no= table->fields;
- NdbRecAttr* rec= m_value[hidden_no];
+ NdbRecAttr* rec= m_value[hidden_no].rec;
const NDBTAB *tab= (NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
DBUG_ASSERT(hidden_col->getPrimaryKey() &&
@@ -1755,7 +2174,10 @@ void ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_VARIABLE)
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
if (flag & HA_STATUS_ERRKEY)
+ {
DBUG_PRINT("info", ("HA_STATUS_ERRKEY"));
+ errkey= dupkey;
+ }
if (flag & HA_STATUS_AUTO)
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
DBUG_VOID_RETURN;
@@ -1898,7 +2320,7 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
const NDBTAB *tab= (NDBTAB *) m_table;
DBUG_ENTER("start_bulk_insert");
- DBUG_PRINT("enter", ("rows: %d", rows));
+ DBUG_PRINT("enter", ("rows: %d", (int)rows));
rows_inserted= 0;
rows_to_insert= rows;
@@ -1910,7 +2332,7 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
degrade if too many bytes are inserted, thus it's limited by this
calculation.
*/
- const int bytesperbatch = 8192;
+ const int bytesperbatch= 8192;
bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns();
batch= bytesperbatch/bytes;
batch= batch == 0 ? 1 : batch;
@@ -1925,15 +2347,32 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
*/
int ha_ndbcluster::end_bulk_insert()
{
+ int error= 0;
+
DBUG_ENTER("end_bulk_insert");
- DBUG_RETURN(0);
+ // Check if last inserts need to be flushed
+ if (bulk_insert_not_flushed)
+ {
+ NdbConnection *trans= m_active_trans;
+ // Send rows to NDB
+ DBUG_PRINT("info", ("Sending inserts to NDB, "\
+ "rows_inserted:%d, bulk_insert_rows: %d",
+ rows_inserted, bulk_insert_rows));
+ bulk_insert_not_flushed= false;
+ if (trans->execute(NoCommit) != 0)
+ error= ndb_err(trans);
+ }
+
+ rows_inserted= 0;
+ rows_to_insert= 1;
+ DBUG_RETURN(error);
}
int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size)
{
DBUG_ENTER("extra_opt");
- DBUG_PRINT("enter", ("cache_size: %d", cache_size));
+ DBUG_PRINT("enter", ("cache_size: %lu", cache_size));
DBUG_RETURN(extra(operation));
}
@@ -1947,7 +2386,7 @@ int ha_ndbcluster::reset()
const char **ha_ndbcluster::bas_ext() const
-{ static const char *ext[1] = { NullS }; return ext; }
+{ static const char *ext[1]= { NullS }; return ext; }
/*
@@ -2154,7 +2593,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
NdbConnection *tablock_trans=
(NdbConnection*)thd->transaction.all.ndb_tid;
- DBUG_PRINT("info", ("tablock_trans: %x", tablock_trans));
+ DBUG_PRINT("info", ("tablock_trans: %x", (uint)tablock_trans));
DBUG_ASSERT(tablock_trans); trans= m_ndb->hupp(tablock_trans);
if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError());
@@ -2189,8 +2628,11 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction)
if (trans->execute(Commit) != 0)
{
const NdbError err= trans->getNdbError();
+ const NdbOperation *error_op= trans->getNdbErrorOperation();
ERR_PRINT(err);
res= ndb_to_mysql_error(&err);
+ if (res != -1)
+ ndbcluster_print_error(res, error_op);
}
ndb->closeTransaction(trans);
DBUG_RETURN(res);
@@ -2216,8 +2658,11 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction)
if (trans->execute(Rollback) != 0)
{
const NdbError err= trans->getNdbError();
+ const NdbOperation *error_op= trans->getNdbErrorOperation();
ERR_PRINT(err);
res= ndb_to_mysql_error(&err);
+ if (res != -1)
+ ndbcluster_print_error(res, error_op);
}
ndb->closeTransaction(trans);
DBUG_RETURN(0);
@@ -2225,71 +2670,184 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction)
/*
- Map MySQL type to the corresponding NDB type
+ Define NDB column based on Field.
+ Returns 0 or mysql error code.
+ Not member of ha_ndbcluster because NDBCOL cannot be declared.
*/
-inline NdbDictionary::Column::Type
-mysql_to_ndb_type(enum enum_field_types mysql_type, bool unsigned_flg)
+static int create_ndb_column(NDBCOL &col,
+ Field *field,
+ HA_CREATE_INFO *info)
{
- switch(mysql_type) {
+ // Set name
+ col.setName(field->field_name);
+ // Set type and sizes
+ const enum enum_field_types mysql_type= field->real_type();
+ switch (mysql_type) {
+ // Numeric types
case MYSQL_TYPE_DECIMAL:
- return NdbDictionary::Column::Char;
+ col.setType(NDBCOL::Char);
+ col.setLength(field->pack_length());
+ break;
case MYSQL_TYPE_TINY:
- return (unsigned_flg) ?
- NdbDictionary::Column::Tinyunsigned :
- NdbDictionary::Column::Tinyint;
+ if (field->flags & UNSIGNED_FLAG)
+ col.setType(NDBCOL::Tinyunsigned);
+ else
+ col.setType(NDBCOL::Tinyint);
+ col.setLength(1);
+ break;
case MYSQL_TYPE_SHORT:
- return (unsigned_flg) ?
- NdbDictionary::Column::Smallunsigned :
- NdbDictionary::Column::Smallint;
+ if (field->flags & UNSIGNED_FLAG)
+ col.setType(NDBCOL::Smallunsigned);
+ else
+ col.setType(NDBCOL::Smallint);
+ col.setLength(1);
+ break;
case MYSQL_TYPE_LONG:
- return (unsigned_flg) ?
- NdbDictionary::Column::Unsigned :
- NdbDictionary::Column::Int;
- case MYSQL_TYPE_TIMESTAMP:
- return NdbDictionary::Column::Unsigned;
- case MYSQL_TYPE_LONGLONG:
- return (unsigned_flg) ?
- NdbDictionary::Column::Bigunsigned :
- NdbDictionary::Column::Bigint;
+ if (field->flags & UNSIGNED_FLAG)
+ col.setType(NDBCOL::Unsigned);
+ else
+ col.setType(NDBCOL::Int);
+ col.setLength(1);
+ break;
case MYSQL_TYPE_INT24:
- return (unsigned_flg) ?
- NdbDictionary::Column::Mediumunsigned :
- NdbDictionary::Column::Mediumint;
+ if (field->flags & UNSIGNED_FLAG)
+ col.setType(NDBCOL::Mediumunsigned);
+ else
+ col.setType(NDBCOL::Mediumint);
+ col.setLength(1);
+ break;
+ case MYSQL_TYPE_LONGLONG:
+ if (field->flags & UNSIGNED_FLAG)
+ col.setType(NDBCOL::Bigunsigned);
+ else
+ col.setType(NDBCOL::Bigint);
+ col.setLength(1);
break;
case MYSQL_TYPE_FLOAT:
- return NdbDictionary::Column::Float;
+ col.setType(NDBCOL::Float);
+ col.setLength(1);
+ break;
case MYSQL_TYPE_DOUBLE:
- return NdbDictionary::Column::Double;
- case MYSQL_TYPE_DATETIME :
- return NdbDictionary::Column::Datetime;
- case MYSQL_TYPE_DATE :
- case MYSQL_TYPE_NEWDATE :
- case MYSQL_TYPE_TIME :
- case MYSQL_TYPE_YEAR :
- // Missing NDB data types, mapped to char
- return NdbDictionary::Column::Char;
- case MYSQL_TYPE_ENUM :
- return NdbDictionary::Column::Char;
- case MYSQL_TYPE_SET :
- return NdbDictionary::Column::Char;
- case MYSQL_TYPE_TINY_BLOB :
- case MYSQL_TYPE_MEDIUM_BLOB :
- case MYSQL_TYPE_LONG_BLOB :
- case MYSQL_TYPE_BLOB :
- return NdbDictionary::Column::Blob;
- case MYSQL_TYPE_VAR_STRING :
- return NdbDictionary::Column::Varchar;
- case MYSQL_TYPE_STRING :
- return NdbDictionary::Column::Char;
- case MYSQL_TYPE_NULL :
- case MYSQL_TYPE_GEOMETRY :
- return NdbDictionary::Column::Undefined;
- }
- return NdbDictionary::Column::Undefined;
+ col.setType(NDBCOL::Double);
+ col.setLength(1);
+ break;
+ // Date types
+ case MYSQL_TYPE_TIMESTAMP:
+ col.setType(NDBCOL::Unsigned);
+ col.setLength(1);
+ break;
+ case MYSQL_TYPE_DATETIME:
+ col.setType(NDBCOL::Datetime);
+ col.setLength(1);
+ break;
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_YEAR:
+ col.setType(NDBCOL::Char);
+ col.setLength(field->pack_length());
+ break;
+ // Char types
+ case MYSQL_TYPE_STRING:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Binary);
+ else
+ col.setType(NDBCOL::Char);
+ col.setLength(field->pack_length());
+ break;
+ case MYSQL_TYPE_VAR_STRING:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Varbinary);
+ else
+ col.setType(NDBCOL::Varchar);
+ col.setLength(field->pack_length());
+ break;
+ // Blob types (all come in as MYSQL_TYPE_BLOB)
+ mysql_type_tiny_blob:
+ case MYSQL_TYPE_TINY_BLOB:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Blob);
+ else
+ col.setType(NDBCOL::Text);
+ col.setInlineSize(256);
+ // No parts
+ col.setPartSize(0);
+ col.setStripeSize(0);
+ break;
+ mysql_type_blob:
+ case MYSQL_TYPE_BLOB:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Blob);
+ else
+ col.setType(NDBCOL::Text);
+ // Use "<=" even if "<" is the exact condition
+ if (field->max_length() <= (1 << 8))
+ goto mysql_type_tiny_blob;
+ else if (field->max_length() <= (1 << 16))
+ {
+ col.setInlineSize(256);
+ col.setPartSize(2000);
+ col.setStripeSize(16);
+ }
+ else if (field->max_length() <= (1 << 24))
+ goto mysql_type_medium_blob;
+ else
+ goto mysql_type_long_blob;
+ break;
+ mysql_type_medium_blob:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Blob);
+ else
+ col.setType(NDBCOL::Text);
+ col.setInlineSize(256);
+ col.setPartSize(4000);
+ col.setStripeSize(8);
+ break;
+ mysql_type_long_blob:
+ case MYSQL_TYPE_LONG_BLOB:
+ if (field->flags & BINARY_FLAG)
+ col.setType(NDBCOL::Blob);
+ else
+ col.setType(NDBCOL::Text);
+ col.setInlineSize(256);
+ col.setPartSize(8000);
+ col.setStripeSize(4);
+ break;
+ // Other types
+ case MYSQL_TYPE_ENUM:
+ col.setType(NDBCOL::Char);
+ col.setLength(field->pack_length());
+ break;
+ case MYSQL_TYPE_SET:
+ col.setType(NDBCOL::Char);
+ col.setLength(field->pack_length());
+ break;
+ case MYSQL_TYPE_NULL:
+ case MYSQL_TYPE_GEOMETRY:
+ goto mysql_type_unsupported;
+ mysql_type_unsupported:
+ default:
+ return HA_ERR_UNSUPPORTED;
+ }
+ // Set nullable and pk
+ col.setNullable(field->maybe_null());
+ col.setPrimaryKey(field->flags & PRI_KEY_FLAG);
+ // Set autoincrement
+ if (field->flags & AUTO_INCREMENT_FLAG)
+ {
+ col.setAutoIncrement(TRUE);
+ ulonglong value= info->auto_increment_value ?
+ info->auto_increment_value -1 : (ulonglong) 0;
+ DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value));
+ col.setAutoIncrementInitialValue(value);
+ }
+ else
+ col.setAutoIncrement(false);
+ return 0;
}
-
/*
Create a table in NDB Cluster
*/
@@ -2299,7 +2857,6 @@ int ha_ndbcluster::create(const char *name,
HA_CREATE_INFO *info)
{
NDBTAB tab;
- NdbDictionary::Column::Type ndb_type;
NDBCOL col;
uint pack_length, length, i;
const void *data, *pack_data;
@@ -2330,31 +2887,11 @@ int ha_ndbcluster::create(const char *name,
for (i= 0; i < form->fields; i++)
{
Field *field= form->field[i];
- ndb_type= mysql_to_ndb_type(field->real_type(),
- field->flags & UNSIGNED_FLAG);
DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d",
field->field_name, field->real_type(),
field->pack_length()));
- col.setName(field->field_name);
- col.setType(ndb_type);
- if ((ndb_type == NdbDictionary::Column::Char) ||
- (ndb_type == NdbDictionary::Column::Varchar))
- col.setLength(field->pack_length());
- else
- col.setLength(1);
- col.setNullable(field->maybe_null());
- col.setPrimaryKey(field->flags & PRI_KEY_FLAG);
- if (field->flags & AUTO_INCREMENT_FLAG)
- {
- col.setAutoIncrement(TRUE);
- ulonglong value= info->auto_increment_value ?
- info->auto_increment_value -1 : (ulonglong) 0;
- DBUG_PRINT("info", ("Autoincrement key, initial: %d", value));
- col.setAutoIncrementInitialValue(value);
- }
- else
- col.setAutoIncrement(false);
-
+ if ((my_errno= create_ndb_column(col, field, info)))
+ DBUG_RETURN(my_errno);
tab.addColumn(col);
}
@@ -2389,50 +2926,10 @@ int ha_ndbcluster::create(const char *name,
}
DBUG_PRINT("info", ("Table %s/%s created successfully",
m_dbname, m_tabname));
-
- if ((my_errno= build_index_list()))
- DBUG_RETURN(my_errno);
-
- // Create secondary indexes
- KEY* key_info= form->key_info;
- const char** key_name= key_names;
- for (i= 0; i < form->keys; i++, key_info++, key_name++)
- {
- int error= 0;
- DBUG_PRINT("info", ("Index %u: %s", i, *key_name));
-
- switch (get_index_type_from_table(i)){
- case PRIMARY_KEY_INDEX:
- // Do nothing, already created
- break;
- case PRIMARY_KEY_ORDERED_INDEX:
- error= create_ordered_index(*key_name, key_info);
- break;
- case UNIQUE_ORDERED_INDEX:
- if (!(error= create_ordered_index(*key_name, key_info)))
- error= create_unique_index(get_unique_index_name(i), key_info);
- break;
- case UNIQUE_INDEX:
- error= create_unique_index(get_unique_index_name(i), key_info);
- break;
- case ORDERED_INDEX:
- error= create_ordered_index(*key_name, key_info);
- break;
- default:
- DBUG_ASSERT(false);
- break;
- }
+ // Create secondary indexes
+ my_errno= build_index_list(form, ILBP_CREATE);
- if (error)
- {
- DBUG_PRINT("error", ("Failed to create index %u", i));
- drop_table();
- my_errno= error;
- break;
- }
- }
-
DBUG_RETURN(my_errno);
}
@@ -2468,6 +2965,7 @@ int ha_ndbcluster::create_index(const char *name,
DBUG_ENTER("create_index");
DBUG_PRINT("enter", ("name: %s ", name));
+ // NdbDictionary::Index ndb_index(name);
NdbDictionary::Index ndb_index(name);
if (unique)
ndb_index.setType(NdbDictionary::Index::UniqueHashIndex);
@@ -2601,10 +3099,17 @@ int ndbcluster_drop_database(const char *path)
longlong ha_ndbcluster::get_auto_increment()
{
- int cache_size = rows_to_insert ? rows_to_insert : 32;
+ DBUG_ENTER("get_auto_increment");
+ DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
+ int cache_size=
+ (rows_to_insert > autoincrement_prefetch) ?
+ rows_to_insert
+ : autoincrement_prefetch;
Uint64 auto_value=
- m_ndb->getAutoIncrementValue(m_tabname, cache_size);
- return (longlong)auto_value;
+ (skip_auto_increment) ?
+ m_ndb->readAutoIncrementValue((NDBTAB *) m_table)
+ : m_ndb->getAutoIncrementValue((NDBTAB *) m_table, cache_size);
+ DBUG_RETURN((longlong)auto_value);
}
@@ -2619,15 +3124,20 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_ndb(NULL),
m_table(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
+ HA_NULL_IN_KEY |
HA_NOT_EXACT_COUNT |
- HA_NO_PREFIX_CHAR_KEYS |
- HA_NO_BLOBS),
+ HA_NO_PREFIX_CHAR_KEYS),
m_use_write(false),
retrieve_all_fields(FALSE),
- rows_to_insert(0),
+ rows_to_insert(1),
rows_inserted(0),
bulk_insert_rows(1024),
- ops_pending(0)
+ bulk_insert_not_flushed(false),
+ ops_pending(0),
+ skip_auto_increment(true),
+ blobs_buffer(0),
+ blobs_buffer_size(0),
+ dupkey((uint) -1)
{
int i;
@@ -2643,8 +3153,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
for (i= 0; i < MAX_KEY; i++)
{
- m_indextype[i]= UNDEFINED_INDEX;
- m_unique_index_name[i]= NULL;
+ m_index[i].type= UNDEFINED_INDEX;
+ m_index[i].unique_name= NULL;
+ m_index[i].unique_index= NULL;
+ m_index[i].index= NULL;
}
DBUG_VOID_RETURN;
@@ -2660,6 +3172,8 @@ ha_ndbcluster::~ha_ndbcluster()
DBUG_ENTER("~ha_ndbcluster");
release_metadata();
+ my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ blobs_buffer= 0;
// Check for open cursor/transaction
DBUG_ASSERT(m_active_cursor == NULL);
@@ -2888,6 +3402,12 @@ int ndb_discover_tables()
bool ndbcluster_init()
{
DBUG_ENTER("ndbcluster_init");
+ // Set connectstring if specified
+ if (ndbcluster_connectstring != 0)
+ {
+ DBUG_PRINT("connectstring", ("%s", ndbcluster_connectstring));
+ Ndb::setConnectString(ndbcluster_connectstring);
+ }
// Create a Ndb object to open the connection to NDB
g_ndb= new Ndb("sys");
if (g_ndb->init() != 0)
@@ -2921,6 +3441,7 @@ bool ndbcluster_init()
bool ndbcluster_end()
{
DBUG_ENTER("ndbcluster_end");
+
delete g_ndb;
g_ndb= NULL;
if (!ndbcluster_inited)
@@ -2934,13 +3455,22 @@ bool ndbcluster_end()
DBUG_RETURN(0);
}
-void ndbcluster_print_error(int error)
+/*
+ Static error print function called from
+ static handler method ndbcluster_commit
+ and ndbcluster_rollback
+*/
+
+void ndbcluster_print_error(int error, const NdbOperation *error_op)
{
DBUG_ENTER("ndbcluster_print_error");
TABLE tab;
- tab.table_name = NULL;
+ const char *tab_name= (error_op) ? error_op->getTableName() : "";
+ tab.table_name= (char *) tab_name;
ha_ndbcluster error_handler(&tab);
+ tab.file= &error_handler;
error_handler.print_error(error, MYF(0));
+ DBUG_VOID_RETURN;
}
/*
@@ -2965,7 +3495,7 @@ void ha_ndbcluster::set_tabname(const char *path_name)
ptr= m_tabname;
while (*ptr != '\0') {
- *ptr = tolower(*ptr);
+ *ptr= tolower(*ptr);
ptr++;
}
#endif
@@ -2981,17 +3511,17 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
char *end, *ptr;
/* Scan name from the end */
- end = strend(path_name)-1;
- ptr = end;
+ end= strend(path_name)-1;
+ ptr= end;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
ptr--;
}
- uint name_len = end - ptr;
+ uint name_len= end - ptr;
memcpy(tabname, ptr + 1, end - ptr);
- tabname[name_len] = '\0';
+ tabname[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
- ptr = tabname;
+ ptr= tabname;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
@@ -3154,7 +3684,7 @@ static int packfrm(const void *data, uint len,
DBUG_PRINT("enter", ("data: %x, len: %d", data, len));
error= 1;
- org_len = len;
+ org_len= len;
if (my_compress((byte*)data, &org_len, &comp_len))
goto err;
@@ -3174,9 +3704,9 @@ static int packfrm(const void *data, uint len,
// Copy frm data into blob, already in machine independent format
memcpy(blob->data, data, org_len);
- *pack_data = blob;
- *pack_len = blob_len;
- error = 0;
+ *pack_data= blob;
+ *pack_len= blob_len;
+ error= 0;
DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len));
err:
@@ -3188,7 +3718,7 @@ err:
static int unpackfrm(const void **unpack_data, uint *unpack_len,
const void *pack_data)
{
- const frm_blob_struct *blob = (frm_blob_struct*)pack_data;
+ const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
byte *data;
ulong complen, orglen, ver;
DBUG_ENTER("unpackfrm");
@@ -3204,7 +3734,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len,
if (ver != 1)
DBUG_RETURN(1);
- if (!(data = my_malloc(max(orglen, complen), MYF(MY_WME))))
+ if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
DBUG_RETURN(2);
memcpy(data, blob->data, complen);
@@ -3214,8 +3744,8 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len,
DBUG_RETURN(3);
}
- *unpack_data = data;
- *unpack_len = complen;
+ *unpack_data= data;
+ *unpack_len= complen;
DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len));
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index f094b79ef35..c49a6078e7a 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -33,6 +33,12 @@ class NdbOperation; // Forward declaration
class NdbConnection; // Forward declaration
class NdbRecAttr; // Forward declaration
class NdbResultSet; // Forward declaration
+class NdbScanOperation;
+class NdbIndexScanOperation;
+class NdbBlob;
+
+// connectstring to cluster if given by mysqld
+extern const char *ndbcluster_connectstring;
typedef enum ndb_index_type {
UNDEFINED_INDEX = 0,
@@ -43,6 +49,12 @@ typedef enum ndb_index_type {
ORDERED_INDEX = 5
} NDB_INDEX_TYPE;
+typedef struct ndb_index_data {
+ NDB_INDEX_TYPE type;
+ void *index;
+ const char * unique_name;
+ void *unique_index;
+} NDB_INDEX_DATA;
typedef struct st_ndbcluster_share {
THR_LOCK lock;
@@ -145,8 +157,9 @@ class ha_ndbcluster: public handler
int create_index(const char *name, KEY *key_info, bool unique);
int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info);
- int initialize_autoincrement(const void* table);
- int build_index_list();
+ int initialize_autoincrement(const void *table);
+ enum ILBP {ILBP_CREATE = 0, ILBP_OPEN = 1}; // Index List Build Phase
+ int build_index_list(TABLE *tab, enum ILBP phase);
int get_metadata(const char* path);
void release_metadata();
const char* get_index_name(uint idx_no) const;
@@ -154,8 +167,8 @@ class ha_ndbcluster: public handler
NDB_INDEX_TYPE get_index_type(uint idx_no) const;
NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const;
- int pk_read(const byte *key, uint key_len,
- byte *buf);
+ int pk_read(const byte *key, uint key_len, byte *buf);
+ int complemented_pk_read(const byte *old_data, byte *new_data);
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
@@ -169,6 +182,7 @@ class ha_ndbcluster: public handler
enum ha_rkey_function find_flag);
int close_scan();
void unpack_record(byte *buf);
+ int get_ndb_lock_type(enum thr_lock_type type);
void set_dbname(const char *pathname);
void set_tabname(const char *pathname);
@@ -179,18 +193,20 @@ class ha_ndbcluster: public handler
int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr);
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
- int get_ndb_value(NdbOperation*, uint fieldnr, byte *field_ptr);
+ int get_ndb_value(NdbOperation*, Field *field, uint fieldnr);
+ friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
+ int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key(NdbOperation *op);
int set_primary_key_from_old_data(NdbOperation *op, const byte *old_data);
- int set_bounds(NdbOperation *ndb_op, const key_range *key,
+ int set_bounds(NdbIndexScanOperation *ndb_op, const key_range *key,
int bound);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
void print_results();
longlong get_auto_increment();
-
int ndb_err(NdbConnection*);
+ bool uses_blob_value(bool all_fields);
private:
int check_ndb_connection();
@@ -205,15 +221,23 @@ class ha_ndbcluster: public handler
ulong m_table_flags;
THR_LOCK_DATA m_lock;
NDB_SHARE *m_share;
- NDB_INDEX_TYPE m_indextype[MAX_KEY];
- const char* m_unique_index_name[MAX_KEY];
- NdbRecAttr *m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
+ NDB_INDEX_DATA m_index[MAX_KEY];
+ // NdbRecAttr has no reference to blob
+ typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
+ NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
bool retrieve_all_fields;
ha_rows rows_to_insert;
ha_rows rows_inserted;
ha_rows bulk_insert_rows;
+ bool bulk_insert_not_flushed;
ha_rows ops_pending;
+ bool skip_auto_increment;
+ bool blobs_pending;
+ // memory for blobs in one tuple
+ char *blobs_buffer;
+ uint32 blobs_buffer_size;
+ uint dupkey;
};
bool ndbcluster_init(void);
@@ -228,11 +252,4 @@ int ndbcluster_discover(const char* dbname, const char* name,
const void** frmblob, uint* frmlen);
int ndbcluster_drop_database(const char* path);
-void ndbcluster_print_error(int error);
-
-
-
-
-
-
-
+void ndbcluster_print_error(int error, const NdbOperation *error_op);
diff --git a/sql/handler.cc b/sql/handler.cc
index 9342d20ec24..640c4f3710d 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -38,6 +38,9 @@
#ifdef HAVE_ARCHIVE_DB
#include "examples/ha_archive.h"
#endif
+#ifdef HAVE_CSV_DB
+#include "examples/ha_tina.h"
+#endif
#ifdef HAVE_INNOBASE_DB
#include "ha_innodb.h"
#endif
@@ -91,6 +94,8 @@ struct show_table_type_st sys_table_types[]=
"Example storage engine", DB_TYPE_EXAMPLE_DB},
{"ARCHIVE",&have_archive_db,
"Archive storage engine", DB_TYPE_ARCHIVE_DB},
+ {"CSV",&have_csv_db,
+ "CSV storage engine", DB_TYPE_CSV_DB},
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
};
@@ -196,6 +201,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
#endif
+#ifdef HAVE_CSV_DB
+ case DB_TYPE_CSV_DB:
+ return new ha_tina(table);
+#endif
#ifdef HAVE_NDBCLUSTER_DB
case DB_TYPE_NDBCLUSTER:
return new ha_ndbcluster(table);
@@ -463,31 +472,45 @@ int ha_release_temporary_latches(THD *thd)
int ha_commit_trans(THD *thd, THD_TRANS* trans)
{
int error=0;
- DBUG_ENTER("ha_commit");
+ DBUG_ENTER("ha_commit_trans");
#ifdef USING_TRANSACTIONS
if (opt_using_transactions)
{
- bool operation_done= 0;
bool transaction_commited= 0;
+ bool operation_done= 0, need_start_waiters= 0;
- /* Update the binary log if we have cached some queries */
+ /* If transaction has done some updates to tables */
if (trans == &thd->transaction.all && mysql_bin_log.is_open() &&
- my_b_tell(&thd->transaction.trans_log))
+ my_b_tell(&thd->transaction.trans_log))
{
- mysql_bin_log.write(thd, &thd->transaction.trans_log, 1);
- statistic_increment(binlog_cache_use, &LOCK_status);
- if (thd->transaction.trans_log.disk_writes != 0)
+ if (error= wait_if_global_read_lock(thd, 0, 0))
{
- /*
- We have to do this after addition of trans_log to main binlog since
- this operation can cause flushing of end of trans_log to disk.
+ /*
+ Note that ROLLBACK [TO SAVEPOINT] does not have this test; it's
+ because ROLLBACK never updates data, so needn't wait on the lock.
*/
- statistic_increment(binlog_cache_disk_use, &LOCK_status);
- thd->transaction.trans_log.disk_writes= 0;
+ my_error(ER_ERROR_DURING_COMMIT, MYF(0), error);
+ error= 1;
+ }
+ else
+ need_start_waiters= 1;
+ if (mysql_bin_log.is_open())
+ {
+ mysql_bin_log.write(thd, &thd->transaction.trans_log, 1);
+ statistic_increment(binlog_cache_use, &LOCK_status);
+ if (thd->transaction.trans_log.disk_writes != 0)
+ {
+ /*
+ We have to do this after addition of trans_log to main binlog since
+ this operation can cause flushing of end of trans_log to disk.
+ */
+ statistic_increment(binlog_cache_disk_use, &LOCK_status);
+ thd->transaction.trans_log.disk_writes= 0;
+ }
+ reinit_io_cache(&thd->transaction.trans_log,
+ WRITE_CACHE, (my_off_t) 0, 0, 1);
+ thd->transaction.trans_log.end_of_file= max_binlog_cache_size;
}
- reinit_io_cache(&thd->transaction.trans_log,
- WRITE_CACHE, (my_off_t) 0, 0, 1);
- thd->transaction.trans_log.end_of_file= max_binlog_cache_size;
}
#ifdef HAVE_NDBCLUSTER_DB
if (trans->ndb_tid)
@@ -495,9 +518,7 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
if ((error=ndbcluster_commit(thd,trans->ndb_tid)))
{
if (error == -1)
- my_error(ER_ERROR_DURING_COMMIT, MYF(0), error);
- else
- ndbcluster_print_error(error);
+ my_error(ER_ERROR_DURING_COMMIT, MYF(0));
error=1;
}
if (trans == &thd->transaction.all)
@@ -544,6 +565,8 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
statistic_increment(ha_commit_count,&LOCK_status);
thd->transaction.cleanup();
}
+ if (need_start_waiters)
+ start_waiting_global_read_lock(thd);
}
#endif // using transactions
DBUG_RETURN(error);
@@ -553,7 +576,7 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
int ha_rollback_trans(THD *thd, THD_TRANS *trans)
{
int error=0;
- DBUG_ENTER("ha_rollback");
+ DBUG_ENTER("ha_rollback_trans");
#ifdef USING_TRANSACTIONS
if (opt_using_transactions)
{
@@ -564,9 +587,7 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
if ((error=ndbcluster_rollback(thd, trans->ndb_tid)))
{
if (error == -1)
- my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error);
- else
- ndbcluster_print_error(error);
+ my_error(ER_ERROR_DURING_ROLLBACK, MYF(0));
error=1;
}
trans->ndb_tid = 0;
@@ -747,12 +768,12 @@ bool ha_flush_logs()
{
bool result=0;
#ifdef HAVE_BERKELEY_DB
- if ((have_berkeley_db == SHOW_OPTION_YES) &&
+ if ((have_berkeley_db == SHOW_OPTION_YES) &&
berkeley_flush_logs())
result=1;
#endif
#ifdef HAVE_INNOBASE_DB
- if ((have_innodb == SHOW_OPTION_YES) &&
+ if ((have_innodb == SHOW_OPTION_YES) &&
innobase_flush_logs())
result=1;
#endif
@@ -847,7 +868,7 @@ my_off_t ha_get_ptr(byte *ptr, uint pack_length)
int handler::ha_open(const char *name, int mode, int test_if_locked)
{
int error;
- DBUG_ENTER("handler::open");
+ DBUG_ENTER("handler::ha_open");
DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
name, table->db_type, table->db_stat, mode,
test_if_locked));
@@ -946,7 +967,7 @@ void handler::update_auto_increment()
{
longlong nr;
THD *thd;
- DBUG_ENTER("update_auto_increment");
+ DBUG_ENTER("handler::update_auto_increment");
if (table->next_number_field->val_int() != 0 ||
table->auto_increment_field_not_null &&
current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
@@ -1004,7 +1025,7 @@ longlong handler::get_auto_increment()
void handler::print_error(int error, myf errflag)
{
- DBUG_ENTER("print_error");
+ DBUG_ENTER("handler::print_error");
DBUG_PRINT("enter",("error: %d",error));
int textno=ER_GET_ERRNO;
@@ -1143,7 +1164,7 @@ bool handler::get_error_message(int error, String* buf)
uint handler::get_dup_key(int error)
{
- DBUG_ENTER("get_dup_key");
+ DBUG_ENTER("handler::get_dup_key");
table->file->errkey = (uint) -1;
if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE)
info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
diff --git a/sql/handler.h b/sql/handler.h
index 092ea47ef4d..7e5e626f713 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -147,7 +147,7 @@ enum db_type
DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM,
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
- DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB,
+ DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_DEFAULT // Must be last
};
@@ -529,7 +529,8 @@ extern TYPELIB tx_isolation_typelib;
#define ha_rollback(thd) (ha_rollback_trans((thd), &((thd)->transaction.all)))
#define ha_supports_generate(T) (T != DB_TYPE_INNODB && \
- T != DB_TYPE_BERKELEY_DB)
+ T != DB_TYPE_BERKELEY_DB && \
+ T != DB_TYPE_NDBCLUSTER)
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type);
diff --git a/sql/item.cc b/sql/item.cc
index 099e6b977d6..ecbe2d22fa4 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -62,10 +62,10 @@ Item::Item():
*/
if (thd->lex->current_select)
{
- SELECT_LEX_NODE::enum_parsing_place place=
+ enum_parsing_place place=
thd->lex->current_select->parsing_place;
- if (place == SELECT_LEX_NODE::SELECT_LIST ||
- place == SELECT_LEX_NODE::IN_HAVING)
+ if (place == SELECT_LIST ||
+ place == IN_HAVING)
thd->lex->current_select->select_n_having_items++;
}
}
@@ -179,10 +179,17 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
name_length= 0;
return;
}
- while (length && !my_isgraph(cs,*str))
- { // Fix problem with yacc
- length--;
- str++;
+ if (cs->ctype)
+ {
+ /*
+ This will probably need a better implementation in the future:
+ a function in CHARSET_INFO structure.
+ */
+ while (length && !my_isgraph(cs,*str))
+ { // Fix problem with yacc
+ length--;
+ str++;
+ }
}
if (!my_charset_same(cs, system_charset_info))
{
@@ -291,8 +298,9 @@ Item_splocal::type() const
}
-bool DTCollation::aggregate(DTCollation &dt)
+bool DTCollation::aggregate(DTCollation &dt, bool superset_conversion)
{
+ nagg++;
if (!my_charset_same(collation, dt.collation))
{
/*
@@ -306,15 +314,39 @@ bool DTCollation::aggregate(DTCollation &dt)
if (derivation <= dt.derivation)
; // Do nothing
else
- set(dt);
+ {
+ set(dt);
+ strong= nagg;
+ }
}
else if (dt.collation == &my_charset_bin)
{
if (dt.derivation <= derivation)
+ {
set(dt);
+ strong= nagg;
+ }
else
; // Do nothing
}
+ else if (superset_conversion)
+ {
+ if (derivation < dt.derivation &&
+ collation->state & MY_CS_UNICODE)
+ ; // Do nothing
+ else if (dt.derivation < derivation &&
+ dt.collation->state & MY_CS_UNICODE)
+ {
+ set(dt);
+ strong= nagg;
+ }
+ else
+ {
+ // Cannot convert to superset
+ set(0, DERIVATION_NONE);
+ return 1;
+ }
+ }
else
{
set(0, DERIVATION_NONE);
@@ -328,6 +360,7 @@ bool DTCollation::aggregate(DTCollation &dt)
else if (dt.derivation < derivation)
{
set(dt);
+ strong= nagg;
}
else
{
@@ -979,7 +1012,7 @@ double Item_param::val()
This works for example when user says SELECT ?+0.0 and supplies
time value for the placeholder.
*/
- return (double) TIME_to_ulonglong(&value.time);
+ return ulonglong2double(TIME_to_ulonglong(&value.time));
case NULL_VALUE:
return 0.0;
default:
@@ -1312,12 +1345,24 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
table_list= (last= sl)->get_table_list();
if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list)
{
- // it is primary INSERT st_select_lex => skip first table resolving
+ /*
+ it is primary INSERT st_select_lex => skip first table
+ resolving
+ */
table_list= table_list->next_local;
}
Item_subselect *prev_subselect_item= prev_unit->item;
- if ((tmp= find_field_in_tables(thd, this,
+ enum_parsing_place place= prev_subselect_item->parsing_place;
+ /*
+ check table fields only if subquery used somewhere out of HAVING
+ or SELECT list or outer SELECT do not use groupping (i.e. tables
+ are accessable)
+ */
+ if (((place != IN_HAVING &&
+ place != SELECT_LIST) ||
+ (sl->with_sum_func == 0 && sl->group_list.elements == 0)) &&
+ (tmp= find_field_in_tables(thd, this,
table_list, ref,
0, 1)) != not_found_field)
{
@@ -2029,7 +2074,16 @@ bool Item_ref::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference)
// it is primary INSERT st_select_lex => skip first table resolving
table_list= table_list->next_local;
}
- if ((tmp= find_field_in_tables(thd, this,
+ enum_parsing_place place= prev_subselect_item->parsing_place;
+ /*
+ Check table fields only if subquery used somewhere out of HAVING
+ or SELECT list or outer SELECT do not use groupping (i.e. tables
+ are accessable)
+ */
+ if (((place != IN_HAVING &&
+ place != SELECT_LIST) ||
+ (sl->with_sum_func == 0 && sl->group_list.elements == 0)) &&
+ (tmp= find_field_in_tables(thd, this,
table_list, reference,
0, 1)) != not_found_field)
{
diff --git a/sql/item.h b/sql/item.h
index 63463c6cc41..4d3f1736b4e 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -41,16 +41,22 @@ class DTCollation {
public:
CHARSET_INFO *collation;
enum Derivation derivation;
+ uint nagg; // Total number of aggregated collations.
+ uint strong; // Number of the strongest collation.
DTCollation()
{
collation= &my_charset_bin;
derivation= DERIVATION_NONE;
+ nagg= 0;
+ strong= 0;
}
DTCollation(CHARSET_INFO *collation_arg, Derivation derivation_arg)
{
collation= collation_arg;
derivation= derivation_arg;
+ nagg= 0;
+ strong= 0;
}
void set(DTCollation &dt)
{
@@ -66,9 +72,9 @@ public:
{ collation= collation_arg; }
void set(Derivation derivation_arg)
{ derivation= derivation_arg; }
- bool aggregate(DTCollation &dt);
- bool set(DTCollation &dt1, DTCollation &dt2)
- { set(dt1); return aggregate(dt2); }
+ bool aggregate(DTCollation &dt, bool superset_conversion= FALSE);
+ bool set(DTCollation &dt1, DTCollation &dt2, bool superset_conversion= FALSE)
+ { set(dt1); return aggregate(dt2, superset_conversion); }
const char *derivation_name() const
{
switch(derivation)
@@ -243,6 +249,7 @@ public:
virtual void top_level_item() {}
virtual void set_result_field(Field *field) {}
virtual bool is_result_field() { return 0; }
+ virtual bool is_bool_func() { return 0; }
virtual void save_in_result_field(bool no_conversions) {}
virtual void no_rows_in_result() {}
virtual Item *copy_or_same(THD *thd) { return this; }
@@ -275,8 +282,7 @@ public:
virtual void bring_value() {}
Field *tmp_table_field_from_field_type(TABLE *table);
-
- /* Used in sql_select.cc:eliminate_not_funcs() */
+
virtual Item *neg_transformer(THD *thd) { return NULL; }
void delete_self()
{
@@ -1020,7 +1026,7 @@ public:
};
/*
- The following class is used to optimize comparing of date columns
+ The following class is used to optimize comparing of date and bigint columns
We need to save the original item, to be able to set the field to the
original value in 'opt_range'.
*/
@@ -1030,7 +1036,9 @@ class Item_int_with_ref :public Item_int
Item *ref;
public:
Item_int_with_ref(longlong i, Item *ref_arg) :Item_int(i), ref(ref_arg)
- {}
+ {
+ unsigned_flag= ref_arg->unsigned_flag;
+ }
int save_in_field(Field *field, bool no_conversions)
{
return ref->save_in_field(field, no_conversions);
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 976cd07cdbe..f6daf0f5ed4 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -188,25 +188,17 @@ void Item_bool_func2::fix_length_and_dec()
{
uint strong= 0;
uint weak= 0;
+ DTCollation coll;
- if ((args[0]->collation.derivation < args[1]->collation.derivation) &&
- !my_charset_same(args[0]->collation.collation,
- args[1]->collation.collation) &&
- (args[0]->collation.collation->state & MY_CS_UNICODE))
- {
- weak= 1;
- }
- else if ((args[1]->collation.derivation < args[0]->collation.derivation) &&
- !my_charset_same(args[0]->collation.collation,
- args[1]->collation.collation) &&
- (args[1]->collation.collation->state & MY_CS_UNICODE))
- {
- strong= 1;
- }
-
- if (strong || weak)
+ if (args[0]->result_type() == STRING_RESULT &&
+ args[1]->result_type() == STRING_RESULT &&
+ !my_charset_same(args[0]->collation.collation,
+ args[1]->collation.collation) &&
+ !coll.set(args[0]->collation, args[1]->collation, TRUE))
{
Item* conv= 0;
+ strong= coll.strong;
+ weak= strong ? 0 : 1;
if (args[weak]->type() == STRING_ITEM)
{
String tmp, cstr;
@@ -219,9 +211,22 @@ void Item_bool_func2::fix_length_and_dec()
}
else
{
- conv= new Item_func_conv_charset(args[weak],args[strong]->collation.collation);
+ THD *thd= current_thd;
+ /*
+ In case we're in statement prepare, create conversion item
+ in its memory: it will be reused on each execute.
+ */
+ Item_arena *arena= thd->current_arena, backup;
+ if (!arena->is_stmt_prepare())
+ arena= 0;
+ else
+ thd->set_n_backup_item_arena(arena, &backup);
+ conv= new Item_func_conv_charset(args[weak],
+ args[strong]->collation.collation);
+ if (arena)
+ thd->restore_backup_item_arena(arena, &backup);
conv->collation.set(args[weak]->collation.derivation);
- conv->fix_fields(current_thd, 0, &conv);
+ conv->fix_fields(thd, 0, &conv);
}
args[weak]= conv ? conv : args[weak];
}
@@ -268,8 +273,8 @@ void Item_bool_func2::fix_length_and_dec()
int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type)
{
owner= item;
- func= comparator_matrix[type][(owner->functype() == Item_func::EQUAL_FUNC)?
- 1:0];
+ func= comparator_matrix[type]
+ [test(owner->functype() == Item_func::EQUAL_FUNC)];
if (type == ROW_RESULT)
{
uint n= (*a)->cols();
@@ -303,10 +308,10 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type)
my_coll_agg_error((*a)->collation, (*b)->collation, owner->func_name());
return 1;
}
- if (my_binary_compare(cmp_collation.collation))
+ if (cmp_collation.collation == &my_charset_bin)
{
/*
- We are using binary collation, change to compare byte by byte,
+ We are using BLOB/BINARY/VARBINARY, change to compare byte by byte,
without removing end space
*/
if (func == &Arg_comparator::compare_string)
@@ -315,6 +320,22 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type)
func= &Arg_comparator::compare_e_binary_string;
}
}
+ else if (type == INT_RESULT)
+ {
+ if (func == &Arg_comparator::compare_int_signed)
+ {
+ if ((*a)->unsigned_flag)
+ func= ((*b)->unsigned_flag)? &Arg_comparator::compare_int_unsigned :
+ &Arg_comparator::compare_int_unsigned_signed;
+ else if ((*b)->unsigned_flag)
+ func= &Arg_comparator::compare_int_signed_unsigned;
+ }
+ else if (func== &Arg_comparator::compare_e_int)
+ {
+ if ((*a)->unsigned_flag ^ (*b)->unsigned_flag)
+ func= &Arg_comparator::compare_e_int_diff_signedness;
+ }
+ }
return 0;
}
@@ -416,7 +437,7 @@ int Arg_comparator::compare_e_real()
return test(val1 == val2);
}
-int Arg_comparator::compare_int()
+int Arg_comparator::compare_int_signed()
{
longlong val1= (*a)->val_int();
if (!(*a)->null_value)
@@ -434,6 +455,82 @@ int Arg_comparator::compare_int()
return -1;
}
+
+/*
+ Compare values as BIGINT UNSIGNED.
+*/
+
+int Arg_comparator::compare_int_unsigned()
+{
+ ulonglong val1= (*a)->val_int();
+ if (!(*a)->null_value)
+ {
+ ulonglong val2= (*b)->val_int();
+ if (!(*b)->null_value)
+ {
+ owner->null_value= 0;
+ if (val1 < val2) return -1;
+ if (val1 == val2) return 0;
+ return 1;
+ }
+ }
+ owner->null_value= 1;
+ return -1;
+}
+
+
+/*
+ Compare signed (*a) with unsigned (*B)
+*/
+
+int Arg_comparator::compare_int_signed_unsigned()
+{
+ longlong sval1= (*a)->val_int();
+ if (!(*a)->null_value)
+ {
+ ulonglong uval2= (ulonglong)(*b)->val_int();
+ if (!(*b)->null_value)
+ {
+ owner->null_value= 0;
+ if (sval1 < 0 || (ulonglong)sval1 < uval2)
+ return -1;
+ if ((ulonglong)sval1 == uval2)
+ return 0;
+ return 1;
+ }
+ }
+ owner->null_value= 1;
+ return -1;
+}
+
+
+/*
+ Compare unsigned (*a) with signed (*B)
+*/
+
+int Arg_comparator::compare_int_unsigned_signed()
+{
+ ulonglong uval1= (ulonglong)(*a)->val_int();
+ if (!(*a)->null_value)
+ {
+ longlong sval2= (*b)->val_int();
+ if (!(*b)->null_value)
+ {
+ owner->null_value= 0;
+ if (sval2 < 0)
+ return 1;
+ if (uval1 < (ulonglong)sval2)
+ return -1;
+ if (uval1 == (ulonglong)sval2)
+ return 0;
+ return 1;
+ }
+ }
+ owner->null_value= 1;
+ return -1;
+}
+
+
int Arg_comparator::compare_e_int()
{
longlong val1= (*a)->val_int();
@@ -443,6 +540,17 @@ int Arg_comparator::compare_e_int()
return test(val1 == val2);
}
+/*
+ Compare unsigned *a with signed *b or signed *a with unsigned *b.
+*/
+int Arg_comparator::compare_e_int_diff_signedness()
+{
+ longlong val1= (*a)->val_int();
+ longlong val2= (*b)->val_int();
+ if ((*a)->null_value || (*b)->null_value)
+ return test((*a)->null_value && (*b)->null_value);
+ return (val1 >= 0) && test(val1 == val2);
+}
int Arg_comparator::compare_row()
{
@@ -1629,8 +1737,8 @@ bool Item_func_in::nulls_in_row()
static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
{
return cs->coll->strnncollsp(cs,
- (unsigned char *) x->ptr(),x->length(),
- (unsigned char *) y->ptr(),y->length());
+ (uchar *) x->ptr(),x->length(),
+ (uchar *) y->ptr(),y->length());
}
@@ -1640,12 +1748,58 @@ void Item_func_in::fix_length_and_dec()
uint const_itm= 1;
agg_cmp_type(&cmp_type, args, arg_count);
- if ((cmp_type == STRING_RESULT) &&
- (agg_arg_collations_for_comparison(cmp_collation, args, arg_count)))
- return;
-
+
for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
const_itm&= arg[0]->const_item();
+
+
+ if (cmp_type == STRING_RESULT)
+ {
+ /*
+ We allow consts character set conversion for
+
+ item IN (const1, const2, const3, ...)
+
+ if item is in a superset for all arguments,
+ and if it is a stong side according to coercibility rules.
+
+ TODO: add covnersion for non-constant IN values
+ via creating Item_func_conv_charset().
+ */
+
+ if (agg_arg_collations_for_comparison(cmp_collation,
+ args, arg_count, TRUE))
+ return;
+ if ((!my_charset_same(args[0]->collation.collation,
+ cmp_collation.collation) || !const_itm))
+ {
+ if (agg_arg_collations_for_comparison(cmp_collation,
+ args, arg_count, FALSE))
+ return;
+ }
+ else
+ {
+ /*
+ Conversion is possible:
+ All IN arguments are constants.
+ */
+ for (arg= args+1, arg_end= args+arg_count; arg < arg_end; arg++)
+ {
+ if (!my_charset_same(cmp_collation.collation,
+ arg[0]->collation.collation))
+ {
+ Item_string *conv;
+ String tmp, cstr, *ostr= arg[0]->val_str(&tmp);
+ cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(),
+ cmp_collation.collation);
+ conv= new Item_string(cstr.ptr(),cstr.length(), cstr.charset(),
+ arg[0]->collation.derivation);
+ conv->str_value.copy();
+ arg[0]= conv;
+ }
+ }
+ }
+ }
/*
Row item with NULLs inside can return NULL or FALSE =>
@@ -1927,15 +2081,6 @@ void Item_cond::neg_arguments(THD *thd)
{
if (!(new_item= new Item_func_not(item)))
return; // Fatal OEM error
- /*
- We can use 0 as tables list because Item_func_not do not use it
- on fix_fields and its arguments are already fixed.
-
- We do not check results of fix_fields, because there are not way
- to return error in this functions interface, thd->net.report_error
- will be checked on upper level call.
- */
- new_item->fix_fields(thd, 0, &new_item);
}
VOID(li.replace(new_item));
}
@@ -2354,7 +2499,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff)
*splm1 = pattern_len;
- if (cs == &my_charset_bin)
+ if (!cs->sort_order)
{
int i;
for (i = pattern_len - 2; i >= 0; i--)
@@ -2457,7 +2602,7 @@ void Item_func_like::turboBM_compute_bad_character_shifts()
for (i = bmBc; i < end; i++)
*i = pattern_len;
- if (cs == &my_charset_bin)
+ if (!cs->sort_order)
{
for (j = 0; j < plm1; j++)
bmBc[(uint) (uchar) pattern[j]] = plm1 - j;
@@ -2488,7 +2633,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
const int tlmpl= text_len - pattern_len;
/* Searching */
- if (cs == &my_charset_bin)
+ if (!cs->sort_order)
{
while (j <= tlmpl)
{
@@ -2613,9 +2758,6 @@ longlong Item_cond_xor::val_int()
IS NULL(a) -> IS NOT NULL(a)
IS NOT NULL(a) -> IS NULL(a)
- NOTE
- This method is used in the eliminate_not_funcs() function.
-
RETURN
New item or
NULL if we cannot apply NOT transformation (see Item::neg_transformer()).
@@ -2623,26 +2765,13 @@ longlong Item_cond_xor::val_int()
Item *Item_func_not::neg_transformer(THD *thd) /* NOT(x) -> x */
{
- // We should apply negation elimination to the argument of the NOT function
- return eliminate_not_funcs(thd, args[0]);
+ return args[0];
}
Item *Item_bool_rowready_func2::neg_transformer(THD *thd)
{
Item *item= negated_item();
- if (item)
- {
- /*
- We can use 0 as tables list because Item_func* family do not use it
- on fix_fields and its arguments are already fixed.
-
- We do not check results of fix_fields, because there are not way
- to return error in this functions interface, thd->net.report_error
- will be checked on upper level call.
- */
- item->fix_fields(thd, 0, &item);
- }
return item;
}
@@ -2651,9 +2780,6 @@ Item *Item_bool_rowready_func2::neg_transformer(THD *thd)
Item *Item_func_isnull::neg_transformer(THD *thd)
{
Item *item= new Item_func_isnotnull(args[0]);
- // see comment before fix_fields in Item_bool_rowready_func2::neg_transformer
- if (item)
- item->fix_fields(thd, 0, &item);
return item;
}
@@ -2662,9 +2788,6 @@ Item *Item_func_isnull::neg_transformer(THD *thd)
Item *Item_func_isnotnull::neg_transformer(THD *thd)
{
Item *item= new Item_func_isnull(args[0]);
- // see comment before fix_fields in Item_bool_rowready_func2::neg_transformer
- if (item)
- item->fix_fields(thd, 0, &item);
return item;
}
@@ -2674,9 +2797,6 @@ Item *Item_cond_and::neg_transformer(THD *thd) /* NOT(a AND b AND ...) -> */
{
neg_arguments(thd);
Item *item= new Item_cond_or(list);
- // see comment before fix_fields in Item_bool_rowready_func2::neg_transformer
- if (item)
- item->fix_fields(thd, 0, &item);
return item;
}
@@ -2686,9 +2806,6 @@ Item *Item_cond_or::neg_transformer(THD *thd) /* NOT(a OR b OR ...) -> */
{
neg_arguments(thd);
Item *item= new Item_cond_and(list);
- // see comment before fix_fields in Item_bool_rowready_func2::neg_transformer
- if (item)
- item->fix_fields(thd, 0, &item);
return item;
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 23ac82e79e6..e7bef18e629 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -65,12 +65,16 @@ public:
int compare_string(); // compare args[0] & args[1]
int compare_binary_string(); // compare args[0] & args[1]
int compare_real(); // compare args[0] & args[1]
- int compare_int(); // compare args[0] & args[1]
+ int compare_int_signed(); // compare args[0] & args[1]
+ int compare_int_signed_unsigned();
+ int compare_int_unsigned_signed();
+ int compare_int_unsigned();
int compare_row(); // compare args[0] & args[1]
int compare_e_string(); // compare args[0] & args[1]
int compare_e_binary_string(); // compare args[0] & args[1]
int compare_e_real(); // compare args[0] & args[1]
int compare_e_int(); // compare args[0] & args[1]
+ int compare_e_int_diff_signedness();
int compare_e_row(); // compare args[0] & args[1]
static arg_cmp_func comparator_matrix [4][2];
@@ -85,6 +89,7 @@ public:
Item_bool_func(Item *a) :Item_int_func(a) {}
Item_bool_func(Item *a,Item *b) :Item_int_func(a,b) {}
Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {}
+ bool is_bool_func() { return 1; }
void fix_length_and_dec() { decimals=0; max_length=1; }
};
@@ -197,6 +202,7 @@ public:
bool have_rev_func() const { return rev_functype() != UNKNOWN_FUNC; }
void print(String *str) { Item_func::print_op(str); }
bool is_null() { return test(args[0]->is_null() || args[1]->is_null()); }
+ bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp.cmp_collation.collation; }
friend class Arg_comparator;
@@ -304,7 +310,7 @@ public:
enum Functype rev_functype() const { return EQUAL_FUNC; }
cond_result eq_cmp_result() const { return COND_TRUE; }
const char *func_name() const { return "<=>"; }
- Item* neg_transformer(THD *thd) { return 0; }
+ Item *neg_transformer(THD *thd) { return 0; }
};
@@ -778,6 +784,7 @@ class Item_func_in :public Item_int_func
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
bool nulls_in_row();
+ bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
};
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 02cface827b..e21364045ba 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -86,11 +86,6 @@ Item *create_func_conv(Item* a, Item *b, Item *c)
return new Item_func_conv(a,b,c);
}
-Item *create_func_convert_tz(Item* a, Item *b, Item *c)
-{
- return new Item_func_convert_tz(a,b,c);
-}
-
Item *create_func_cos(Item* a)
{
return new Item_func_cos(a);
diff --git a/sql/item_create.h b/sql/item_create.h
index 18c5b3239f3..d48aed5284a 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -31,7 +31,6 @@ Item *create_func_char_length(Item* a);
Item *create_func_cast(Item *a, Cast_target cast_type, int len, CHARSET_INFO *cs);
Item *create_func_connection_id(void);
Item *create_func_conv(Item* a, Item *b, Item *c);
-Item *create_func_convert_tz(Item* a, Item *b, Item *c);
Item *create_func_cos(Item* a);
Item *create_func_cot(Item* a);
Item *create_func_crc32(Item* a);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index c2c93586af8..7c5b584e645 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -78,13 +78,16 @@ static void my_coll_agg_error(Item** args, uint count, const char *fname)
}
-bool Item_func::agg_arg_collations(DTCollation &c, Item **av, uint count)
+bool Item_func::agg_arg_collations(DTCollation &c, Item **av, uint count,
+ bool allow_superset_conversion)
{
uint i;
+ c.nagg= 0;
+ c.strong= 0;
c.set(av[0]->collation);
for (i= 1; i < count; i++)
{
- if (c.aggregate(av[i]->collation))
+ if (c.aggregate(av[i]->collation, allow_superset_conversion))
{
my_coll_agg_error(av, count, func_name());
return TRUE;
@@ -95,9 +98,10 @@ bool Item_func::agg_arg_collations(DTCollation &c, Item **av, uint count)
bool Item_func::agg_arg_collations_for_comparison(DTCollation &c,
- Item **av, uint count)
+ Item **av, uint count,
+ bool allow_superset_conv)
{
- if (agg_arg_collations(c, av, count))
+ if (agg_arg_collations(c, av, count, allow_superset_conv))
return TRUE;
if (c.derivation == DERIVATION_NONE)
@@ -972,7 +976,7 @@ void Item_func_round::fix_length_and_dec()
if (tmp < 0)
decimals=0;
else
- decimals=tmp;
+ decimals=min(tmp,NOT_FIXED_DEC);
}
}
@@ -1438,30 +1442,43 @@ longlong Item_func_find_in_set::val_int()
int diff;
if ((diff=buffer->length() - find->length()) >= 0)
{
- const char *f_pos=find->ptr();
- const char *f_end=f_pos+find->length();
- const char *str=buffer->ptr();
- const char *end=str+diff+1;
- const char *real_end=str+buffer->length();
- uint position=1;
- do
+ my_wc_t wc;
+ CHARSET_INFO *cs= cmp_collation.collation;
+ const char *str_begin= buffer->ptr();
+ const char *str_end= buffer->ptr();
+ const char *real_end= str_end+buffer->length();
+ const uchar *find_str= (const uchar *) find->ptr();
+ uint find_str_len= find->length();
+ int position= 0;
+ while (1)
{
- const char *pos= f_pos;
- while (pos != f_end)
+ int symbol_len;
+ if ((symbol_len= cs->cset->mb_wc(cs, &wc, (uchar*) str_end,
+ (uchar*) real_end)) > 0)
{
- if (my_toupper(cmp_collation.collation,*str) !=
- my_toupper(cmp_collation.collation,*pos))
- goto not_found;
- str++;
- pos++;
+ const char *substr_end= str_end + symbol_len;
+ bool is_last_item= (substr_end == real_end);
+ if (wc == (my_wc_t) separator || is_last_item)
+ {
+ position++;
+ if (is_last_item)
+ str_end= substr_end;
+ if (!my_strnncoll(cs, (const uchar *) str_begin,
+ str_end - str_begin,
+ find_str, find_str_len))
+ return (longlong) position;
+ else
+ str_begin= substr_end;
+ }
+ str_end= substr_end;
}
- if (str == real_end || str[0] == separator)
- return (longlong) position;
- not_found:
- while (str < end && str[0] != separator)
- str++;
- position++;
- } while (++str <= end);
+ else if (str_end - str_begin == 0 &&
+ find_str_len == 0 &&
+ wc == (my_wc_t) separator)
+ return (longlong) ++position;
+ else
+ return (longlong) 0;
+ }
}
return 0;
}
@@ -1649,7 +1666,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func,
func->max_length=min(initid.max_length,MAX_BLOB_WIDTH);
func->maybe_null=initid.maybe_null;
const_item_cache=initid.const_item;
- func->decimals=min(initid.decimals,31);
+ func->decimals=min(initid.decimals,NOT_FIXED_DEC);
}
initialized=1;
if (error)
diff --git a/sql/item_func.h b/sql/item_func.h
index 8636bcd6507..76d0346531e 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -142,8 +142,11 @@ public:
Field *tmp_table_field(TABLE *t_arg);
Item *get_tmp_table_item(THD *thd);
- bool agg_arg_collations(DTCollation &c, Item **items, uint nitems);
- bool agg_arg_collations_for_comparison(DTCollation &c, Item **items, uint nitems);
+ bool agg_arg_collations(DTCollation &c, Item **items, uint nitems,
+ bool allow_superset_conversion= FALSE);
+ bool agg_arg_collations_for_comparison(DTCollation &c,
+ Item **items, uint nitems,
+ bool allow_superset_comversion= FALSE);
bool walk(Item_processor processor, byte *arg);
};
@@ -337,7 +340,7 @@ class Item_dec_func :public Item_real_func
Item_dec_func(Item *a,Item *b) :Item_real_func(a,b) {}
void fix_length_and_dec()
{
- decimals=6; max_length=float_length(decimals);
+ decimals=NOT_FIXED_DEC; max_length=float_length(decimals);
maybe_null=1;
}
inline double fix_result(double value)
diff --git a/sql/item_row.cc b/sql/item_row.cc
index c7e4bc0acf4..f6623e80734 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -66,7 +66,8 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref)
// we can't assign 'item' before, because fix_fields() can change arg
Item *item= *arg;
used_tables_cache |= item->used_tables();
- if (const_item_cache&= item->const_item() && !with_null)
+ const_item_cache&= item->const_item() && !with_null;
+ if (const_item_cache)
{
if (item->cols() > 1)
with_null|= item->null_inside();
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index d3493e1fad1..fac73a1a759 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -39,7 +39,8 @@ C_MODE_END
String my_empty_string("",default_charset_info);
-static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname)
+static void my_coll_agg_error(DTCollation &c1, DTCollation &c2,
+ const char *fname)
{
my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0),
c1.collation->name,c1.derivation_name(),
@@ -62,8 +63,9 @@ double Item_str_func::val()
{
DBUG_ASSERT(fixed == 1);
int err;
- String *res;
- res=val_str(&str_value);
+ char buff[64];
+ String *res, tmp(buff,sizeof(buff), &my_charset_bin);
+ res= val_str(&tmp);
return res ? my_strntod(res->charset(), (char*) res->ptr(),res->length(),
NULL, &err) : 0.0;
}
@@ -72,8 +74,9 @@ longlong Item_str_func::val_int()
{
DBUG_ASSERT(fixed == 1);
int err;
- String *res;
- res=val_str(&str_value);
+ char buff[22];
+ String *res, tmp(buff,sizeof(buff), &my_charset_bin);
+ res= val_str(&tmp);
return (res ?
my_strntoll(res->charset(), res->ptr(), res->length(), 10, NULL,
&err) :
@@ -977,14 +980,16 @@ String *Item_func_left::val_str(String *str)
DBUG_ASSERT(fixed == 1);
String *res =args[0]->val_str(str);
long length =(long) args[1]->val_int();
+ uint char_pos;
if ((null_value=args[0]->null_value))
return 0;
if (length <= 0)
return &my_empty_string;
- if (res->length() <= (uint) length)
+ if (res->length() <= (uint) length ||
+ res->length() <= (char_pos= res->charpos(length)))
return res;
- str_value.set(*res, 0, res->charpos(length));
+ str_value.set(*res, 0, char_pos);
return &str_value;
}
@@ -2195,7 +2200,8 @@ String *Item_func_conv_charset::val_str(String *str)
null_value=1;
return 0;
}
- null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(),conv_charset);
+ null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(),
+ conv_charset);
return null_value ? 0 : &str_value;
}
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index c615bcf7e0f..6d86d7d4b2d 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -63,12 +63,21 @@ void Item_subselect::init(st_select_lex *select_lex,
=> we do not copy old_engine here
*/
engine= unit->item->engine;
+ parsing_place= unit->item->parsing_place;
unit->item->engine= 0;
unit->item= this;
engine->change_item(this, result);
}
else
{
+ SELECT_LEX *outer_select= unit->outer_select();
+ /*
+ do not take into account expression inside aggregate functions because
+ they can access original table fields
+ */
+ parsing_place= (outer_select->in_sum_expr ?
+ NO_MATTER :
+ outer_select->parsing_place);
if (select_lex->next_select())
engine= new subselect_union_engine(unit, result, this);
else
@@ -76,7 +85,7 @@ void Item_subselect::init(st_select_lex *select_lex,
}
{
SELECT_LEX *upper= unit->outer_select();
- if (upper->parsing_place == SELECT_LEX_NODE::IN_HAVING)
+ if (upper->parsing_place == IN_HAVING)
upper->subquery_in_having= 1;
}
DBUG_VOID_RETURN;
@@ -123,13 +132,13 @@ Item_subselect::select_transformer(JOIN *join)
bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref)
{
+ char const *save_where= thd_param->where;
+ int res;
+
DBUG_ASSERT(fixed == 0);
engine->set_thd((thd= thd_param));
arena= thd->current_arena;
- char const *save_where= thd->where;
- int res;
-
if (check_stack_overrun(thd, (gptr)&res))
return 1;
@@ -306,7 +315,12 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
return RES_OK;
SELECT_LEX *select_lex= join->select_lex;
- Statement backup;
+
+ /* Juggle with current arena only if we're in prepared statement prepare */
+ Item_arena *arena= join->thd->current_arena;
+ Item_arena backup;
+ if (!arena->is_stmt_prepare())
+ arena= 0; // For easier test
if (!select_lex->master_unit()->first_select()->next_select() &&
!select_lex->table_list.elements &&
@@ -632,6 +646,7 @@ Item_subselect::trans_res
Item_in_subselect::single_value_transformer(JOIN *join,
Comp_creator *func)
{
+ Item_subselect::trans_res result= RES_ERROR;
DBUG_ENTER("Item_in_subselect::single_value_transformer");
if (changed)
@@ -640,10 +655,13 @@ Item_in_subselect::single_value_transformer(JOIN *join,
}
SELECT_LEX *select_lex= join->select_lex;
- Statement backup;
+ Item_arena *arena= join->thd->current_arena, backup;
thd->where= "scalar IN/ALL/ANY subquery";
- if (arena)
+
+ if (!arena->is_stmt_prepare())
+ arena= 0; // For easier test
+ else
thd->set_n_backup_item_arena(arena, &backup);
if (select_lex->item_list.elements > 1)
@@ -751,18 +769,17 @@ Item_in_subselect::single_value_transformer(JOIN *join,
}
select_lex->uncacheable|= UNCACHEABLE_DEPENDENT;
- Item *item;
-
- item= (Item*) select_lex->item_list.head();
if (join->having || select_lex->with_sum_func ||
select_lex->group_list.elements)
{
- item= func->create(expr,
- new Item_ref_null_helper(this,
- select_lex->ref_pointer_array,
- (char *)"<ref>",
- this->full_name()));
+ bool tmp;
+ Item *item= func->create(expr,
+ new Item_ref_null_helper(this,
+ select_lex->
+ ref_pointer_array,
+ (char *)"<ref>",
+ this->full_name()));
/*
AND and comparison functions can't be changed during fix_fields()
we can assign select_lex->having here, and pass 0 as last
@@ -770,21 +787,22 @@ Item_in_subselect::single_value_transformer(JOIN *join,
*/
select_lex->having= join->having= and_items(join->having, item);
select_lex->having_fix_field= 1;
- if (join->having->fix_fields(thd, join->tables_list, 0))
- {
- select_lex->having_fix_field= 0;
- goto err;
- }
+ tmp= join->having->fix_fields(thd, join->tables_list, 0);
select_lex->having_fix_field= 0;
+ if (tmp)
+ goto err;
}
else
{
+ Item *item= (Item*) select_lex->item_list.head();
+
select_lex->item_list.empty();
select_lex->item_list.push_back(new Item_int("Not_used",
(longlong) 1, 21));
select_lex->ref_pointer_array[0]= select_lex->item_list.head();
if (select_lex->table_list.elements)
{
+ bool tmp;
Item *having= item, *orig_item= item;
item= func->create(expr, item);
if (!abort_on_null && orig_item->maybe_null)
@@ -800,12 +818,10 @@ Item_in_subselect::single_value_transformer(JOIN *join,
new Item_cond_and(having, join->having) :
having);
select_lex->having_fix_field= 1;
- if (join->having->fix_fields(thd, join->tables_list, 0))
- {
- select_lex->having_fix_field= 0;
+ tmp= join->having->fix_fields(thd, join->tables_list, 0);
+ select_lex->having_fix_field= 0;
+ if (tmp)
goto err;
- }
- select_lex->having_fix_field= 0;
item= new Item_cond_or(item,
new Item_func_isnull(orig_item));
}
@@ -821,6 +837,7 @@ Item_in_subselect::single_value_transformer(JOIN *join,
}
else
{
+ bool tmp;
if (select_lex->master_unit()->first_select()->next_select())
{
/*
@@ -835,13 +852,10 @@ Item_in_subselect::single_value_transformer(JOIN *join,
(char *)"<no matter>",
(char *)"<result>"));
select_lex->having_fix_field= 1;
- if (join->having->fix_fields(thd, join->tables_list,
- 0))
- {
- select_lex->having_fix_field= 0;
+ tmp= join->having->fix_fields(thd, join->tables_list, 0);
+ select_lex->having_fix_field= 0;
+ if (tmp)
goto err;
- }
- select_lex->having_fix_field= 0;
}
else
{
@@ -857,42 +871,40 @@ Item_in_subselect::single_value_transformer(JOIN *join,
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_SELECT_REDUCED, warn_buff);
}
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
- DBUG_RETURN(RES_REDUCE);
+ result= RES_REDUCE;
+ goto end;
}
}
}
ok:
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
- DBUG_RETURN(RES_OK);
+ result= RES_OK;
err:
if (arena)
thd->restore_backup_item_arena(arena, &backup);
- DBUG_RETURN(RES_ERROR);
+ DBUG_RETURN(result);
}
Item_subselect::trans_res
Item_in_subselect::row_value_transformer(JOIN *join)
{
+ Item *item= 0;
+ SELECT_LEX *select_lex= join->select_lex;
DBUG_ENTER("Item_in_subselect::row_value_transformer");
if (changed)
{
DBUG_RETURN(RES_OK);
}
- Statement backup;
- Item *item= 0;
-
thd->where= "row IN/ALL/ANY subquery";
- if (arena)
- thd->set_n_backup_item_arena(arena, &backup);
- SELECT_LEX *select_lex= join->select_lex;
+ Item_arena *arena= join->thd->current_arena, backup;
+ if (!arena->is_stmt_prepare())
+ arena= 0;
+ else
+ thd->set_n_backup_item_arena(arena, &backup);
if (select_lex->item_list.elements != left_expr->cols())
{
@@ -1237,29 +1249,31 @@ int subselect_uniquesubquery_engine::exec()
DBUG_ENTER("subselect_uniquesubquery_engine::exec");
int error;
TABLE *table= tab->table;
- if ((tab->ref.key_err= (*tab->ref.key_copy)->copy()))
+ for (store_key **copy=tab->ref.key_copy ; *copy ; copy++)
{
- table->status= STATUS_NOT_FOUND;
- error= -1;
+ if (tab->ref.key_err= (*copy)->copy())
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(1);
+ }
}
+
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
+ error= table->file->index_read(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length,HA_READ_KEY_EXACT);
+ if (error && error != HA_ERR_KEY_NOT_FOUND)
+ error= report_error(table, error);
else
{
- if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
- error= table->file->index_read(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length,HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND)
- error= report_error(table, error);
- else
- {
- error= 0;
- table->null_row= 0;
- ((Item_in_subselect *) item)->value= (!table->status &&
- (!cond || cond->val_int()) ? 1 :
- 0);
- }
+ error= 0;
+ table->null_row= 0;
+ ((Item_in_subselect *) item)->value= (!table->status &&
+ (!cond || cond->val_int()) ? 1 :
+ 0);
}
+
DBUG_RETURN(error != 0);
}
@@ -1287,55 +1301,56 @@ int subselect_indexsubquery_engine::exec()
((Item_in_subselect *) item)->was_null= 0;
}
- if ((*tab->ref.key_copy) && (tab->ref.key_err= (*tab->ref.key_copy)->copy()))
+ for (store_key **copy=tab->ref.key_copy ; *copy ; copy++)
{
- table->status= STATUS_NOT_FOUND;
- error= -1;
+ if (tab->ref.key_err= (*copy)->copy())
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(1);
+ }
}
+
+ if (!table->file->inited)
+ table->file->ha_index_init(tab->ref.key);
+ error= table->file->index_read(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length,HA_READ_KEY_EXACT);
+ if (error && error != HA_ERR_KEY_NOT_FOUND)
+ error= report_error(table, error);
else
{
- if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
- error= table->file->index_read(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length,HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND)
- error= report_error(table, error);
- else
+ for (;;)
{
- for (;;)
+ error= 0;
+ table->null_row= 0;
+ if (!table->status)
{
- error= 0;
- table->null_row= 0;
- if (!table->status)
- {
- if (!cond || cond->val_int())
- {
- if (null_finding)
- ((Item_in_subselect *) item)->was_null= 1;
- else
- ((Item_in_subselect *) item)->value= 1;
- break;
- }
- error= table->file->index_next_same(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length);
- if (error && error != HA_ERR_END_OF_FILE)
- {
- error= report_error(table, error);
- break;
- }
- }
- else
- {
- if (!check_null || null_finding)
- break; /* We don't need to check nulls */
- *tab->ref.null_ref_key= 1;
- null_finding= 1;
- /* Check if there exists a row with a null value in the index */
- if ((error= (safe_index_read(tab) == 1)))
- break;
- }
+ if (!cond || cond->val_int())
+ {
+ if (null_finding)
+ ((Item_in_subselect *) item)->was_null= 1;
+ else
+ ((Item_in_subselect *) item)->value= 1;
+ break;
+ }
+ error= table->file->index_next_same(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length);
+ if (error && error != HA_ERR_END_OF_FILE)
+ {
+ error= report_error(table, error);
+ break;
+ }
+ }
+ else
+ {
+ if (!check_null || null_finding)
+ break; /* We don't need to check nulls */
+ *tab->ref.null_ref_key= 1;
+ null_finding= 1;
+ /* Check if there exists a row with a null value in the index */
+ if ((error= (safe_index_read(tab) == 1)))
+ break;
}
}
}
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index e5defe24756..e870feddedf 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -50,6 +50,8 @@ protected:
table_map used_tables_cache;
/* allowed number of columns (1 for single value subqueries) */
uint max_columns;
+ /* where subquery is placed */
+ enum_parsing_place parsing_place;
/* work with 'substitution' */
bool have_to_be_excluded;
/* cache of constant state */
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index b92a7f2ba80..13b6329daae 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -64,20 +64,20 @@ Item_sum::Item_sum(THD *thd, Item_sum *item):
/*
- Save copy of arguments if we are prepare prepared statement
+ Save copy of arguments if we prepare prepared statement
(arguments can be rewritten in get_tmp_table_item())
SYNOPSIS
- Item_sum::save_args_for_prepared_statements()
+ Item_sum::save_args_for_prepared_statement()
thd - thread handler
RETURN
0 - OK
1 - Error
*/
-bool Item_sum::save_args_for_prepared_statements(THD *thd)
+bool Item_sum::save_args_for_prepared_statement(THD *thd)
{
- if (thd->current_arena && args_copy == 0)
+ if (thd->current_arena->is_stmt_prepare() && args_copy == 0)
return save_args(thd->current_arena);
return 0;
}
@@ -214,7 +214,7 @@ Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
{
DBUG_ASSERT(fixed == 0);
- if (save_args_for_prepared_statements(thd))
+ if (save_args_for_prepared_statement(thd))
return 1;
if (!thd->allow_sum_func)
@@ -248,7 +248,7 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
{
DBUG_ASSERT(fixed == 0);
- if (save_args_for_prepared_statements(thd))
+ if (save_args_for_prepared_statement(thd))
return 1;
Item *item= args[0];
@@ -2056,13 +2056,12 @@ void Item_func_group_concat::reset_field()
bool
Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
{
+ uint i; /* for loop variable */
DBUG_ASSERT(fixed == 0);
- if (save_args_for_prepared_statements(thd))
+ if (save_args_for_prepared_statement(thd))
return 1;
- uint i; /* for loop variable */
-
if (!thd->allow_sum_func)
{
my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0));
@@ -2077,12 +2076,12 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
Fix fields for select list and ORDER clause
*/
- for (i= 0 ; i < arg_count ; i++)
+ for (i=0 ; i < arg_count ; i++)
{
if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1))
return 1;
- if (i < arg_count_field && args[i]->maybe_null)
- maybe_null= 0;
+ if (i < arg_count_field)
+ maybe_null|= args[i]->maybe_null;
}
result_field= 0;
@@ -2153,10 +2152,14 @@ bool Item_func_group_concat::setup(THD *thd)
Note that in the table, we first have the ORDER BY fields, then the
field list.
+
+ We need to set set_sum_field in true for storing value of blob in buffer
+ of a record instead of a pointer of one.
*/
- if (!(table=create_tmp_table(thd, tmp_table_param, all_fields, 0,
- 0, 0, 0,select_lex->options | thd->options,
- (char *) "")))
+ if (!(table=create_tmp_table(thd, tmp_table_param, all_fields,
+ (ORDER*) 0, 0, TRUE,
+ select_lex->options | thd->options,
+ HA_POS_ERROR,(char *) "")))
DBUG_RETURN(1);
table->file->extra(HA_EXTRA_NO_ROWS);
table->no_rows= 1;
@@ -2263,7 +2266,7 @@ void Item_func_group_concat::print(String *str)
(*order[i]->item)->print(str);
}
}
- str->append(" seperator \'", 12);
+ str->append(" separator \'", 12);
str->append(*separator);
str->append("\')", 2);
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 2dde6f73425..9046a215c86 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -94,8 +94,8 @@ public:
virtual bool setup(THD *thd) {return 0;}
virtual void make_unique() {}
Item *get_tmp_table_item(THD *thd);
- bool save_args_for_prepared_statements(THD *);
- bool save_args(Item_arena* stmt);
+ bool save_args_for_prepared_statement(THD *);
+ bool save_args(Item_arena *arena);
bool walk (Item_processor processor, byte *argument);
};
@@ -317,7 +317,11 @@ public:
class Item_sum_avg :public Item_sum_num
{
- void fix_length_and_dec() { decimals+=4; maybe_null=1; }
+ void fix_length_and_dec()
+ {
+ decimals=min(decimals+4, NOT_FIXED_DEC);
+ maybe_null=1;
+ }
double sum;
ulonglong count;
@@ -372,7 +376,11 @@ class Item_sum_variance : public Item_sum_num
{
double sum, sum_sqr;
ulonglong count;
- void fix_length_and_dec() { decimals+=4; maybe_null=1; }
+ void fix_length_and_dec()
+ {
+ decimals=min(decimals+4, NOT_FIXED_DEC);
+ maybe_null=1;
+ }
public:
Item_sum_variance(Item *item_par) :Item_sum_num(item_par),count(0) {}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 5aa14010058..c558c935090 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -114,6 +114,17 @@ static bool make_datetime(date_time_format_types format, TIME *ltime,
/*
+ Date formats corresponding to compound %r and %T conversion specifiers
+
+ Note: We should init at least first element of "positions" array
+ (first member) or hpux11 compiler will die horribly.
+*/
+static DATE_TIME_FORMAT time_ampm_format= {{0}, '\0', 0,
+ {(char *)"%I:%i:%S %p", 11}};
+static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0,
+ {(char *)"%H:%i:%S", 8}};
+
+/*
Extract datetime value to TIME struct from string value
according to format string.
@@ -126,6 +137,17 @@ static bool make_datetime(date_time_format_types format, TIME *ltime,
cached_timestamp_type
It uses to get an appropriate warning
in the case when the value is truncated.
+ sub_pattern_end if non-zero then we are parsing string which
+ should correspond compound specifier (like %T or
+ %r) and this parameter is pointer to place where
+ pointer to end of string matching this specifier
+ should be stored.
+ NOTE
+ Possibility to parse strings matching to patterns equivalent to compound
+ specifiers is mainly intended for use from inside of this function in
+ order to understand %T and %r conversion specifiers, so number of
+ conversion specifiers that can be used in such sub-patterns is limited.
+ Also most of checks are skipped in this case.
RETURN
0 ok
@@ -134,14 +156,18 @@ static bool make_datetime(date_time_format_types format, TIME *ltime,
static bool extract_date_time(DATE_TIME_FORMAT *format,
const char *val, uint length, TIME *l_time,
- timestamp_type cached_timestamp_type)
+ timestamp_type cached_timestamp_type,
+ const char **sub_pattern_end)
{
int weekday= 0, yearday= 0, daypart= 0;
int week_number= -1;
CHARSET_INFO *cs= &my_charset_bin;
int error= 0;
bool usa_time= 0;
- bool sunday_first= 0;
+ bool sunday_first_n_first_week_non_iso;
+ bool strict_week_number;
+ int strict_week_number_year= -1;
+ bool strict_week_number_year_type;
int frac_part;
const char *val_begin= val;
const char *val_end= val + length;
@@ -149,7 +175,12 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
const char *end= ptr + format->format.length;
DBUG_ENTER("extract_date_time");
- bzero((char*) l_time, sizeof(*l_time));
+ LINT_INIT(sunday_first_n_first_week_non_iso);
+ LINT_INIT(strict_week_number);
+ LINT_INIT(strict_week_number_year_type);
+
+ if (!sub_pattern_end)
+ bzero((char*) l_time, sizeof(*l_time));
for (; ptr != end && val != val_end; ptr++)
{
@@ -160,7 +191,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
char *tmp;
/* Skip pre-space between each argument */
- while (my_isspace(cs, *val) && val != val_end)
+ while (val != val_end && my_isspace(cs, *val))
val++;
val_len= (uint) (val_end - val);
@@ -268,9 +299,12 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
break;
case 'w':
tmp= (char*) val + 1;
- if ((weekday= (int) my_strtoll10(val, &tmp, &error)) <= 0 ||
+ if ((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
weekday >= 7)
goto err;
+ /* We should use the same 1 - 7 scale for %w as for %W */
+ if (!weekday)
+ weekday= 7;
val= tmp;
break;
case 'j':
@@ -279,15 +313,45 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
val= tmp;
break;
+ /* Week numbers */
+ case 'V':
case 'U':
- sunday_first= 1;
- /* Fall through */
+ case 'v':
case 'u':
+ sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V');
+ strict_week_number= (*ptr=='V' || *ptr=='v');
tmp= (char*) val + min(val_len, 2);
- week_number= (int) my_strtoll10(val, &tmp, &error);
+ if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
+ strict_week_number && !week_number ||
+ week_number > 53)
+ goto err;
val= tmp;
break;
+ /* Year used with 'strict' %V and %v week numbers */
+ case 'X':
+ case 'x':
+ strict_week_number_year_type= (*ptr=='X');
+ tmp= (char*) val + min(4, val_len);
+ strict_week_number_year= (int) my_strtoll10(val, &tmp, &error);
+ val= tmp;
+ break;
+
+ /* Time in AM/PM notation */
+ case 'r':
+ error= extract_date_time(&time_ampm_format, val,
+ (uint)(val_end - val), l_time,
+ cached_timestamp_type, &val);
+ break;
+
+ /* Time in 24-hour notation */
+ case 'T':
+ error= extract_date_time(&time_24hrs_format, val,
+ (uint)(val_end - val), l_time,
+ cached_timestamp_type, &val);
+ break;
+
+ /* Conversion specifiers that match classes of characters */
case '.':
while (my_ispunct(cs, *val) && val != val_end)
val++;
@@ -320,6 +384,16 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
l_time->hour= l_time->hour%12+daypart;
}
+ /*
+ If we are recursively called for parsing string matching compound
+ specifiers we are already done.
+ */
+ if (sub_pattern_end)
+ {
+ *sub_pattern_end= val;
+ DBUG_RETURN(0);
+ }
+
if (yearday > 0)
{
uint days= calc_daynr(l_time->year,1,1) + yearday - 1;
@@ -330,34 +404,45 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
if (week_number >= 0 && weekday)
{
- int days= calc_daynr(l_time->year,1,1);
+ int days;
uint weekday_b;
-
- if (weekday > 7 || weekday < 0)
- goto err;
- if (sunday_first)
- weekday = weekday%7;
- if (week_number == 53)
- {
- days+= (week_number - 1)*7;
- weekday_b= calc_weekday(days, sunday_first);
- weekday = weekday - weekday_b - !sunday_first;
- days+= weekday;
- }
- else if (week_number == 0)
+ /*
+ %V,%v require %X,%x resprectively,
+ %U,%u should be used with %Y and not %X or %x
+ */
+ if (strict_week_number &&
+ (strict_week_number_year < 0 ||
+ strict_week_number_year_type != sunday_first_n_first_week_non_iso) ||
+ !strict_week_number && strict_week_number_year >= 0)
+ goto err;
+
+ /* Number of days since year 0 till 1st Jan of this year */
+ days= calc_daynr((strict_week_number ? strict_week_number_year :
+ l_time->year),
+ 1, 1);
+ /* Which day of week is 1st Jan of this year */
+ weekday_b= calc_weekday(days, sunday_first_n_first_week_non_iso);
+
+ /*
+ Below we are going to sum:
+ 1) number of days since year 0 till 1st day of 1st week of this year
+ 2) number of days between 1st week and our week
+ 3) and position of our day in the week
+ */
+ if (sunday_first_n_first_week_non_iso)
{
- weekday_b= calc_weekday(days, sunday_first);
- weekday = weekday - weekday_b - !sunday_first;
- days+= weekday;
+ days+= ((weekday_b == 0) ? 0 : 7) - weekday_b +
+ (week_number - 1) * 7 +
+ weekday % 7;
}
else
{
- days+= (week_number - !sunday_first)*7;
- weekday_b= calc_weekday(days, sunday_first);
- weekday =weekday - weekday_b - !sunday_first;
- days+= weekday;
+ days+= ((weekday_b <= 3) ? 0 : 7) - weekday_b +
+ (week_number - 1) * 7 +
+ (weekday - 1);
}
+
if (days <= 0 || days >= MAX_DAY_NUMBER)
goto err;
get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day);
@@ -1574,19 +1659,29 @@ bool Item_func_from_unixtime::get_date(TIME *ltime,
void Item_func_convert_tz::fix_length_and_dec()
-{
- String str;
-
- thd= current_thd;
+{
collation.set(&my_charset_bin);
decimals= 0;
max_length= MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
+}
+
+
+bool
+Item_func_convert_tz::fix_fields(THD *thd_arg, TABLE_LIST *tables_arg, Item **ref)
+{
+ String str;
+ if (Item_date_func::fix_fields(thd_arg, tables_arg, ref))
+ return 1;
+
+ tz_tables= thd_arg->lex->time_zone_tables_used;
if (args[1]->const_item())
- from_tz= my_tz_find(thd, args[1]->val_str(&str));
-
+ from_tz= my_tz_find(args[1]->val_str(&str), tz_tables);
+
if (args[2]->const_item())
- to_tz= my_tz_find(thd, args[2]->val_str(&str));
+ to_tz= my_tz_find(args[2]->val_str(&str), tz_tables);
+
+ return 0;
}
@@ -1627,10 +1722,10 @@ bool Item_func_convert_tz::get_date(TIME *ltime,
String str;
if (!args[1]->const_item())
- from_tz= my_tz_find(thd, args[1]->val_str(&str));
+ from_tz= my_tz_find(args[1]->val_str(&str), tz_tables);
if (!args[2]->const_item())
- to_tz= my_tz_find(thd, args[2]->val_str(&str));
+ to_tz= my_tz_find(args[2]->val_str(&str), tz_tables);
if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, 0))
{
@@ -2825,7 +2920,7 @@ bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date)
date_time_format.format.str= (char*) format->ptr();
date_time_format.format.length= format->length();
if (extract_date_time(&date_time_format, val->ptr(), val->length(),
- ltime, cached_timestamp_type))
+ ltime, cached_timestamp_type, 0))
goto null_date;
if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day)
{
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index fe72ac6193b..5f71045ef27 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -531,9 +531,22 @@ class Item_func_from_unixtime :public Item_date_func
*/
class Time_zone;
+/*
+ This class represents CONVERT_TZ() function.
+ The important fact about this function that it is handled in special way.
+ When such function is met in expression time_zone system tables are added
+ to global list of tables to open, so later those already opened and locked
+ tables can be used during this function calculation for loading time zone
+ descriptions.
+*/
class Item_func_convert_tz :public Item_date_func
{
- THD *thd;
+ /* Cached pointer to list of pre-opened time zone tables. */
+ TABLE_LIST *tz_tables;
+ /*
+ If time zone parameters are constants we are caching objects that
+ represent them.
+ */
Time_zone *from_tz, *to_tz;
public:
Item_func_convert_tz(Item *a, Item *b, Item *c):
@@ -542,6 +555,7 @@ class Item_func_convert_tz :public Item_date_func
double val() { return (double) val_int(); }
String *val_str(String *str);
const char *func_name() const { return "convert_tz"; }
+ bool fix_fields(THD *, struct st_table_list *, Item **);
void fix_length_and_dec();
bool get_date(TIME *res, uint fuzzy_date);
};
diff --git a/sql/key.cc b/sql/key.cc
index 9425a368669..b1f4c9533a9 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -211,10 +211,17 @@ bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length)
if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
FIELDFLAG_PACK)))
{
- if (my_strnncoll(key_part->field->charset(),
- (const uchar*) key, length,
- (const uchar*) table->record[0]+key_part->offset,
- length))
+ CHARSET_INFO *cs= key_part->field->charset();
+ uint char_length= key_part->length / cs->mbmaxlen;
+ const byte *pos= table->record[0] + key_part->offset;
+ if (length > char_length)
+ {
+ char_length= my_charpos(cs, pos, pos + length, char_length);
+ set_if_smaller(char_length, length);
+ }
+ if (cs->coll->strnncollsp(cs,
+ (const uchar*) key, length,
+ (const uchar*) pos, char_length))
return 1;
}
else if (memcmp(key,table->record[0]+key_part->offset,length))
diff --git a/sql/lex.h b/sql/lex.h
index 957aa3159e7..4c44d53d5b1 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -553,7 +553,7 @@ static SYMBOL sql_functions[] = {
{ "CONNECTION_ID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)},
{ "CONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)},
{ "CONV", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)},
- { "CONVERT_TZ", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_convert_tz)},
+ { "CONVERT_TZ", SYM(CONVERT_TZ_SYM)},
{ "COUNT", SYM(COUNT_SYM)},
{ "COS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cos)},
{ "COT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_cot)},
diff --git a/sql/lock.cc b/sql/lock.cc
index bcf61fde1a6..debfb900c23 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -99,7 +99,7 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd,TABLE **tables,uint count)
Someone has issued LOCK ALL TABLES FOR READ and we want a write lock
Wait until the lock is gone
*/
- if (wait_if_global_read_lock(thd, 1))
+ if (wait_if_global_read_lock(thd, 1, 1))
{
my_free((gptr) sql_lock,MYF(0));
sql_lock=0;
@@ -474,7 +474,7 @@ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list)
int error= -1;
DBUG_ENTER("lock_and_wait_for_table_name");
- if (wait_if_global_read_lock(thd,0))
+ if (wait_if_global_read_lock(thd, 0, 1))
DBUG_RETURN(1);
VOID(pthread_mutex_lock(&LOCK_open));
if ((lock_retcode = lock_table_name(thd, table_list)) < 0)
@@ -709,14 +709,23 @@ static void print_lock_error(int error)
The global locks are handled through the global variables:
global_read_lock
+ global_read_lock_blocks_commit
waiting_for_read_lock
protect_against_global_read_lock
+
+ Taking the global read lock is TWO steps (2nd step is optional; without
+ it, COMMIT of existing transactions will be allowed):
+ lock_global_read_lock() THEN make_global_read_lock_block_commit().
****************************************************************************/
volatile uint global_read_lock=0;
+volatile uint global_read_lock_blocks_commit=0;
static volatile uint protect_against_global_read_lock=0;
static volatile uint waiting_for_read_lock=0;
+#define GOT_GLOBAL_READ_LOCK 1
+#define MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT 2
+
bool lock_global_read_lock(THD *thd)
{
DBUG_ENTER("lock_global_read_lock");
@@ -734,40 +743,54 @@ bool lock_global_read_lock(THD *thd)
while (protect_against_global_read_lock && !thd->killed)
pthread_cond_wait(&COND_refresh, &LOCK_open);
waiting_for_read_lock--;
- thd->exit_cond(old_message);
if (thd->killed)
{
- (void) pthread_mutex_unlock(&LOCK_open);
+ thd->exit_cond(old_message);
DBUG_RETURN(1);
}
- thd->global_read_lock=1;
+ thd->global_read_lock= GOT_GLOBAL_READ_LOCK;
global_read_lock++;
- (void) pthread_mutex_unlock(&LOCK_open);
+ thd->exit_cond(old_message);
}
+ /*
+ We DON'T set global_read_lock_blocks_commit now, it will be set after
+ tables are flushed (as the present function serves for FLUSH TABLES WITH
+ READ LOCK only). Doing things in this order is necessary to avoid
+ deadlocks (we must allow COMMIT until all tables are closed; we should not
+ forbid it before, or we can have a 3-thread deadlock if 2 do SELECT FOR
+ UPDATE and one does FLUSH TABLES WITH READ LOCK).
+ */
DBUG_RETURN(0);
}
void unlock_global_read_lock(THD *thd)
{
uint tmp;
- thd->global_read_lock=0;
pthread_mutex_lock(&LOCK_open);
tmp= --global_read_lock;
+ if (thd->global_read_lock == MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT)
+ --global_read_lock_blocks_commit;
pthread_mutex_unlock(&LOCK_open);
/* Send the signal outside the mutex to avoid a context switch */
if (!tmp)
pthread_cond_broadcast(&COND_refresh);
+ thd->global_read_lock= 0;
}
+#define must_wait (global_read_lock && \
+ (is_not_commit || \
+ global_read_lock_blocks_commit))
-bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh)
+bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh,
+ bool is_not_commit)
{
const char *old_message;
- bool result=0;
+ bool result= 0, need_exit_cond;
DBUG_ENTER("wait_if_global_read_lock");
+ LINT_INIT(old_message);
(void) pthread_mutex_lock(&LOCK_open);
- if (global_read_lock)
+ if ((need_exit_cond= must_wait))
{
if (thd->global_read_lock) // This thread had the read locks
{
@@ -777,16 +800,22 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh)
}
old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting for release of readlock");
- while (global_read_lock && ! thd->killed &&
+ while (must_wait && ! thd->killed &&
(!abort_on_refresh || thd->version == refresh_version))
(void) pthread_cond_wait(&COND_refresh,&LOCK_open);
if (thd->killed)
result=1;
- thd->exit_cond(old_message);
}
if (!abort_on_refresh && !result)
protect_against_global_read_lock++;
- pthread_mutex_unlock(&LOCK_open);
+ /*
+ The following is only true in case of a global read locks (which is rare)
+ and if old_message is set
+ */
+ if (unlikely(need_exit_cond))
+ thd->exit_cond(old_message);
+ else
+ pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(result);
}
@@ -802,3 +831,21 @@ void start_waiting_global_read_lock(THD *thd)
pthread_cond_broadcast(&COND_refresh);
DBUG_VOID_RETURN;
}
+
+
+void make_global_read_lock_block_commit(THD *thd)
+{
+ /*
+ If we didn't succeed lock_global_read_lock(), or if we already suceeded
+ make_global_read_lock_block_commit(), do nothing.
+ */
+ if (thd->global_read_lock != GOT_GLOBAL_READ_LOCK)
+ return;
+ pthread_mutex_lock(&LOCK_open);
+ /* increment this BEFORE waiting on cond (otherwise race cond) */
+ global_read_lock_blocks_commit++;
+ while (protect_against_global_read_lock)
+ pthread_cond_wait(&COND_refresh, &LOCK_open);
+ pthread_mutex_unlock(&LOCK_open);
+ thd->global_read_lock= MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT;
+}
diff --git a/sql/log.cc b/sql/log.cc
index 44c8ce59aaf..16381c8e26c 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1,15 +1,15 @@
/* Copyright (C) 2000-2003 MySQL AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
@@ -31,12 +31,55 @@
#include <stdarg.h>
#include <m_ctype.h> // For test_if_number
+#ifdef __NT__
+#include "message.h"
+#endif
+
MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log;
ulong sync_binlog_counter= 0;
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
+#ifdef __NT__
+static int eventSource = 0;
+
+void setup_windows_event_source()
+{
+ HKEY hRegKey= NULL;
+ DWORD dwError= 0;
+ TCHAR szPath[MAX_PATH];
+ DWORD dwTypes;
+
+ if (eventSource) // Ensure that we are only called once
+ return;
+ eventSource= 1;
+
+ // Create the event source registry key
+ dwError= RegCreateKey(HKEY_LOCAL_MACHINE,
+ "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\MySQL",
+ &hRegKey);
+
+ /* Name of the PE module that contains the message resource */
+ GetModuleFileName(NULL, szPath, MAX_PATH);
+
+ /* Register EventMessageFile */
+ dwError = RegSetValueEx(hRegKey, "EventMessageFile", 0, REG_EXPAND_SZ,
+ (PBYTE) szPath, strlen(szPath)+1);
+
+
+ /* Register supported event types */
+ dwTypes= (EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE |
+ EVENTLOG_INFORMATION_TYPE);
+ dwError= RegSetValueEx(hRegKey, "TypesSupported", 0, REG_DWORD,
+ (LPBYTE) &dwTypes, sizeof dwTypes);
+
+ RegCloseKey(hRegKey);
+}
+
+#endif /* __NT__ */
+
+
/****************************************************************************
** Find a uniq filename for 'filename.#'.
** Set # to a number as low as possible
@@ -234,7 +277,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
"started with:\nTcp port: %d Unix socket: %s\n",
my_progname,server_version,mysqld_port,mysqld_unix_port
#endif
- );
+ );
end=strnmov(buff+len,"Time Id Command Argument\n",
sizeof(buff)-len);
if (my_b_write(&log_file, (byte*) buff,(uint) (end-buff)) ||
@@ -1405,29 +1448,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
goto err;
}
#endif
-
-#if MYSQL_VERSION_ID < 50000
- /*
- In 5.0 this is not needed anymore as we store the value of
- FOREIGN_KEY_CHECKS in a binary way in the Query event's header.
- The code below was enabled in 4.0 and 4.1.
- */
- /*
- If the user has set FOREIGN_KEY_CHECKS=0 we wrap every SQL
- command in the binlog inside:
- SET FOREIGN_KEY_CHECKS=0;
- <command>;
- SET FOREIGN_KEY_CHECKS=1;
- */
-
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS)
- {
- Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
- }
-#endif
}
/* Write the SQL command */
@@ -1436,18 +1456,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
if (event_info->write(file))
goto err;
- /* Write log events to reset the 'run environment' of the SQL command */
-
-#if MYSQL_VERSION_ID < 50000
- if (thd && thd->options & OPTION_NO_FOREIGN_KEY_CHECKS)
- {
- Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
- }
-#endif
-
/*
Tell for transactional table handlers up to which position in the
binlog file we wrote. The table handler can store this info, and
@@ -1842,17 +1850,12 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length,
NOTES
One must have a lock on LOCK_log before calling this function.
- This lock will be freed before return!
-
- The reason for the above is that for enter_cond() / exit_cond() to
- work the mutex must be got before enter_cond() but releases before
- exit_cond().
- If you don't do it this way, you will get a deadlock in THD::awake()
+ This lock will be freed before return! That's required by
+ THD::enter_cond() (see NOTES in sql_class.h).
*/
void MYSQL_LOG:: wait_for_update(THD* thd, bool master_or_slave)
{
- safe_mutex_assert_owner(&LOCK_log);
const char* old_msg = thd->enter_cond(&update_cond, &LOCK_log,
master_or_slave ?
"Has read all relay log; waiting for \
@@ -1860,7 +1863,6 @@ the slave I/O thread to update it" :
"Has sent all binlog to slave; \
waiting for binlog to be updated");
pthread_cond_wait(&update_cond, &LOCK_log);
- pthread_mutex_unlock(&LOCK_log); // See NOTES
thd->exit_cond(old_msg);
}
@@ -1944,6 +1946,19 @@ void MYSQL_LOG::set_max_size(ulong max_size_arg)
}
+Disable_binlog::Disable_binlog(THD *thd_arg) :
+ thd(thd_arg), save_options(thd_arg->options)
+{
+ thd_arg->options&= ~OPTION_BIN_LOG;
+}
+
+
+Disable_binlog::~Disable_binlog()
+{
+ thd->options= save_options;
+}
+
+
/*
Check if a string is a valid number
@@ -1994,39 +2009,31 @@ static bool test_if_number(register const char *str,
} /* test_if_number */
-void sql_print_error(const char *format,...)
+void print_buffer_to_file(enum loglevel level, const char *buffer)
{
- va_list args;
time_t skr;
struct tm tm_tmp;
struct tm *start;
- va_start(args,format);
- DBUG_ENTER("sql_print_error");
+ DBUG_ENTER("print_buffer_to_file");
+ DBUG_PRINT("enter",("buffer: %s", buffer));
VOID(pthread_mutex_lock(&LOCK_error_log));
-#ifndef DBUG_OFF
- {
- char buff[1024];
- my_vsnprintf(buff,sizeof(buff)-1,format,args);
- DBUG_PRINT("error",("%s",buff));
- va_end(args);
- va_start(args,format);
- }
-#endif
+
skr=time(NULL);
- localtime_r(&skr,&tm_tmp);
+ localtime_r(&skr, &tm_tmp);
start=&tm_tmp;
- fprintf(stderr,"%02d%02d%02d %2d:%02d:%02d ",
- start->tm_year % 100,
- start->tm_mon+1,
+ fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n",
+ start->tm_year % 100,
+ start->tm_mon+1,
start->tm_mday,
start->tm_hour,
start->tm_min,
- start->tm_sec);
- (void) vfprintf(stderr,format,args);
- (void) fputc('\n',stderr);
+ start->tm_sec,
+ (level == ERROR_LEVEL ? "ERROR" : level == WARNING_LEVEL ?
+ "WARNING" : "INFORMATION"),
+ buffer);
+
fflush(stderr);
- va_end(args);
VOID(pthread_mutex_unlock(&LOCK_error_log));
DBUG_VOID_RETURN;
@@ -2042,6 +2049,7 @@ void sql_perror(const char *message)
#endif
}
+
bool flush_error_log()
{
bool result=0;
@@ -2225,3 +2233,126 @@ void MYSQL_LOG::report_pos_in_innodb()
#endif
DBUG_VOID_RETURN;
}
+
+#ifdef __NT__
+void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
+ uint length, int buffLen)
+{
+ HANDLE event;
+ char *buffptr;
+ LPCSTR *buffmsgptr;
+ DBUG_ENTER("print_buffer_to_nt_eventlog");
+
+ buffptr= buff;
+ if (length > (uint)(buffLen-4))
+ {
+ char *newBuff= new char[length + 4];
+ strcpy(newBuff, buff);
+ buffptr= newBuff;
+ }
+ strmov(buffptr+length, "\r\n\r\n");
+ buffmsgptr= (LPCSTR*) &buffptr; // Keep windows happy
+
+ setup_windows_event_source();
+ if ((event= RegisterEventSource(NULL,"MySQL")))
+ {
+ switch (level) {
+ case ERROR_LEVEL:
+ ReportEvent(event, EVENTLOG_ERROR_TYPE, 0, MSG_DEFAULT, NULL, 1, 0,
+ buffmsgptr, NULL);
+ break;
+ case WARNING_LEVEL:
+ ReportEvent(event, EVENTLOG_WARNING_TYPE, 0, MSG_DEFAULT, NULL, 1, 0,
+ buffmsgptr, NULL);
+ break;
+ case INFORMATION_LEVEL:
+ ReportEvent(event, EVENTLOG_INFORMATION_TYPE, 0, MSG_DEFAULT, NULL, 1,
+ 0, buffmsgptr, NULL);
+ break;
+ }
+ DeregisterEventSource(event);
+ }
+
+ /* if we created a string buffer, then delete it */
+ if (buffptr != buff)
+ delete[] buffptr;
+
+ DBUG_VOID_RETURN;
+}
+#endif /* __NT__ */
+
+
+/*
+ Prints a printf style message to the error log and, under NT, to the
+ Windows event log.
+
+ SYNOPSIS
+ vprint_msg_to_log()
+ event_type Type of event to write (Error, Warning, or Info)
+ format Printf style format of message
+ args va_list list of arguments for the message
+
+ NOTE
+
+ IMPLEMENTATION
+ This function prints the message into a buffer and then sends that buffer
+ to other functions to write that message to other logging sources.
+
+ RETURN VALUES
+ void
+*/
+
+void vprint_msg_to_log(enum loglevel level, const char *format, va_list args)
+{
+ char buff[1024];
+ uint length;
+ DBUG_ENTER("vprint_msg_to_log");
+
+ length= my_vsnprintf(buff, sizeof(buff)-5, format, args);
+ print_buffer_to_file(level, buff);
+
+#ifdef __NT__
+ print_buffer_to_nt_eventlog(level, buff, length, sizeof(buff));
+#endif
+
+ DBUG_VOID_RETURN;
+}
+
+
+void sql_print_error(const char *format, ...)
+{
+ va_list args;
+ DBUG_ENTER("sql_print_error");
+
+ va_start(args, format);
+ vprint_msg_to_log(ERROR_LEVEL, format, args);
+ va_end(args);
+
+ DBUG_VOID_RETURN;
+}
+
+
+void sql_print_warning(const char *format, ...)
+{
+ va_list args;
+ DBUG_ENTER("sql_print_warning");
+
+ va_start(args, format);
+ vprint_msg_to_log(WARNING_LEVEL, format, args);
+ va_end(args);
+
+ DBUG_VOID_RETURN;
+}
+
+
+void sql_print_information(const char *format, ...)
+{
+ va_list args;
+ DBUG_ENTER("sql_print_information");
+
+ va_start(args, format);
+ vprint_msg_to_log(INFORMATION_LEVEL, format, args);
+ va_end(args);
+
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 2f3471fee17..97e041774f8 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1540,10 +1540,6 @@ end:
thd->query= 0; // just to be sure
thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
- // assume no convert for next query unless set explictly
-#ifdef TO_BE_REMOVED
- thd->variables.convert_set = 0;
-#endif
close_thread_tables(thd);
free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
return (thd->query_error ? thd->query_error : Log_event::exec_event(rli));
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index a100aa0cd3a..c668e152df5 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -333,9 +333,17 @@ void debug_sync_point(const char* lock_name, uint lock_timeout);
*/
#define MAX_DATE_REP_LENGTH 30
+enum enum_parsing_place
+{
+ NO_MATTER,
+ IN_HAVING,
+ SELECT_LIST,
+ IN_WHERE
+};
+
struct st_table;
class THD;
-class Statement;
+class Item_arena;
/* Struct to handle simple linked lists */
@@ -414,6 +422,7 @@ int delete_precheck(THD *thd, TABLE_LIST *tables);
int insert_precheck(THD *thd, TABLE_LIST *tables, bool update);
int create_table_precheck(THD *thd, TABLE_LIST *tables,
TABLE_LIST *create_table);
+Item *negate_expression(THD *thd, Item *expr);
#include "sql_class.h"
#include "opt_range.h"
@@ -563,7 +572,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
int mysql_create_table(THD *thd,const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &fields, List<Key> &keys,
- bool tmp_table, bool no_log, uint select_field_count);
+ bool tmp_table, uint select_field_count);
+
TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
TABLE_LIST *create_table,
List<create_field> *extra_fields,
@@ -611,7 +621,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order,
ha_rows rows, ulong options);
-int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok=0);
+int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update);
TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem,
bool *refresh);
@@ -841,8 +851,14 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
bool init_errmessage(void);
void sql_perror(const char *message);
-void sql_print_error(const char *format,...)
- __attribute__ ((format (printf, 1, 2)));
+
+void vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
+void sql_print_error(const char *format, ...);
+void sql_print_warning(const char *format, ...);
+void sql_print_information(const char *format, ...);
+
+
+
bool fn_format_relative_to_data_home(my_string to, const char *name,
const char *dir, const char *extension);
bool open_log(MYSQL_LOG *log, const char *hostname,
@@ -894,7 +910,6 @@ extern Gt_creator gt_creator;
extern Lt_creator lt_creator;
extern Ge_creator ge_creator;
extern Le_creator le_creator;
-extern uchar *days_in_month;
extern char language[LIBLEN],reg_ext[FN_EXTLEN];
extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file;
@@ -998,7 +1013,7 @@ extern struct my_option my_long_options[];
/* optional things, have_* variables */
extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db;
-extern SHOW_COMP_OPTION have_example_db, have_archive_db;
+extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db;
extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink;
extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
@@ -1023,8 +1038,9 @@ void mysql_lock_abort_for_thread(THD *thd, TABLE *table);
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b);
bool lock_global_read_lock(THD *thd);
void unlock_global_read_lock(THD *thd);
-bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh);
+bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commit);
void start_waiting_global_read_lock(THD *thd);
+void make_global_read_lock_block_commit(THD *thd);
/* Lock based on name */
int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list);
@@ -1062,12 +1078,9 @@ void free_blobs(TABLE *table);
int set_zone(int nr,int min_zone,int max_zone);
ulong convert_period_to_month(ulong period);
ulong convert_month_to_period(ulong month);
-long calc_daynr(uint year,uint month,uint day);
uint calc_days_in_year(uint year);
void get_date_from_daynr(long daynr,uint *year, uint *month,
uint *day);
-void init_time(void);
-my_time_t my_system_gmt_sec(const TIME *, long *current_timezone, bool *not_exist);
my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist);
bool str_to_time_with_warn(const char *str,uint length,TIME *l_time);
timestamp_type str_to_datetime_with_warn(const char *str, uint length,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index ac275f0d765..81651862255 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -92,7 +92,7 @@ extern "C" { // Because of SCO 3.2V4.2
#if defined(OS2)
# include <sys/un.h>
-#elif !defined( __WIN__)
+#elif !defined(__WIN__)
# ifndef __NETWARE__
#include <sys/resource.h>
# endif /* __NETWARE__ */
@@ -240,10 +240,10 @@ bool opt_help= 0;
bool opt_verbose= 0;
arg_cmp_func Arg_comparator::comparator_matrix[4][2] =
-{{&Arg_comparator::compare_string, &Arg_comparator::compare_e_string},
- {&Arg_comparator::compare_real, &Arg_comparator::compare_e_real},
- {&Arg_comparator::compare_int, &Arg_comparator::compare_e_int},
- {&Arg_comparator::compare_row, &Arg_comparator::compare_e_row}};
+{{&Arg_comparator::compare_string, &Arg_comparator::compare_e_string},
+ {&Arg_comparator::compare_real, &Arg_comparator::compare_e_real},
+ {&Arg_comparator::compare_int_signed, &Arg_comparator::compare_e_int},
+ {&Arg_comparator::compare_row, &Arg_comparator::compare_e_row}};
/* Global variables */
@@ -370,7 +370,7 @@ CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
- have_example_db, have_archive_db;
+ have_example_db, have_archive_db, have_csv_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
@@ -569,7 +569,7 @@ static void close_connections(void)
/* Abort listening to new connections */
DBUG_PRINT("quit",("Closing sockets"));
- if ( !opt_disable_networking )
+ if (!opt_disable_networking )
{
if (ip_sock != INVALID_SOCKET)
{
@@ -582,7 +582,7 @@ static void close_connections(void)
if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
{
HANDLE temp;
- DBUG_PRINT( "quit", ("Closing named pipes") );
+ DBUG_PRINT("quit", ("Closing named pipes") );
/* Create connection to the handle named pipe handler to break the loop */
if ((temp = CreateFile(pipe_name,
@@ -748,7 +748,7 @@ void kill_mysql(void)
}
#endif
#elif defined(OS2)
- pthread_cond_signal( &eventShutdown); // post semaphore
+ pthread_cond_signal(&eventShutdown); // post semaphore
#elif defined(HAVE_PTHREAD_KILL)
if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL))
{
@@ -766,7 +766,7 @@ void kill_mysql(void)
abort_loop=1;
if (pthread_create(&tmp,&connection_attrib, kill_server_thread,
(void*) 0))
- sql_print_error("Error: Can't create thread to kill server");
+ sql_print_error("Can't create thread to kill server");
}
#endif
DBUG_VOID_RETURN;
@@ -795,7 +795,7 @@ static void __cdecl kill_server(int sig_ptr)
abort_loop=1; // This should be set
signal(sig,SIG_IGN);
if (sig == MYSQL_KILL_SIGNAL || sig == 0)
- sql_print_error(ER(ER_NORMAL_SHUTDOWN),my_progname);
+ sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname);
else
sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
@@ -811,7 +811,7 @@ static void __cdecl kill_server(int sig_ptr)
#ifdef __NETWARE__
pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
-
+
pthread_exit(0); /* purecov: deadcode */
#endif /* EMBEDDED_LIBRARY */
@@ -839,7 +839,7 @@ extern "C" sig_handler print_signal_warning(int sig)
if (!DBUG_IN_USE)
{
if (global_system_variables.log_warnings)
- sql_print_error("Warning: Got signal %d from thread %d",
+ sql_print_warning("Got signal %d from thread %d",
sig,my_thread_id());
}
#ifdef DONT_REMEMBER_SIGNAL
@@ -965,7 +965,7 @@ void clean_up(bool print_message)
#endif
if (print_message && errmesg)
- sql_print_error(ER(ER_SHUTDOWN_COMPLETE),my_progname);
+ sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname);
#if !defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
if (!opt_bootstrap)
(void) my_delete(pidfile_name,MYF(0)); // This may not always exist
@@ -1066,8 +1066,8 @@ static void set_user(const char *user)
struct passwd *user_info= getpwnam(user);
if ((!user_info || user_id != user_info->pw_uid) &&
global_system_variables.log_warnings)
- fprintf(stderr,
- "Warning: One can only use the --user switch if running as root\n");
+ sql_print_warning(
+ "One can only use the --user switch if running as root\n");
}
return;
}
@@ -1141,7 +1141,7 @@ static void server_init(void)
DBUG_ENTER("server_init");
#ifdef __WIN__
- if ( !opt_disable_networking )
+ if (!opt_disable_networking)
{
WSADATA WsaData;
if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData))
@@ -1187,7 +1187,7 @@ static void server_init(void)
if (listen(ip_sock,(int) back_log) < 0)
{
sql_perror("Can't start server: listen() on TCP/IP port");
- sql_print_error("Error: listen() on TCP/IP failed with error %d",
+ sql_print_error("listen() on TCP/IP failed with error %d",
socket_errno);
unireg_abort(1);
}
@@ -1216,7 +1216,7 @@ static void server_init(void)
sql_perror("Can't start server : Set security descriptor");
unireg_abort(1);
}
- saPipeSecurity.nLength = sizeof( SECURITY_ATTRIBUTES );
+ saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES);
saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor;
saPipeSecurity.bInheritHandle = FALSE;
if ((hPipe= CreateNamedPipe(pipe_name,
@@ -1236,9 +1236,9 @@ static void server_init(void)
FORMAT_MESSAGE_FROM_SYSTEM,
NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &lpMsgBuf, 0, NULL );
- MessageBox( NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe",
- MB_OK|MB_ICONINFORMATION );
- LocalFree( lpMsgBuf );
+ MessageBox(NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe",
+ MB_OK|MB_ICONINFORMATION);
+ LocalFree(lpMsgBuf);
unireg_abort(1);
}
}
@@ -1282,7 +1282,7 @@ static void server_init(void)
(void) chmod(mysqld_unix_port,S_IFSOCK); /* Fix solaris 2.6 bug */
#endif
if (listen(unix_sock,(int) back_log) < 0)
- sql_print_error("Warning: listen() on Unix socket failed with error %d",
+ sql_print_warning("listen() on Unix socket failed with error %d",
socket_errno);
}
#endif
@@ -1459,7 +1459,7 @@ static void init_signals(void)
{
int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ;
for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++)
- signal( signals[i], kill_server) ;
+ signal(signals[i], kill_server) ;
#if defined(__WIN__)
signal(SIGBREAK,SIG_IGN); //ignore SIGBREAK for NT
#else
@@ -1594,6 +1594,8 @@ static void registerwithneb()
ulong neb_event_callback(struct EventBlock *eblock)
{
EventChangeVolStateEnter_s *voldata;
+ extern bool nw_panic;
+
voldata= (EventChangeVolStateEnter_s *)eblock->EBEventData;
/* Deactivation of a volume */
@@ -1606,6 +1608,7 @@ ulong neb_event_callback(struct EventBlock *eblock)
if (!memcmp(&voldata->volID, &datavolid, sizeof(VolumeID_t)))
{
consoleprintf("MySQL data volume is deactivated, shutting down MySQL Server \n");
+ nw_panic = TRUE;
kill_server(0);
}
}
@@ -1792,7 +1795,7 @@ bytes of memory\n", ((ulong) sql_key_cache->key_cache_mem_size +
You seem to be running 32-bit Linux and have %d concurrent connections.\n\
If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\
yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\
-the thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n\n",
+the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
thread_count);
}
#endif /* HAVE_LINUXTHREADS */
@@ -1871,7 +1874,7 @@ static void init_signals(void)
struct rlimit rl;
rl.rlim_cur = rl.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rl) && global_system_variables.log_warnings)
- sql_print_error("Warning: setrlimit could not change the size of core files to 'infinity'; We may not be able to generate a core file on signals");
+ sql_print_warning("setrlimit could not change the size of core files to 'infinity'; We may not be able to generate a core file on signals");
}
#endif
(void) sigemptyset(&set);
@@ -1883,9 +1886,11 @@ static void init_signals(void)
sigaddset(&set,SIGPIPE);
#endif
sigaddset(&set,SIGINT);
+#ifndef IGNORE_SIGHUP_SIGQUIT
sigaddset(&set,SIGQUIT);
- sigaddset(&set,SIGTERM);
sigaddset(&set,SIGHUP);
+#endif
+ sigaddset(&set,SIGTERM);
/* Fix signals if blocked by parents (can happen on Mac OS X) */
sigemptyset(&sa.sa_mask);
@@ -1969,11 +1974,13 @@ extern "C" void *signal_hand(void *arg __attribute__((unused)))
#ifdef USE_ONE_SIGNAL_HAND
(void) sigaddset(&set,THR_SERVER_ALARM); // For alarms
#endif
+#ifndef IGNORE_SIGHUP_SIGQUIT
(void) sigaddset(&set,SIGQUIT);
- (void) sigaddset(&set,SIGTERM);
#if THR_CLIENT_ALARM != SIGHUP
(void) sigaddset(&set,SIGHUP);
#endif
+#endif
+ (void) sigaddset(&set,SIGTERM);
(void) sigaddset(&set,SIGTSTP);
/* Save pid to this process (or thread on Linux) */
@@ -2021,7 +2028,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused)))
case SIGQUIT:
case SIGKILL:
#ifdef EXTRA_DEBUG
- sql_print_error("Got signal %d to shutdown mysqld",sig);
+ sql_print_information("Got signal %d to shutdown mysqld",sig);
#endif
DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop));
if (!abort_loop)
@@ -2033,7 +2040,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused)))
my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR);
if (pthread_create(&tmp,&connection_attrib, kill_server_thread,
(void*) sig))
- sql_print_error("Error: Can't create thread to kill server");
+ sql_print_error("Can't create thread to kill server");
#else
kill_server((void*) sig); // MIT THREAD has a alarm thread
#endif
@@ -2057,7 +2064,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused)))
#endif
default:
#ifdef EXTRA_DEBUG
- sql_print_error("Warning: Got signal: %d error: %d",sig,error); /* purecov: tested */
+ sql_print_warning("Got signal: %d error: %d",sig,error); /* purecov: tested */
#endif
break; /* purecov: tested */
}
@@ -2165,10 +2172,10 @@ extern "C" pthread_handler_decl(handle_shutdown,arg)
my_thread_init();
// wait semaphore
- pthread_cond_wait( &eventShutdown, NULL);
+ pthread_cond_wait(&eventShutdown, NULL);
// close semaphore and kill server
- pthread_cond_destroy( &eventShutdown);
+ pthread_cond_destroy(&eventShutdown);
/*
Exit main loop on main thread, so kill will be done from
@@ -2339,11 +2346,11 @@ static int init_common_variables(const char *conf_file_name, int argc,
("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
files, max_connections, table_cache_size));
if (global_system_variables.log_warnings)
- sql_print_error("Warning: Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
+ sql_print_warning("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
files, max_connections, table_cache_size);
}
else if (global_system_variables.log_warnings)
- sql_print_error("Warning: Could not increase number of max_open_files to more than %u (request: %u)", files, wanted_files);
+ sql_print_warning("Could not increase number of max_open_files to more than %u (request: %u)", files, wanted_files);
}
open_files_limit= files;
}
@@ -2521,8 +2528,8 @@ static int init_server_components()
does nothing, and we don't take into account if he used this option or
not; but internally we give this variable a value to have the behaviour we
want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not).
- As sql-bin-update-same, log-update and log-bin cannot be changed by the user
- after starting the server (they are not variables), the user will not
+ As sql-bin-update-same, log-update and log-bin cannot be changed by the
+ user after starting the server (they are not variables), the user will not
later interfere with the settings we do here.
*/
if (opt_bin_log)
@@ -2570,8 +2577,8 @@ with --log-bin instead.");
}
else if (opt_log_slave_updates)
{
- sql_print_error("\
-Warning: you need to use --log-bin to make --log-slave-updates work. \
+ sql_print_warning("\
+you need to use --log-bin to make --log-slave-updates work. \
Now disabling --log-slave-updates.");
}
@@ -2579,7 +2586,7 @@ Now disabling --log-slave-updates.");
if (opt_log_slave_updates && replicate_same_server_id)
{
sql_print_error("\
-Error: using --replicate-same-server-id in conjunction with \
+using --replicate-same-server-id in conjunction with \
--log-slave-updates is impossible, it would lead to infinite loops in this \
server.");
unireg_abort(1);
@@ -2589,7 +2596,8 @@ server.");
if (opt_error_log)
{
if (!log_error_file_ptr[0])
- fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", 0);
+ fn_format(log_error_file, glob_hostname, mysql_data_home, ".err",
+ MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
else
fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
MY_UNPACK_FILENAME | MY_SAFE_PATH);
@@ -2607,12 +2615,12 @@ server.");
if (opt_innodb_safe_binlog)
{
if (have_innodb != SHOW_OPTION_YES)
- sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
+ sql_print_warning("--innodb-safe-binlog is meaningful only if "
"the InnoDB storage engine is enabled in the server.");
#ifdef HAVE_INNOBASE_DB
if (innobase_flush_log_at_trx_commit != 1)
{
- sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
+ sql_print_warning("--innodb-safe-binlog is meaningful only if "
"innodb_flush_log_at_trx_commit is 1; now setting it "
"to 1.");
innobase_flush_log_at_trx_commit= 1;
@@ -2624,14 +2632,14 @@ server.");
good (especially "littlesync", and on Windows... see
srv/srv0start.c).
*/
- sql_print_error("Warning: --innodb-safe-binlog requires that "
+ sql_print_warning("--innodb-safe-binlog requires that "
"the innodb_flush_method actually synchronizes the "
"InnoDB log to disk; it is your responsibility "
"to verify that the method you chose does it.");
}
if (sync_binlog_period != 1)
{
- sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
+ sql_print_warning("--innodb-safe-binlog is meaningful only if "
"the global sync_binlog variable is 1; now setting it "
"to 1.");
sync_binlog_period= 1;
@@ -2670,7 +2678,7 @@ server.");
if (mlockall(MCL_CURRENT))
{
if (global_system_variables.log_warnings)
- sql_print_error("Warning: Failed to lock memory. Errno: %d\n",errno);
+ sql_print_warning("Failed to lock memory. Errno: %d\n",errno);
locked_in_memory= 0;
}
}
@@ -2696,7 +2704,7 @@ static void create_maintenance_thread()
{
pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_manager,0))
- sql_print_error("Warning: Can't create thread to manage maintenance");
+ sql_print_warning("Can't create thread to manage maintenance");
}
}
@@ -2708,7 +2716,7 @@ static void create_shutdown_thread()
hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name);
pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0))
- sql_print_error("Warning: Can't create thread to handle shutdown requests");
+ sql_print_warning("Can't create thread to handle shutdown requests");
// On "Stop Service" we have to do regular shutdown
Service.SetShutdownEvent(hEventShutdown);
@@ -2717,7 +2725,7 @@ static void create_shutdown_thread()
pthread_cond_init(&eventShutdown, NULL);
pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0))
- sql_print_error("Warning: Can't create thread to handle shutdown requests");
+ sql_print_warning("Can't create thread to handle shutdown requests");
#endif
#endif // EMBEDDED_LIBRARY
}
@@ -2733,7 +2741,7 @@ static void handle_connections_methods()
(!have_tcpip || opt_disable_networking) &&
!opt_enable_shared_memory)
{
- sql_print_error("TCP/IP,--shared-memory or --named-pipe should be configured on NT OS");
+ sql_print_error("TCP/IP, --shared-memory, or --named-pipe should be configured on NT OS");
unireg_abort(1); // Will not return
}
#endif
@@ -2748,7 +2756,7 @@ static void handle_connections_methods()
if (pthread_create(&hThread,&connection_attrib,
handle_connections_namedpipes, 0))
{
- sql_print_error("Warning: Can't create thread to handle named pipes");
+ sql_print_warning("Can't create thread to handle named pipes");
handler_count--;
}
}
@@ -2759,7 +2767,7 @@ static void handle_connections_methods()
if (pthread_create(&hThread,&connection_attrib,
handle_connections_sockets, 0))
{
- sql_print_error("Warning: Can't create thread to handle TCP/IP");
+ sql_print_warning("Can't create thread to handle TCP/IP");
handler_count--;
}
}
@@ -2770,7 +2778,7 @@ static void handle_connections_methods()
if (pthread_create(&hThread,&connection_attrib,
handle_connections_shared_memory, 0))
{
- sql_print_error("Warning: Can't create thread to handle shared memory");
+ sql_print_warning("Can't create thread to handle shared memory");
handler_count--;
}
}
@@ -2809,7 +2817,7 @@ int main(int argc, char **argv)
if (_cust_check_startup())
{
/ * _cust_check_startup will report startup failure error * /
- exit( 1 );
+ exit(1);
}
#endif
@@ -2830,7 +2838,7 @@ int main(int argc, char **argv)
if (stack_size && stack_size < thread_stack)
{
if (global_system_variables.log_warnings)
- sql_print_error("Warning: Asked for %ld thread stack, but got %ld",
+ sql_print_warning("Asked for %ld thread stack, but got %ld",
thread_stack, stack_size);
thread_stack= stack_size;
}
@@ -2853,8 +2861,8 @@ int main(int argc, char **argv)
if (lower_case_table_names_used)
{
if (global_system_variables.log_warnings)
- sql_print_error("\
-Warning: You have forced lower_case_table_names to 0 through a command-line \
+ sql_print_warning("\
+You have forced lower_case_table_names to 0 through a command-line \
option, even though your file system '%s' is case insensitive. This means \
that you can corrupt a MyISAM table by accessing it with different cases. \
You should consider changing lower_case_table_names to 1 or 2",
@@ -2863,7 +2871,7 @@ You should consider changing lower_case_table_names to 1 or 2",
else
{
if (global_system_variables.log_warnings)
- sql_print_error("Warning: Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home);
+ sql_print_warning("Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home);
lower_case_table_names= 2;
}
}
@@ -2896,14 +2904,14 @@ You should consider changing lower_case_table_names to 1 or 2",
#ifdef EXTRA_DEBUG
switch (server_id) {
case 1:
- sql_print_error("\
-Warning: You have enabled the binary log, but you haven't set server-id to \
+ sql_print_warning("\
+You have enabled the binary log, but you haven't set server-id to \
a non-zero value: we force server id to 1; updates will be logged to the \
binary log, but connections from slaves will not be accepted.");
break;
case 2:
- sql_print_error("\
-Warning: You should set server-id to a non-0 value if master_host is set; \
+ sql_print_warning("\
+You should set server-id to a non-0 value if master_host is set; \
we force server id to 2, but this MySQL server will not act as a slave.");
break;
}
@@ -2978,11 +2986,21 @@ we force server id to 2, but this MySQL server will not act as a slave.");
printf(ER(ER_READY),my_progname,server_version,
((unix_sock == INVALID_SOCKET) ? (char*) "" : mysqld_unix_port),
mysqld_port);
+ if (MYSQL_COMPILATION_COMMENT[0] != '\0')
+ fputs(" " MYSQL_COMPILATION_COMMENT, stdout);
+ putchar('\n');
fflush(stdout);
#if defined(__NT__) || defined(HAVE_SMEM)
handle_connections_methods();
#else
+#ifdef __WIN__
+ if (!have_tcpip || opt_disable_networking)
+ {
+ sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces");
+ unireg_abort(1);
+ }
+#endif
handle_connections_sockets(0);
#endif /* __NT__ */
@@ -3265,7 +3283,7 @@ static int bootstrap(FILE *file)
if (pthread_create(&thd->real_id,&connection_attrib,handle_bootstrap,
(void*) thd))
{
- sql_print_error("Warning: Can't create thread to handle bootstrap");
+ sql_print_warning("Can't create thread to handle bootstrap");
DBUG_RETURN(-1);
}
/* Wait for thread to die */
@@ -3637,25 +3655,27 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg)
while (!abort_loop)
{
/* wait for named pipe connection */
- fConnected = ConnectNamedPipe( hPipe, NULL );
+ fConnected = ConnectNamedPipe(hPipe, NULL);
if (abort_loop)
break;
if (!fConnected)
fConnected = GetLastError() == ERROR_PIPE_CONNECTED;
if (!fConnected)
{
- CloseHandle( hPipe );
- if ((hPipe = CreateNamedPipe(pipe_name,
- PIPE_ACCESS_DUPLEX,
- PIPE_TYPE_BYTE |
- PIPE_READMODE_BYTE |
- PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES,
- (int) global_system_variables.net_buffer_length,
- (int) global_system_variables.net_buffer_length,
- NMPWAIT_USE_DEFAULT_WAIT,
- &saPipeSecurity )) ==
- INVALID_HANDLE_VALUE )
+ CloseHandle(hPipe);
+ if ((hPipe= CreateNamedPipe(pipe_name,
+ PIPE_ACCESS_DUPLEX,
+ PIPE_TYPE_BYTE |
+ PIPE_READMODE_BYTE |
+ PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES,
+ (int) global_system_variables.
+ net_buffer_length,
+ (int) global_system_variables.
+ net_buffer_length,
+ NMPWAIT_USE_DEFAULT_WAIT,
+ &saPipeSecurity)) ==
+ INVALID_HANDLE_VALUE)
{
sql_perror("Can't create new named pipe!");
break; // Abort
@@ -3682,8 +3702,8 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg)
if (!(thd = new THD))
{
- DisconnectNamedPipe( hConnectedPipe );
- CloseHandle( hConnectedPipe );
+ DisconnectNamedPipe(hConnectedPipe);
+ CloseHandle(hConnectedPipe);
continue;
}
if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) ||
@@ -3959,8 +3979,9 @@ enum options_mysqld
OPT_INNODB_FLUSH_METHOD,
OPT_INNODB_FAST_SHUTDOWN,
OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB,
+ OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG,
- OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_SKIP_SAFEMALLOC,
+ OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
@@ -4019,6 +4040,7 @@ enum options_mysqld
OPT_INNODB_LOCK_WAIT_TIMEOUT,
OPT_INNODB_THREAD_CONCURRENCY,
OPT_INNODB_FORCE_RECOVERY,
+ OPT_INNODB_STATUS_FILE,
OPT_INNODB_MAX_DIRTY_PAGES_PCT,
OPT_INNODB_OPEN_FILES,
OPT_BDB_CACHE_SIZE,
@@ -4054,12 +4076,25 @@ enum options_mysqld
struct my_option my_long_options[] =
{
+ {"help", '?', "Display this help and exit.",
+ (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
+ 0, 0},
+#ifdef HAVE_REPLICATION
+ {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT,
+ "Option used by mysql-test for debugging and testing of replication.",
+ (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count,
+ 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#endif /* HAVE_REPLICATION */
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"basedir", 'b',
"Path to installation directory. All paths are usually resolved relative to this.",
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
+ {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
+Disable with --skip-bdb (will save memory).",
+ (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ 0, 0, 0},
#ifdef HAVE_BERKELEY_DB
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
(gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -4076,10 +4111,6 @@ struct my_option my_long_options[] =
"Disable synchronously flushing logs. This option is deprecated, use --skip-sync-bdb-logs or sync-bdb-logs=0 instead",
// (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"sync-bdb-logs", OPT_BDB_SYNC,
- "Synchronously flush logs. Enabled by default",
- (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
- NO_ARG, 1, 0, 0, 0, 0, 0},
{"bdb-shared-data", OPT_BDB_SHARED,
"Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
0, 0, 0, 0, 0},
@@ -4087,77 +4118,58 @@ struct my_option my_long_options[] =
(gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif /* HAVE_BERKELEY_DB */
- {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default",
- (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
- 0, 0, 0, 0},
- {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
-Disable with --skip-bdb (will save memory).",
- (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
- 0, 0, 0},
{"big-tables", OPT_BIG_TABLES,
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
+ (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-do-db", OPT_BINLOG_DO_DB,
"Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-ignore-db", OPT_BINLOG_IGNORE_DB,
"Tells the master that updates to the given database should not be logged tothe binary log.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
- (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"character_set_server", 'C', "Set the default character set.",
+ {"character-set-server", 'C', "Set the default character set.",
(gptr*) &default_character_set_name, (gptr*) &default_character_set_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
- {"collation_server", OPT_DEFAULT_COLLATION, "Set the default collation.",
+ {"character-sets-dir", OPT_CHARSETS_DIR,
+ "Directory where character sets are.", (gptr*) &charsets_dir,
+ (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"chroot", 'r', "Chroot mysqld daemon during startup.",
+ (gptr*) &mysqld_chroot, (gptr*) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+ {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.",
(gptr*) &default_collation_name, (gptr*) &default_collation_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ {"concurrent-insert", OPT_CONCURRENT_INSERT,
+ "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.",
+ (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert,
+ 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.",
(gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
-#ifdef __WIN__
- {"standalone", OPT_STANDALONE,
- "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG,
- NO_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
- {"chroot", 'r', "Chroot mysqld daemon during startup.",
- (gptr*) &mysqld_chroot, (gptr*) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
- {"character-sets-dir", OPT_CHARSETS_DIR,
- "Directory where character sets are.", (gptr*) &charsets_dir,
- (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"datadir", 'h', "Path to the database root.", (gptr*) &mysql_data_home,
(gptr*) &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifndef DBUG_OFF
{"debug", '#', "Debug log.", (gptr*) &default_dbug_option,
(gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef SAFEMALLOC
- {"skip-safemalloc", OPT_SKIP_SAFEMALLOC,
- "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG,
- 0, 0, 0, 0, 0, 0},
#endif
-#endif
-#ifdef HAVE_OPENSSL
- {"des-key-file", OPT_DES_KEY_FILE,
- "Load keys for des_encrypt() and des_encrypt from given file.",
- (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
-#endif /* HAVE_OPENSSL */
- {"default-character-set", 'C', "Set the default character set (Deprecated option, use character_set_server instead).",
+ {"default-character-set", 'C', "Set the default character set (deprecated option, use --character-set-server instead).",
(gptr*) &default_character_set_name, (gptr*) &default_character_set_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
- {"default-collation", OPT_DEFAULT_COLLATION, "Set the default collation (Deprecated option, use character_set_server instead).",
+ {"default-collation", OPT_DEFAULT_COLLATION, "Set the default collation (deprecated option, use --collation-server instead).",
(gptr*) &default_collation_name, (gptr*) &default_collation_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{"default-storage-engine", OPT_STORAGE_ENGINE,
"Set the default storage engine (table tyoe) for tables.", 0, 0,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default-table-type", OPT_STORAGE_ENGINE,
- "(deprecated) Use default-storage-engine.", 0, 0,
+ "(deprecated) Use --default-storage-engine.", 0, 0,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
(gptr*) &default_tz_name, (gptr*) &default_tz_name,
@@ -4167,6 +4179,19 @@ Disable with --skip-bdb (will save memory).",
{"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL,
"Don't flush key buffers between writes for any MyISAM table (Deprecated option, use --delay-key-write=all instead).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef HAVE_OPENSSL
+ {"des-key-file", OPT_DES_KEY_FILE,
+ "Load keys for des_encrypt() and des_encrypt from given file.",
+ (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+#endif /* HAVE_OPENSSL */
+#ifdef HAVE_REPLICATION
+ {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT,
+ "Option used by mysql-test for debugging and testing of replication.",
+ (gptr*) &disconnect_slave_event_count,
+ (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0,
+ 0, 0, 0},
+#endif /* HAVE_REPLICATION */
{"enable-locking", OPT_ENABLE_LOCK,
"Deprecated option, use --external-locking instead.",
(gptr*) &opt_external_locking, (gptr*) &opt_external_locking,
@@ -4179,46 +4204,49 @@ Disable with --skip-bdb (will save memory).",
{"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.",
(gptr*) &opt_do_pstack, (gptr*) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0,
0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory", OPT_ENABLE_SHARED_MEMORY,
- "Enable the shared memory.",(gptr*) &opt_enable_shared_memory, (gptr*) &opt_enable_shared_memory,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0,
GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0},
+ {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.",
+ (gptr*) &opt_external_locking, (gptr*) &opt_external_locking,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN,
- "The maximum length of the result of function group_concat.",
- (gptr*) &global_system_variables.group_concat_max_len,
- (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
- REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
/* We must always support the next option to make scripts like mysqltest
easier to do */
{"gdb", OPT_DEBUGGING,
"Set up signals usable for debugging",
(gptr*) &opt_debugging, (gptr*) &opt_debugging,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection",
+ (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.",
+ (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
{"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master",
+ (gptr*) &opt_init_slave, (gptr*) &opt_init_slave, 0, GET_STR_ALLOC,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \
+Disable with --skip-innodb (will save memory).",
+ (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ 0, 0, 0},
{"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH,
"Path to individual files and their sizes.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_INNOBASE_DB
{"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR,
- "The common part for Innodb table spaces.", (gptr*) &innobase_data_home_dir,
+ "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir,
(gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
- {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR,
- "Path to innodb log files.", (gptr*) &innobase_log_group_home_dir,
- (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0,
- 0, 0},
- {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR,
- "Where full logs should be archived.", (gptr*) &innobase_log_arch_dir,
- (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE,
- "Set to 1 if you want to have logs archived.", 0, 0, 0, GET_LONG, OPT_ARG,
- 0, 0, 0, 0, 0, 0},
+ {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN,
+ "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown,
+ (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE,
+ "Stores each InnoDB table to an .ibd file in the database dir.",
+ (gptr*) &innobase_file_per_table,
+ (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"innodb_flush_log_at_trx_commit", OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
"Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).",
(gptr*) &innobase_flush_log_at_trx_commit,
@@ -4228,34 +4256,32 @@ Disable with --skip-bdb (will save memory).",
"With which method to flush data.", (gptr*) &innobase_unix_file_flush_method,
(gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
0, 0, 0},
- {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN,
- "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown,
- (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
+ "Force InnoDB not to use next-key locking. Instead use only row-level locking",
+ (gptr*) &innobase_locks_unsafe_for_binlog,
+ (gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR,
+ "Where full logs should be archived.", (gptr*) &innobase_log_arch_dir,
+ (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE,
+ "Set to 1 if you want to have logs archived.", 0, 0, 0, GET_LONG, OPT_ARG,
+ 0, 0, 0, 0, 0, 0},
+ {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR,
+ "Path to InnoDB log files.", (gptr*) &innobase_log_group_home_dir,
+ (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0,
+ 0, 0},
{"innodb_max_dirty_pages_pct", OPT_INNODB_MAX_DIRTY_PAGES_PCT,
"Percentage of dirty pages allowed in bufferpool.", (gptr*) &srv_max_buf_pool_modified_pct,
(gptr*) &srv_max_buf_pool_modified_pct, 0, GET_ULONG, REQUIRED_ARG, 90, 0, 100, 0, 0, 0},
- {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE,
- "Stores each InnoDB table to an .ibd file in the database dir.",
- (gptr*) &innobase_file_per_table,
- (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_status_file", OPT_INNODB_STATUS_FILE,
+ "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file",
+ (gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file,
+ 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
#endif /* End HAVE_INNOBASE_DB */
- {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection",
- (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master",
- (gptr*) &opt_init_slave, (gptr*) &opt_init_slave, 0, GET_STR_ALLOC,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"help", '?', "Display this help and exit.",
- (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
- 0, 0},
- {"verbose", 'v', "Used with --help option for detailed help",
- (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
- 0, 0},
- {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.",
- (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
- {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname,
- (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+ {"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \
+Disable with --skip-isam.",
+ (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ 0, 0, 0},
{"language", 'L',
"Client error messages in given language. May be given as a full path.",
(gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG,
@@ -4265,6 +4291,8 @@ Disable with --skip-bdb (will save memory).",
(gptr*) &opt_local_infile,
(gptr*) &opt_local_infile, 0, GET_BOOL, OPT_ARG,
1, 0, 0, 0, 0, 0},
+ {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname,
+ (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"log-bin", OPT_BIN_LOG,
"Log update queries in binary format.",
(gptr*) &opt_bin_logname, (gptr*) &opt_bin_logname, 0, GET_STR_ALLOC,
@@ -4273,46 +4301,54 @@ Disable with --skip-bdb (will save memory).",
"File that holds the names for last binary log files.",
(gptr*) &opt_binlog_index_name, (gptr*) &opt_binlog_index_name, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"log-error", OPT_ERROR_LOG_FILE, "Log error file.",
+ (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR,
+ OPT_ARG, 0, 0, 0, 0, 0, 0},
{"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.",
(gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"log-update", OPT_UPDATE_LOG,
- "The update log is deprecated since version 5.0, is replaced by the binary \
-log and this option justs turns on --log-bin instead.",
- (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR,
- OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"log-slow-queries", OPT_SLOW_QUERY_LOG,
- "Log slow queries to this log file. Defaults logging to hostname-slow.log file.",
- (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG,
- 0, 0, 0, 0, 0, 0},
{"log-long-format", '0',
"Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log-short-format", OPT_SHORT_LOG_FORMAT,
- "Don't log extra information to update and slow-query logs.",
- (gptr*) &opt_short_log_format, (gptr*) &opt_short_log_format,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES,
"Log queries that are executed without benefit of any index.",
(gptr*) &opt_log_queries_not_using_indexes, (gptr*) &opt_log_queries_not_using_indexes,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"log-short-format", OPT_SHORT_LOG_FORMAT,
+ "Don't log extra information to update and slow-query logs.",
+ (gptr*) &opt_short_log_format, (gptr*) &opt_short_log_format,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"log-slave-updates", OPT_LOG_SLAVE_UPDATES,
"Tells the slave to log the updates from the slave thread to the binary log. You will need to turn it on if you plan to daisy-chain the slaves.",
(gptr*) &opt_log_slave_updates, (gptr*) &opt_log_slave_updates, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"log-slow-queries", OPT_SLOW_QUERY_LOG,
+ "Log slow queries to this log file. Defaults logging to hostname-slow.log file.",
+ (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG,
+ 0, 0, 0, 0, 0, 0},
+ {"log-update", OPT_UPDATE_LOG,
+ "The update log is deprecated since version 5.0, is replaced by the binary \
+log and this option justs turns on --log-bin instead.",
+ (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR,
+ OPT_ARG, 0, 0, 0, 0, 0, 0},
{"low-priority-updates", OPT_LOW_PRIORITY_UPDATES,
"INSERT/DELETE/UPDATE has lower priority than selects.",
(gptr*) &global_system_variables.low_priority_updates,
(gptr*) &max_system_variables.low_priority_updates,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"master-connect-retry", OPT_MASTER_CONNECT_RETRY,
+ "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.",
+ (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT,
+ REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
{"master-host", OPT_MASTER_HOST,
"Master hostname or IP address for replication. If not set, the slave thread will not be started. Note that the setting of master-host will be ignored if there exists a valid master.info file.",
(gptr*) &master_host, (gptr*) &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
- {"master-user", OPT_MASTER_USER,
- "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.",
- (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0,
- 0, 0, 0, 0},
+ {"master-info-file", OPT_MASTER_INFO_FILE,
+ "The location and name of the file that remembers the master and where the I/O replication \
+thread is in the master's binlogs.",
+ (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"master-password", OPT_MASTER_PASSWORD,
"The password the slave thread will authenticate with when connecting to the master. If not set, an empty password is assumed.The value in master.info will take precedence if it can be read.",
(gptr*)&master_password, (gptr*)&master_password, 0,
@@ -4321,32 +4357,14 @@ log and this option justs turns on --log-bin instead.",
"The port the master is listening on. If not set, the compiled setting of MYSQL_PORT is assumed. If you have not tinkered with configure options, this should be 3306. The value in master.info will take precedence if it can be read.",
(gptr*) &master_port, (gptr*) &master_port, 0, GET_UINT, REQUIRED_ARG,
MYSQL_PORT, 0, 0, 0, 0, 0},
- {"master-connect-retry", OPT_MASTER_CONNECT_RETRY,
- "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.",
- (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT,
- REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
{"master-retry-count", OPT_MASTER_RETRY_COUNT,
"The number of tries the slave will make to connect to the master before giving up.",
(gptr*) &master_retry_count, (gptr*) &master_retry_count, 0, GET_ULONG,
REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0},
- {"master-info-file", OPT_MASTER_INFO_FILE,
- "The location and name of the file that remembers the master and where the I/O replication \
-thread is in the master's binlogs.",
- (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"master-ssl", OPT_MASTER_SSL,
"Enable the slave to connect to the master using SSL.",
(gptr*) &master_ssl, (gptr*) &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
- {"master-ssl-key", OPT_MASTER_SSL_KEY,
- "Master SSL keyfile name. Only applies if you have enabled master-ssl.",
- (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG,
- 0, 0, 0, 0, 0, 0},
- {"master-ssl-cert", OPT_MASTER_SSL_CERT,
- "Master SSL certificate file name. Only applies if you have enabled \
-master-ssl",
- (gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG,
- 0, 0, 0, 0, 0, 0},
{"master-ssl-ca", OPT_MASTER_SSL_CA,
"Master SSL CA file. Only applies if you have enabled master-ssl.",
(gptr*) &master_ssl_ca, (gptr*) &master_ssl_ca, 0, GET_STR, OPT_ARG,
@@ -4355,39 +4373,44 @@ master-ssl",
"Master SSL CA path. Only applies if you have enabled master-ssl.",
(gptr*) &master_ssl_capath, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
+ {"master-ssl-cert", OPT_MASTER_SSL_CERT,
+ "Master SSL certificate file name. Only applies if you have enabled \
+master-ssl",
+ (gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG,
+ 0, 0, 0, 0, 0, 0},
{"master-ssl-cipher", OPT_MASTER_SSL_CIPHER,
"Master SSL cipher. Only applies if you have enabled master-ssl.",
(gptr*) &master_ssl_cipher, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
- {"myisam-recover", OPT_MYISAM_RECOVER,
- "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
- (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0,
- GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory,
- (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"master-ssl-key", OPT_MASTER_SSL_KEY,
+ "Master SSL keyfile name. Only applies if you have enabled master-ssl.",
+ (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG,
+ 0, 0, 0, 0, 0, 0},
+ {"master-user", OPT_MASTER_USER,
+ "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.",
+ (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0,
+ 0, 0, 0, 0},
#ifdef HAVE_REPLICATION
- {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT,
- "Option used by mysql-test for debugging and testing of replication.",
- (gptr*) &disconnect_slave_event_count,
- (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0,
- 0, 0, 0},
- {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT,
- "Option used by mysql-test for debugging and testing of replication.",
- (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count,
- 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS,
"Option used by mysql-test for debugging and testing of replication.",
(gptr*) &max_binlog_dump_events, (gptr*) &max_binlog_dump_events, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL,
- "Option used by mysql-test for debugging and testing of replication.",
- (gptr*) &opt_sporadic_binlog_dump_fail,
- (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
- 0},
#endif /* HAVE_REPLICATION */
- {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT,
- "Simulate memory shortage when compiled with the --with-debug=full option.",
- 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory,
+ (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"myisam-recover", OPT_MYISAM_RECOVER,
+ "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
+ (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+ {"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \
+Disable with --skip-ndbcluster (will save memory).",
+ (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ 0, 0, 0},
+#ifdef HAVE_NDBCLUSTER_DB
+ {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.",
+ (gptr*) &ndbcluster_connectstring, (gptr*) &ndbcluster_connectstring, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#endif
{"new", 'n', "Use very new possible 'unsafe' functions.",
(gptr*) &global_system_variables.new_mode,
(gptr*) &max_system_variables.new_mode,
@@ -4409,32 +4432,43 @@ master-ssl",
{"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.",
(gptr*) &pidfile_name_ptr, (gptr*) &pidfile_name_ptr, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"log-error", OPT_ERROR_LOG_FILE, "Log error file.",
- (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR,
- OPT_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "Port number to use for connection.", (gptr*) &mysqld_port,
(gptr*) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"relay-log", OPT_RELAY_LOG,
+ "The location and name to use for relay logs.",
+ (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"relay-log-index", OPT_RELAY_LOG_INDEX,
+ "The location and name to use for the file that keeps a list of the last \
+relay logs.",
+ (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE,
+ "The location and name of the file that remembers where the SQL replication \
+thread is in the relay logs.",
+ (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-do-db", OPT_REPLICATE_DO_DB,
"Tells the slave thread to restrict replication to the specified database. To specify more than one database, use the directive multiple times, once for each database. Note that this will only work if you do not use cross-database queries such as UPDATE some_db.some_table SET foo='bar' while having selected a different or no database. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-do-table=db_name.%.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-do-table", OPT_REPLICATE_DO_TABLE,
"Tells the slave thread to restrict replication to the specified table. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates, in contrast to replicate-do-db.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE,
- "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB,
"Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE,
"Tells the slave thread to not replicate to the specified table. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-datbase updates, in contrast to replicate-ignore-db.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE,
- "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB,
"Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE,
+ "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.",
+ 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE,
+ "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.",
+ 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_REPLICATION
{"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID,
"In replication, if set to 1, do not skip events having our server id. \
@@ -4449,8 +4483,6 @@ Can't be set to 1 if --log-slave-updates is used.",
"Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.",
(gptr*) &report_host, (gptr*) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
- {"report-user", OPT_REPORT_USER, "Undocumented.", (gptr*) &report_user,
- (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"report-password", OPT_REPORT_PASSWORD, "Undocumented.",
(gptr*) &report_password, (gptr*) &report_password, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -4458,29 +4490,25 @@ Can't be set to 1 if --log-slave-updates is used.",
"Port for connecting to slave reported to the master during slave registration. Set it only if the slave is listening on a non-default port or if you have a special tunnel from the master or other clients to the slave. If not sure, leave this option unset.",
(gptr*) &report_port, (gptr*) &report_port, 0, GET_UINT, REQUIRED_ARG,
MYSQL_PORT, 0, 0, 0, 0, 0},
+ {"report-user", OPT_REPORT_USER, "Undocumented.", (gptr*) &report_user,
+ (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented.",
(gptr*) &rpl_recovery_rank, (gptr*) &rpl_recovery_rank, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"relay-log", OPT_RELAY_LOG,
- "The location and name to use for relay logs.",
- (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0,
- GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"relay-log-index", OPT_RELAY_LOG_INDEX,
- "The location and name to use for the file that keeps a list of the last \
-relay logs.",
- (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0,
- GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef TO_BE_DELETED
{"safe-show-database", OPT_SAFE_SHOW_DB,
- "Deprecated option; One should use GRANT SHOW DATABASES instead...",
+ "Deprecated option; use GRANT SHOW DATABASES instead...",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"safe-user-create", OPT_SAFE_USER_CREATE,
"Don't allow new user creation by the user who has no write privileges to the mysql.user table.",
(gptr*) &opt_safe_user_create, (gptr*) &opt_safe_user_create, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT,
+ "Simulate memory shortage when compiled with the --with-debug=full option.",
+ 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.",
(gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG,
my_bool(0), 0, 0, 0, 0, 0},
@@ -4492,7 +4520,12 @@ relay logs.",
"Change the value of a variable. Please note that this option is deprecated;you can set variables directly with --variable-name=value.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
- {"shared_memory_base_name",OPT_SHARED_MEMORY_BASE_NAME,
+ {"shared-memory", OPT_ENABLE_SHARED_MEMORY,
+ "Enable the shared memory.",(gptr*) &opt_enable_shared_memory, (gptr*) &opt_enable_shared_memory,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+#endif
+#ifdef HAVE_SMEM
+ {"shared-memory-base-name",OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
@@ -4500,31 +4533,15 @@ relay logs.",
"Show user and password in SHOW SLAVE HOSTS on this master",
(gptr*) &opt_show_slave_auth_info, (gptr*) &opt_show_slave_auth_info, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"concurrent-insert", OPT_CONCURRENT_INSERT,
- "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.",
- (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert,
- 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"skip-grant-tables", OPT_SKIP_GRANT,
"Start without grant tables. This gives all users FULL ACCESS to all tables!",
(gptr*) &opt_noacl, (gptr*) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
- {"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \
-Disable with --skip-innodb (will save memory).",
- (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
- 0, 0, 0},
- {"isam", OPT_ISAM, "Enable isam (if this version of MySQL supports it). \
-Disable with --skip-isam.",
- (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 1, 0, 0,
- 0, 0, 0},
- {"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \
-Disable with --skip-ndbcluster (will save memory).",
- (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0,
- 0, 0, 0},
+ {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip-locking", OPT_SKIP_LOCK,
"Deprecated option, use --skip-external-locking instead.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0,
- GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip-name-resolve", OPT_SKIP_RESOLVE,
"Don't resolve hostnames. All hostnames are IP's or 'localhost'.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -4533,6 +4550,13 @@ Disable with --skip-ndbcluster (will save memory).",
0, 0, 0},
{"skip-new", OPT_SKIP_NEW, "Don't use new, possible wrong routines.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+#ifndef DBUG_OFF
+#ifdef SAFEMALLOC
+ {"skip-safemalloc", OPT_SKIP_SAFEMALLOC,
+ "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
+#endif
+#endif
{"skip-show-database", OPT_SKIP_SHOW_DB,
"Don't allow 'SHOW DATABASE' commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
0, 0, 0, 0},
@@ -4547,11 +4571,6 @@ Disable with --skip-ndbcluster (will save memory).",
{"skip-thread-priority", OPT_SKIP_PRIOR,
"Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
0, 0, 0, 0, 0},
- {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE,
- "The location and name of the file that remembers where the SQL replication \
-thread is in the relay logs.",
- (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_REPLICATION
{"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR,
"The location where the slave should put its temporary files when \
@@ -4565,6 +4584,13 @@ replicating a LOAD DATA INFILE command.",
{"socket", OPT_SOCKET, "Socket file to use for connection.",
(gptr*) &mysqld_unix_port, (gptr*) &mysqld_unix_port, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#ifdef HAVE_REPLICATION
+ {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL,
+ "Option used by mysql-test for debugging and testing of replication.",
+ (gptr*) &opt_sporadic_binlog_dump_fail,
+ (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
+ 0},
+#endif /* HAVE_REPLICATION */
{"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME,
"The update log is deprecated since version 5.0, is replaced by the binary \
log and this option does nothing anymore.",
@@ -4576,13 +4602,21 @@ log and this option does nothing anymore.",
#ifdef HAVE_OPENSSL
#include "sslopt-longopts.h"
#endif
+#ifdef __WIN__
+ {"standalone", OPT_STANDALONE,
+ "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG,
+ NO_ARG, 0, 0, 0, 0, 0, 0},
+#endif
+ {"symbolic-links", 's', "Enable symbolic link support.",
+ (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG,
+ IF_PURIFY(0,1), 0, 0, 0, 0, 0},
{"temp-pool", OPT_TEMP_POOL,
"Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.",
(gptr*) &use_temp_pool, (gptr*) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1,
0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files. Several paths may be specified, separated by a "
-#if defined( __WIN__) || defined(OS2) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__)
"semicolon (;)"
#else
"colon (:)"
@@ -4593,26 +4627,19 @@ log and this option does nothing anymore.",
{"transaction-isolation", OPT_TX_ISOLATION,
"Default transaction isolation level.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0,
0, 0, 0, 0, 0},
- {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.",
- (gptr*) &opt_external_locking, (gptr*) &opt_external_locking,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; Use --symbolic-links instead.",
- (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG,
- IF_PURIFY(0,1), 0, 0, 0, 0, 0},
- {"symbolic-links", 's', "Enable symbolic link support.",
+ {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; use --symbolic-links instead.",
(gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG,
IF_PURIFY(0,1), 0, 0, 0, 0, 0},
{"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
+ {"verbose", 'v', "Used with --help option for detailed help",
+ (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
+ 0, 0},
{"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log-warnings", 'W', "Log some not critical warnings to the log file.",
- (gptr*) &global_system_variables.log_warnings,
- (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0,
- 0, 0, 0},
- {"warnings", 'W', "Deprecated ; Use --log-warnings instead.",
+ {"warnings", 'W', "Deprecated; use --log-warnings instead.",
(gptr*) &global_system_variables.log_warnings,
- (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0,
+ (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L,
0, 0, 0},
{ "back_log", OPT_BACK_LOG,
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
@@ -4623,6 +4650,10 @@ log and this option does nothing anymore.",
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0},
+ /* QQ: The following should be removed soon! (bdb_max_lock preferred) */
+ {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
+ (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
+ REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
@@ -4631,15 +4662,16 @@ log and this option does nothing anymore.",
"The maximum number of locks you can have active on a BDB table.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
- /* QQ: The following should be removed soon! */
- {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
- (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
- REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
#endif /* HAVE_BERKELEY_DB */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0},
+ {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
+ "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
+ (gptr*) &global_system_variables.bulk_insert_buff_size,
+ (gptr*) &max_system_variables.bulk_insert_buff_size,
+ 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"connect_timeout", OPT_CONNECT_TIMEOUT,
"The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.",
(gptr*) &connect_timeout, (gptr*) &connect_timeout,
@@ -4650,18 +4682,38 @@ log and this option does nothing anymore.",
(gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb,
0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0},
#endif
- {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
- "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
- (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
- GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
+ { "date_format", OPT_DATE_FORMAT,
+ "The DATE format (For future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "datetime_format", OPT_DATETIME_FORMAT,
+ "The DATETIME/TIMESTAMP format (for future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "default_week_format", OPT_DEFAULT_WEEK_FORMAT,
+ "The default week format used by WEEK() functions.",
+ (gptr*) &global_system_variables.default_week_format,
+ (gptr*) &max_system_variables.default_week_format,
+ 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
{"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT,
"After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.",
(gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0},
+ {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
+ "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
+ (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
+ GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
{ "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE,
"What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.",
(gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG,
REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0},
+ {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
+ "Binary logs will be rotated after expire-log-days days ",
+ (gptr*) &expire_logs_days,
+ (gptr*) &expire_logs_days, 0, GET_ULONG,
+ REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
{ "flush_time", OPT_FLUSH_TIME,
"A dedicated thread is created to flush all tables at the given interval.",
(gptr*) &flush_time, (gptr*) &flush_time, 0, GET_ULONG, REQUIRED_ARG,
@@ -4670,14 +4722,14 @@ log and this option does nothing anymore.",
"List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE)",
0, 0, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
- "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
- (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
- REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_max_word_len", OPT_FT_MAX_WORD_LEN,
"The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
(gptr*) &ft_max_word_len, (gptr*) &ft_max_word_len, 0, GET_ULONG,
REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0},
+ { "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
+ "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.",
+ (gptr*) &ft_min_word_len, (gptr*) &ft_min_word_len, 0, GET_ULONG,
+ REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT,
"Number of best matches to use for query expansion",
(gptr*) &ft_query_expansion_limit, (gptr*) &ft_query_expansion_limit, 0, GET_ULONG,
@@ -4686,49 +4738,58 @@ log and this option does nothing anymore.",
"Use stopwords from this file instead of built-in list.",
(gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN,
+ "The maximum length of the result of function group_concat.",
+ (gptr*) &global_system_variables.group_concat_max_len,
+ (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG,
+ REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0},
#ifdef HAVE_INNOBASE_DB
- {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
- "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
- (gptr*) &innobase_mirrored_log_groups,
- (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
- 0, 1, 0},
- {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
- "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
- (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
- 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
- {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
- "Size of each log file in a log group in megabytes.",
- (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
- GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
- {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
- "The size of the buffer which InnoDB uses to write log to the log files on disk.",
- (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
- GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
- {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
- "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
- (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
- GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
- {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
- "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
- (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
- GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
{"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
"Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
(gptr*) &innobase_additional_mem_pool_size,
(gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG,
1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0},
+ {"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
+ "If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
+ (gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
+ GET_LONG, REQUIRED_ARG, 0, 0, 63000, 0, 1, 0},
+ {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
+ "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
+ (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0,
+ GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0},
{"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS,
"Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads,
(gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0,
1, 0},
- {"innodb_open_files", OPT_INNODB_OPEN_FILES,
- "How many files at the maximum InnoDB keeps open at the same time.",
- (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
- GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
+ {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
+ "Helps to save your data in case the disk image of the database becomes corrupt.",
+ (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
+ GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
{"innodb_lock_wait_timeout", OPT_INNODB_LOCK_WAIT_TIMEOUT,
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
(gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout,
0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
+ {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
+ "The size of the buffer which InnoDB uses to write log to the log files on disk.",
+ (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0,
+ GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0},
+ {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
+ "Size of each log file in a log group in megabytes.",
+ (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0,
+ GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0},
+ {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
+ "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
+ (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group,
+ 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
+ {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS,
+ "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
+ (gptr*) &innobase_mirrored_log_groups,
+ (gptr*) &innobase_mirrored_log_groups, 0, GET_LONG, REQUIRED_ARG, 1, 1, 10,
+ 0, 1, 0},
+ {"innodb_open_files", OPT_INNODB_OPEN_FILES,
+ "How many files at the maximum InnoDB keeps open at the same time.",
+ (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
+ GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
#ifdef HAVE_REPLICATION
/*
Disabled for the 4.1.3 release. Disabling just this paragraph of code is
@@ -4753,10 +4814,6 @@ log and this option does nothing anymore.",
"Helps in performance tuning in heavily concurrent environments.",
(gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency,
0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0},
- {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
- "Helps to save your data in case the disk image of the database becomes corrupt.",
- (gptr*) &innobase_force_recovery, (gptr*) &innobase_force_recovery, 0,
- GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
#endif /* HAVE_INNOBASE_DB */
{"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
"The number of seconds the server waits for activity on an interactive connection before closing it.",
@@ -4776,6 +4833,12 @@ log and this option does nothing anymore.",
0, (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
IO_SIZE, 0},
+ {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
+ "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
+ (gptr*) &dflt_key_cache_var.param_age_threshold,
+ (gptr*) 0,
+ 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
+ 300, 100, ~0L, 0, 100, 0},
{"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
"The default size of key cache blocks",
(gptr*) &dflt_key_cache_var.param_block_size,
@@ -4788,12 +4851,6 @@ log and this option does nothing anymore.",
(gptr*) 0,
0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100,
1, 100, 0, 1, 0},
- {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
- "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
- (gptr*) &dflt_key_cache_var.param_age_threshold,
- (gptr*) 0,
- 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG,
- 300, 100, ~0L, 0, 100, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time,
@@ -4824,14 +4881,14 @@ value. Will also apply to relay logs if max_relay_log_size is 0. \
The minimum value for this variable is 4096.",
(gptr*) &max_binlog_size, (gptr*) &max_binlog_size, 0, GET_ULONG,
REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0},
- {"max_connections", OPT_MAX_CONNECTIONS,
- "The number of simultaneous clients allowed.", (gptr*) &max_connections,
- (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
- 0},
{"max_connect_errors", OPT_MAX_CONNECT_ERRORS,
"If there is more than this number of interrupted connections from a host this host will be blocked from further connections.",
(gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG,
REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0},
+ {"max_connections", OPT_MAX_CONNECTIONS,
+ "The number of simultaneous clients allowed.", (gptr*) &max_connections,
+ (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1,
+ 0},
{"max_delayed_threads", OPT_MAX_DELAYED_THREADS,
"Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.",
(gptr*) &global_system_variables.max_insert_delayed_threads,
@@ -4884,11 +4941,6 @@ The minimum value for this variable is 4096.",
"After this many write locks, allow some read locks to run in between.",
(gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG,
REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0},
- {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
- "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!",
- (gptr*) &global_system_variables.bulk_insert_buff_size,
- (gptr*) &max_system_variables.bulk_insert_buff_size,
- 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0},
{"myisam_block_size", OPT_MYISAM_BLOCK_SIZE,
"Block size to be used for MyISAM index pages.",
(gptr*) &opt_myisam_block_size,
@@ -4927,16 +4979,16 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.net_buffer_length,
(gptr*) &max_system_variables.net_buffer_length, 0, GET_ULONG,
REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0},
- {"net_retry_count", OPT_NET_RETRY_COUNT,
- "If a read on a communication port is interrupted, retry this many times before giving up.",
- (gptr*) &global_system_variables.net_retry_count,
- (gptr*) &max_system_variables.net_retry_count,0,
- GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_read_timeout", OPT_NET_READ_TIMEOUT,
"Number of seconds to wait for more data from a connection before aborting the read.",
(gptr*) &global_system_variables.net_read_timeout,
(gptr*) &max_system_variables.net_read_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
+ {"net_retry_count", OPT_NET_RETRY_COUNT,
+ "If a read on a communication port is interrupted, retry this many times before giving up.",
+ (gptr*) &global_system_variables.net_retry_count,
+ (gptr*) &max_system_variables.net_retry_count,0,
+ GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0},
{"net_write_timeout", OPT_NET_WRITE_TIMEOUT,
"Number of seconds to wait for a block to be written to a connection before aborting the write.",
(gptr*) &global_system_variables.net_write_timeout,
@@ -4998,11 +5050,21 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.query_prealloc_size,
(gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG,
REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0},
+ {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
+ "Allocation block size for storing ranges during optimization",
+ (gptr*) &global_system_variables.range_alloc_block_size,
+ (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
+ REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
{"read_buffer_size", OPT_RECORD_BUFFER,
"Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.",
(gptr*) &global_system_variables.read_buff_size,
(gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0},
+ {"read_only", OPT_READONLY,
+ "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
+ (gptr*) &opt_readonly,
+ (gptr*) &opt_readonly,
+ 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER,
"When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks. If not set, then it's set to the value of record_buffer.",
(gptr*) &global_system_variables.read_rnd_buff_size,
@@ -5035,16 +5097,6 @@ The minimum value for this variable is 4096.",
(gptr*) &slave_net_timeout, (gptr*) &slave_net_timeout, 0,
GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
#endif /* HAVE_REPLICATION */
- {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
- "Allocation block size for storing ranges during optimization",
- (gptr*) &global_system_variables.range_alloc_block_size,
- (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
- REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0},
- {"read-only", OPT_READONLY,
- "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege",
- (gptr*) &opt_readonly,
- (gptr*) &opt_readonly,
- 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{"slow_launch_time", OPT_SLOW_LAUNCH_TIME,
"If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.",
(gptr*) &slow_launch_time, (gptr*) &slow_launch_time, 0, GET_ULONG,
@@ -5055,33 +5107,52 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
1, 0},
+ {"sql_updatable_view_key", OPT_SQL_UPDATABLE_VIEW_KEY,
+ "0 = NO = Don't check presence of key in updatable VIEW. 1 = YES = Prohibit update of VIEW which does not contain key of underlying table. 2 = LIMIT1 = Same as YES but prohibited only operation with LIMIT 1 (usually get from GUI tools).",
+ (gptr*) &global_system_variables.sql_updatable_view_key,
+ (gptr*) &max_system_variables.sql_updatable_view_key,
+ 0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0},
+#ifdef HAVE_BERKELEY_DB
+ {"sync-bdb-logs", OPT_BDB_SYNC,
+ "Synchronously flush logs. Enabled by default",
+ (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
+ NO_ARG, 1, 0, 0, 0, 0, 0},
+#endif /* HAVE_BERKELEY_DB */
{"sync-binlog", OPT_SYNC_BINLOG,
"Sync the binlog to disk after every #th event. \
#=0 (the default) does no sync. Syncing slows MySQL down",
(gptr*) &sync_binlog_period,
(gptr*) &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1,
0},
+ {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default",
+ (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
+ 0, 0, 0, 0},
{"table_cache", OPT_TABLE_CACHE,
"The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0},
- {"thread_concurrency", OPT_THREAD_CONCURRENCY,
- "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
- (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
- DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
"How many threads we should keep in a cache for reuse.",
(gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, 16384, 0, 1, 0},
+ {"thread_concurrency", OPT_THREAD_CONCURRENCY,
+ "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.",
+ (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG,
+ DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
+ {"thread_stack", OPT_THREAD_STACK,
+ "The stack size for each thread.", (gptr*) &thread_stack,
+ (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
+ 1024L*128L, ~0L, 0, 1024, 0},
+ { "time_format", OPT_TIME_FORMAT,
+ "The TIME format (for future).",
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
+ (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tmp_table_size", OPT_TMP_TABLE_SIZE,
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size,
(gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG,
REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0},
- {"thread_stack", OPT_THREAD_STACK,
- "The stack size for each thread.", (gptr*) &thread_stack,
- (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
- 1024L*128L, ~0L, 0, 1024, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
@@ -5098,36 +5169,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.net_wait_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
0, 1, 0},
- {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
- "Binary logs will be rotated after expire-log-days days ",
- (gptr*) &expire_logs_days,
- (gptr*) &expire_logs_days, 0, GET_ULONG,
- REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
- { "default-week-format", OPT_DEFAULT_WEEK_FORMAT,
- "The default week format used by WEEK() functions.",
- (gptr*) &global_system_variables.default_week_format,
- (gptr*) &max_system_variables.default_week_format,
- 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
- { "date-format", OPT_DATE_FORMAT,
- "The DATE format (For future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "datetime-format", OPT_DATETIME_FORMAT,
- "The DATETIME/TIMESTAMP format (for future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- { "time-format", OPT_TIME_FORMAT,
- "The TIME format (for future).",
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
- (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"sql_updatable_view_key", OPT_SQL_UPDATABLE_VIEW_KEY,
- "0 = NO = Don't check presence of key in updatable VIEW. 1 = YES = Prohibit update of VIEW which does not contain key of underlying table. 2 = LIMIT1 = Same as YES but prohibited only operation with LIMIT 1 (usually get from GUI tools).",
- (gptr*) &global_system_variables.sql_updatable_view_key,
- (gptr*) &max_system_variables.sql_updatable_view_key,
- 0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -5154,6 +5195,8 @@ struct show_var_st status_vars[]= {
{"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG},
{"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG},
{"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG},
+ {"Com_dealloc_sql", (char*) (com_stat+(uint)
+ SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG},
{"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG},
{"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG},
{"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG},
@@ -5162,6 +5205,8 @@ struct show_var_st status_vars[]= {
{"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG},
{"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG},
{"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG},
+ {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE),
+ SHOW_LONG},
{"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG},
{"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG},
{"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG},
@@ -5177,6 +5222,8 @@ struct show_var_st status_vars[]= {
{"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG},
{"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG},
{"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG},
+ {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE),
+ SHOW_LONG},
{"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG},
{"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG},
{"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG},
@@ -5223,12 +5270,6 @@ struct show_var_st status_vars[]= {
{"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG},
{"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG},
{"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG},
- {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE),
- SHOW_LONG},
- {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE),
- SHOW_LONG},
- {"Com_dealloc_sql", (char*) (com_stat+(uint)
- SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG},
{"Connections", (char*) &thread_id, SHOW_LONG_CONST},
{"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG},
{"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG},
@@ -5239,6 +5280,7 @@ struct show_var_st status_vars[]= {
{"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST},
{"Handler_commit", (char*) &ha_commit_count, SHOW_LONG},
{"Handler_delete", (char*) &ha_delete_count, SHOW_LONG},
+ {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG},
{"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG},
{"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG},
{"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG},
@@ -5248,13 +5290,12 @@ struct show_var_st status_vars[]= {
{"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG},
{"Handler_update", (char*) &ha_update_count, SHOW_LONG},
{"Handler_write", (char*) &ha_write_count, SHOW_LONG},
- {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG},
{"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed,
SHOW_KEY_CACHE_LONG},
- {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used,
- SHOW_KEY_CACHE_CONST_LONG},
{"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused,
SHOW_KEY_CACHE_CONST_LONG},
+ {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used,
+ SHOW_KEY_CACHE_CONST_LONG},
{"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests,
SHOW_KEY_CACHE_LONG},
{"Key_reads", (char*) &dflt_key_cache_var.global_cache_read,
@@ -5555,6 +5596,11 @@ static void mysql_init_variables(void)
#else
have_archive_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_CSV_DB
+ have_csv_db= SHOW_OPTION_YES;
+#else
+ have_csv_db= SHOW_OPTION_NO;
+#endif
#ifdef HAVE_NDBCLUSTER_DB
have_ndbcluster=SHOW_OPTION_DISABLED;
#else
@@ -5664,7 +5710,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
if (!mysqld_user || !strcmp(mysqld_user, argument))
mysqld_user= argument;
else
- fprintf(stderr, "Warning: Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user);
+ sql_print_warning("Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user);
break;
case 'L':
strmake(language, argument, sizeof(language)-1);
@@ -5682,9 +5728,11 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#ifdef EMBEDDED_LIBRARY
case OPT_MAX_ALLOWED_PACKET:
max_allowed_packet= atoi(argument);
+ global_system_variables.max_allowed_packet= max_allowed_packet;
break;
case OPT_NET_BUFFER_LENGTH:
net_buffer_length= atoi(argument);
+ global_system_variables.net_buffer_length= net_buffer_length;
break;
#endif
#include <sslopt-case.h>
@@ -6033,15 +6081,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
case OPT_BDB_SHARED:
berkeley_init_flags&= ~(DB_PRIVATE);
- berkeley_shared_data=1;
+ berkeley_shared_data= 1;
break;
#endif /* HAVE_BERKELEY_DB */
case OPT_BDB:
#ifdef HAVE_BERKELEY_DB
if (opt_bdb)
- have_berkeley_db=SHOW_OPTION_YES;
+ have_berkeley_db= SHOW_OPTION_YES;
else
- have_berkeley_db=SHOW_OPTION_DISABLED;
+ have_berkeley_db= SHOW_OPTION_DISABLED;
#endif
break;
case OPT_ISAM:
@@ -6055,22 +6103,22 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case OPT_NDBCLUSTER:
#ifdef HAVE_NDBCLUSTER_DB
if (opt_ndbcluster)
- have_ndbcluster=SHOW_OPTION_YES;
+ have_ndbcluster= SHOW_OPTION_YES;
else
- have_ndbcluster=SHOW_OPTION_DISABLED;
+ have_ndbcluster= SHOW_OPTION_DISABLED;
#endif
break;
case OPT_INNODB:
#ifdef HAVE_INNOBASE_DB
if (opt_innodb)
- have_innodb=SHOW_OPTION_YES;
+ have_innodb= SHOW_OPTION_YES;
else
- have_innodb=SHOW_OPTION_DISABLED;
+ have_innodb= SHOW_OPTION_DISABLED;
#endif
break;
case OPT_INNODB_DATA_FILE_PATH:
#ifdef HAVE_INNOBASE_DB
- innobase_data_file_path=argument;
+ innobase_data_file_path= argument;
#endif
break;
#ifdef HAVE_INNOBASE_DB
@@ -6134,7 +6182,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
}
return 0;
}
-
+ /* Initiates DEBUG - but no debugging here ! */
extern "C" gptr *
mysql_getopt_value(const char *keyname, uint key_length,
@@ -6165,6 +6213,15 @@ mysql_getopt_value(const char *keyname, uint key_length,
}
+void option_error_reporter(enum loglevel level, const char *format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vprint_msg_to_log(level, format, args);
+ va_end(args);
+}
+
+
static void get_options(int argc,char **argv)
{
int ho_error;
@@ -6172,12 +6229,15 @@ static void get_options(int argc,char **argv)
my_getopt_register_get_addr(mysql_getopt_value);
strmake(def_ft_boolean_syntax, ft_boolean_syntax,
sizeof(ft_boolean_syntax)-1);
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+ my_getopt_error_reporter= option_error_reporter;
+ if ((ho_error= handle_options(&argc, &argv, my_long_options,
+ get_one_option)))
exit(ho_error);
if (argc > 0)
{
fprintf(stderr, "%s: Too many arguments (first extra is '%s').\nUse --help to get a list of available options\n", my_progname, *argv);
- exit(ho_error);
+ /* FIXME add EXIT_TOO_MANY_ARGUMENTS to "mysys_err.h" and return that code? */
+ exit(1);
}
if (opt_help)
@@ -6439,7 +6499,7 @@ static int test_if_case_insensitive(const char *dir_name)
(void) my_delete(buff2, MYF(0));
if ((file= my_create(buff, 0666, O_RDWR, MYF(0))) < 0)
{
- sql_print_error("Warning: Can't create test file %s", buff);
+ sql_print_warning("Can't create test file %s", buff);
DBUG_RETURN(-1);
}
my_close(file, MYF(0));
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index d2190c35bc6..e0e2b5c8045 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -3666,7 +3666,7 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
if (*key2 && !(*key2)->simple_key())
flag|=CLONE_KEY2_MAYBE;
*key1=key_and(*key1,*key2,flag);
- if ((*key1)->type == SEL_ARG::IMPOSSIBLE)
+ if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE)
{
tree1->type= SEL_TREE::IMPOSSIBLE;
DBUG_RETURN(tree1);
@@ -3904,6 +3904,13 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
return key1;
}
+ if ((key1->min_flag | key2->min_flag) & GEOM_FLAG)
+ {
+ key1->free_tree();
+ key2->free_tree();
+ return 0; // Can't optimize this
+ }
+
key1->use_count--;
key2->use_count--;
SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0;
@@ -3986,7 +3993,8 @@ key_or(SEL_ARG *key1,SEL_ARG *key2)
key1->use_count--;
key2->use_count--;
- if (key1->part != key2->part)
+ if (key1->part != key2->part ||
+ (key1->min_flag | key2->min_flag) & GEOM_FLAG)
{
key1->free_tree();
key2->free_tree();
diff --git a/sql/protocol.cc b/sql/protocol.cc
index e14eafc86a2..bceac780037 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -213,11 +213,13 @@ net_printf(THD *thd, uint errcode, ...)
2+SQLSTATE_LENGTH+1 : 2) : 0);
#ifndef EMBEDDED_LIBRARY
text_pos=(char*) net->buff + head_length + offset + 1;
+ length= (uint) ((char*)net->buff_end - text_pos);
+#else
+ length=sizeof(text_pos)-1;
#endif
- (void) vsprintf(my_const_cast(char*) (text_pos),format,args);
- length=(uint) strlen((char*) text_pos);
- if (length >= sizeof(net->last_error))
- length=sizeof(net->last_error)-1; /* purecov: inspected */
+ length=my_vsnprintf(my_const_cast(char*) (text_pos),
+ min(length, sizeof(net->last_error)),
+ format,args);
va_end(args);
#ifndef EMBEDDED_LIBRARY
diff --git a/sql/records.cc b/sql/records.cc
index 37fbc7570ed..8dd4f548093 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -101,6 +101,9 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
else if (select && select->quick)
{
DBUG_PRINT("info",("using rr_quick"));
+
+ if (!table->file->inited)
+ table->file->ha_index_init(select->quick->index);
info->read_record=rr_quick;
}
else if (table->sort.record_pointers)
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index bb70b793d3b..10ff5fa3596 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -590,6 +590,8 @@ pthread_handler_decl(handle_failsafe_rpl,arg)
THD *thd = new THD;
thd->thread_stack = (char*)&thd;
MYSQL* recovery_captain = 0;
+ const char* msg;
+
pthread_detach_this_thread();
if (init_failsafe_rpl_thread(thd) || !(recovery_captain=mysql_init(0)))
{
@@ -597,11 +599,11 @@ pthread_handler_decl(handle_failsafe_rpl,arg)
goto err;
}
pthread_mutex_lock(&LOCK_rpl_status);
+ msg= thd->enter_cond(&COND_rpl_status,
+ &LOCK_rpl_status, "Waiting for request");
while (!thd->killed && !abort_loop)
{
bool break_req_chain = 0;
- const char* msg = thd->enter_cond(&COND_rpl_status,
- &LOCK_rpl_status, "Waiting for request");
pthread_cond_wait(&COND_rpl_status, &LOCK_rpl_status);
thd->proc_info="Processing request";
while (!break_req_chain)
@@ -619,9 +621,8 @@ pthread_handler_decl(handle_failsafe_rpl,arg)
break;
}
}
- thd->exit_cond(msg);
}
- pthread_mutex_unlock(&LOCK_rpl_status);
+ thd->exit_cond(msg);
err:
if (recovery_captain)
mysql_close(recovery_captain);
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 458f1a3b08e..d92d5eb42b2 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -34,6 +34,12 @@
- If the variable should show up in 'show variables' add it to the
init_vars[] struct in this file
+ NOTES:
+ - Be careful with var->save_result: sys_var::check() only updates
+ ulonglong_value; so other members of the union are garbage then; to use
+ them you must first assign a value to them (in specific ::check() for
+ example).
+
TODO:
- Add full support for the variable character_set (for 4.1)
@@ -623,8 +629,8 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_BERKELEY_DB
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG},
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
- {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
{"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
+ {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
{"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
{"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
@@ -664,9 +670,11 @@ struct show_var_st init_vars[]= {
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_compress", (char*) &have_compress, SHOW_HAVE},
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
+ {"have_csv", (char*) &have_csv_db, SHOW_HAVE},
+ {"have_example_engine", (char*) &have_example_db, SHOW_HAVE},
+ {"have_geometry", (char*) &have_geometry, SHOW_HAVE},
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
- {"have_geometry", (char*) &have_geometry, SHOW_HAVE},
{"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE},
{"have_openssl", (char*) &have_openssl, SHOW_HAVE},
{"have_query_cache", (char*) &have_query_cache, SHOW_HAVE},
@@ -685,6 +693,7 @@ struct show_var_st init_vars[]= {
{"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL},
{"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG },
{"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL},
+ {"innodb_locks_unsafe_for_binlog", (char*) &innobase_locks_unsafe_for_binlog, SHOW_MY_BOOL},
{"innodb_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_INT},
{"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
{"innodb_force_recovery", (char*) &innobase_force_recovery, SHOW_LONG },
@@ -784,6 +793,8 @@ struct show_var_st init_vars[]= {
SHOW_SYS},
{sys_query_cache_size.name, (char*) &sys_query_cache_size, SHOW_SYS},
{sys_query_cache_type.name, (char*) &sys_query_cache_type, SHOW_SYS},
+ {sys_query_cache_wlock_invalidate.name,
+ (char *) &sys_query_cache_wlock_invalidate, SHOW_SYS},
#endif /* HAVE_QUERY_CACHE */
{sys_query_prealloc_size.name, (char*) &sys_query_prealloc_size, SHOW_SYS},
{sys_range_alloc_block_size.name, (char*) &sys_range_alloc_block_size,
@@ -1147,8 +1158,10 @@ static int check_max_delayed_threads(THD *thd, set_var *var)
static void fix_max_connections(THD *thd, enum_var_type type)
{
+#ifndef EMBEDDED_LIBRARY
resize_thr_alarm(max_connections +
global_system_variables.max_insert_delayed_threads + 10);
+#endif
}
@@ -2347,7 +2360,7 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var)
bool sys_var_sync_binlog_period::update(THD *thd, set_var *var)
{
pthread_mutex_t *lock_log= mysql_bin_log.get_log_lock();
- sync_binlog_period= var->save_result.ulong_value;
+ sync_binlog_period= (ulong) var->save_result.ulonglong_value;
/*
Must reset the counter otherwise it may already be beyond the new period
and so the new period will not be taken into account. Need mutex otherwise
@@ -2389,8 +2402,9 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var)
return 1;
}
#endif
-
- if (!(var->save_result.time_zone= my_tz_find(thd, res)))
+
+ if (!(var->save_result.time_zone=
+ my_tz_find(res, thd->lex->time_zone_tables_used)))
{
my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL");
return 1;
@@ -2435,7 +2449,8 @@ void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type)
if (default_tz_name)
{
String str(default_tz_name, &my_charset_latin1);
- global_system_variables.time_zone= my_tz_find(thd, &str);
+ global_system_variables.time_zone=
+ my_tz_find(&str, thd->lex->time_zone_tables_used);
}
else
global_system_variables.time_zone= my_tz_SYSTEM;
@@ -2892,8 +2907,9 @@ int set_var_password::check(THD *thd)
if (!user->host.str)
user->host.str= (char*) thd->host_or_ip;
/* Returns 1 as the function sends error to client */
- return check_change_password(thd, user->host.str, user->user.str) ? 1 : 0;
-#else
+ return check_change_password(thd, user->host.str, user->user.str, password) ?
+ 1 : 0;
+#else
return 0;
#endif
}
@@ -2902,8 +2918,8 @@ int set_var_password::update(THD *thd)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Returns 1 as the function sends error to client */
- return (change_password(thd, user->host.str, user->user.str, password) ?
- 1 : 0);
+ return change_password(thd, user->host.str, user->user.str, password) ?
+ 1 : 0;
#else
return 0;
#endif
diff --git a/sql/set_var.h b/sql/set_var.h
index 1374492526e..c34ebbca4b2 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -892,6 +892,7 @@ ulong fix_sql_mode(ulong sql_mode);
extern sys_var_str sys_charset_system;
extern sys_var_str sys_init_connect;
extern sys_var_str sys_init_slave;
+extern sys_var_thd_time_zone sys_time_zone;
CHARSET_INFO *get_old_charset_by_name(const char *old_name);
gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length,
NAMED_LIST **found);
diff --git a/sql/share/charsets/cp852.xml b/sql/share/charsets/cp852.xml
index ee434859233..958587d0399 100644
--- a/sql/share/charsets/cp852.xml
+++ b/sql/share/charsets/cp852.xml
@@ -114,6 +114,8 @@
</map>
</collation>
+<collation name="cp852_bin" flag="binary"/>
+
</charset>
</charsets>
diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt
index 9d1c429d5b6..587a8be7ac8 100644
--- a/sql/share/czech/errmsg.txt
+++ b/sql/share/czech/errmsg.txt
@@ -1,3 +1,19 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
/*
Modifikoval Petr -Bnajdr, snajdr@pvt.net, snajdr@cpress.cz v.0.01
ISO LATIN-8852-2
@@ -88,7 +104,7 @@ character-set=latin2
"Blob sloupec '%-.64s' nem-Be bt pouit jako kl",
"P-Bli velk dlka sloupce '%-.64s' (nejvce %d). Pouijte BLOB",
"M-Bete mt pouze jedno AUTO pole a to mus bt definovno jako kl",
-"%s: p-Bipraven na spojen\n",
+"%s: p-Bipraven na spojen",
"%s: norm-Bln ukonen\n",
"%s: p-Bijat signal %d, konm\n",
"%s: ukon-Ben prce hotovo\n",
@@ -313,7 +329,8 @@ character-set=latin2
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt
index b2d2fdf4d77..8d6e23d449e 100644
--- a/sql/share/danish/errmsg.txt
+++ b/sql/share/danish/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Knud Riishjgrd knudriis@post.tele.dk 99 &&
Carsten H. Pedersen, carsten.pedersen@bitbybit.dk oct. 1999 / aug. 2001. */
@@ -82,7 +95,7 @@ character-set=latin1
"BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks",
"For stor feltlngde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet",
"Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal vre indekseret",
-"%s: klar til tilslutninger\n",
+"%s: klar til tilslutninger",
"%s: Normal nedlukning\n",
"%s: Fangede signal %d. Afslutter!!\n",
"%s: Server lukket\n",
@@ -307,7 +320,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt
index 85c0443869f..0c60c63de64 100644
--- a/sql/share/dutch/errmsg.txt
+++ b/sql/share/dutch/errmsg.txt
@@ -1,6 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Dutch error messages (share/dutch/errmsg.txt)
2001-08-02 - Arjen Lentz (agl@bitbike.com)
Completed earlier partial translation; worked on consistency and spelling.
@@ -90,7 +104,7 @@ character-set=latin1
"BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie",
"Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB",
"Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd.",
-"%s: klaar voor verbindingen\n",
+"%s: klaar voor verbindingen",
"%s: Normaal afgesloten \n",
"%s: Signaal %d. Systeem breekt af!\n",
"%s: Afsluiten afgerond\n",
@@ -315,7 +329,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index 641f267d67e..b850c2d36d0 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=latin1
@@ -79,7 +92,7 @@ character-set=latin1
"BLOB column '%-.64s' can't be used in key specification with the used table type",
"Column length too big for column '%-.64s' (max = %d); use BLOB instead",
"Incorrect table definition; there can be only one auto column and it must be defined as a key",
-"%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d\n",
+"%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d",
"%s: Normal shutdown\n",
"%s: Got signal %d. Aborting!\n",
"%s: Shutdown complete\n",
@@ -304,7 +317,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt
index e64c0c17e74..ef958921b5e 100644
--- a/sql/share/estonian/errmsg.txt
+++ b/sql/share/estonian/errmsg.txt
@@ -1,9 +1,22 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
/*
- Copyright Abandoned 1997 MySQL AB
- This file is public domain and comes with NO WARRANTY of any kind
Esialgne tlge: Tnu Samuel (tonu@spam.ee)
Parandanud ja tiendanud: Indrek Siitan (tfr@mysql.com)
-
*/
character-set=latin7
@@ -84,7 +97,7 @@ character-set=latin7
"BLOB-tpi tulpa '%-.64s' ei saa kasutada vtmena",
"Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB vljatpi",
"Vigane tabelikirjeldus; Tabelis tohib olla ks auto_increment tpi tulp ning see peab olema defineeritud vtmena",
-"%s: ootab hendusi\n",
+"%s: ootab hendusi",
"%s: MySQL lpetas\n",
"%s: sain signaali %d. Lpetan!\n",
"%s: Lpp\n",
@@ -309,7 +322,8 @@ character-set=latin7
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt
index f3443457346..8723919ab47 100644
--- a/sql/share/french/errmsg.txt
+++ b/sql/share/french/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=latin1
@@ -79,7 +92,7 @@ character-set=latin1
"Champ BLOB '%-.64s' ne peut tre utilis dans une cl",
"Champ '%-.64s' trop long (max = %d). Utilisez un BLOB",
"Un seul champ automatique est permis et il doit tre index",
-"%s: Prt pour des connections\n",
+"%s: Prt pour des connections",
"%s: Arrt normal du serveur\n",
"%s: Reu le signal %d. Abandonne!\n",
"%s: Arrt du serveur termin\n",
@@ -304,7 +317,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt
index 498a230faed..74274ebfc8c 100644
--- a/sql/share/german/errmsg.txt
+++ b/sql/share/german/errmsg.txt
@@ -1,6 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Dirk Munzinger (dmun@4t2.com)
2001-06-07
@@ -11,7 +25,7 @@
Stefan Hinz (stefan@mysql.com)
2003-10-01
- */
+*/
character-set=latin1
@@ -91,7 +105,7 @@ character-set=latin1
"BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlssel verwendet werden",
"Feldlnge fr Feld '%-.64s' zu gro (maximal %d). BLOB-Feld verwenden!",
"Falsche Tabellendefinition. Es darf nur ein Auto-Feld geben und dieses muss als Schlssel definiert werden",
-"%-.64s: Bereit fr Verbindungen\n",
+"%-.64s: Bereit fr Verbindungen",
"%-.64s: Normal heruntergefahren\n",
"%-.64s: Signal %d erhalten. Abbruch!\n",
"%-.64s: Heruntergefahren (shutdown)\n",
@@ -316,7 +330,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt
index 2827517ba9a..ea148999fc4 100644
--- a/sql/share/greek/errmsg.txt
+++ b/sql/share/greek/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=greek
@@ -79,7 +92,7 @@ character-set=greek
" Blob '%-.64s' (key specification)",
" '%-.64s' (max = %d). BLOB",
" auto field key",
-"%s: \n",
+"%s: ",
"%s: shutdown\n",
"%s: %d. !\n",
"%s: Shutdown \n",
@@ -304,7 +317,8 @@ character-set=greek
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt
index fab0b156322..ede873f21c0 100644
--- a/sql/share/hungarian/errmsg.txt
+++ b/sql/share/hungarian/errmsg.txt
@@ -1,7 +1,23 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Translated by Feher Peter. Forditotta Feher Peter (feherp@mail.matav.hu) 1998
Updated May, 2000
- This file is public domain and comes with NO WARRANTY of any kind */
+*/
character-set=latin2
@@ -81,7 +97,7 @@ character-set=latin2
"Blob objektum '%-.64s' nem hasznalhato kulcskent",
"A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb.",
"Csak egy auto mezo lehetseges, es azt kulcskent kell definialni.",
-"%s: kapcsolatra kesz\n",
+"%s: kapcsolatra kesz",
"%s: Normal leallitas\n",
"%s: %d jelzes. Megszakitva!\n",
"%s: A leallitas kesz\n",
@@ -306,7 +322,8 @@ character-set=latin2
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt
index 24634514a23..7898503dc03 100644
--- a/sql/share/italian/errmsg.txt
+++ b/sql/share/italian/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=latin1
@@ -304,7 +317,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt
index 68f2857aeca..f73eca2a183 100644
--- a/sql/share/japanese/errmsg.txt
+++ b/sql/share/japanese/errmsg.txt
@@ -1,5 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
3.22.10-beta euc-japanese (ujis) text
*/
@@ -81,7 +96,7 @@ character-set=ujis
"BLOB column '%-.64s' can't be used in key specification with the used table type",
"column '%-.64s' ,ݤ column 礭¿ޤ. ( %d ޤ). BLOB 򤫤˻ѤƤ.",
"ơ֥㤤ޤ; there can be only one auto column and it must be defined as a key",
-"%s: λ\n",
+"%s: λ",
"%s: Normal shutdown\n",
"%s: Got signal %d. !\n",
"%s: Shutdown λ\n",
@@ -306,7 +321,8 @@ character-set=ujis
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt
index 70267c82364..6c8a5c2661a 100644
--- a/sql/share/korean/errmsg.txt
+++ b/sql/share/korean/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This ȭ is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=euckr
@@ -79,7 +92,7 @@ character-set=euckr
"BLOB Į '%-.64s' Ű ǿ ϴ.",
"Į '%-.64s' Į ̰ ʹ ϴ (ִ = %d). ſ BLOB ϼ.",
"Ȯ ̺ ; ̺ ϳ auto Į ϰ Ű ǵǾ մϴ.",
-"%s: غԴϴ.\n",
+"%s: غԴϴ",
"%s: shutdown\n",
"%s: %d ȣ . !\n",
"%s: Shutdown Ϸ!\n",
@@ -304,7 +317,8 @@ character-set=euckr
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt
index 1d84a3a5e5a..48ade24703b 100644
--- a/sql/share/norwegian-ny/errmsg.txt
+++ b/sql/share/norwegian-ny/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Roy-Magne Mo rmo@www.hivolda.no 97 */
@@ -81,7 +94,7 @@ character-set=latin1
"Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar",
"For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor",
"Bare eitt auto felt kan vre definert som nkkel.",
-"%s: klar for tilkoblingar\n",
+"%s: klar for tilkoblingar",
"%s: Normal nedkopling\n",
"%s: Oppdaga signal %d. Avsluttar!\n",
"%s: Nedkopling komplett\n",
@@ -306,7 +319,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt
index be881d54473..e217155a4df 100644
--- a/sql/share/norwegian/errmsg.txt
+++ b/sql/share/norwegian/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Roy-Magne Mo rmo@www.hivolda.no 97 */
@@ -81,7 +94,7 @@ character-set=latin1
"Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nkler",
"For stor nkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor",
"Bare ett auto felt kan vre definert som nkkel.",
-"%s: klar for tilkoblinger\n",
+"%s: klar for tilkoblinger",
"%s: Normal avslutning\n",
"%s: Oppdaget signal %d. Avslutter!\n",
"%s: Avslutning komplett\n",
@@ -306,7 +319,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt
index 8a576b5bf82..c514581ec1c 100644
--- a/sql/share/polish/errmsg.txt
+++ b/sql/share/polish/errmsg.txt
@@ -1,6 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Changed by Jaroslaw Lewandowski <jotel@itnet.com.pl>
Charset ISO-8859-2
*/
@@ -83,7 +97,7 @@ character-set=latin2
"Kolumna typu Blob '%-.64s' nie moe by uyta w specyfikacji klucza",
"Zbyt dua dugo? kolumny '%-.64s' (maks. = %d). W zamian uyj typu BLOB",
"W tabeli moe by tylko jedno pole auto i musi ono by zdefiniowane jako klucz",
-"%s: gotowe do po?czenia\n",
+"%s: gotowe do po?czenia",
"%s: Standardowe zakoczenie dziaania\n",
"%s: Otrzymano sygna %d. Koczenie dziaania!\n",
"%s: Zakoczenie dziaania wykonane\n",
@@ -308,7 +322,8 @@ character-set=latin2
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt
index 6794db726cc..ca6cccb7471 100644
--- a/sql/share/portuguese/errmsg.txt
+++ b/sql/share/portuguese/errmsg.txt
@@ -1,5 +1,19 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
/* Updated by Thiago Delgado Pinto - thiagodp@ieg.com.br - 06.07.2002 */
character-set=latin1
@@ -80,7 +94,7 @@ character-set=latin1
"Coluna BLOB '%-.64s' no pode ser utilizada na especificao de chave para o tipo de tabela usado",
"Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar",
"Definio incorreta de tabela. Somente permitido um nico campo auto-incrementado e ele tem que ser definido como chave",
-"%s: Pronto para conexes\n",
+"%s: Pronto para conexes",
"%s: 'Shutdown' normal\n",
"%s: Obteve sinal %d. Abortando!\n",
"%s: 'Shutdown' completo\n",
@@ -305,7 +319,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt
index 9eaa4860b64..3c69e33a222 100644
--- a/sql/share/romanian/errmsg.txt
+++ b/sql/share/romanian/errmsg.txt
@@ -1,6 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Translated into Romanian by Stefan Saroiu
e-mail: tzoompy@cs.washington.edu
*/
@@ -83,7 +97,7 @@ character-set=latin2
"Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit",
"Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine",
"Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie",
-"%s: sint gata pentru conectii\n",
+"%s: sint gata pentru conectii",
"%s: Terminare normala\n",
"%s: Semnal %d obtinut. Aborting!\n",
"%s: Terminare completa\n",
@@ -308,7 +322,8 @@ character-set=latin2
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt
index eec85d611fc..13c7a410443 100644
--- a/sql/share/russian/errmsg.txt
+++ b/sql/share/russian/errmsg.txt
@@ -1,6 +1,22 @@
-/* Copyright 2003 MySQL AB
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Translation done in 2003 by Egor Egorov; Ensita.NET, http://www.ensita.net/
- This file is public domain and comes with NO WARRANTY of any kind */
+*/
/* charset: KOI8-R */
character-set=koi8r
@@ -81,7 +97,7 @@ character-set=koi8r
" BLOB '%-.64s' ",
" '%-.64s' ( = %d). BLOB ",
" : , ",
-"%s: .\n: '%s' : '%s' : %d\n",
+"%s: .\n: '%s' : '%s' : %d",
"%s: \n",
"%s: %d. !\n",
"%s: \n",
@@ -306,7 +322,8 @@ character-set=koi8r
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt
index 4039268f446..b1fa4d86b54 100644
--- a/sql/share/serbian/errmsg.txt
+++ b/sql/share/serbian/errmsg.txt
@@ -310,7 +310,8 @@ character-set=cp1250
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt
index 9be5ce01d6a..9570ba1fef6 100644
--- a/sql/share/slovak/errmsg.txt
+++ b/sql/share/slovak/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
Translated from both E n g l i s h & C z e c h error messages
@@ -87,7 +100,7 @@ character-set=latin2
"Blob pole '%-.64s' neme by pouit ako k",
"Prli vek dka pre pole '%-.64s' (maximum = %d). Pouite BLOB",
"Mete ma iba jedno AUTO pole a to mus by definovan ako k",
-"%s: pripraven na spojenie\n",
+"%s: pripraven na spojenie",
"%s: normlne ukonenie\n",
"%s: prijat signl %d, ukonenie (Abort)!\n",
"%s: prca ukonen\n",
@@ -312,7 +325,8 @@ character-set=latin2
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt
index dc15f8b8d5e..4058fcc7c4e 100644
--- a/sql/share/spanish/errmsg.txt
+++ b/sql/share/spanish/errmsg.txt
@@ -1,5 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
Traduccion por Miguel Angel Fernandez Roiz -- LoboCom Sistemas, s.l.
From June 28, 2001 translated by Miguel Solorzano miguel@mysql.com */
@@ -81,7 +96,7 @@ character-set=latin1
"La columna Blob '%-.64s' no puede ser usada en una declaracion de clave",
"Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar",
"Puede ser solamente un campo automatico y este debe ser definido como una clave",
-"%s: preparado para conexiones\n",
+"%s: preparado para conexiones",
"%s: Apagado normal\n",
"%s: Recibiendo signal %d. Abortando!\n",
"%s: Apagado completado\n",
@@ -306,7 +321,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
deleted file mode 100644
index 3dd14c8b613..00000000000
--- a/sql/share/swedish/errmsg.OLD
+++ /dev/null
@@ -1,221 +0,0 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
-
-"hashchk",
-"isamchk",
-"NO",
-"YES",
-"Kan inte skapa filen: '%-.64s' (Felkod: %d)",
-"Kan inte skapa tabellen: '%-.64s' (Felkod: %d)",
-"Kan inte skapa databasen '%-.64s'. (Felkod: %d)",
-"Databasen '%-.64s' existerar redan",
-"Kan inte radera databasen '%-.64s'. Databasen finns inte",
-"Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)",
-"Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)",
-"Kan inte radera filen: '%-.64s' (Felkod: %d)",
-"Hittar inte posten i systemregistret",
-"Kan inte lsa filinformationen (stat) frn '%-.64s' (Felkod: %d)",
-"Kan inte inte lsa aktivt bibliotek. (Felkod: %d)",
-"Kan inte lsa filen. (Felkod: %d)",
-"Kan inte anvnda: '%-.64s'. (Felkod: %d)",
-"Hittar inte filen: '%-.64s'. (Felkod: %d)",
-"Kan inte lsa frn bibliotek '%-.64s'. (Felkod: %d)",
-"Kan inte byta till: '%-.64s'. (Felkod: %d)",
-"Posten har frndrats sedan den lstes i register '%-.64s'",
-"Disken r full (%s). Vntar tills det finns ledigt utrymme....",
-"Kan inte skriva, dubbel sknyckel i register '%-.64s'",
-"Fick fel vid stngning av '%-.64s' (Felkod: %d)",
-"Fick fel vid lsning av '%-.64s' (Felkod %d)",
-"Kan inte byta namn frn '%-.64s' till '%-.64s' (Felkod: %d)",
-"Fick fel vid skrivning till '%-.64s' (Felkod %d)",
-"'%-.64s' r lst mot anvndning",
-"Sorteringen avbruten",
-"Formulr '%-.64s' finns inte i '%-.64s'",
-"Fick felkod %d frn databashanteraren",
-"Registrets databas har inte denna facilitet",
-"Hittar inte posten",
-"Felaktig fil: '%-.64s'",
-"Fatalt fel vid hantering av register '%-.64s'. Kr en reparation",
-"Gammal nyckelfil '%-.64s'; Reparera registret",
-"'%-.64s' r skyddad mot frndring",
-"Ovntat slut p minnet, starta om programmet och frsk p nytt (Behvde %d bytes)",
-"Sorteringsbufferten rcker inte till. Kontrollera startparametrarna",
-"Ovntat filslut vid lsning frn '%-.64s' (Felkod: %d)",
-"Fr mnga anslutningar",
-"Fick slut p minnet. Kontrollera ifall mysqld eller ngon annan process anvnder allt tillgngligt minne. Ifall inte, frsk anvnda 'ulimit' eller allokera mera swap",
-"Kan inte hitta 'hostname' fr din adress",
-"Fel vid initiering av kommunikationen med klienten",
-"Anvndare '%-.32s@%-.64s' r ej berttigad att anvnda databasen %-.64s",
-"Anvndare '%-.32s@%-.64s' r ej berttigad att logga in (Anvnder lsen: %s)",
-"Ingen databas i anvndning",
-"Oknt commando",
-"Kolumn '%-.64s' fr inte vara NULL",
-"Oknd database '%-.64s'",
-"Tabellen '%-.64s' finns redan",
-"Oknd tabell '%-.64s'",
-"Kolumn: '%-.64s' i %s r inte unik",
-"Servern gr nu ned",
-"Oknd kolumn '%-.64s' i %s",
-"'%-.64s' finns inte i GROUP BY",
-"Kan inte anvnda GROUP BY med '%-.64s'",
-"Kommandot har bde sum functions och enkla funktioner",
-"Antalet kolumner motsvarar inte antalet vrden",
-"Kolumn namn '%-.64s' r fr lngt",
-"Kolumn namn '%-64s finns flera gnger",
-"Nyckel namn '%-.64s' finns flera gnger",
-"Dubbel nyckel '%-.64s' fr nyckel: %d",
-"Felaktigt kolumn typ fr kolumn: '%-.64s'",
-"%s nra '%-.64s' p rad %d",
-"Frgan var tom",
-"Icke unikt tabell/alias: '%-.64s'",
-"Ogiltigt DEFAULT vrde fr '%-.64s'",
-"Flera PRIMARY KEY anvnda",
-"Fr mnga nycklar anvnda. Man fr ha hgst %d nycklar",
-"Fr mnga nyckel delar anvnda. Man fr ha hgst %d nyckeldelar",
-"Fr lng nyckel. Hgsta tilltna nyckellngd r %d",
-"Nyckel kolumn '%-.64s' finns inte",
-"En BLOB '%-.64s' kan inte vara nyckel med den anvnda tabellen typen",
-"Fr stor kolumnlngd angiven fr '%-.64s' (max= %d). Anvnd en BLOB instllet",
-"Det fr finnas endast ett AUTO_INCREMENT flt och detta mste vara en nyckel",
-"%s: klar att ta emot klienter\n",
-"%s: Normal avslutning\n",
-"%s: Fick signal %d. Avslutar!\n",
-"%s: Avslutning klar\n",
-"%s: Stnger av trd %ld anvndare: '%-.64s'\n",
-"Kan inte skapa IP socket",
-"Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen",
-"Flt separatorerna r inte emotsgande eller fr lnga. Kontrollera mot manualen",
-"Man kan inte anvnda fast radlngd med blobs. Anvnd 'fields terminated by'."
-"Textfilen '%' mste finnas i databas biblioteket eller vara lsbar fr alla",
-"Filen '%-.64s' existerar redan",
-"Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld",
-"Rader: %ld Dubletter: %ld",
-"Felaktig delnyckel. Nyckeldelen r inte en strng eller den angivna lngden r lngre n kolumnlngden",
-"Man kan inte radera alla flt med ALTER TABLE. Anvnd DROP TABLE istllet",
-"Kan inte ta bort '%-.64s'. Kontrollera att fltet/nyckel finns",
-"Rader: %ld Dubletter: %ld Varningar: %ld",
-"INSERT table '%-.64s' fr inte finnas i FROM tabell-listan",
-"Finns inget thread med id %lu",
-"Du r inte gare till thread %lu",
-"Inga tabeller angivna",
-"Fr mnga alternativ till kolumn %s fr SET",
-"Kan inte generera ett unikt filnamn %s.(1-999)\n",
-"Tabell '%-.64s' kan inte uppdateras emedan den r lst fr lsning",
-"Tabell '%-.64s' r inte lst med LOCK TABLES",
-"BLOB flt '%-.64s' kan inte ha ett DEFAULT vrde"
-"Felaktigt databas namn '%-.64s'",
-"Felaktigt tabell namn '%-.64s'",
-"Den angivna frgan skulle troligen ta mycket long tid! Kontrollar din WHERE och anvnd SET OPTION SQL_BIG_SELECTS=1 ifall du vill hantera stora joins",
-"Oidentifierat fel",
-"Oknd procedur: %s",
-"Felaktigt antal parametrar till procedur %s",
-"Felaktiga parametrar till procedur %s",
-"Oknd tabell '%-.64s' i '%-.64s'",
-"Flt '%-.64s' r redan anvnt",
-"Felaktig anvndning av SQL grupp function",
-"Tabell '%-.64s' har en extension som inte finns i denna version av MySQL",
-"Tabeller mste ha minst 1 kolumn",
-"Tabellen '%-.64s' r full",
-"Oknt karaktrset: '%-.64s'",
-"Fr mnga tabeller. MySQL can ha hgst %d tabeller i en och samma join"
-"Fr mnga flt",
-"Fr stor total rad lngd. Den hgst tilltna rad-lngden, frutom BLOBs, r %d. ndra ngra av dina flt till BLOB",
-"Trd-stacken tog slut: Har anvnt %ld av %ld bytes. Anvnd 'mysqld -O thread_stack=#' ifall du behver en strre stack",
-"Felaktigt referens i OUTER JOIN. Kontrollera ON uttrycket",
-"Kolumn '%-.32s' r anvnd med UNIQUE eller INDEX men r inte definerad med NOT NULL",
-"Kan inte ladda funktionen '%-.64s'",
-"Kan inte initialisera funktionen '%-.64s'; '%-.80s'",
-"Man fr inte ange skvg fr dynamiska bibliotek",
-"Funktionen '%-.64s' finns redan",
-"Kan inte ppna det dynamiska biblioteket '%-.64s' (Felkod: %d %s)",
-"Hittar inte funktionen '%-.64s' in det dynamiska biblioteket",
-"Funktionen '%-.64s' r inte definierad",
-"Denna dator '%-.64s' r blockerad pga mnga felaktig paket. Gr 'mysqladmin flush-hosts' fr att ta bort alla blockeringarna",
-"Denna dator '%-.64s' har inte privileger att anvnda denna MySQL server",
-"Du anvnder MySQL som en anonym anvndare och som sdan fr du inte ndra ditt lsenord",
-"Fr att ndra lsenord fr andra mste du ha rttigheter att uppdatera mysql databasen",
-"Hittade inte anvndaren i 'user' tabellen",
-"Rader: %ld Uppdaterade: %ld Varningar: %ld",
-"Kan inte skapa en ny trd (errno %d)"
-"Antalet kolumner motsvarar inte antalet vrden p rad: %ld",
-"Kunde inte stnga och ppna tabell: '%-.64s',
-"Felaktig anvnding av NULL",
-"Fix fel '%-.64s' frn REGEXP",
-"Man fr ha bde GROUP kolumner (MIN(),MAX(),COUNT()...) och flt i en frga om man inte har en GROUP BY del",
-"Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s'",
-"%-.16s ej tilltet fr '%-.32s@%-.64s' fr tabell '%-.64s'",
-"%-.16s ej tilltet fr '%-.32s@%-.64s'\n fr kolumn '%-.64s' i tabell '%-.64s'",
-"Felaktigt GRANT privilegium anvnt",
-"Felaktigt maskinnamn eller anvndarnamn anvnt med GRANT",
-"Det finns ingen tabell som heter '%-64s.%s'"
-"Det finns inget privilegium definierat fr anvndare '%-.32s' p '%-.64s' fr tabell '%-.64s'",
-"Du kan inte anvnda detta kommando med denna MySQL version",
-"Du har ngot fel i din syntax",
-"DELAYED INSERT trden kunde inte lsa tabell '%-.64s'",
-"Det finns redan 'max_delayed_threads' trdar i anvnding",
-"Avbrt lnken fr trd %ld till db: '%-.64s' anvndare: '%-.64s' (%s)",
-"Kommunkationspaketet r strre n 'max_allowed_packet'",
-"Fick lsfel frn klienten vid lsning frn 'PIPE'",
-"Fick fatalt fel frn 'fcntl()'",
-"Kommunikationspaketen kom i fel ordning",
-"Kunde inte packa up kommunikationspaketet",
-"Fick ett fel vid lsning frn klienten",
-"Fick 'timeout' vid lsning frn klienten",
-"Fick ett fel vid skrivning till klienten",
-"Fick 'timeout' vid skrivning till klienten",
-"Resultat strngen r lngre n max_allowed_packet",
-"Den anvnda tabell typen kan inte hantera BLOB/TEXT kolumner",
-"Den anvnda tabell typen kan inte hantera AUTO_INCREMENT kolumner",
-"INSERT DELAYED kan inte anvndas med tabell '%-.64s', emedan den r lst med LOCK TABLES",
-"Felaktigt column namn '%-.100s'",
-"Den anvnda tabell typen kan inte indexera kolumn '%-.64s'",
-"Tabellerna i MERGE tabellen r inte identiskt definierade",
-"Kan inte skriva till tabell '%-.64s'; UNIQUE test",
-"Du har inte angett en nyckel lngd fr BLOB '%-.64s'",
-"Alla delar av en PRIMARY KEY mste vara NOT NULL; Om du vill ha en nyckel med NULL, anvnd UNIQUE istllet",
-"Resultet bestod av mera n en rad",
-"Denna tabell typ krver en PRIMARY KEY",
-"Denna version av MySQL r inte kompilerad med RAID",
-"Du anvnder 'sker uppdaterings mod' och frskte uppdatera en table utan en WHERE sats som anvnder sig av en nyckel",
-"Nyckel '%-.64s' finns inte in tabell '%-.64s'",
-"Kan inte ppna tabellen",
-"Tabellhanteraren fr denna tabell kan inte gra check/repair",
-"Du fr inte utfra detta kommando i en transaktion",
-"Fick fel %d vid COMMIT",
-"Fick fel %d vid ROLLBACK",
-"Fick fel %d vid FLUSH_LOGS",
-"Fick fel %d vid CHECKPOINT",
-"Avbrt lnken fr trd %ld till db: '%-.64s' anvndare: '%-.32s' Host: '%-.64s' (%.-64s)",
-"Tabellhanteraren klarar inte en binr kopiering av tabellen",
-"Binrloggen stngdes medan vi gjorde FLUSH MASTER",
-"Failed rebuilding the index of dumped table '%-.64s'",
-"Fick en master: '%-.64s'",
-"Fick ntverksfel vid lsning frn master",
-"Fick ntverksfel vid skrivning till master",
-"Hittar inte ett FULLTEXT index i kolumnlistan",
-"Kan inte exekvera kommandot emedan du har en lst tabell eller an aktiv transaktion",
-"Oknd system variabel '%-.64'",
-"Tabell '%-.64s' r crashad och br repareras med REPAIR TABLE",
-"Tabell '%-.64s' r crashad och senast (automatiska?) reparation misslyckades",
-"Warning: Ngra icke transaktionella tabeller kunde inte terstllas vid ROLLBACK",
-"Transaktionen krvde mera n 'max_binlog_cache_size' minne. Utka denna mysqld variabel och frsk p nytt",
-"Denna operation kan inte gras under replikering; Gr SLAVE STOP frst",
-"Denna operation kan endast gras under replikering; Konfigurera slaven och gr SLAVE START",
-"Servern r inte konfigurerade som en replikations slav. ndra konfigurationsfilen eller gr CHANGE MASTER TO",
-"Kunde inte initializera replications-strukturerna. Kontrollera privilegerna fr 'master.info'",
-"Kunde inte starta en trd fr replikering",
-"Anvndare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
-"Man kan endast anvnda konstant-uttryck med SET",
-"Fick inte ett ls i tid",
-"Antal ls verskrider antalet reserverade ls",
-"Updaterings-ls kan inte gras nr man anvnder READ UNCOMMITTED",
-"DROP DATABASE r inte tilltet nr man har ett globalt ls-ls",
-"CREATE DATABASE r inte tilltet nr man har ett globalt ls-ls",
-"Felaktiga argument till %s",
-"%-.32s@%-.64s har inte rttigheter att skapa nya anvndare",
-"Fick fel vid anslutning till master: %-.128s",
-"Fick fel vid utfrande av command p mastern: %-.128s",
-"Fick fel vid utfrande av %s: %-.128s",
-"Felaktig anvnding av %s and %s",
-"SELECT kommandona har olika antal kolumner"
-"Kan inte utfra kommandot emedan du har ett READ ls",
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index ee5436c3b80..e03af70d03c 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -1,5 +1,18 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- This file is public domain and comes with NO WARRANTY of any kind */
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
character-set=latin1
@@ -79,7 +92,7 @@ character-set=latin1
"En BLOB '%-.64s' kan inte vara nyckel med den anvnda tabelltypen",
"Fr stor kolumnlngd angiven fr '%-.64s' (max= %d). Anvnd en BLOB instllet",
"Det fr finnas endast ett AUTO_INCREMENT-flt och detta mste vara en nyckel",
-"%s: klar att ta emot klienter\n",
+"%s: klar att ta emot klienter",
"%s: Normal avslutning\n",
"%s: Fick signal %d. Avslutar!\n",
"%s: Avslutning klar\n",
@@ -304,7 +317,8 @@ character-set=latin1
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt
index cb1d99e2fec..a919422a6cf 100644
--- a/sql/share/ukrainian/errmsg.txt
+++ b/sql/share/ukrainian/errmsg.txt
@@ -1,6 +1,20 @@
-/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
- * This is public domain and comes with NO WARRANTY of any kind
- *
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
* Ukrainian translation by Roman Festchook <roma@orta.zt.ua>
* Encoding: KOI8-U
* Version: 13/09/2001 mysql-3.23.41
@@ -84,7 +98,7 @@ character-set=koi8u
"BLOB '%-.64s' Φ Ц æ",
" '%-.64s' (max = %d). BLOB",
"צ æ; , ",
-"%s: '!\n",
+"%s: '!",
"%s: \n",
"%s: %d. !\n",
"%s: \n",
@@ -309,7 +323,8 @@ character-set=koi8u
"Unknown or incorrect time zone: '%-.64s'",
"Invalid TIMESTAMP value in column '%s' at row %ld",
"Invalid %s character string: '%.64s'",
-"Result of %s() was larger than max_allowed_packet (%d) - truncated"
+"Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+"Conflicting declarations: '%s%s' and '%s%s'"
"Can't create a %s from within another stored routine"
"%s %s already exists"
"%s %s does not exist"
diff --git a/sql/slave.cc b/sql/slave.cc
index 0defbe35163..d7b60107096 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -749,7 +749,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
pthread_mutex_unlock(start_lock);
DBUG_RETURN(ER_SLAVE_THREAD);
}
- if (start_cond && cond_lock)
+ if (start_cond && cond_lock) // caller has cond_lock
{
THD* thd = current_thd;
while (start_id == *slave_run_id)
@@ -759,11 +759,9 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
"Waiting for slave thread to start");
pthread_cond_wait(start_cond,cond_lock);
thd->exit_cond(old_msg);
+ pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released
if (thd->killed)
- {
- pthread_mutex_unlock(cond_lock);
DBUG_RETURN(thd->killed_errno());
- }
}
}
if (start_lock)
@@ -1424,6 +1422,12 @@ not always make sense; please check the manual before using it).";
/*
Check that the master's global character_set_server and ours are the same.
Not fatal if query fails (old master?).
+ Note that we don't check for equality of global character_set_client and
+ collation_connection (neither do we prevent their setting in
+ set_var.cc). That's because from what I (Guilhem) have tested, the global
+ values of these 2 are never used (new connections don't use them).
+ We don't test equality of global collation_database either as it's is
+ going to be deprecated (made read-only) in 4.1 very soon.
*/
if (!mysql_real_query(mysql, "SELECT @@GLOBAL.COLLATION_SERVER", 32) &&
(master_res= mysql_store_result(mysql)))
@@ -1897,7 +1901,6 @@ Waiting for the slave SQL thread to free enough relay log space");
!rli->ignore_log_space_limit)
pthread_cond_wait(&rli->log_space_cond, &rli->log_space_lock);
thd->exit_cond(save_proc_info);
- pthread_mutex_unlock(&rli->log_space_lock);
DBUG_RETURN(slave_killed);
}
@@ -2574,6 +2577,9 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
(long) timeout));
pthread_mutex_lock(&data_lock);
+ const char *msg= thd->enter_cond(&data_cond, &data_lock,
+ "Waiting for the slave SQL thread to "
+ "advance position");
/*
This function will abort when it notices that some CHANGE MASTER or
RESET MASTER has changed the master info.
@@ -2675,9 +2681,6 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
//wait for master update, with optional timeout.
DBUG_PRINT("info",("Waiting for master update"));
- const char* msg = thd->enter_cond(&data_cond, &data_lock,
- "Waiting for the slave SQL thread to \
-advance position");
/*
We are going to pthread_cond_(timed)wait(); if the SQL thread stops it
will wake us up.
@@ -2699,8 +2702,7 @@ advance position");
}
else
pthread_cond_wait(&data_cond, &data_lock);
- DBUG_PRINT("info",("Got signal of master update"));
- thd->exit_cond(msg);
+ DBUG_PRINT("info",("Got signal of master update or timed out"));
if (error == ETIMEDOUT || error == ETIME)
{
error= -1;
@@ -2712,7 +2714,7 @@ advance position");
}
err:
- pthread_mutex_unlock(&data_lock);
+ thd->exit_cond(msg);
DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \
improper_arguments: %d timed_out: %d",
thd->killed_errno(),
@@ -3305,6 +3307,9 @@ dump");
}
thd->proc_info= "Waiting to reconnect after a failed binlog dump request";
+#ifdef SIGNAL_WITH_VIO_CLOSE
+ thd->clear_active_vio();
+#endif
end_server(mysql);
/*
First time retry immediately, assuming that we can recover
@@ -3378,6 +3383,9 @@ max_allowed_packet",
goto err;
}
thd->proc_info = "Waiting to reconnect after a failed master event read";
+#ifdef SIGNAL_WITH_VIO_CLOSE
+ thd->clear_active_vio();
+#endif
end_server(mysql);
if (retry_count++)
{
@@ -4825,4 +4833,5 @@ template class I_List_iterator<i_string>;
template class I_List_iterator<i_string_pair>;
#endif
+
#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 0a970640e32..e7b4772c3ab 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -251,9 +251,9 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables)
{
global_system_variables.old_passwords= 1;
pthread_mutex_unlock(&LOCK_global_system_variables);
- sql_print_error("mysql.user table is not updated to new password format; "
- "Disabling new password usage until "
- "mysql_fix_privilege_tables is run");
+ sql_print_warning("mysql.user table is not updated to new password format; "
+ "Disabling new password usage until "
+ "mysql_fix_privilege_tables is run");
}
thd->variables.old_passwords= 1;
}
@@ -543,22 +543,28 @@ static ulong get_sort(uint count,...)
va_start(args,count);
ulong sort=0;
+ /* Should not use this function with more than 4 arguments for compare. */
+ DBUG_ASSERT(count <= 4);
+
while (count--)
{
- char *str=va_arg(args,char*);
- uint chars=0,wild=0;
+ char *start, *str= va_arg(args,char*);
+ uint chars= 0;
+ uint wild_pos= 0; /* first wildcard position */
- if (str)
+ if ((start= str))
{
for (; *str ; str++)
{
if (*str == wild_many || *str == wild_one || *str == wild_prefix)
- wild++;
- else
- chars++;
+ {
+ wild_pos= (uint) (str - start) + 1;
+ break;
+ }
+ chars= 128; // Marker that chars existed
}
}
- sort= (sort << 8) + (wild ? 1 : chars ? 2 : 0);
+ sort= (sort << 8) + (wild_pos ? min(wild_pos, 127) : chars);
}
va_end(args);
return sort;
@@ -1208,13 +1214,14 @@ bool acl_check_host(const char *host, const char *ip)
1 ERROR ; In this case the error is sent to the client.
*/
-bool check_change_password(THD *thd, const char *host, const char *user)
+bool check_change_password(THD *thd, const char *host, const char *user,
+ char *new_password)
{
if (!initialized)
{
net_printf(thd,ER_OPTION_PREVENTS_STATEMENT,
- "--skip-grant-tables"); /* purecov: inspected */
- return(1); /* purecov: inspected */
+ "--skip-grant-tables");
+ return(1);
}
if (!thd->slave_thread &&
(strcmp(thd->user,user) ||
@@ -1228,6 +1235,15 @@ bool check_change_password(THD *thd, const char *host, const char *user)
send_error(thd, ER_PASSWORD_ANONYMOUS_USER);
return(1);
}
+ uint len=strlen(new_password);
+ if (len && len != SCRAMBLED_PASSWORD_CHAR_LENGTH &&
+ len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ {
+ net_printf(thd, 0,
+ "Password hash should be a %d-digit hexadecimal number",
+ SCRAMBLED_PASSWORD_CHAR_LENGTH);
+ return -1;
+ }
return(0);
}
@@ -1255,7 +1271,7 @@ bool change_password(THD *thd, const char *host, const char *user,
host,user,new_password));
DBUG_ASSERT(host != 0); // Ensured by parent
- if (check_change_password(thd, host, user))
+ if (check_change_password(thd, host, user, new_password))
DBUG_RETURN(1);
VOID(pthread_mutex_lock(&acl_cache->lock));
@@ -1513,7 +1529,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
if (combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH &&
combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
{
- my_printf_error(ER_PASSWORD_NO_MATCH,
+ my_printf_error(ER_UNKNOWN_ERROR,
"Password hash should be a %d-digit hexadecimal number",
MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
DBUG_RETURN(-1);
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index 77702cc375a..390106c1546 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -154,7 +154,8 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd,
uint passwd_len);
int acl_getroot_no_password(THD *thd);
bool acl_check_host(const char *host, const char *ip);
-bool check_change_password(THD *thd, const char *host, const char *user);
+bool check_change_password(THD *thd, const char *host, const char *user,
+ char *password);
bool change_password(THD *thd, const char *host, const char *user,
char *password);
int mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list,
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 68f7d45e81c..3f75dadb6f0 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -34,9 +34,6 @@
#define MAX_TREEMEM 8192
#define MAX_TREE_ELEMENTS 256
-#define UINT_MAX16 0xffff
-#define UINT_MAX24 0xffffff
-#define UINT_MAX32 0xffffffff
int sortcmp2(void* cmp_arg __attribute__((unused)),
const String *a,const String *b)
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 61cbe432909..0aee91af0da 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1719,6 +1719,26 @@ int open_and_lock_tables(THD *thd, TABLE_LIST *tables)
uint counter;
if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter))
DBUG_RETURN(thd->net.report_error ? -1 : 1); /* purecov: inspected */
+ /*
+ Let us propagate pointers to open tables from global table list
+ to table lists in particular selects if needed.
+ */
+ if (thd->lex->all_selects_list->next_select_in_list() ||
+ thd->lex->time_zone_tables_used)
+ {
+ for (SELECT_LEX *sl= thd->lex->all_selects_list;
+ sl;
+ sl= sl->next_select_in_list())
+ {
+ for (TABLE_LIST *cursor= (TABLE_LIST *) sl->table_list.first;
+ cursor;
+ cursor=cursor->next)
+ {
+ if (cursor->table_list)
+ cursor->table= cursor->table_list->table;
+ }
+ }
+ }
DBUG_RETURN(mysql_handle_derived(thd->lex));
}
@@ -1929,7 +1949,9 @@ find_field_in_table(THD *thd, TABLE_LIST *table_list,
else
{
Item_arena *arena= thd->current_arena, backup;
- if (arena)
+ if (!arena->is_stmt_prepare())
+ arena= 0;
+ else
thd->set_n_backup_item_arena(arena, &backup);
*ref= new Item_ref(trans + i, 0, table_list->view_name.str,
item_name);
@@ -2261,10 +2283,12 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
find_item_error_report_type report_error)
{
List_iterator<Item> li(items);
- Item **found=0,*item;
+ Item **found=0, **found_unaliased= 0, *item;
const char *db_name=0;
const char *field_name=0;
const char *table_name=0;
+ bool found_unaliased_non_uniq= 0;
+ uint unaliased_counter;
if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM)
{
field_name= ((Item_ident*) find)->field_name;
@@ -2277,42 +2301,93 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
if (field_name && item->type() == Item::FIELD_ITEM)
{
Item_field *item_field= (Item_field*) item;
+
/*
In case of group_concat() with ORDER BY condition in the QUERY
item_field can be field of temporary table without item name
(if this field created from expression argument of group_concat()),
=> we have to check presence of name before compare
*/
- if (item_field->name &&
- (!my_strcasecmp(system_charset_info, item_field->name, field_name) ||
- !my_strcasecmp(system_charset_info,
- item_field->field_name, field_name)))
+ if (!item_field->name)
+ continue;
+
+ if (table_name)
{
- if (!table_name)
- {
- if (found)
- {
- if ((*found)->eq(item,0))
- continue; // Same field twice (Access?)
- if (report_error != IGNORE_ERRORS)
- my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0),
- find->full_name(), current_thd->where);
- return (Item**) 0;
- }
- found= li.ref();
- *counter= i;
- }
- else
- {
- if (!strcmp(item_field->table_name,table_name) &&
- (!db_name || (db_name && item_field->db_name &&
- !strcmp(item_field->db_name, db_name))))
- {
- found= li.ref();
- *counter= i;
- break;
- }
- }
+ /*
+ If table name is specified we should find field 'field_name' in
+ table 'table_name'. According to SQL-standard we should ignore
+ aliases in this case. Note that we should prefer fields from the
+ select list over other fields from the tables participating in
+ this select in case of ambiguity.
+
+ We use strcmp for table names and database names as these may be
+ case sensitive.
+ In cases where they are not case sensitive, they are always in lower
+ case.
+ */
+ if (!my_strcasecmp(system_charset_info, item_field->field_name,
+ field_name) &&
+ !strcmp(item_field->table_name, table_name) &&
+ (!db_name || (item_field->db_name &&
+ !strcmp(item_field->db_name, db_name))))
+ {
+ if (found)
+ {
+ if ((*found)->eq(item, 0))
+ continue; // Same field twice
+ if (report_error != IGNORE_ERRORS)
+ my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR),
+ MYF(0), find->full_name(), current_thd->where);
+ return (Item**) 0;
+ }
+ found= li.ref();
+ *counter= i;
+ if (db_name)
+ break; // Perfect match
+ }
+ }
+ else if (!my_strcasecmp(system_charset_info, item_field->name,
+ field_name))
+ {
+ /*
+ If table name was not given we should scan through aliases
+ (or non-aliased fields) first. We are also checking unaliased
+ name of the field in then next else-if, to be able to find
+ instantly field (hidden by alias) if no suitable alias (or
+ non-aliased field) was found.
+ */
+ if (found)
+ {
+ if ((*found)->eq(item, 0))
+ continue; // Same field twice
+ if (report_error != IGNORE_ERRORS)
+ my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR),
+ MYF(0), find->full_name(), current_thd->where);
+ return (Item**) 0;
+ }
+ found= li.ref();
+ *counter= i;
+ }
+ else if (!my_strcasecmp(system_charset_info, item_field->field_name,
+ field_name))
+ {
+ /*
+ We will use un-aliased field or react on such ambiguities only if
+ we won't be able to find aliased field.
+ Again if we have ambiguity with field outside of select list
+ we should prefer fields from select list.
+ */
+ if (found_unaliased)
+ {
+ if ((*found_unaliased)->eq(item, 0))
+ continue; // Same field twice
+ found_unaliased_non_uniq= 1;
+ }
+ else
+ {
+ found_unaliased= li.ref();
+ unaliased_counter= i;
+ }
}
}
else if (!table_name && (item->eq(find,0) ||
@@ -2325,9 +2400,24 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
break;
}
}
+ if (!found)
+ {
+ if (found_unaliased_non_uniq)
+ {
+ if (report_error != IGNORE_ERRORS)
+ my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), MYF(0),
+ find->full_name(), current_thd->where);
+ return (Item **) 0;
+ }
+ if (found_unaliased)
+ {
+ found= found_unaliased;
+ *counter= unaliased_counter;
+ }
+ }
if (found)
return found;
- else if (report_error != REPORT_EXCEPT_NOT_FOUND)
+ if (report_error != REPORT_EXCEPT_NOT_FOUND)
{
if (report_error == REPORT_ALL_ERRORS)
my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0),
@@ -2346,9 +2436,12 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list,
uint wild_num)
{
+ Item *item;
if (!wild_num)
return 0;
Item_arena *arena= thd->current_arena, backup;
+ if (!arena->is_stmt_prepare())
+ arena= 0; // For easier test
/*
If we are in preparing prepared statement phase then we have change
@@ -2356,9 +2449,9 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
*/
if (arena)
thd->set_n_backup_item_arena(arena, &backup);
- reg2 Item *item;
+
List_iterator<Item> it(fields);
- while ( wild_num && (item= it++))
+ while (wild_num && (item= it++))
{
if (item->type() == Item::FIELD_ITEM && ((Item_field*) item)->field_name &&
((Item_field*) item)->field_name[0] == '*' &&
@@ -2580,8 +2673,21 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name,
Field_iterator_table table_iter;
Field_iterator_view view_iter;
uint found;
+ char name_buff[NAME_LEN+1];
DBUG_ENTER("insert_fields");
+ if (db_name && lower_case_table_names)
+ {
+ /*
+ convert database to lower case for comparison
+ We can't do this in Item_field as this would change the
+ 'name' of the item which may be used in the select list
+ */
+ strmake(name_buff, db_name, sizeof(name_buff)-1);
+ my_casedn_str(files_charset_info, name_buff);
+ db_name= name_buff;
+ }
+
found= 0;
for (; tables; tables= tables->next_local)
{
@@ -2721,7 +2827,7 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name,
field->query_id=thd->query_id;
table->used_keys.intersect(field->part_of_key);
}
- else if (thd->current_arena &&
+ else if (thd->current_arena->is_stmt_prepare() &&
thd->lex->current_select->first_execution)
{
Item_field *item= new Item_field(thd->strdup(tables->view_db.str),
@@ -2761,13 +2867,14 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
{
table_map not_null_tables= 0;
SELECT_LEX *select_lex= thd->lex->current_select;
- Item_arena *arena= ((thd->current_arena &&
- !select_lex->conds_processed_with_permanent_arena) ?
- thd->current_arena :
- 0);
+ Item_arena *arena= thd->current_arena;
Item_arena backup;
DBUG_ENTER("setup_conds");
+ if (select_lex->conds_processed_with_permanent_arena ||
+ !arena->is_stmt_prepare())
+ arena= 0; // For easier test
+
thd->set_query_id=1;
select_lex->cond_count= 0;
@@ -2779,7 +2886,6 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
DBUG_RETURN(1);
}
-
/* Check if we are using outer joins */
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
@@ -2900,31 +3006,34 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
// to prevent natural join processing during PS re-execution
embedding->natural_join= 0;
- COND *on_expr= cond_and;
- on_expr->fix_fields(thd, 0, &on_expr);
- if (!embedded->outer_join) // Not left join
- {
- *conds= and_conds(*conds, cond_and);
- // fix_fields() should be made with temporary memory pool
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
- if (*conds && !(*conds)->fixed)
- {
- if ((*conds)->fix_fields(thd, tables, conds))
- DBUG_RETURN(1);
- }
- }
- else
+ if (cond_and->list.elements)
{
- embedded->on_expr= and_conds(embedded->on_expr, cond_and);
- // fix_fields() should be made with temporary memory pool
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
- if (embedded->on_expr && !embedded->on_expr->fixed)
- {
- if (embedded->on_expr->fix_fields(thd, tables, &table->on_expr))
- DBUG_RETURN(1);
- }
+ COND *on_expr= cond_and;
+ on_expr->fix_fields(thd, 0, &on_expr);
+ if (!embedded->outer_join) // Not left join
+ {
+ *conds= and_conds(*conds, cond_and);
+ // fix_fields() should be made with temporary memory pool
+ if (arena)
+ thd->restore_backup_item_arena(arena, &backup);
+ if (*conds && !(*conds)->fixed)
+ {
+ if ((*conds)->fix_fields(thd, tables, conds))
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ embedded->on_expr= and_conds(embedded->on_expr, cond_and);
+ // fix_fields() should be made with temporary memory pool
+ if (arena)
+ thd->restore_backup_item_arena(arena, &backup);
+ if (embedded->on_expr && !embedded->on_expr->fixed)
+ {
+ if (embedded->on_expr->fix_fields(thd, tables, &table->on_expr))
+ DBUG_RETURN(1);
+ }
+ }
}
}
embedding= embedded->embedding;
@@ -2972,7 +3081,7 @@ fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors)
Field *rfield= field->field;
TABLE *table= rfield->table;
if (rfield == table->next_number_field)
- table->auto_increment_field_not_null= true;
+ table->auto_increment_field_not_null= TRUE;
if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors)
DBUG_RETURN(1);
}
@@ -2993,7 +3102,7 @@ fill_record(Field **ptr,List<Item> &values, bool ignore_errors)
value=v++;
TABLE *table= field->table;
if (field == table->next_number_field)
- table->auto_increment_field_not_null= true;
+ table->auto_increment_field_not_null= TRUE;
if ((value->save_in_field(field, 0) < 0) && !ignore_errors)
DBUG_RETURN(1);
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index da64479abf2..456b58ee95e 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -158,12 +158,13 @@ bool foreign_key_prefix(Key *a, Key *b)
** Thread specific functions
****************************************************************************/
-THD::THD():user_time(0), current_arena(0), is_fatal_error(0),
- last_insert_id_used(0),
- insert_id_used(0), rand_used(0), time_zone_used(0),
- in_lock_tables(0), global_read_lock(0), bootstrap(0),
- spcont(NULL)
+THD::THD()
+ :user_time(0), global_read_lock(0), is_fatal_error(0),
+ last_insert_id_used(0),
+ insert_id_used(0), rand_used(0), time_zone_used(0),
+ in_lock_tables(0), bootstrap(0), spcont(NULL)
{
+ current_arena= this;
host= user= priv_user= db= ip= 0;
catalog= (char*)"std"; // the only catalog we have for now
host_or_ip= "connecting host";
@@ -450,8 +451,21 @@ void THD::awake(THD::killed_state state_to_set)
exits the cond in the time between read and broadcast, but that is
ok since all we want to do is to make the victim thread get out
of waiting on current_cond.
+ If we see a non-zero current_cond: it cannot be an old value (because
+ then exit_cond() should have run and it can't because we have mutex); so
+ it is the true value but maybe current_mutex is not yet non-zero (we're
+ in the middle of enter_cond() and there is a "memory order
+ inversion"). So we test the mutex too to not lock 0.
+
+ Note that there is a small chance we fail to kill. If victim has locked
+ current_mutex, but hasn't yet entered enter_cond() (which means that
+ current_cond and current_mutex are 0), then the victim will not get
+ a signal and it may wait "forever" on the cond (until
+ we issue a second KILL or the status it's waiting for happens).
+ It's true that we have set its thd->killed but it may not
+ see it immediately and so may have time to reach the cond_wait().
*/
- if (mysys_var->current_cond)
+ if (mysys_var->current_cond && mysys_var->current_mutex)
{
pthread_mutex_lock(mysys_var->current_mutex);
pthread_cond_broadcast(mysys_var->current_cond);
@@ -710,6 +724,12 @@ void select_result::send_error(uint errcode,const char *err)
::send_error(thd, errcode, err);
}
+
+void select_result::cleanup()
+{
+ /* do nothing */
+}
+
static String default_line_term("\n",default_charset_info);
static String default_escaped("\\",default_charset_info);
static String default_field_term("\t",default_charset_info);
@@ -815,6 +835,32 @@ void select_to_file::send_error(uint errcode,const char *err)
}
+bool select_to_file::send_eof()
+{
+ int error= test(end_io_cache(&cache));
+ if (my_close(file,MYF(MY_WME)))
+ error= 1;
+ if (!error)
+ ::send_ok(thd,row_count);
+ file= -1;
+ return error;
+}
+
+
+void select_to_file::cleanup()
+{
+ /* In case of error send_eof() may be not called: close the file here. */
+ if (file >= 0)
+ {
+ (void) end_io_cache(&cache);
+ (void) my_close(file,MYF(0));
+ file= -1;
+ }
+ path[0]= '\0';
+ row_count= 0;
+}
+
+
select_to_file::~select_to_file()
{
if (file >= 0)
@@ -860,8 +906,10 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange,
#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS
option|= MY_REPLACE_DIR; // Force use of db directory
#endif
- (void) fn_format(path, exchange->file_name, thd->db ? thd->db : "", "",
- option);
+
+ strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "",
+ NullS);
+ (void) fn_format(path, exchange->file_name, path, "", option);
if (!access(path, F_OK))
{
my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name);
@@ -1065,18 +1113,6 @@ err:
}
-bool select_export::send_eof()
-{
- int error=test(end_io_cache(&cache));
- if (my_close(file,MYF(MY_WME)))
- error=1;
- if (!error)
- ::send_ok(thd,row_count);
- file= -1;
- return error;
-}
-
-
/***************************************************************************
** Dump of select to a binary file
***************************************************************************/
@@ -1130,18 +1166,6 @@ err:
}
-bool select_dump::send_eof()
-{
- int error=test(end_io_cache(&cache));
- if (my_close(file,MYF(MY_WME)))
- error=1;
- if (!error)
- ::send_ok(thd,row_count);
- file= -1;
- return error;
-}
-
-
select_subselect::select_subselect(Item_subselect *item_arg)
{
item= item_arg;
@@ -1316,8 +1340,16 @@ int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
}
+void select_dumpvar::cleanup()
+{
+ vars.empty();
+ row_count=0;
+}
+
+
Item_arena::Item_arena(THD* thd)
- :free_list(0)
+ :free_list(0),
+ state(INITIALIZED)
{
init_sql_alloc(&mem_root,
thd->variables.query_alloc_block_size,
@@ -1325,18 +1357,28 @@ Item_arena::Item_arena(THD* thd)
}
+/* This constructor is called when Item_arena is a subobject of THD */
+
Item_arena::Item_arena()
- :free_list(0)
+ :free_list(0),
+ state(CONVENTIONAL_EXECUTION)
{
- bzero((char *) &mem_root, sizeof(mem_root));
+ clear_alloc_root(&mem_root);
}
Item_arena::Item_arena(bool init_mem_root)
- :free_list(0)
+ :free_list(0),
+ state(INITIALIZED)
{
if (init_mem_root)
- bzero((char *) &mem_root, sizeof(mem_root));
+ clear_alloc_root(&mem_root);
+}
+
+Item_arena::Type Item_arena::type() const
+{
+ DBUG_ASSERT(0); /* Should never be called */
+ return STATEMENT;
}
@@ -1380,7 +1422,7 @@ Statement::Statement()
}
-Statement::Type Statement::type() const
+Item_arena::Type Statement::type() const
{
return STATEMENT;
}
@@ -1398,6 +1440,36 @@ void Statement::set_statement(Statement *stmt)
}
+void
+Statement::set_n_backup_statement(Statement *stmt, Statement *backup)
+{
+ backup->set_statement(this);
+ set_statement(stmt);
+}
+
+
+void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
+{
+ stmt->set_statement(this);
+ set_statement(backup);
+}
+
+
+void Statement::end_statement()
+{
+ /* Cleanup SQL processing state to resuse this statement in next query. */
+ lex_end(lex);
+ delete lex->result;
+ lex->result= 0;
+ free_items(free_list);
+ free_list= 0;
+ /*
+ Don't free mem_root, as mem_root is freed in the end of dispatch_command
+ (once for any command).
+ */
+}
+
+
void Item_arena::set_n_backup_item_arena(Item_arena *set, Item_arena *backup)
{
backup->set_item_arena(this);
@@ -1417,6 +1489,7 @@ void Item_arena::set_item_arena(Item_arena *set)
{
mem_root= set->mem_root;
free_list= set->free_list;
+ state= set->state;
}
Statement::~Statement()
@@ -1460,7 +1533,7 @@ Statement_map::Statement_map() :
hash_init(&st_hash, default_charset_info, START_STMT_HASH_SIZE, 0, 0,
get_statement_id_as_hash_key,
delete_statement_as_hash_key, MYF(0));
- hash_init(&names_hash, &my_charset_bin, START_NAME_HASH_SIZE, 0, 0,
+ hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
(hash_get_key) get_stmt_name_hash_key,
NULL,MYF(0));
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index eccaf072008..2b941b317f6 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -454,12 +454,28 @@ public:
*/
Item *free_list;
MEM_ROOT mem_root;
+ enum enum_state
+ {
+ INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2,
+ ERROR= -1
+ };
+ enum_state state;
+
+ /* We build without RTTI, so dynamic_cast can't be used. */
+ enum Type
+ {
+ STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE
+ };
+
Item_arena(THD *thd);
Item_arena();
Item_arena(bool init_mem_root);
- ~Item_arena();
+ virtual Type type() const;
+ virtual ~Item_arena();
+ inline bool is_stmt_prepare() const { return (int)state < (int)PREPARED; }
+ inline bool is_first_stmt_execute() const { return state == PREPARED; }
inline gptr alloc(unsigned int size) { return alloc_root(&mem_root,size); }
inline gptr calloc(unsigned int size)
{
@@ -549,12 +565,6 @@ public:
Cursor *cursor;
public:
- /* We build without RTTI, so dynamic_cast can't be used. */
- enum Type
- {
- STATEMENT,
- PREPARED_STATEMENT
- };
/*
This constructor is called when statement is a subobject of THD:
@@ -567,9 +577,16 @@ public:
/* Assign execution context (note: not all members) of given stmt to self */
void set_statement(Statement *stmt);
+ void set_n_backup_statement(Statement *stmt, Statement *backup);
+ void restore_backup_statement(Statement *stmt, Statement *backup);
/* return class type */
virtual Type type() const;
+ /*
+ Cleanup statement parse state (parse tree, lex) after execution of
+ a non-prepared SQL statement.
+ */
+ void end_statement();
};
@@ -862,7 +879,7 @@ public:
ulong row_count; // Row counter, mainly for errors and warnings
long dbug_thread_id;
pthread_t real_id;
- uint current_tablenr,tmp_table;
+ uint current_tablenr,tmp_table,global_read_lock;
uint server_status,open_options,system_thread;
uint32 db_length;
uint select_number; //number of select (used for EXPLAIN)
@@ -881,7 +898,7 @@ public:
bool no_errors, password, is_fatal_error;
bool query_start_used,last_insert_id_used,insert_id_used,rand_used;
bool time_zone_used;
- bool in_lock_tables,global_read_lock;
+ bool in_lock_tables;
bool query_error, bootstrap, cleanup_done;
enum killed_state { NOT_KILLED=0, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED };
@@ -951,6 +968,12 @@ public:
void close_active_vio();
#endif
void awake(THD::killed_state state_to_set);
+ /*
+ For enter_cond() / exit_cond() to work the mutex must be got before
+ enter_cond() (in 4.1 an assertion will soon ensure this); this mutex is
+ then released by exit_cond(). Use must be:
+ lock mutex; enter_cond(); your code; exit_cond().
+ */
inline const char* enter_cond(pthread_cond_t *cond, pthread_mutex_t* mutex,
const char* msg)
{
@@ -962,6 +985,13 @@ public:
}
inline void exit_cond(const char* old_msg)
{
+ /*
+ Putting the mutex unlock in exit_cond() ensures that
+ mysys_var->current_mutex is always unlocked _before_ mysys_var->mutex is
+ locked (if that would not be the case, you'll get a deadlock if someone
+ does a THD::awake() on you).
+ */
+ pthread_mutex_unlock(mysys_var->current_mutex);
pthread_mutex_lock(&mysys_var->mutex);
mysys_var->current_mutex = 0;
mysys_var->current_cond = 0;
@@ -1036,38 +1066,6 @@ public:
}
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
-
- inline void allocate_temporary_memory_pool_for_ps_preparing()
- {
- DBUG_ASSERT(current_arena!=0);
- /*
- We do not want to have in PS memory all that junk,
- which will be created by preparation => substitute memory
- from original thread pool.
-
- We know that PS memory pool is now copied to THD, we move it back
- to allow some code use it.
- */
- current_arena->set_item_arena(this);
- init_sql_alloc(&mem_root,
- variables.query_alloc_block_size,
- variables.query_prealloc_size);
- free_list= 0;
- }
- inline void free_temporary_memory_pool_for_ps_preparing()
- {
- DBUG_ASSERT(current_arena!=0);
- cleanup_items(current_arena->free_list);
- /* no need to reset free_list as it won't be used anymore */
- free_items(free_list);
- close_thread_tables(this); // to close derived tables
- free_root(&mem_root, MYF(0));
- set_item_arena(current_arena);
- }
- inline bool only_prepare()
- {
- return command == COM_PREPARE;
- }
};
/* Flags for the THD::system_thread (bitmap) variable */
@@ -1076,9 +1074,32 @@ public:
#define SYSTEM_THREAD_SLAVE_SQL 4
/*
+ Disables binary logging for one thread, and resets it back to what it was
+ before being disabled.
+ Some functions (like the internal mysql_create_table() when it's called by
+ mysql_alter_table()) must NOT write to the binlog (binlogging is done at the
+ at a later stage of the command already, and must be, for locking reasons);
+ so we internally disable it temporarily by creating the Disable_binlog
+ object and reset the state by destroying the object (don't forget that! or
+ write code so that the object gets automatically destroyed when leaving a
+ block, see example in sql_table.cc).
+*/
+class Disable_binlog {
+private:
+ THD *thd;
+ ulong save_options;
+public:
+ Disable_binlog(THD *thd_arg);
+ ~Disable_binlog();
+};
+
+
+/*
Used to hold information about file and file structure in exchainge
via non-DB file (...INTO OUTFILE..., ...LOAD DATA...)
+ XXX: We never call destructor for objects of this class.
*/
+
class sql_exchange :public Sql_alloc
{
public:
@@ -1088,7 +1109,6 @@ public:
bool dumpfile;
ulong skip_lines;
sql_exchange(char *name,bool dumpfile_flag);
- ~sql_exchange() {}
};
#include "log_event.h"
@@ -1119,6 +1139,11 @@ public:
virtual void send_error(uint errcode,const char *err);
virtual bool send_eof()=0;
virtual void abort() {}
+ /*
+ Cleanup instance of this class for next execution of a prepared
+ statement/stored procedure.
+ */
+ virtual void cleanup();
};
@@ -1145,6 +1170,8 @@ public:
~select_to_file();
bool send_fields(List<Item> &list, uint flags) { return 0; }
void send_error(uint errcode,const char *err);
+ bool send_eof();
+ void cleanup();
};
@@ -1157,7 +1184,6 @@ public:
~select_export();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
- bool send_eof();
};
@@ -1166,7 +1192,6 @@ public:
select_dump(sql_exchange *ex) :select_to_file(ex) {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
- bool send_eof();
};
@@ -1194,6 +1219,8 @@ class select_insert :public select_result {
bool send_data(List<Item> &items);
void send_error(uint errcode,const char *err);
bool send_eof();
+ /* not implemented: select_insert is never re-used in prepared statements */
+ void cleanup();
};
@@ -1520,4 +1547,5 @@ public:
bool send_fields(List<Item> &list, uint flags) { return 0; }
bool send_data(List<Item> &items);
bool send_eof();
+ void cleanup();
};
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index dad39da271b..f41e03b0602 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -322,10 +322,17 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
{
if (!strncmp(buf,"default-character-set", (pos-buf)))
{
+ /*
+ Try character set name, and if it fails
+ try collation name, probably it's an old
+ 4.1.0 db.opt file, which didn't have
+ separate default-character-set and
+ default-collation commands.
+ */
if (!(create->default_table_charset=
- get_charset_by_csname(pos+1,
- MY_CS_PRIMARY,
- MYF(0))))
+ get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) &&
+ !(create->default_table_charset=
+ get_charset_by_name(pos+1, MYF(0))))
{
sql_print_error("Error while loading database options: '%s':",path);
sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1);
@@ -391,7 +398,7 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
// do not create database if another thread is holding read lock
- if (wait_if_global_read_lock(thd,0))
+ if (wait_if_global_read_lock(thd, 0, 1))
{
error= -1;
goto exit2;
@@ -492,7 +499,7 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
// do not alter database if another thread is holding read lock
- if ((error=wait_if_global_read_lock(thd,0)))
+ if ((error=wait_if_global_read_lock(thd,0,1)))
goto exit2;
/* Check directory */
@@ -557,7 +564,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
// do not drop database if another thread is holding read lock
- if (wait_if_global_read_lock(thd,0))
+ if (wait_if_global_read_lock(thd, 0, 1))
{
error= -1;
goto exit2;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d9d60be162a..05b2de8adfd 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -693,17 +693,13 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
int error;
DBUG_ENTER("mysql_truncate");
+ bzero((char*) &create_info,sizeof(create_info));
/* If it is a temporary table, close and regenerate it */
if (!dont_send_ok && (table_ptr=find_temporary_table(thd,table_list->db,
table_list->real_name)))
{
TABLE *table= *table_ptr;
- HA_CREATE_INFO create_info;
table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
- bzero((char*) &create_info,sizeof(create_info));
- create_info.auto_increment_value= table->file->auto_increment_value;
- create_info.default_table_charset= table->table_charset;
-
db_type table_type=table->db_type;
strmov(path,table->path);
*table_ptr= table->next; // Unlink table from list
@@ -745,8 +741,6 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
DBUG_RETURN(-1);
}
- bzero((char*) &create_info,sizeof(create_info));
-
*fn_ext(path)=0; // Remove the .frm extension
error= ha_create_table(path,&create_info,1) ? -1 : 0;
query_cache_invalidate3(thd, table_list, 0);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index b7940e2d9d0..0e4f803536d 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1627,6 +1627,13 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_RETURN(0);
}
+
+void select_insert::cleanup()
+{
+ /* select_insert/select_create are never re-used in prepared statement */
+ DBUG_ASSERT(0);
+}
+
select_insert::~select_insert()
{
if (table)
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index d650f696d14..b0707955522 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -24,6 +24,16 @@
#include "sp.h"
#include "sp_head.h"
+
+/*
+ Fake table list object, pointer to which is used as special value for
+ st_lex::time_zone_tables_used indicating that we implicitly use time
+ zone tables in this statement but real table list was not yet created.
+ Pointer to it is also returned by my_tz_get_tables_list() as indication
+ of transient error;
+*/
+TABLE_LIST fake_time_zone_tables_list;
+
/* Macros to look like lex */
#define yyGet() *(lex->ptr++)
@@ -1376,7 +1386,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
We have to create array in prepared statement memory if it is
prepared statement
*/
- Item_arena *arena= thd->current_arena ? thd->current_arena : thd;
+ Item_arena *arena= thd->current_arena;
return (ref_pointer_array=
(Item **)arena->alloc(sizeof(Item*) *
(item_list.elements +
@@ -1649,6 +1659,11 @@ void st_select_lex_unit::set_limit(SELECT_LEX *values,
}
+st_lex::st_lex()
+ :result(0)
+{}
+
+
/*
Unlink first table from global table list and first table from outer select
list (lex->select_lex)
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 84b5cf3454b..de0d5d90f16 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -249,12 +249,6 @@ protected:
*master, *slave, /* vertical links */
*link_next, **link_prev; /* list of whole SELECT_LEX */
public:
- enum enum_parsing_place
- {
- NO_MATTER,
- IN_HAVING,
- SELECT_LIST
- };
ulong options;
/*
@@ -700,6 +694,12 @@ typedef struct st_lex
bool prepared_stmt_code_is_varref;
/* Names of user variables holding parameters (in EXECUTE) */
List<LEX_STRING> prepared_stmt_params;
+ /*
+ If points to fake_time_zone_tables_list indicates that time zone
+ tables are implicitly used by statement, also is used for holding
+ list of those tables after they are opened.
+ */
+ TABLE_LIST *time_zone_tables_used;
sp_head *sphead;
sp_name *spname;
bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */
@@ -753,6 +753,7 @@ typedef struct st_lex
bool only_view_structure();
} LEX;
+extern TABLE_LIST fake_time_zone_tables_list;
struct st_lex_local: public st_lex
{
static void *operator new(size_t size)
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 6dee38e9192..a4379b74c17 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -19,9 +19,9 @@
#pragma interface /* gcc class implementation */
#endif
-/* mysql standard class memoryallocator */
+/* mysql standard class memory allocator */
-#ifdef PEDANTIC_SAFEMALLOC
+#ifdef SAFEMALLOC
#define TRASH(XX,YY) bfill((XX), (YY), 0x8F)
#else
#define TRASH(XX,YY) /* no-op */
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index fa3adf236fe..44c6f71c9bd 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -190,26 +190,16 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
ex->file_name+=dirname_length(ex->file_name);
#endif
if (!dirname_length(ex->file_name) &&
- strlen(ex->file_name)+strlen(mysql_data_home)+strlen(tdb)+3 <
+ strlen(ex->file_name)+strlen(mysql_real_data_home)+strlen(tdb)+3 <
FN_REFLEN)
{
- (void) sprintf(name,"%s/%s/%s",mysql_data_home,tdb,ex->file_name);
+ (void) sprintf(name,"%s%s/%s",mysql_real_data_home,tdb,ex->file_name);
unpack_filename(name,name); /* Convert to system format */
}
else
{
-#ifdef EMBEDDED_LIBRARY
- char *chk_name= ex->file_name;
- while ((*chk_name == ' ') || (*chk_name == 't'))
- chk_name++;
- if (*chk_name == FN_CURLIB)
- {
- sprintf(name, "%s%s", mysql_data_home, ex->file_name);
- unpack_filename(name, name);
- }
- else
-#endif /*EMBEDDED_LIBRARY*/
- unpack_filename(name,ex->file_name);
+ my_load_path(name, ex->file_name, mysql_real_data_home);
+ unpack_filename(name, name);
#if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__)
MY_STAT stat_info;
if (!my_stat(name,&stat_info,MYF(MY_WME)))
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 98f1810adc3..59a3e4b545b 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1572,7 +1572,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
mysqld_list_fields(thd,&table_list,fields);
free_items(thd->free_list);
- thd->free_list=0; /* free_list should never point to garbage */
+ thd->free_list= 0; /* free_list should never point to garbage */
break;
}
#endif
@@ -1673,8 +1673,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
SHUTDOWN_DEFAULT is 0. If client is >= 4.1.3, the shutdown level is in
packet[0].
*/
- enum enum_shutdown_level level=
- (enum enum_shutdown_level) (uchar) packet[0];
+ enum mysql_enum_shutdown_level level=
+ (enum mysql_enum_shutdown_level) (uchar) packet[0];
DBUG_PRINT("quit",("Got shutdown command for level %u", level));
if (level == SHUTDOWN_DEFAULT)
level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable
@@ -2011,8 +2011,6 @@ mysql_execute_command(THD *thd)
else
thd->send_explain_fields(result);
res= mysql_explain_union(thd, &thd->lex->unit, result);
- MYSQL_LOCK *save_lock= thd->lock;
- thd->lock= (MYSQL_LOCK *)0;
if (lex->describe & DESCRIBE_EXTENDED)
{
char buff[1024];
@@ -2024,20 +2022,19 @@ mysql_execute_command(THD *thd)
ER_YES, str.ptr());
}
result->send_eof();
- thd->lock= save_lock;
+ delete result;
}
else
{
- if (!result)
+ if (!result && !(result= new select_send()))
{
- if (!(result=new select_send()))
- {
- res= -1;
- break;
- }
+ res= -1;
+ break;
}
query_cache_store_query(thd, all_tables);
- res=handle_select(thd, lex, result);
+ res= handle_select(thd, lex, result);
+ if (result != lex->result)
+ delete result;
}
}
break;
@@ -2393,6 +2390,21 @@ mysql_execute_command(THD *thd)
net_printf(thd, ER_UPDATE_TABLE_USED, create_table->real_name);
goto create_error;
}
+ if (lex->create_info.used_fields & HA_CREATE_USED_UNION)
+ {
+ TABLE_LIST *tab;
+ for (tab= select_tables; tab; tab= tab->next)
+ {
+ if (find_real_table_in_list((TABLE_LIST*) lex->create_info.
+ merge_list.first,
+ select_tables->db, tab->real_name))
+ {
+ net_printf(thd, ER_UPDATE_TABLE_USED, tab->real_name);
+ goto create_error;
+ }
+ }
+ }
+
if (select_tables &&
check_table_access(thd, SELECT_ACL, select_tables, 0))
goto create_error; // Error message is given
@@ -2433,7 +2445,7 @@ mysql_execute_command(THD *thd)
res= mysql_create_table(thd, create_table->db,
create_table->real_name, &lex->create_info,
lex->create_list,
- lex->key_list, 0, 0, 0); // do logging
+ lex->key_list, 0, 0);
}
if (!res)
send_ok(thd);
@@ -2759,31 +2771,30 @@ unsent_create_error:
// is table which we are changing used somewhere in other parts of query
if (find_table_in_global_list(all_tables->next_global,
- first_table->db, first_table->real_name))
+ first_table->db, first_table->real_name))
{
/* Using same table for INSERT and SELECT */
select_lex->options |= OPTION_BUFFER_RESULT;
}
- if (!(res= open_and_lock_tables(thd, all_tables)))
+ if (!(res= open_and_lock_tables(thd, all_tables)) &&
+ !(res= mysql_insert_select_prepare(thd)) &&
+ (result= new select_insert(first_table, first_table->table,
+ &lex->field_list, lex->duplicates)))
{
- if ((res= mysql_insert_select_prepare(thd)))
- break;
- if ((result= new select_insert(first_table, first_table->table,
- &lex->field_list, lex->duplicates)))
- /* Skip first table, which is the table we are inserting in */
- lex->select_lex.table_list.first= (byte*) first_table->next_local;
- /*
- insert/replace from SELECT give its SELECT_LEX for SELECT,
- and item_list belong to SELECT
- */
- lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE;
- res= handle_select(thd, lex, result);
- /* revert changes for SP */
- lex->select_lex.table_list.first= (byte*) first_table;
- lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE;
+ /* Skip first table, which is the table we are inserting in */
+ lex->select_lex.table_list.first= (byte*) first_table->next_local;
+ /*
+ insert/replace from SELECT give its SELECT_LEX for SELECT,
+ and item_list belong to SELECT
+ */
+ lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE;
+ res= handle_select(thd, lex, result);
+ lex->select_lex.table_list.first= (byte*) first_table;
+ lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE;
+ delete result;
if (thd->net.report_error)
- res= -1;
+ res= -1;
}
else
res= -1;
@@ -2807,7 +2818,7 @@ unsent_create_error:
goto error;
}
- res= mysql_truncate(thd, first_table);
+ res= mysql_truncate(thd, first_table, 0);
break;
case SQLCOM_DELETE:
{
@@ -4290,6 +4301,7 @@ mysql_init_query(THD *thd, uchar *buf, uint length, bool lexonly)
lex->lock_option= TL_READ;
lex->found_colon= 0;
lex->safe_to_cache_query= 1;
+ lex->time_zone_tables_used= 0;
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
lex->variables_used= 0;
@@ -4324,8 +4336,8 @@ mysql_init_select(LEX *lex)
select_lex->select_limit= HA_POS_ERROR;
if (select_lex == &lex->select_lex)
{
+ DBUG_ASSERT(lex->result == 0);
lex->exchange= 0;
- lex->result= 0;
lex->proc_list.first= 0;
}
}
@@ -4494,9 +4506,7 @@ void mysql_parse(THD *thd, char *inBuf, uint length)
}
}
thd->proc_info="freeing items";
- free_items(thd->free_list); /* Free strings used by items */
- thd->free_list= 0; /* free_list should never point to garbage */
- lex_end(lex);
+ thd->end_statement();
}
DBUG_VOID_RETURN;
}
@@ -4522,10 +4532,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
if (!yyparse((void*) thd) && ! thd->is_fatal_error &&
all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first))
error= 1; /* Ignore question */
- free_items(thd->free_list); /* Free strings used by items */
- thd->free_list= 0; /* free_list should never point to garbage */
- lex_end(lex);
-
+ thd->end_statement();
DBUG_RETURN(error);
}
#endif
@@ -4770,8 +4777,12 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
case FIELD_TYPE_TIMESTAMP:
if (!length)
new_field->length= 14; // Full date YYYYMMDDHHMMSS
- else
+ else if (new_field->length != 19)
{
+ /*
+ We support only even TIMESTAMP lengths less or equal than 14
+ and 19 as length of 4.1 compatible representation.
+ */
new_field->length=((new_field->length+1)/2)*2; /* purecov: inspected */
new_field->length= min(new_field->length,14); /* purecov: inspected */
}
@@ -4832,7 +4843,10 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
new_field->length=0;
for (const char **pos=interval->type_names; *pos ; pos++)
{
- new_field->length+=(uint) strip_sp((char*) *pos)+1;
+ uint length= (uint) strip_sp((char*) *pos)+1;
+ CHARSET_INFO *cs= thd->variables.character_set_client;
+ length= cs->cset->numchars(cs, *pos, *pos+length);
+ new_field->length+= length;
}
new_field->length--;
set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
@@ -4862,8 +4876,10 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
new_field->length=(uint) strip_sp((char*) interval->type_names[0]);
for (const char **pos=interval->type_names+1; *pos ; pos++)
{
- uint length=(uint) strip_sp((char*) *pos);
- set_if_bigger(new_field->length,length);
+ uint length=(uint) strip_sp((char*) *pos);
+ CHARSET_INFO *cs= thd->variables.character_set_client;
+ length= cs->cset->numchars(cs, *pos, *pos+length);
+ set_if_bigger(new_field->length,length);
}
set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
if (default_value)
@@ -5493,9 +5509,13 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
tmp_write_to_binlog= 0;
if (lock_global_read_lock(thd))
return 1;
+ result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1,
+ tables);
+ make_global_read_lock_block_commit(thd);
}
+ else
+ result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables);
my_dbopt_cleanup();
- result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables);
}
if (options & REFRESH_HOSTS)
hostname_cache_refresh();
@@ -6056,3 +6076,39 @@ int create_table_precheck(THD *thd, TABLE_LIST *tables,
check_grant(thd, want_priv, create_table, 0, UINT_MAX, 0)) ?
1 : 0);
}
+
+
+/*
+ negate given expression
+
+ SYNOPSIS
+ negate_expression()
+ thd therad handler
+ expr expression for negation
+
+ RETURN
+ negated expression
+*/
+
+Item *negate_expression(THD *thd, Item *expr)
+{
+ Item *negated;
+ if (expr->type() == Item::FUNC_ITEM &&
+ ((Item_func *) expr)->functype() == Item_func::NOT_FUNC)
+ {
+ /* it is NOT(NOT( ... )) */
+ Item *arg= ((Item_func *) expr)->arguments()[0];
+ enum_parsing_place place= thd->lex->current_select->parsing_place;
+ if (arg->is_bool_func() || place == IN_WHERE || place == IN_HAVING)
+ return arg;
+ /*
+ if it is not boolean function then we have to emulate value of
+ not(not(a)), it will be a != 0
+ */
+ return new Item_func_ne(arg, new Item_int((char*) "0", 0, 1));
+ }
+
+ if ((negated= expr->neg_transformer(thd)) != 0)
+ return negated;
+ return new Item_func_not(expr);
+}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 2cc2d881cca..5fccdd624de 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -91,7 +91,6 @@ public:
uint param_count;
uint last_errno;
char last_error[MYSQL_ERRMSG_SIZE];
- bool get_longdata_error;
#ifndef EMBEDDED_LIBRARY
bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end,
uchar *read_pos, String *expanded_query);
@@ -105,7 +104,7 @@ public:
Prepared_statement(THD *thd_arg);
virtual ~Prepared_statement();
void setup_set_params();
- virtual Statement::Type type() const;
+ virtual Item_arena::Type type() const;
};
static void execute_stmt(THD *thd, Prepared_statement *stmt,
@@ -136,7 +135,7 @@ find_prepared_statement(THD *thd, ulong id, const char *where,
{
Statement *stmt= thd->stmt_map.find(id);
- if (stmt == 0 || stmt->type() != Statement::PREPARED_STATEMENT)
+ if (stmt == 0 || stmt->type() != Item_arena::PREPARED_STATEMENT)
{
char llbuf[22];
my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), 22, llstr(id, llbuf), where);
@@ -334,15 +333,22 @@ static void set_param_double(Item_param *param, uchar **pos, ulong len)
}
#ifndef EMBEDDED_LIBRARY
+
+/*
+ Read date/time/datetime parameter values from network (binary
+ protocol). See writing counterparts of these functions in
+ libmysql.c (store_param_{time,date,datetime}).
+*/
+
static void set_param_time(Item_param *param, uchar **pos, ulong len)
{
- ulong length;
- uint day;
+ MYSQL_TIME tm;
+ ulong length= get_param_length(pos, len);
- if ((length= get_param_length(pos, len)) >= 8)
+ if (length >= 8)
{
uchar *to= *pos;
- TIME tm;
+ uint day;
tm.neg= (bool) to[0];
day= (uint) sint4korr(to+1);
@@ -364,21 +370,22 @@ static void set_param_time(Item_param *param, uchar **pos, ulong len)
tm.second= 59;
}
tm.day= tm.year= tm.month= 0;
-
- param->set_time(&tm, MYSQL_TIMESTAMP_TIME,
- MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
}
+ else
+ set_zero_time(&tm);
+ param->set_time(&tm, MYSQL_TIMESTAMP_TIME,
+ MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
*pos+= length;
}
static void set_param_datetime(Item_param *param, uchar **pos, ulong len)
{
- uint length;
+ MYSQL_TIME tm;
+ ulong length= get_param_length(pos, len);
- if ((length= get_param_length(pos, len)) >= 4)
+ if (length >= 4)
{
uchar *to= *pos;
- TIME tm;
tm.neg= 0;
tm.year= (uint) sint2korr(to);
@@ -399,21 +406,22 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len)
tm.hour= tm.minute= tm.second= 0;
tm.second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0;
-
- param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME,
- MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
}
+ else
+ set_zero_time(&tm);
+ param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME,
+ MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
*pos+= length;
}
static void set_param_date(Item_param *param, uchar **pos, ulong len)
{
- ulong length;
-
- if ((length= get_param_length(pos, len)) >= 4)
+ MYSQL_TIME tm;
+ ulong length= get_param_length(pos, len);
+
+ if (length >= 4)
{
uchar *to= *pos;
- TIME tm;
/*
Note, that though ranges of hour, minute and second are not checked
here we rely on them being < 256: otherwise
@@ -426,10 +434,11 @@ static void set_param_date(Item_param *param, uchar **pos, ulong len)
tm.hour= tm.minute= tm.second= 0;
tm.second_part= 0;
tm.neg= 0;
-
- param->set_time(&tm, MYSQL_TIMESTAMP_DATE,
- MAX_DATE_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
}
+ else
+ set_zero_time(&tm);
+ param->set_time(&tm, MYSQL_TIMESTAMP_DATE,
+ MAX_DATE_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
*pos+= length;
}
@@ -700,6 +709,7 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query)
else
{
uchar *buff= (uchar*) client_param->buffer;
+ param->unsigned_flag= client_param->is_unsigned;
param->set_param_func(param, &buff,
client_param->length ?
*client_param->length :
@@ -740,6 +750,7 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query)
else
{
uchar *buff= (uchar*)client_param->buffer;
+ param->unsigned_flag= client_param->is_unsigned;
param->set_param_func(param, &buff,
client_param->length ?
*client_param->length :
@@ -896,10 +907,8 @@ static int mysql_test_insert(Prepared_statement *stmt,
open temporary memory pool for temporary data allocated by derived
tables & preparation procedure
*/
- thd->allocate_temporary_memory_pool_for_ps_preparing();
if (open_and_lock_tables(thd, table_list))
{
- thd->free_temporary_memory_pool_for_ps_preparing();
DBUG_RETURN(-1);
}
@@ -934,7 +943,6 @@ static int mysql_test_insert(Prepared_statement *stmt,
res= 0;
error:
lex->unit.cleanup();
- thd->free_temporary_memory_pool_for_ps_preparing();
DBUG_RETURN(res);
}
@@ -963,12 +971,6 @@ static int mysql_test_update(Prepared_statement *stmt,
if ((res= update_precheck(thd, table_list)))
DBUG_RETURN(res);
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
-
if (open_and_lock_tables(thd, table_list))
res= -1;
else
@@ -994,7 +996,6 @@ static int mysql_test_update(Prepared_statement *stmt,
}
stmt->lex->unit.cleanup();
}
- thd->free_temporary_memory_pool_for_ps_preparing();
/* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(res);
}
@@ -1024,12 +1025,6 @@ static int mysql_test_delete(Prepared_statement *stmt,
if ((res= delete_precheck(thd, table_list)))
DBUG_RETURN(res);
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
-
if (open_and_lock_tables(thd, table_list))
res= -1;
else
@@ -1037,7 +1032,6 @@ static int mysql_test_delete(Prepared_statement *stmt,
res= mysql_prepare_delete(thd, table_list, &lex->select_lex.where);
lex->unit.cleanup();
}
- thd->free_temporary_memory_pool_for_ps_preparing();
/* TODO: here we should send types of placeholders to the client. */
DBUG_RETURN(res);
}
@@ -1065,7 +1059,7 @@ static int mysql_test_select(Prepared_statement *stmt,
THD *thd= stmt->thd;
LEX *lex= stmt->lex;
SELECT_LEX_UNIT *unit= &lex->unit;
-
+ int result= 1;
DBUG_ENTER("mysql_test_select");
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1079,11 +1073,6 @@ static int mysql_test_select(Prepared_statement *stmt,
DBUG_RETURN(1);
#endif
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
if (open_and_lock_tables(thd, tables))
{
send_error(thd);
@@ -1098,15 +1087,14 @@ static int mysql_test_select(Prepared_statement *stmt,
send_error(thd);
goto err_prep;
}
- if (lex->describe)
+ if (!text_protocol)
{
- if (!text_protocol && send_prep_stmt(stmt, 0))
- goto err_prep;
- unit->cleanup();
- }
- else
- {
- if (!text_protocol)
+ if (lex->describe)
+ {
+ if (send_prep_stmt(stmt, 0))
+ goto err_prep;
+ }
+ else
{
if (send_prep_stmt(stmt, lex->select_lex.item_list.elements) ||
thd->protocol_simple.send_fields(&lex->select_lex.item_list,
@@ -1117,16 +1105,13 @@ static int mysql_test_select(Prepared_statement *stmt,
)
goto err_prep;
}
- unit->cleanup();
}
- thd->free_temporary_memory_pool_for_ps_preparing();
- DBUG_RETURN(0);
+ result= 0; // ok
err_prep:
unit->cleanup();
err:
- thd->free_temporary_memory_pool_for_ps_preparing();
- DBUG_RETURN(1);
+ DBUG_RETURN(result);
}
@@ -1154,19 +1139,13 @@ static int mysql_test_do_fields(Prepared_statement *stmt,
int res= 0;
if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0)))
DBUG_RETURN(res);
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
+
if (tables && (res= open_and_lock_tables(thd, tables)))
{
- thd->free_temporary_memory_pool_for_ps_preparing();
DBUG_RETURN(res);
}
res= setup_fields(thd, 0, 0, *values, 0, 0, 0);
stmt->lex->unit.cleanup();
- thd->free_temporary_memory_pool_for_ps_preparing();
if (res)
DBUG_RETURN(-1);
DBUG_RETURN(0);
@@ -1199,11 +1178,7 @@ static int mysql_test_set_fields(Prepared_statement *stmt,
if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0)))
DBUG_RETURN(res);
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
+
if (tables && (res= open_and_lock_tables(thd, tables)))
goto error;
while ((var= it++))
@@ -1217,7 +1192,6 @@ static int mysql_test_set_fields(Prepared_statement *stmt,
}
error:
stmt->lex->unit.cleanup();
- thd->free_temporary_memory_pool_for_ps_preparing();
DBUG_RETURN(res);
}
@@ -1244,11 +1218,7 @@ static int select_like_statement_test(Prepared_statement *stmt,
THD *thd= stmt->thd;
LEX *lex= stmt->lex;
int res= 0;
- /*
- open temporary memory pool for temporary data allocated by derived
- tables & preparation procedure
- */
- thd->allocate_temporary_memory_pool_for_ps_preparing();
+
if (tables && (res= open_and_lock_tables(thd, tables)))
goto end;
@@ -1264,7 +1234,6 @@ static int select_like_statement_test(Prepared_statement *stmt,
}
end:
lex->unit.cleanup();
- thd->free_temporary_memory_pool_for_ps_preparing();
DBUG_RETURN(res);
}
@@ -1609,17 +1578,13 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
DBUG_RETURN(1);
}
- thd->stmt_backup.set_statement(thd);
- thd->stmt_backup.set_item_arena(thd);
- thd->set_statement(stmt);
- thd->set_item_arena(stmt);
+ thd->set_n_backup_statement(stmt, &thd->stmt_backup);
+ thd->set_n_backup_item_arena(stmt, &thd->stmt_backup);
if (alloc_query(thd, packet, packet_length))
{
- stmt->set_statement(thd);
- stmt->set_item_arena(thd);
- thd->set_statement(&thd->stmt_backup);
- thd->set_item_arena(&thd->stmt_backup);
+ thd->restore_backup_statement(stmt, &thd->stmt_backup);
+ thd->restore_backup_item_arena(stmt, &thd->stmt_backup);
/* Statement map deletes statement on erase */
thd->stmt_map.erase(stmt);
send_error(thd, ER_OUT_OF_RESOURCES);
@@ -1634,8 +1599,19 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
lex->safe_to_cache_query= 0;
error= yyparse((void *)thd) || thd->is_fatal_error ||
- init_param_array(stmt) ||
- send_prepare_results(stmt, test(name));
+ init_param_array(stmt);
+ /*
+ While doing context analysis of the query (in send_prepare_results) we
+ allocate a lot of additional memory: for open tables, JOINs, derived
+ tables, etc. Let's save a snapshot of current parse tree to the
+ statement and restore original THD. In cases when some tree
+ transformation can be reused on execute, we set again thd->mem_root from
+ stmt->mem_root (see setup_wild for one place where we do that).
+ */
+ thd->restore_backup_item_arena(stmt, &thd->stmt_backup);
+
+ if (!error)
+ error= send_prepare_results(stmt, test(name));
/* restore to WAIT_PRIOR: QUERY_PRIOR is set inside alloc_query */
if (!(specialflag & SPECIAL_NO_PRIOR))
@@ -1648,11 +1624,12 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
thd->lex->sphead= NULL;
}
lex_end(lex);
- stmt->set_statement(thd);
- stmt->set_item_arena(thd);
- thd->set_statement(&thd->stmt_backup);
- thd->set_item_arena(&thd->stmt_backup);
- thd->current_arena= 0;
+ thd->restore_backup_statement(stmt, &thd->stmt_backup);
+ cleanup_items(stmt->free_list);
+ close_thread_tables(thd);
+ free_items(thd->free_list);
+ thd->free_list= 0;
+ thd->current_arena= thd;
if (error)
{
@@ -1671,6 +1648,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
*/
for (; sl; sl= sl->next_select_in_list())
sl->prep_where= sl->where;
+ stmt->state= Item_arena::PREPARED;
}
DBUG_RETURN(!stmt);
}
@@ -1740,6 +1718,8 @@ void reset_stmt_for_execute(THD *thd, LEX *lex)
tables->table= 0;
}
lex->current_select= &lex->select_lex;
+ if (lex->result)
+ lex->result->cleanup();
}
@@ -1795,7 +1775,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
DBUG_PRINT("exec_query:", ("%s", stmt->query));
/* Check if we got an error when sending long data */
- if (stmt->get_longdata_error)
+ if (stmt->state == Item_arena::ERROR)
{
send_error(thd, stmt->last_errno, stmt->last_error);
DBUG_VOID_RETURN;
@@ -1841,6 +1821,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query))
goto set_params_data_err;
#endif
+ DBUG_ASSERT(thd->free_list == NULL);
thd->stmt_backup.set_statement(thd);
thd->set_statement(stmt);
thd->current_arena= stmt;
@@ -1925,8 +1906,9 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name)
DBUG_VOID_RETURN;
}
- thd->stmt_backup.set_statement(thd);
- thd->set_statement(stmt);
+ DBUG_ASSERT(thd->free_list == NULL);
+
+ thd->set_n_backup_statement(stmt, &thd->stmt_backup);
if (stmt->set_params_from_vars(stmt,
thd->stmt_backup.lex->prepared_stmt_params,
&expanded_query))
@@ -1958,6 +1940,8 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt,
String *expanded_query)
{
DBUG_ENTER("execute_stmt");
+
+ thd->set_n_backup_statement(stmt, &thd->stmt_backup);
reset_stmt_for_execute(thd, stmt->lex);
if (expanded_query->length() &&
@@ -1967,6 +1951,13 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt,
my_error(ER_OUTOFMEMORY, 0, expanded_query->length());
DBUG_VOID_RETURN;
}
+ /*
+ At first execution of prepared statement we will perform logical
+ transformations of the query tree (i.e. negations elimination).
+ This should be done permanently on the parse tree of this statement.
+ */
+ if (stmt->state == Item_arena::PREPARED)
+ thd->current_arena= stmt;
if (!(specialflag & SPECIAL_NO_PRIOR))
my_pthread_setprio(pthread_self(),QUERY_PRIOR);
@@ -1986,6 +1977,11 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt,
never points to garbage: keep this predicate true.
*/
thd->free_list= 0;
+ if (stmt->state == Item_arena::PREPARED)
+ {
+ thd->current_arena= thd;
+ stmt->state= Item_arena::EXECUTED;
+ }
DBUG_VOID_RETURN;
}
@@ -2070,7 +2066,7 @@ void mysql_stmt_reset(THD *thd, char *packet)
SEND_ERROR)))
DBUG_VOID_RETURN;
- stmt->get_longdata_error= 0;
+ stmt->state= Item_arena::PREPARED;
/*
Clear parameters from data which could be set by
@@ -2158,7 +2154,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
if (param_number >= stmt->param_count)
{
/* Error will be sent in execute call */
- stmt->get_longdata_error= 1;
+ stmt->state= Item_arena::ERROR;
stmt->last_errno= ER_WRONG_ARGUMENTS;
sprintf(stmt->last_error, ER(ER_WRONG_ARGUMENTS),
"mysql_stmt_send_long_data");
@@ -2169,10 +2165,15 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
param= stmt->param_array[param_number];
#ifndef EMBEDDED_LIBRARY
- param->set_longdata(packet, (ulong) (packet_end - packet));
+ if (param->set_longdata(packet, (ulong) (packet_end - packet)))
#else
- param->set_longdata(thd->extra_data, thd->extra_length);
+ if (param->set_longdata(thd->extra_data, thd->extra_length))
#endif
+ {
+ stmt->state= Item_arena::ERROR;
+ stmt->last_errno= ER_OUTOFMEMORY;
+ sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0);
+ }
DBUG_VOID_RETURN;
}
@@ -2182,8 +2183,7 @@ Prepared_statement::Prepared_statement(THD *thd_arg)
thd(thd_arg),
param_array(0),
param_count(0),
- last_errno(0),
- get_longdata_error(0)
+ last_errno(0)
{
*last_error= '\0';
}
@@ -2217,10 +2217,11 @@ Prepared_statement::~Prepared_statement()
if (cursor)
cursor->Cursor::~Cursor();
free_items(free_list);
+ delete lex->result;
}
-Statement::Type Prepared_statement::type() const
+Item_arena::Type Prepared_statement::type() const
{
return PREPARED_STATEMENT;
}
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index 97c4e39874c..3298eb68a91 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -48,7 +48,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
DBUG_RETURN(1);
}
- if (wait_if_global_read_lock(thd,0))
+ if (wait_if_global_read_lock(thd,0,1))
DBUG_RETURN(1);
VOID(pthread_mutex_lock(&LOCK_open));
if (lock_table_names(thd, table_list))
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 5bd7cb62667..db3a0c90141 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -227,14 +227,9 @@ int handle_select(THD *thd, LEX *lex, select_result *result)
res= 1;
if (unlikely(res))
{
- if (result)
- {
- if (res > 0)
- result->send_error(0, NullS);
- result->abort();
- }
- else if (res > 0)
- send_error(thd, 0, NullS);
+ if (res > 0)
+ result->send_error(0, NullS);
+ result->abort();
res= 1; // Error sent to client
}
if (result != lex->result)
@@ -772,6 +767,10 @@ JOIN::optimize()
(select_lex->ftfunc_list->elements ?
SELECT_NO_JOIN_CACHE : 0));
+ /* Perform FULLTEXT search before all regular searches */
+ if (!(select_options & SELECT_DESCRIBE))
+ init_ftfuncs(thd, select_lex, test(order));
+
/*
is this simple IN subquery?
*/
@@ -827,7 +826,7 @@ JOIN::optimize()
join_tab->info= "Using index; Using where";
else
join_tab->info= "Using index";
-
+
DBUG_RETURN(unit->item->
change_engine(new subselect_indexsubquery_engine(thd,
join_tab,
@@ -864,7 +863,7 @@ JOIN::optimize()
as in other cases the join is done before the sort.
*/
if (const_tables != tables &&
- (order || group_list) &&
+ (order || group_list) &&
join_tab[const_tables].type != JT_ALL &&
join_tab[const_tables].type != JT_FT &&
join_tab[const_tables].type != JT_REF_OR_NULL &&
@@ -878,8 +877,7 @@ JOIN::optimize()
((group_list && const_tables != tables &&
(!simple_group ||
!test_if_skip_sort_order(&join_tab[const_tables], group_list,
- unit->select_limit_cnt,
- 0))) ||
+ unit->select_limit_cnt, 0))) ||
select_distinct) &&
tmp_table_param.quick_group && !procedure)
{
@@ -894,8 +892,6 @@ JOIN::optimize()
}
having= 0;
- /* Perform FULLTEXT search before all regular searches */
- init_ftfuncs(thd, select_lex, test(order));
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
{
@@ -903,7 +899,7 @@ JOIN::optimize()
thd->proc_info="Creating tmp table";
init_items_ref_array();
-
+
tmp_table_param.hidden_field_count= (all_fields.elements -
fields_list.elements);
if (!(exec_tmp_table1 =
@@ -2432,7 +2428,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
}
else if (old->eq_func && new_fields->eq_func &&
old->val->eq(new_fields->val, old->field->binary()))
-
+
{
old->level= and_level;
old->optimize= ((old->optimize & new_fields->optimize &
@@ -2491,7 +2487,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
field Field used in comparision
eq_func True if we used =, <=> or IS NULL
value Value used for comparison with field
- Is NULL for BETWEEN and IN
+ Is NULL for BETWEEN and IN
usable_tables Tables which can be used for key optimization
NOTES
@@ -2565,22 +2561,32 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond,
number. cmp_type() is checked to allow compare of dates to numbers.
eq_func is NEVER true when num_values > 1
*/
- if (!eq_func ||
- field->result_type() == STRING_RESULT &&
- (*value)->result_type() != STRING_RESULT &&
- field->cmp_type() != (*value)->result_type())
- return;
-
- /*
- We can't use indexes if the effective collation
- of the operation differ from the field collation.
- */
- if (field->result_type() == STRING_RESULT &&
- (*value)->result_type() == STRING_RESULT &&
- field->cmp_type() == STRING_RESULT &&
- ((Field_str*)field)->charset() != cond->compare_collation())
- return;
+ if (!eq_func)
+ return;
+ if (field->result_type() == STRING_RESULT)
+ {
+ if ((*value)->result_type() != STRING_RESULT)
+ {
+ if (field->cmp_type() != (*value)->result_type())
+ return;
+ }
+ else
+ {
+ /*
+ We can't use indexes if the effective collation
+ of the operation differ from the field collation.
+ We can also not used index on a text column, as the column may
+ contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after
+ 'x' when searching for WHERE col='x '
+ */
+ if (field->cmp_type() == STRING_RESULT &&
+ (((Field_str*)field)->charset() != cond->compare_collation() ||
+ ((*value)->type() != Item::NULL_ITEM &&
+ (field->flags & BLOB_FLAG) && !field->binary())))
+ return;
+ }
+ }
}
}
DBUG_ASSERT(num_values == 1);
@@ -2683,7 +2689,7 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level,
!(cond_func->used_tables() & OUTER_REF_TABLE_BIT))
{
Item *tmp=new Item_null;
- if (!tmp) // Should never be true
+ if (unlikely(!tmp)) // Should never be true
return;
add_key_field(key_fields,*and_level,cond_func,
((Item_field*) (cond_func->arguments()[0])->real_item())
@@ -4055,7 +4061,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
rec= keyuse->ref_table_rows;
/*
If there is one 'key_column IS NULL' expression, we can
- use this ref_or_null optimsation of this field
+ use this ref_or_null optimisation of this field
*/
found_ref_or_null|= (keyuse->optimize &
KEY_OPTIMIZE_REF_OR_NULL);
@@ -4572,6 +4578,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
store_key **ref_key= j->ref.key_copy;
byte *key_buff=j->ref.key_buff, *null_ref_key= 0;
+ bool keyuse_uses_no_tables= TRUE;
if (ftkey)
{
j->ref.items[0]=((Item_func*)(keyuse->val))->key_item();
@@ -4591,6 +4598,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
uint maybe_null= test(keyinfo->key_part[i].null_bit);
j->ref.items[i]=keyuse->val; // Save for cond removal
+ keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
if (!keyuse->used_tables &&
!(join->select_options & SELECT_DESCRIBE))
{ // Compare against constant
@@ -4630,7 +4638,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
j->ref.null_ref_key= null_ref_key;
}
- else if (ref_key == j->ref.key_copy)
+ else if (keyuse_uses_no_tables)
{
/*
This happen if we are using a constant expression in the ON part
@@ -5425,6 +5433,10 @@ JOIN::join_free(bool full)
if (full)
{
group_fields.delete_elements();
+ /*
+ We can't call delete_elements() on copy_funcs as this will cause
+ problems in free_elements() as some of the elements are then deleted.
+ */
tmp_table_param.copy_funcs.empty();
tmp_table_param.cleanup();
}
@@ -5609,7 +5621,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order)
}
if ((ref=order_tables & (not_const_tables ^ first_table)))
{
- if (only_eq_ref_tables(join,first_order,ref))
+ if (!(order_tables & first_table) && only_eq_ref_tables(join,first_order,ref))
{
DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
continue;
@@ -5732,7 +5744,10 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father,
Item *right_item= func->arguments()[1];
Item_func::Functype functype= func->functype();
- if (right_item->eq(field,0) && left_item != value)
+ if (right_item->eq(field,0) && left_item != value &&
+ (left_item->result_type() != STRING_RESULT ||
+ value->result_type() != STRING_RESULT ||
+ left_item->collation.collation == value->collation.collation))
{
Item *tmp=value->new_item();
if (tmp)
@@ -5750,7 +5765,10 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father,
func->set_cmp_func();
}
}
- else if (left_item->eq(field,0) && right_item != value)
+ else if (left_item->eq(field,0) && right_item != value &&
+ (right_item->result_type() != STRING_RESULT ||
+ value->result_type() != STRING_RESULT ||
+ right_item->collation.collation == value->collation.collation))
{
Item *tmp=value->new_item();
if (tmp)
@@ -5871,62 +5889,6 @@ propagate_cond_constants(I_List<COND_CMP> *save_list,COND *and_father,
/*
- Eliminate NOT functions from the condition tree.
-
- SYNPOSIS
- eliminate_not_funcs()
- thd thread handler
- cond condition tree
-
- DESCRIPTION
- Eliminate NOT functions from the condition tree where it's possible.
- Recursively traverse condition tree to find all NOT functions.
- Call neg_transformer() method for negated arguments.
-
- NOTE
- If neg_transformer() returned a new condition we call fix_fields().
- We don't delete any items as it's not needed. They will be deleted
- later at once.
-
- RETURN
- New condition tree
-*/
-
-COND *eliminate_not_funcs(THD *thd, COND *cond)
-{
- DBUG_ENTER("eliminate_not_funcs");
-
- if (!cond)
- DBUG_RETURN(cond);
- if (cond->type() == Item::COND_ITEM) /* OR or AND */
- {
- List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
- Item *item;
- while ((item= li++))
- {
- Item *new_item= eliminate_not_funcs(thd, item);
- if (item != new_item)
- VOID(li.replace(new_item)); /* replace item with a new condition */
- }
- }
- else if (cond->type() == Item::FUNC_ITEM && /* 'NOT' operation? */
- ((Item_func*) cond)->functype() == Item_func::NOT_FUNC)
- {
- COND *new_cond= ((Item_func*) cond)->arguments()[0]->neg_transformer(thd);
- if (new_cond)
- {
- /*
- Here we can delete the NOT function. Something like: delete cond;
- But we don't need to do it. All items will be deleted later at once.
- */
- cond= new_cond;
- }
- }
- DBUG_RETURN(cond);
-}
-
-
-/*
Simplify joins replacing outer joins by inner joins whenever it's possible
SYNOPSIS
@@ -5990,9 +5952,9 @@ COND *eliminate_not_funcs(THD *thd, COND *cond)
The function removes all unnecessary braces from the expression
produced by the conversions.
- E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b
+ E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
finally is converted to:
- SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b
+ SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
It also will remove braces from the following queries:
SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b
@@ -6162,27 +6124,26 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
}
return conds;
}
-
+
+
static COND *
optimize_cond(JOIN *join, COND *conds, Item::cond_result *cond_value)
{
+ SELECT_LEX *select= thd->lex->current_select;
DBUG_ENTER("optimize_cond");
THD *thd= join->thd;
SELECT_LEX *select= thd->lex->current_select;
if (select->first_cond_optimization)
{
- Item_arena *arena= thd->current_arena;
- Item_arena backup;
- if (arena)
- thd->set_n_backup_item_arena(arena, &backup);
+ Item_arena *arena, backup;
+ select->first_cond_optimization= 0;
- if (conds)
- {
- DBUG_EXECUTE("where",print_where(conds,"original"););
- /* eliminate NOT operators */
- conds= eliminate_not_funcs(thd, conds);
- }
+ arena= thd->current_arena;
+ if (!arena->is_stmt_prepare())
+ arena= 0;
+ else
+ thd->set_n_backup_item_arena(arena, &backup);
/* Convert all outer joins to inner joins if possible */
conds= simplify_joins(join, join->join_list, conds, TRUE);
@@ -6196,19 +6157,21 @@ optimize_cond(JOIN *join, COND *conds, Item::cond_result *cond_value)
if (!conds)
{
*cond_value= Item::COND_TRUE;
- DBUG_RETURN(conds);
+ select->prep_where= 0;
+ }
+ else
+ {
+ DBUG_EXECUTE("where", print_where(conds, "after negation elimination"););
+ /* change field = field to field = const for each found field = const */
+ propagate_cond_constants((I_List<COND_CMP> *) 0,conds,conds);
+ /*
+ Remove all instances of item == item
+ Remove all and-levels where CONST item != CONST item
+ */
+ DBUG_EXECUTE("where",print_where(conds,"after const change"););
+ conds= remove_eq_conds(thd, conds, cond_value) ;
+ DBUG_EXECUTE("info",print_where(conds,"after remove"););
}
-
- DBUG_EXECUTE("where", print_where(conds, "after negation elimination"););
- /* change field = field to field = const for each found field = const */
- propagate_cond_constants((I_List<COND_CMP> *) 0,conds,conds);
- /*
- Remove all instances of item == item
- Remove all and-levels where CONST item != CONST item
- */
- DBUG_EXECUTE("where",print_where(conds,"after const change"););
- conds= remove_eq_conds(thd, conds, cond_value) ;
- DBUG_EXECUTE("info",print_where(conds,"after remove"););
DBUG_RETURN(conds);
}
@@ -6536,7 +6499,7 @@ static Field* create_tmp_field_from_item(THD *thd,
copy_func If set and item is a function, store copy of item
in this array
from_field if field will be created using other field as example,
- pointer example field will be written here
+ pointer example field will be written here
group 1 if we are going to do a relative group by on result
modify_item 1 if item->result_field should point to new item.
This is relevent for how fill_record() is going to
@@ -6545,7 +6508,7 @@ static Field* create_tmp_field_from_item(THD *thd,
the record in the original table.
If modify_item is 0 then fill_record() will update
the temporary table
-
+
RETURN
0 on error
new_created field
@@ -6569,13 +6532,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
return new Field_double(item_sum->max_length,maybe_null,
item->name, table, item_sum->decimals);
case Item_sum::VARIANCE_FUNC: /* Place for sum & count */
- case Item_sum::STD_FUNC:
+ case Item_sum::STD_FUNC:
if (group)
return new Field_string(sizeof(double)*2+sizeof(longlong),
0, item->name,table,&my_charset_bin);
else
return new Field_double(item_sum->max_length, maybe_null,
- item->name,table,item_sum->decimals);
+ item->name,table,item_sum->decimals);
case Item_sum::UNIQUE_USERS_FUNC:
return new Field_long(9,maybe_null,item->name,table,1);
default:
@@ -6683,7 +6646,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
else // if we run out of slots or we are not using tempool
sprintf(path,"%s%s%lx_%lx_%x",mysql_tmpdir,tmp_file_prefix,current_pid,
thd->thread_id, thd->tmp_table++);
-
+
if (lower_case_table_names)
my_casedn_str(files_charset_info, path);
@@ -6799,14 +6762,21 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
tmp_from_field++;
*(reg_field++)= new_field;
reclength+=new_field->pack_length();
- if (!(new_field->flags & NOT_NULL_FLAG))
- null_count++;
if (new_field->flags & BLOB_FLAG)
{
*blob_field++= new_field;
blob_count++;
}
((Item_sum*) item)->args[i]= new Item_field(new_field);
+ if (!(new_field->flags & NOT_NULL_FLAG))
+ {
+ null_count++;
+ /*
+ new_field->maybe_null() is still false, it will be
+ changed below. But we have to setup Item_field correctly
+ */
+ ((Item_sum*) item)->args[i]->maybe_null=1;
+ }
}
}
}
@@ -7339,6 +7309,18 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
new_table.no_rows=1;
}
+#ifdef TO_BE_DONE_LATER_IN_4_1
+ /*
+ To use start_bulk_insert() (which is new in 4.1) we need to find
+ all places where a corresponding end_bulk_insert() should be put.
+ */
+ table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */
+ new_table.file->start_bulk_insert(table->file->records);
+#else
+ /* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
+ new_table.file->extra(HA_EXTRA_WRITE_CACHE);
+#endif
+
/* copy all old rows */
while (!table->file->rnd_next(new_table.record[1]))
{
@@ -9275,9 +9257,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
keys.merge(table->used_keys);
/*
- We are adding here also the index speified in FORCE INDEX clause,
+ We are adding here also the index specified in FORCE INDEX clause,
if any.
- This is to allow users to use index in ORDER BY.
+ This is to allow users to use index in ORDER BY.
*/
if (table->force_index)
keys.merge(table->keys_in_use_for_query);
@@ -10054,7 +10036,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
Item *itemptr=*order->item;
if (itemptr->type() == Item::INT_ITEM)
{ /* Order by position */
- uint count= itemptr->val_int();
+ uint count= (uint) itemptr->val_int();
if (!count || count > fields.elements)
{
my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),
@@ -10067,13 +10049,18 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
return 0;
}
uint counter;
- Item **item= find_item_in_list(itemptr, fields, &counter, IGNORE_ERRORS);
- if (item)
+ Item **item= find_item_in_list(itemptr, fields, &counter,
+ REPORT_EXCEPT_NOT_FOUND);
+ if (!item)
+ return 1;
+
+ if (item != (Item **)not_found_item)
{
order->item= ref_pointer_array + counter;
order->in_field_list=1;
return 0;
}
+
order->in_field_list=0;
Item *it= *order->item;
/*
@@ -10536,7 +10523,16 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
{
if (!(pos= new Item_copy_string(pos)))
goto err;
- if (param->copy_funcs.push_back(pos))
+ /*
+ Item_copy_string::copy for function can call
+ Item_copy_string::val_int for blob via Item_ref.
+ But if Item_copy_string::copy for blob isn't called before,
+ it's value will be wrong
+ so let's insert Item_copy_string for blobs in the beginning of
+ copy_funcs
+ (to see full test case look at having.test, BUG #4358)
+ */
+ if (param->copy_funcs.push_front(pos))
goto err;
}
else
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 284e4315917..a1487693b79 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -506,4 +506,3 @@ bool cp_buffer_from_ref(TABLE_REF *ref);
bool error_if_full_join(JOIN *join);
int report_error(TABLE *table, int error);
int safe_index_read(JOIN_TAB *tab);
-COND *eliminate_not_funcs(THD *thd, COND *cond);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 9e26a22cfa0..6322d99582d 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -693,6 +693,7 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild,
TABLE *table;
handler *file;
char tmp[MAX_FIELD_WIDTH];
+ char tmp1[MAX_FIELD_WIDTH];
Item *item;
Protocol *protocol= thd->protocol;
DBUG_ENTER("mysqld_show_fields");
@@ -779,9 +780,24 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild,
else if (field->unireg_check != Field::NEXT_NUMBER &&
!field->is_null())
{ // Not null by default
+ /*
+ Note: we have to convert the default value into
+ system_charset_info before sending.
+ This is necessary for "SET NAMES binary":
+ If the client character set is binary, we want to
+ send metadata in UTF8 rather than in the column's
+ character set.
+ This conversion also makes "SHOW COLUMNS" and
+ "SHOW CREATE TABLE" output consistent. Without
+ this conversion the default values were displayed
+ differently.
+ */
+ String def(tmp1,sizeof(tmp1), system_charset_info);
type.set(tmp, sizeof(tmp), field->charset());
field->val_str(&type);
- protocol->store(type.ptr(),type.length(),type.charset());
+ def.copy(type.ptr(), type.length(), type.charset(),
+ system_charset_info);
+ protocol->store(def.ptr(), def.length(), def.charset());
}
else if (field->unireg_check == Field::NEXT_NUMBER ||
field->maybe_null())
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 79365b7481b..ad32305e9b4 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -95,9 +95,18 @@ public:
Ptr[str_length]=0;
return Ptr;
}
+ inline char *c_ptr_safe()
+ {
+ if (Ptr && str_length < Alloced_length)
+ Ptr[str_length]=0;
+ else
+ (void) realloc(str_length);
+ return Ptr;
+ }
void set(String &str,uint32 offset,uint32 arg_length)
{
+ DBUG_ASSERT(&str != this);
free();
Ptr=(char*) str.ptr()+offset; str_length=arg_length; alloced=0;
if (str.Alloced_length)
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 93fb7930da7..03c20be198e 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1089,7 +1089,6 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
keys List of keys to create
tmp_table Set to 1 if this is an internal temporary table
(From ALTER TABLE)
- no_log Don't log the query to binary log.
DESCRIPTION
If one creates a temporary table, this is automaticly opened
@@ -1107,7 +1106,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
int mysql_create_table(THD *thd,const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &fields,
- List<Key> &keys,bool tmp_table,bool no_log,
+ List<Key> &keys,bool tmp_table,
uint select_field_count)
{
char path[FN_REFLEN];
@@ -1206,7 +1205,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
DBUG_RETURN(-1);
}
- if (wait_if_global_read_lock(thd, 0))
+ if (wait_if_global_read_lock(thd, 0, 1))
DBUG_RETURN(error);
VOID(pthread_mutex_lock(&LOCK_open));
if (!tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
@@ -1276,7 +1275,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
}
thd->tmp_table_used= 1;
}
- if (!tmp_table && !no_log && mysql_bin_log.is_open())
+ if (!tmp_table && mysql_bin_log.is_open())
{
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
@@ -1346,6 +1345,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
TABLE *table;
tmp_table.table_name=0;
uint select_field_count= items->elements;
+ Disable_binlog disable_binlog(thd);
DBUG_ENTER("create_table_from_items");
/* Add selected items to field list */
@@ -1377,9 +1377,16 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
/* create and lock table */
/* QQ: This should be done atomic ! */
if (mysql_create_table(thd, create_table->db, create_table->real_name,
- create_info, *extra_fields, *keys, 0, 1,
- select_field_count)) // no logging
+ create_info, *extra_fields, *keys, 0,
+ select_field_count))
DBUG_RETURN(0);
+ /*
+ If this is a HEAP table, the automatic DELETE FROM which is written to the
+ binlog when a HEAP table is opened for the first time since startup, must
+ not be written: 1) it would be wrong (imagine we're in CREATE SELECT: we
+ don't want to delete from it) 2) it would be written before the CREATE
+ TABLE, which is a wrong order. So we keep binary logging disabled.
+ */
if (!(table= open_table(thd, create_table, 0, (bool*) 0)))
{
quick_rm_table(create_info->db_type, create_table->db,
@@ -1398,6 +1405,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
}
table->file->extra(HA_EXTRA_WRITE_CACHE);
DBUG_RETURN(table);
+ /* Note that leaving the function resets binlogging properties */
}
@@ -1567,7 +1575,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table,
reg_ext))
DBUG_RETURN(-1); // protect buffer overflow
- my_snprintf(dst_path, sizeof(dst_path), "%s/%s/%s",
+ my_snprintf(dst_path, sizeof(dst_path), "%s%s/%s",
mysql_real_data_home, db, table_name);
if (lock_and_wait_for_table_name(thd,table))
@@ -1814,7 +1822,6 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables,
dropping_tables--;
}
thd->exit_cond(old_message);
- pthread_mutex_unlock(&LOCK_open);
if (thd->killed)
goto err;
open_for_modify=0;
@@ -2688,7 +2695,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (do_send_ok)
send_ok(thd);
}
- else
+ else if (error > 0)
{
table->file->print_error(error, MYF(0));
error= -1;
@@ -3006,12 +3013,14 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
else
create_info->data_file_name=create_info->index_file_name=0;
-
- if ((error=mysql_create_table(thd, new_db, tmp_name,
- create_info,
- create_list,key_list,1,1,0))) // no logging
- DBUG_RETURN(error);
-
+ {
+ /* We don't log the statement, it will be logged later */
+ Disable_binlog disable_binlog(thd);
+ if ((error=mysql_create_table(thd, new_db, tmp_name,
+ create_info,
+ create_list,key_list,1,0)))
+ DBUG_RETURN(error);
+ }
if (table->tmp_table)
{
TABLE_LIST tbl;
@@ -3278,11 +3287,11 @@ copy_data_between_tables(TABLE *from,TABLE *to,
uint length;
SORT_FIELD *sortorder;
READ_RECORD info;
- Field *next_field;
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
ha_rows examined_rows;
+ bool auto_increment_field_copied= 0;
DBUG_ENTER("copy_data_between_tables");
if (!(copy= new Copy_field[to->fields]))
@@ -3299,7 +3308,12 @@ copy_data_between_tables(TABLE *from,TABLE *to,
{
def=it++;
if (def->field)
+ {
+ if (*ptr == to->next_number_field)
+ auto_increment_field_copied= TRUE;
(copy_end++)->set(*ptr,def->field,0);
+ }
+
}
found_count=delete_count=0;
@@ -3335,7 +3349,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
error= 1;
goto err;
}
-
+
/* Handler must be told explicitly to retrieve all columns, because
this function does not set field->query_id in the columns to the
current query id */
@@ -3344,7 +3358,6 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (handle_duplicates == DUP_IGNORE ||
handle_duplicates == DUP_REPLACE)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- next_field=to->next_number_field;
thd->row_count= 0;
while (!(error=info.read_record(&info)))
{
@@ -3355,10 +3368,17 @@ copy_data_between_tables(TABLE *from,TABLE *to,
break;
}
thd->row_count++;
- if (next_field)
- next_field->reset();
+ if (to->next_number_field)
+ {
+ if (auto_increment_field_copied)
+ to->auto_increment_field_not_null= TRUE;
+ else
+ to->next_number_field->reset();
+ }
for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
+ {
copy_ptr->do_copy(copy_ptr);
+ }
if ((error=to->file->write_row((byte*) to->record[0])))
{
if ((handle_duplicates != DUP_IGNORE &&
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index d6b776571f2..827d75a9848 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -292,7 +292,9 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
{
Item_arena *arena= thd->current_arena;
Item_arena backup;
- if (arena)
+ if (!arena->is_stmt_prepare())
+ arena= 0
+ else
thd->set_n_backup_item_arena(arena, &backup);
Field **field;
for (field= table->field; *field; field++)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index ba3a36f2c34..19b6aa4b385 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -503,6 +503,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CASE_SYM
%token CONCAT
%token CONCAT_WS
+%token CONVERT_TZ_SYM
%token CURDATE
%token CURTIME
%token DATABASE
@@ -1116,7 +1117,10 @@ create:
lex->col_list.empty();
}
| CREATE DATABASE opt_if_not_exists ident
- { Lex->create_info.default_table_charset=NULL; }
+ {
+ Lex->create_info.default_table_charset= NULL;
+ Lex->create_info.used_fields= 0;
+ }
opt_create_database_options
{
LEX *lex=Lex;
@@ -2027,7 +2031,8 @@ sp_fetch_list:
YYABORT;
}
else
- { /* An SP local variable */
+ {
+ /* An SP local variable */
sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction();
i->add_to_varlist(spv);
@@ -2048,7 +2053,8 @@ sp_fetch_list:
YYABORT;
}
else
- { /* An SP local variable */
+ {
+ /* An SP local variable */
sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction();
i->add_to_varlist(spv);
@@ -2323,11 +2329,11 @@ create_select:
*/
lex->current_select->table_list.save_and_clear(&lex->save_list);
mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST;
+ lex->current_select->parsing_place= SELECT_LIST;
}
select_options select_item_list
{
- Select->parsing_place= SELECT_LEX_NODE::NO_MATTER;
+ Select->parsing_place= NO_MATTER;
}
opt_select_from
{
@@ -2352,11 +2358,8 @@ create_database_options:
| create_database_options create_database_option {};
create_database_option:
- opt_default COLLATE_SYM collation_name_or_default
- { Lex->create_info.default_table_charset=$3; }
- | opt_default charset charset_name_or_default
- { Lex->create_info.default_table_charset=$3; }
- ;
+ default_collation {}
+ | default_charset {};
opt_table_options:
/* empty */ { $$= 0; }
@@ -2418,21 +2421,46 @@ create_table_option:
table_list->next_local= 0;
lex->create_info.used_fields|= HA_CREATE_USED_UNION;
}
- | opt_default charset opt_equal charset_name_or_default
- {
- Lex->create_info.default_table_charset= $4;
- Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
- }
- | opt_default COLLATE_SYM opt_equal collation_name_or_default
- {
- Lex->create_info.default_table_charset= $4;
- Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
- }
+ | default_charset
+ | default_collation
| INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;}
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->create_info.data_file_name= $4.str; }
| INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; };
+default_charset:
+ opt_default charset opt_equal charset_name_or_default
+ {
+ HA_CREATE_INFO *cinfo= &Lex->create_info;
+ if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) &&
+ cinfo->default_table_charset && $4 &&
+ !my_charset_same(cinfo->default_table_charset,$4))
+ {
+ net_printf(YYTHD, ER_CONFLICTING_DECLARATIONS,
+ "CHARACTER SET ", cinfo->default_table_charset->csname,
+ "CHARACTER SET ", $4->csname);
+ YYABORT;
+ }
+ Lex->create_info.default_table_charset= $4;
+ Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
+ };
+
+default_collation:
+ opt_default COLLATE_SYM opt_equal collation_name_or_default
+ {
+ HA_CREATE_INFO *cinfo= &Lex->create_info;
+ if ((cinfo->used_fields & HA_CREATE_USED_DEFAULT_CHARSET) &&
+ cinfo->default_table_charset && $4 &&
+ !my_charset_same(cinfo->default_table_charset,$4))
+ {
+ net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH,
+ $4->name, cinfo->default_table_charset->csname);
+ YYABORT;
+ }
+ Lex->create_info.default_table_charset= $4;
+ Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
+ };
+
storage_engines:
ident_or_text
{
@@ -3042,7 +3070,12 @@ alter:
}
alter_list
{}
- | ALTER DATABASE ident opt_create_database_options
+ | ALTER DATABASE ident
+ {
+ Lex->create_info.default_table_charset= NULL;
+ Lex->create_info.used_fields= 0;
+ }
+ opt_create_database_options
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_ALTER_DB;
@@ -3193,6 +3226,12 @@ alter_list_item:
LEX *lex=Lex;
lex->select_lex.db=$3->db.str;
lex->name= $3->table.str;
+ if (check_table_name($3->table.str,$3->table.length) ||
+ $3->db.str && check_db_name($3->db.str))
+ {
+ net_printf(lex->thd,ER_WRONG_TABLE_NAME,$3->table.str);
+ YYABORT;
+ }
lex->alter_info.flags|= ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
@@ -3631,11 +3670,11 @@ select_part2:
lex->lock_option= TL_READ;
if (sel->linkage != UNION_TYPE)
mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST;
+ lex->current_select->parsing_place= SELECT_LIST;
}
select_options select_item_list
{
- Select->parsing_place= SELECT_LEX_NODE::NO_MATTER;
+ Select->parsing_place= NO_MATTER;
}
select_into select_lock_type;
@@ -3986,8 +4025,14 @@ simple_expr:
| '+' expr %prec NEG { $$= $2; }
| '-' expr %prec NEG { $$= new Item_func_neg($2); }
| '~' expr %prec NEG { $$= new Item_func_bit_neg($2); }
- | NOT expr %prec NEG { $$= new Item_func_not($2); }
- | '!' expr %prec NEG { $$= new Item_func_not($2); }
+ | NOT expr %prec NEG
+ {
+ $$= negate_expression(YYTHD, $2);
+ }
+ | '!' expr %prec NEG
+ {
+ $$= negate_expression(YYTHD, $2);
+ }
| '(' expr ')' { $$= $2; }
| '(' expr ',' expr_list ')'
{
@@ -4009,8 +4054,7 @@ simple_expr:
| ASCII_SYM '(' expr ')' { $$= new Item_func_ascii($3); }
| BINARY expr %prec NEG
{
- $$= new Item_func_set_collation($2,new Item_string(binary_keyword,
- 6, &my_charset_latin1));
+ $$= create_func_cast($2, ITEM_CAST_CHAR, -1, &my_charset_bin);
}
| CAST_SYM '(' expr AS cast_type ')'
{
@@ -4098,6 +4142,11 @@ simple_expr:
{ $$= new Item_func_concat(* $3); }
| CONCAT_WS '(' expr ',' expr_list ')'
{ $$= new Item_func_concat_ws($3, *$5); }
+ | CONVERT_TZ_SYM '(' expr ',' expr ',' expr ')'
+ {
+ Lex->time_zone_tables_used= &fake_time_zone_tables_list;
+ $$= new Item_func_convert_tz($3, $5, $7);
+ }
| CURDATE optional_braces
{ $$= new Item_func_curdate_local(); Lex->safe_to_cache_query=0; }
| CURTIME optional_braces
@@ -4833,11 +4882,11 @@ select_derived:
YYABORT;
mysql_init_select(lex);
lex->current_select->linkage= DERIVED_TABLE_TYPE;
- lex->current_select->parsing_place= SELECT_LEX_NODE::SELECT_LIST;
+ lex->current_select->parsing_place= SELECT_LIST;
}
select_options select_item_list
{
- Select->parsing_place= SELECT_LEX_NODE::NO_MATTER;
+ Select->parsing_place= NO_MATTER;
}
opt_select_from union_opt
;
@@ -4934,12 +4983,15 @@ interval_time_st:
| MONTH_SYM { $$=INTERVAL_MONTH; }
| QUARTER_SYM { $$=INTERVAL_QUARTER; }
| SECOND_SYM { $$=INTERVAL_SECOND; }
- | YEAR_SYM { $$=INTERVAL_YEAR; };
+ | YEAR_SYM { $$=INTERVAL_YEAR; }
+ ;
date_time_type:
- DATE_SYM {$$=MYSQL_TIMESTAMP_DATE;}
- | TIME_SYM {$$=MYSQL_TIMESTAMP_TIME;}
- | DATETIME {$$=MYSQL_TIMESTAMP_DATETIME;};
+ DATE_SYM {$$=MYSQL_TIMESTAMP_DATE;}
+ | TIME_SYM {$$=MYSQL_TIMESTAMP_TIME;}
+ | DATETIME {$$=MYSQL_TIMESTAMP_DATETIME;}
+ | TIMESTAMP {$$=MYSQL_TIMESTAMP_DATETIME;}
+ ;
table_alias:
/* empty */
@@ -4958,11 +5010,17 @@ opt_all:
where_clause:
/* empty */ { Select->where= 0; }
- | WHERE expr
+ | WHERE
+ {
+ Select->parsing_place= IN_WHERE;
+ }
+ expr
{
- Select->where= $2;
- if ($2)
- $2->top_level_item();
+ SELECT_LEX *select= Select;
+ select->where= $3;
+ select->parsing_place= NO_MATTER;
+ if ($3)
+ $3->top_level_item();
}
;
@@ -4970,13 +5028,13 @@ having_clause:
/* empty */
| HAVING
{
- Select->parsing_place= SELECT_LEX_NODE::IN_HAVING;
+ Select->parsing_place= IN_HAVING;
}
expr
{
SELECT_LEX *sel= Select;
sel->having= $3;
- sel->parsing_place= SELECT_LEX_NODE::NO_MATTER;
+ sel->parsing_place= NO_MATTER;
if ($3)
$3->top_level_item();
}
@@ -5730,7 +5788,7 @@ show_param:
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
} opt_limit_clause_init
- | keys_or_index FROM table_ident opt_db
+ | keys_or_index from_or_in table_ident opt_db
{
Lex->sql_command= SQLCOM_SHOW_KEYS;
if ($4)
@@ -6312,7 +6370,7 @@ simple_ident:
else
{
SELECT_LEX *sel=Select;
- $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING ||
+ $$= (sel->parsing_place != IN_HAVING ||
sel->get_in_sum_expr() > 0) ?
(Item*) new Item_field(NullS,NullS,$1.str) :
(Item*) new Item_ref(0,0, NullS,NullS,$1.str);
@@ -6325,7 +6383,7 @@ simple_ident_nospvar:
ident
{
SELECT_LEX *sel=Select;
- $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING ||
+ $$= (sel->parsing_place != IN_HAVING ||
sel->get_in_sum_expr() > 0) ?
(Item*) new Item_field(NullS,NullS,$1.str) :
(Item*) new Item_ref(0,0,NullS,NullS,$1.str);
@@ -6345,7 +6403,7 @@ simple_ident_q:
ER(ER_TABLENAME_NOT_ALLOWED_HERE),
MYF(0), $1.str, thd->where);
}
- $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING ||
+ $$= (sel->parsing_place != IN_HAVING ||
sel->get_in_sum_expr() > 0) ?
(Item*) new Item_field(NullS,$1.str,$3.str) :
(Item*) new Item_ref(0,0,NullS,$1.str,$3.str);
@@ -6361,7 +6419,7 @@ simple_ident_q:
ER(ER_TABLENAME_NOT_ALLOWED_HERE),
MYF(0), $2.str, thd->where);
}
- $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING ||
+ $$= (sel->parsing_place != IN_HAVING ||
sel->get_in_sum_expr() > 0) ?
(Item*) new Item_field(NullS,$2.str,$4.str) :
(Item*) new Item_ref(0,0,NullS,$2.str,$4.str);
@@ -6377,7 +6435,7 @@ simple_ident_q:
ER(ER_TABLENAME_NOT_ALLOWED_HERE),
MYF(0), $3.str, thd->where);
}
- $$= (sel->parsing_place != SELECT_LEX_NODE::IN_HAVING ||
+ $$= (sel->parsing_place != IN_HAVING ||
sel->get_in_sum_expr() > 0) ?
(Item*) new Item_field((YYTHD->client_capabilities &
CLIENT_NO_SCHEMA ? NullS : $1.str),
@@ -6791,7 +6849,8 @@ option_value:
&$1.base_name, $3));
}
else
- { /* An SP local variable */
+ {
+ /* An SP local variable */
sp_pcontext *ctx= lex->spcont;
sp_pvar_t *spv;
sp_instr_set *i;
@@ -6875,16 +6934,24 @@ internal_variable_name:
/* We have to lookup here since local vars can shadow sysvars */
if (!spc || !(spv = spc->find_pvar(&$1)))
- { /* Not an SP local variable */
+ {
+ /* Not an SP local variable */
sys_var *tmp=find_sys_var($1.str, $1.length);
if (!tmp)
YYABORT;
$$.var= tmp;
$$.base_name.str=0;
$$.base_name.length=0;
+ /*
+ If this is time_zone variable we should open time zone
+ describing tables
+ */
+ if (tmp == &sys_time_zone)
+ Lex->time_zone_tables_used= &fake_time_zone_tables_list;
}
else
- { /* An SP local variable */
+ {
+ /* An SP local variable */
$$.var= NULL;
$$.base_name= $1;
}
diff --git a/sql/table.cc b/sql/table.cc
index 1764df75a7e..5d0c60718d3 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1111,6 +1111,17 @@ void append_unescaped(String *res, const char *pos, uint length)
for (; pos != end ; pos++)
{
+#if defined(USE_MB) && MYSQL_VERSION_ID < 40100
+ uint mblen;
+ if (use_mb(default_charset_info) &&
+ (mblen= my_ismbchar(default_charset_info, pos, end)))
+ {
+ res->append(pos, mblen);
+ pos+= mblen;
+ continue;
+ }
+#endif
+
switch (*pos) {
case 0: /* Must be escaped for 'mysql' */
res->append('\\');
diff --git a/sql/time.cc b/sql/time.cc
index 132612e53c5..4421b6aa00f 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -20,166 +20,9 @@
#include "mysql_priv.h"
#include <m_ctype.h>
-static ulong const days_at_timestart=719528; /* daynr at 1970.01.01 */
-uchar *days_in_month= (uchar*) "\037\034\037\036\037\036\037\037\036\037\036\037";
-
-
-/*
- Offset of system time zone from UTC in seconds used to speed up
- work of my_system_gmt_sec() function.
-*/
-static long my_time_zone=0;
-
-
-/*
- Prepare offset of system time zone from UTC for my_system_gmt_sec() func.
-
- SYNOPSIS
- init_time()
-*/
-void init_time(void)
-{
- time_t seconds;
- struct tm *l_time,tm_tmp;;
- TIME my_time;
- bool not_used;
-
- seconds= (time_t) time((time_t*) 0);
- localtime_r(&seconds,&tm_tmp);
- l_time= &tm_tmp;
- my_time_zone= 3600; /* Comp. for -3600 in my_gmt_sec */
- my_time.year= (uint) l_time->tm_year+1900;
- my_time.month= (uint) l_time->tm_mon+1;
- my_time.day= (uint) l_time->tm_mday;
- my_time.hour= (uint) l_time->tm_hour;
- my_time.minute= (uint) l_time->tm_min;
- my_time.second= (uint) l_time->tm_sec;
- my_system_gmt_sec(&my_time, &my_time_zone, &not_used); /* Init my_time_zone */
-}
-
-
-/*
- Convert time in TIME representation in system time zone to its
- my_time_t form (number of seconds in UTC since begginning of Unix Epoch).
-
- SYNOPSIS
- my_system_gmt_sec()
- t - time value to be converted
- my_timezone - pointer to long where offset of system time zone
- from UTC will be stored for caching
- in_dst_time_gap - set to true if time falls into spring time-gap
-
- NOTES
- The idea is to cache the time zone offset from UTC (including daylight
- saving time) for the next call to make things faster. But currently we
- just calculate this offset during startup (by calling init_time()
- function) and use it all the time.
- Time value provided should be legal time value (e.g. '2003-01-01 25:00:00'
- is not allowed).
-
- RETURN VALUE
- Time in UTC seconds since Unix Epoch representation.
-*/
-my_time_t
-my_system_gmt_sec(const TIME *t, long *my_timezone, bool *in_dst_time_gap)
-{
- uint loop;
- time_t tmp;
- struct tm *l_time,tm_tmp;
- long diff, current_timezone;
-
- /*
- Calculate the gmt time based on current time and timezone
- The -1 on the end is to ensure that if have a date that exists twice
- (like 2002-10-27 02:00:0 MET), we will find the initial date.
-
- By doing -3600 we will have to call localtime_r() several times, but
- I couldn't come up with a better way to get a repeatable result :(
-
- We can't use mktime() as it's buggy on many platforms and not thread safe.
- */
- tmp=(time_t) (((calc_daynr((uint) t->year,(uint) t->month,(uint) t->day) -
- (long) days_at_timestart)*86400L + (long) t->hour*3600L +
- (long) (t->minute*60 + t->second)) + (time_t) my_time_zone -
- 3600);
- current_timezone= my_time_zone;
-
- localtime_r(&tmp,&tm_tmp);
- l_time=&tm_tmp;
- for (loop=0;
- loop < 2 &&
- (t->hour != (uint) l_time->tm_hour ||
- t->minute != (uint) l_time->tm_min);
- loop++)
- { /* One check should be enough ? */
- /* Get difference in days */
- int days= t->day - l_time->tm_mday;
- if (days < -1)
- days= 1; // Month has wrapped
- else if (days > 1)
- days= -1;
- diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) +
- (long) (60*((int) t->minute - (int) l_time->tm_min)));
- current_timezone+= diff+3600; // Compensate for -3600 above
- tmp+= (time_t) diff;
- localtime_r(&tmp,&tm_tmp);
- l_time=&tm_tmp;
- }
- /*
- Fix that if we are in the not existing daylight saving time hour
- we move the start of the next real hour
- */
- if (loop == 2 && t->hour != (uint) l_time->tm_hour)
- {
- int days= t->day - l_time->tm_mday;
- if (days < -1)
- days=1; // Month has wrapped
- else if (days > 1)
- days= -1;
- diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+
- (long) (60*((int) t->minute - (int) l_time->tm_min)));
- if (diff == 3600)
- tmp+=3600 - t->minute*60 - t->second; // Move to next hour
- else if (diff == -3600)
- tmp-=t->minute*60 + t->second; // Move to previous hour
-
- *in_dst_time_gap= 1;
- }
- *my_timezone= current_timezone;
-
- return (my_time_t) tmp;
-} /* my_system_gmt_sec */
-
/* Some functions to calculate dates */
- /* Calculate nr of day since year 0 in new date-system (from 1615) */
-
-long calc_daynr(uint year,uint month,uint day)
-{
- long delsum;
- int temp;
- DBUG_ENTER("calc_daynr");
-
- if (year == 0 && month == 0 && day == 0)
- DBUG_RETURN(0); /* Skip errors */
- if (year < 200)
- {
- if ((year=year+1900) < 1900+YY_PART_YEAR)
- year+=100;
- }
- delsum= (long) (365L * year+ 31*(month-1) +day);
- if (month <= 2)
- year--;
- else
- delsum-= (long) (month*4+23)/10;
- temp=(int) ((year/100+1)*3)/4;
- DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld",
- year+(month <= 2),month,day,delsum+year/4-temp));
- DBUG_RETURN(delsum+(int) year/4-temp);
-} /* calc_daynr */
-
-
#ifndef TESTTIME
/* Calc weekday from daynr */
/* Returns 0 for monday, 1 for tuesday .... */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f2d20634ec5..af9af530fec 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -32,6 +32,7 @@
#include "mysql_priv.h"
#else
#include <my_global.h>
+#include <my_time.h>
#include "tztime.h"
#include <my_sys.h>
#endif
@@ -1358,6 +1359,13 @@ static bool tz_inited= 0;
static uint tz_leapcnt= 0;
static LS_INFO *tz_lsis= 0;
+/*
+ Shows whenever we have found time zone tables during start-up.
+ Used for avoiding of putting those tables to global table list
+ for queries that use time zone info.
+*/
+static bool time_zone_tables_exist= 1;
+
typedef struct st_tz_names_entry: public Sql_alloc
{
@@ -1387,6 +1395,68 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length,
/*
+ Prepare table list with time zone related tables from preallocated array.
+
+ SYNOPSIS
+ tz_init_table_list()
+ tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects.
+
+ DESCRIPTION
+ This function prepares list of TABLE_LIST objects which can be used
+ for opening of time zone tables from preallocated array.
+*/
+
+void
+tz_init_table_list(TABLE_LIST *tz_tabs)
+{
+ bzero(tz_tabs, sizeof(TABLE_LIST) * 4);
+ tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name";
+ tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone";
+ tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type";
+ tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition";
+ tz_tabs[0].next= tz_tabs+1;
+ tz_tabs[1].next= tz_tabs+2;
+ tz_tabs[2].next= tz_tabs+3;
+ tz_tabs[0].lock_type= tz_tabs[1].lock_type= tz_tabs[2].lock_type=
+ tz_tabs[3].lock_type= TL_READ;
+ tz_tabs[0].db= tz_tabs[1].db= tz_tabs[2].db= tz_tabs[3].db= (char *)"mysql";
+}
+
+
+/*
+ Create table list with time zone related tables.
+
+ SYNOPSIS
+ my_tz_get_table_list()
+ thd - current thread object
+
+ DESCRIPTION
+ This function creates list of TABLE_LIST objects allocated in thd's
+ memroot, which can be used for opening of time zone tables.
+
+ RETURN VALUES
+ Returns pointer to first TABLE_LIST object, (could be 0 if time zone
+ tables don't exist) and &fake_time_zone_tables_list in case of error.
+*/
+
+TABLE_LIST *
+my_tz_get_table_list(THD *thd)
+{
+ TABLE_LIST *tz_tabs;
+
+ if (!time_zone_tables_exist)
+ return 0;
+
+ if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * 4)))
+ return &fake_time_zone_tables_list;
+
+ tz_init_table_list(tz_tabs);
+
+ return tz_tabs;
+}
+
+
+/*
Initialize time zone support infrastructure.
SYNOPSIS
@@ -1398,13 +1468,13 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length,
DESCRIPTION
This function will init memory structures needed for time zone support,
it will register mandatory SYSTEM time zone in them. It will try to open
- mysql.time_zone_leap_seconds table and and load information which further
- will be shared among all time zones loaded. It will also try to load
- information about default time zone. If system tables with time zone
- descriptions don't exist it won't fail (unless default_tzname is time zone
- from tables). If bootstrap parameter is true then this routine assumes that
- we are in bootstrap mode and won't load time zone descriptions unless someone
- specifies default time zone which is supposedly stored in those tables.
+ mysql.time_zone* tables and load information about default time zone and
+ information which further will be shared among all time zones loaded.
+ If system tables with time zone descriptions don't exist it won't fail
+ (unless default_tzname is time zone from tables). If bootstrap parameter
+ is true then this routine assumes that we are in bootstrap mode and won't
+ load time zone descriptions unless someone specifies default time zone
+ which is supposedly stored in those tables.
It'll also set default time zone if it is specified.
RETURN VALUES
@@ -1415,14 +1485,13 @@ my_bool
my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
{
THD *thd;
- TABLE_LIST tables;
+ TABLE_LIST *tables= 0;
+ TABLE_LIST tables_buff[5];
TABLE *table;
- TABLE *lock_ptr;
- MYSQL_LOCK *lock;
TZ_NAMES_ENTRY *tmp_tzname;
my_bool return_val= 1;
int res;
- uint not_used;
+ uint counter;
DBUG_ENTER("my_tz_init");
/*
@@ -1467,7 +1536,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
if (bootstrap)
{
/* If we are in bootstrap mode we should not load time zone tables */
- return_val= 0;
+ return_val= time_zone_tables_exist= 0;
goto end_with_setting_default_tz;
}
@@ -1479,28 +1548,25 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
thd->db= my_strdup("mysql",MYF(0));
thd->db_length= 5; // Safety
- bzero((char*) &tables,sizeof(tables));
- tables.alias= tables.real_name= (char*)"time_zone_leap_second";
- tables.lock_type= TL_READ;
- tables.db= thd->db;
-
- if (open_tables(thd, &tables, &not_used))
+ bzero((char*) &tables_buff, sizeof(TABLE_LIST));
+ tables_buff[0].alias= tables_buff[0].real_name=
+ (char*)"time_zone_leap_second";
+ tables_buff[0].lock_type= TL_READ;
+ tables_buff[0].db= thd->db;
+ tables_buff[0].next= tables_buff + 1;
+ /* Fill TABLE_LIST for rest of the time zone describing tables */
+ tz_init_table_list(tables_buff + 1);
+
+ if (open_tables(thd, tables_buff, &counter) ||
+ lock_tables(thd, tables_buff, counter))
{
- sql_print_error("Warning: Can't open time zone table: %s "
- "trying to live without them", thd->net.last_error);
+ sql_print_warning("Can't open and lock time zone table: %s "
+ "trying to live without them", thd->net.last_error);
/* We will try emulate that everything is ok */
- return_val= 0;
+ return_val= time_zone_tables_exist= 0;
goto end_with_setting_default_tz;
}
-
- lock_ptr= tables.table;
- if (!(lock= mysql_lock_tables(thd, &lock_ptr, 1)))
- {
- sql_print_error("Fatal error: Can't lock time zone table: %s",
- thd->net.last_error);
- goto end_with_close;
- }
-
+ tables= tables_buff + 1;
/*
Now we are going to load leap seconds descriptions that are shared
@@ -1513,11 +1579,16 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
{
sql_print_error("Fatal error: Out of memory while loading "
"mysql.time_zone_leap_second table");
- goto end_with_unlock;
+ goto end_with_close;
}
- table= tables.table;
- table->file->ha_index_init(0);
+ table= tables_buff[0].table;
+ /*
+ It is OK to ignore ha_index_init()/ha_index_end() return values since
+ mysql.time_zone* tables are MyISAM and these operations always succeed
+ for MyISAM.
+ */
+ (void)table->file->ha_index_init(0);
tz_leapcnt= 0;
res= table->file->index_first(table->record[0]);
@@ -1529,7 +1600,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
sql_print_error("Fatal error: While loading mysql.time_zone_leap_second"
" table: too much leaps");
table->file->ha_index_end();
- goto end_with_unlock;
+ goto end_with_close;
}
tz_lsis[tz_leapcnt].ls_trans= (my_time_t)table->field[0]->val_int();
@@ -1545,13 +1616,13 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
res= table->file->index_next(table->record[0]);
}
- table->file->ha_index_end();
+ (void)table->file->ha_index_end();
if (res != HA_ERR_END_OF_FILE)
{
sql_print_error("Fatal error: Error while loading "
"mysql.time_zone_leap_second table");
- goto end_with_unlock;
+ goto end_with_close;
}
/*
@@ -1561,19 +1632,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
return_val= 0;
-end_with_unlock:
- mysql_unlock_tables(thd, lock);
-
-end_with_close:
- close_thread_tables(thd);
- thd->version--; /* Force close to free memory */
-
end_with_setting_default_tz:
- /* If not an error and have default time zone try to load it */
- if (!return_val && default_tzname)
+ /* If we have default time zone try to load it */
+ if (default_tzname)
{
String tzname(default_tzname, &my_charset_latin1);
- if (!(global_system_variables.time_zone= my_tz_find(thd, &tzname)))
+ if (!(global_system_variables.time_zone= my_tz_find(&tzname, tables)))
{
sql_print_error("Fatal error: Illegal or unknown default time zone '%s'",
default_tzname);
@@ -1581,6 +1645,10 @@ end_with_setting_default_tz:
}
}
+end_with_close:
+ thd->version--; /* Force close to free memory */
+ close_thread_tables(thd);
+
end_with_cleanup:
/* if there were error free time zone describing structs */
@@ -1624,29 +1692,27 @@ void my_tz_free()
Load time zone description from system tables.
SYNOPSIS
- tz_load_from_db()
- thd - current thread object
- tz_name - name of time zone that should be loaded.
+ tz_load_from_open_tables()
+ tz_name - name of time zone that should be loaded.
+ tz_tables - list of tables from which time zone description
+ should be loaded
DESCRIPTION
- This function will try to open system tables describing time zones
- and to load information about time zone specified. It will also update
- information in hash used for time zones lookup.
+ This function will try to load information about time zone specified
+ from the list of the already opened and locked tables (first table in
+ tz_tables should be time_zone_name, next time_zone, then
+ time_zone_transition_type and time_zone_transition should be last).
+ It will also update information in hash used for time zones lookup.
RETURN VALUES
Returns pointer to newly created Time_zone object or 0 in case of error.
*/
+
static Time_zone*
-tz_load_from_db(THD *thd, const String *tz_name)
+tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
{
- TABLE_LIST tables[4];
TABLE *table= 0;
- TABLE *lock_ptr[4];
- MYSQL_LOCK *lock;
- char system_db_name[]= "mysql";
- char *db_save;
- uint db_length_save;
TIME_ZONE_INFO *tz_info;
TZ_NAMES_ENTRY *tmp_tzname;
Time_zone *return_val= 0;
@@ -1666,17 +1732,15 @@ tz_load_from_db(THD *thd, const String *tz_name)
#ifdef ABBR_ARE_USED
char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
#endif
- uint not_used;
- DBUG_ENTER("tz_load_from_db");
+ DBUG_ENTER("tz_load_from_open_tables");
/* Prepare tz_info for loading also let us make copy of time zone name */
if (!(alloc_buff= alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
tz_name->length() + 1)))
{
- sql_print_error("Error: Out of memory while loading time zone "
- "description");
+ sql_print_error("Out of memory while loading time zone description");
return 0;
}
tz_info= (TIME_ZONE_INFO *)alloc_buff;
@@ -1689,76 +1753,45 @@ tz_load_from_db(THD *thd, const String *tz_name)
strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
/*
- Open and lock time zone description tables
- */
- db_save= thd->db;
- db_length_save= thd->db_length;
- thd->db= system_db_name;
- thd->db_length= 5;
-
- bzero((char*) &tables,sizeof(tables));
- tables[0].alias= tables[0].real_name= (char*)"time_zone_name";
- tables[1].alias= tables[1].real_name= (char*)"time_zone";
- tables[2].alias= tables[2].real_name= (char*)"time_zone_transition";
- tables[3].alias= tables[3].real_name= (char*)"time_zone_transition_type";
- tables[0].next_local= tables[0].next_global= tables+1;
- tables[1].next_local= tables[1].next_global= tables+2;
- tables[2].next_local= tables[2].next_global= tables+3;
- tables[0].lock_type= tables[1].lock_type= tables[2].lock_type=
- tables[3].lock_type= TL_READ;
- tables[0].db= tables[1].db= tables[2].db= tables[3].db= thd->db;
- if (open_tables(thd, tables, &not_used))
- {
- sql_print_error("Error: Can't open time zone tables: %s",
- thd->net.last_error);
- goto end;
- }
-
- lock_ptr[0]= tables[0].table;
- lock_ptr[1]= tables[1].table;
- lock_ptr[2]= tables[2].table;
- lock_ptr[3]= tables[3].table;
- if (!(lock= mysql_lock_tables(thd, lock_ptr, 4)))
- {
- sql_print_error("Error: Can't lock time zone tables: %s",
- thd->net.last_error);
- goto end_with_close;
- }
-
- /*
Let us find out time zone id by its name (there is only one index
and it is specifically for this purpose).
*/
- table= tables[0].table;
-
+ table= tz_tables->table;
+ tz_tables= tz_tables->next;
table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1);
- table->file->ha_index_init(0);
+ /*
+ It is OK to ignore ha_index_init()/ha_index_end() return values since
+ mysql.time_zone* tables are MyISAM and these operations always succeed
+ for MyISAM.
+ */
+ (void)table->file->ha_index_init(0);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
{
- sql_print_error("Error: Can't find description of time zone.");
- goto end_with_unlock;
+ sql_print_error("Can't find description of time zone.");
+ goto end;
}
tzid= (uint)table->field[1]->val_int();
- table->file->ha_index_end();
+ (void)table->file->ha_index_end();
/*
Now we need to lookup record in mysql.time_zone table in order to
understand whenever this timezone uses leap seconds (again we are
using the only index in this table).
*/
- table= tables[1].table;
+ table= tz_tables->table;
+ tz_tables= tz_tables->next;
table->field[0]->store((longlong)tzid);
- table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0);
if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
0, HA_READ_KEY_EXACT))
{
- sql_print_error("Error: Can't find description of time zone.");
- goto end_with_unlock;
+ sql_print_error("Can't find description of time zone.");
+ goto end;
}
/* If Uses_leap_seconds == 'Y' */
@@ -1768,7 +1801,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
tz_info->lsis= tz_lsis;
}
- table->file->ha_index_end();
+ (void)table->file->ha_index_end();
/*
Now we will iterate through records for out time zone in
@@ -1776,9 +1809,10 @@ tz_load_from_db(THD *thd, const String *tz_name)
only for our time zone guess what are we doing?
Right - using special index.
*/
- table= tables[3].table;
+ table= tz_tables->table;
+ tz_tables= tz_tables->next;
table->field[0]->store((longlong)tzid);
- table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@@ -1792,7 +1826,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition_type table: too big "
"transition type id");
- goto end_with_unlock;
+ goto end;
}
ttis[ttid].tt_gmtoff= (long)table->field[2]->val_int();
@@ -1806,7 +1840,7 @@ tz_load_from_db(THD *thd, const String *tz_name)
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition_type table: not enough "
"room for abbreviations");
- goto end_with_unlock;
+ goto end;
}
ttis[ttid].tt_abbrind= tz_info->charcnt;
memcpy(chars + tz_info->charcnt, abbr.ptr(), abbr.length());
@@ -1837,10 +1871,10 @@ tz_load_from_db(THD *thd, const String *tz_name)
{
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition_type table");
- goto end_with_unlock;
+ goto end;
}
- table->file->ha_index_end();
+ (void)table->file->ha_index_end();
/*
@@ -1848,9 +1882,9 @@ tz_load_from_db(THD *thd, const String *tz_name)
mysql.time_zone_transition table. Here we additionaly need records
in ascending order by index scan also satisfies us.
*/
- table= tables[2].table;
+ table= tz_tables->table;
table->field[0]->store((longlong)tzid);
- table->file->ha_index_init(0);
+ (void)table->file->ha_index_init(0);
// FIXME Is there any better approach than explicitly specifying 4 ???
res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr,
@@ -1865,14 +1899,14 @@ tz_load_from_db(THD *thd, const String *tz_name)
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition table: "
"too much transitions");
- goto end_with_unlock;
+ goto end;
}
if (ttid + 1 > tz_info->typecnt)
{
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition table: "
"bad transition type id");
- goto end_with_unlock;
+ goto end;
}
ats[tz_info->timecnt]= ttime;
@@ -1895,10 +1929,10 @@ tz_load_from_db(THD *thd, const String *tz_name)
{
sql_print_error("Error while loading time zone description from "
"mysql.time_zone_transition table");
- goto end_with_unlock;
+ goto end;
}
- table->file->ha_index_end();
+ (void)table->file->ha_index_end();
table= 0;
/*
@@ -1913,9 +1947,8 @@ tz_load_from_db(THD *thd, const String *tz_name)
#endif
sizeof(TRAN_TYPE_INFO) * tz_info->typecnt)))
{
- sql_print_error("Error: Out of memory while loading time zone "
- "description");
- goto end_with_unlock;
+ sql_print_error("Out of memory while loading time zone description");
+ goto end;
}
@@ -1939,13 +1972,13 @@ tz_load_from_db(THD *thd, const String *tz_name)
*/
if (tz_info->typecnt < 1)
{
- sql_print_error("Error: loading time zone without transition types");
- goto end_with_unlock;
+ sql_print_error("loading time zone without transition types");
+ goto end;
}
if (prepare_tz_info(tz_info, &tz_storage))
{
- sql_print_error("Error: Unable to build mktime map for time zone");
- goto end_with_unlock;
+ sql_print_error("Unable to build mktime map for time zone");
+ goto end;
}
@@ -1956,8 +1989,8 @@ tz_load_from_db(THD *thd, const String *tz_name)
&my_charset_latin1),
my_hash_insert(&tz_names, (const byte *)tmp_tzname)))
{
- sql_print_error("Error: Out of memory while loading time zone");
- goto end_with_unlock;
+ sql_print_error("Out of memory while loading time zone");
+ goto end;
}
/*
@@ -1965,19 +1998,11 @@ tz_load_from_db(THD *thd, const String *tz_name)
*/
return_val= tmp_tzname->tz;
-end_with_unlock:
+end:
if (table)
- table->file->ha_index_end();
-
- mysql_unlock_tables(thd, lock);
+ (void)table->file->ha_index_end();
-end_with_close:
- close_thread_tables(thd);
-
-end:
- thd->db= db_save;
- thd->db_length= db_length_save;
DBUG_RETURN(return_val);
}
@@ -2067,8 +2092,8 @@ str_to_offset(const char *str, uint length, long *offset)
SYNOPSIS
my_tz_find()
- thd - current thread
name - time zone specification
+ tz_tables - list of opened'n'locked time zone describing tables
DESCRIPTION
This function checks if name is one of time zones described in db,
@@ -2090,7 +2115,11 @@ str_to_offset(const char *str, uint length, long *offset)
values as parameter without additional external check and this property
is used by @@time_zone variable handling code).
- It will perform lookup in system tables (mysql.time_zone*) if needed.
+ It will perform lookup in system tables (mysql.time_zone*) if needed
+ using tz_tables as list of already opened tables (for info about this
+ list look at tz_load_from_open_tables() description). It won't perform
+ such lookup if no time zone describing tables were found during server
+ start up.
RETURN VALUE
Pointer to corresponding Time_zone object. 0 - in case of bad time zone
@@ -2098,7 +2127,7 @@ str_to_offset(const char *str, uint length, long *offset)
*/
Time_zone *
-my_tz_find(THD *thd, const String * name)
+my_tz_find(const String * name, TABLE_LIST *tz_tables)
{
TZ_NAMES_ENTRY *tmp_tzname;
Time_zone *result_tz= 0;
@@ -2108,6 +2137,8 @@ my_tz_find(THD *thd, const String * name)
DBUG_PRINT("enter", ("time zone name='%s'",
name ? ((String *)name)->c_ptr() : "NULL"));
+ DBUG_ASSERT(!time_zone_tables_exist || tz_tables);
+
if (!name)
DBUG_RETURN(0);
@@ -2125,18 +2156,21 @@ my_tz_find(THD *thd, const String * name)
if (!(result_tz= new (&tz_storage) Time_zone_offset(offset)) ||
my_hash_insert(&offset_tzs, (const byte *) result_tz))
{
+ result_tz= 0;
sql_print_error("Fatal error: Out of memory "
"while setting new time zone");
- result_tz= 0;
}
}
- } else {
+ }
+ else
+ {
+ result_tz= 0;
if ((tmp_tzname= (TZ_NAMES_ENTRY *)hash_search(&tz_names,
(const byte *)name->ptr(),
name->length())))
result_tz= tmp_tzname->tz;
- else
- result_tz= tz_load_from_db(thd, name);
+ else if (time_zone_tables_exist)
+ result_tz= tz_load_from_open_tables(name, tz_tables);
}
VOID(pthread_mutex_unlock(&tz_LOCK));
diff --git a/sql/tztime.h b/sql/tztime.h
index 334b14f4fc4..aabec260ec7 100644
--- a/sql/tztime.h
+++ b/sql/tztime.h
@@ -19,15 +19,8 @@
#pragma interface /* gcc class interface */
#endif
-/*
- Portable time_t replacement.
- Should be signed and hold seconds for 1902-2038 range.
-*/
-typedef long my_time_t;
-#define MY_TIME_T_MAX LONG_MAX
-#define MY_TIME_T_MIN LONG_MIN
-
#if !defined(TESTTIME) && !defined(TZINFO2SQL)
+
/*
This class represents abstract time zone and provides
basic interface for TIME <-> my_time_t conversion.
@@ -66,7 +59,8 @@ public:
extern Time_zone * my_tz_UTC;
extern Time_zone * my_tz_SYSTEM;
-extern Time_zone * my_tz_find(THD *thd, const String *name);
+extern TABLE_LIST * my_tz_get_table_list(THD *thd);
+extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables);
extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap);
extern void my_tz_free();
diff --git a/sql/unireg.cc b/sql/unireg.cc
index b5f6c3546a4..c82fcc4abef 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -75,7 +75,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
uchar fileinfo[64],forminfo[288],*keybuff;
TYPELIB formnames;
uchar *screen_buff;
- DBUG_ENTER("rea_create_table");
+ DBUG_ENTER("mysql_create_frm");
formnames.type_names=0;
if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0)))
diff --git a/strings/ctype-big5.c b/strings/ctype-big5.c
index ff53f61c053..3f35f7504ac 100644
--- a/strings/ctype-big5.c
+++ b/strings/ctype-big5.c
@@ -6290,6 +6290,7 @@ static MY_CHARSET_HANDLER my_charset_big5_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_big5, /* mb_wc */
my_wc_mb_big5, /* wc_mb */
my_caseup_str_mb,
@@ -6347,7 +6348,7 @@ CHARSET_INFO my_charset_big5_bin=
ctype_big5,
to_lower_big5,
to_upper_big5,
- sort_order_big5,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c
index cc83471f264..42dc0ab086d 100644
--- a/strings/ctype-bin.c
+++ b/strings/ctype-bin.c
@@ -68,11 +68,22 @@ static uchar bin_char_array[] =
+static int my_strnncoll_binary(CHARSET_INFO * cs __attribute__((unused)),
+ const uchar *s, uint slen,
+ const uchar *t, uint tlen,
+ my_bool t_is_prefix)
+{
+ uint len=min(slen,tlen);
+ int cmp= memcmp(s,t,len);
+ return cmp ? cmp : (int)((t_is_prefix ? len : slen) - tlen);
+}
+
+
/*
Compare two strings. Result is sign(first_argument - second_argument)
SYNOPSIS
- my_strnncoll_binary()
+ my_strnncollsp_binary()
cs Chararacter set
s String to compare
slen Length of 's'
@@ -80,8 +91,9 @@ static uchar bin_char_array[] =
tlen Length of 't'
NOTE
- This is used also when comparing with end space removal, as end space
- is significant for binary strings
+ This function is used for real binary strings, i.e. for
+ BLOB, BINARY(N) and VARBINARY(N).
+ It compares trailing spaces as spaces.
RETURN
< 0 s < t
@@ -89,10 +101,18 @@ static uchar bin_char_array[] =
> 0 s > t
*/
-static int my_strnncoll_binary(CHARSET_INFO * cs __attribute__((unused)),
- const uchar *s, uint slen,
- const uchar *t, uint tlen,
- my_bool t_is_prefix)
+static int my_strnncollsp_binary(CHARSET_INFO * cs __attribute__((unused)),
+ const uchar *s, uint slen,
+ const uchar *t, uint tlen)
+{
+ return my_strnncoll_binary(cs,s,slen,t,tlen,0);
+}
+
+
+static int my_strnncoll_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
+ const uchar *s, uint slen,
+ const uchar *t, uint tlen,
+ my_bool t_is_prefix)
{
uint len=min(slen,tlen);
int cmp= memcmp(s,t,len);
@@ -100,11 +120,62 @@ static int my_strnncoll_binary(CHARSET_INFO * cs __attribute__((unused)),
}
-static int my_strnncollsp_binary(CHARSET_INFO * cs __attribute__((unused)),
- const uchar *s, uint slen,
- const uchar *t, uint tlen)
+/*
+ Compare two strings. Result is sign(first_argument - second_argument)
+
+ SYNOPSIS
+ my_strnncollsp_8bit_bin()
+ cs Chararacter set
+ s String to compare
+ slen Length of 's'
+ t String to compare
+ tlen Length of 't'
+
+ NOTE
+ This function is used for character strings with binary collations.
+ The shorter string is extended with end space to be as long as the longer
+ one.
+
+ RETURN
+ < 0 s < t
+ 0 s == t
+ > 0 s > t
+*/
+
+static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
+ const uchar *a, uint a_length,
+ const uchar *b, uint b_length)
{
- return my_strnncoll_binary(cs,s,slen,t,tlen,0);
+ const uchar *end;
+ uint length;
+
+ end= a + (length= min(a_length, b_length));
+ while (a < end)
+ {
+ if (*a++ != *b++)
+ return ((int) a[-1] - (int) b[-1]);
+ }
+ if (a_length != b_length)
+ {
+ int swap= 0;
+ /*
+ Check the next not space character of the longer key. If it's < ' ',
+ then it's smaller than the other key.
+ */
+ if (a_length < b_length)
+ {
+ /* put shorter key in s */
+ a_length= b_length;
+ a= b;
+ swap= -1; /* swap sign of result */
+ }
+ for (end= a + a_length-length; a < end ; a++)
+ {
+ if (*a != ' ')
+ return ((int) *a - (int) ' ') ^ swap;
+ }
+ }
+ return 0;
}
@@ -344,6 +415,20 @@ skip:
MY_COLLATION_HANDLER my_collation_8bit_bin_handler =
{
NULL, /* init */
+ my_strnncoll_8bit_bin,
+ my_strnncollsp_8bit_bin,
+ my_strnxfrm_bin,
+ my_like_range_simple,
+ my_wildcmp_bin,
+ my_strcasecmp_bin,
+ my_instr_bin,
+ my_hash_sort_bin
+};
+
+
+static MY_COLLATION_HANDLER my_collation_binary_handler =
+{
+ NULL, /* init */
my_strnncoll_binary,
my_strnncollsp_binary,
my_strnxfrm_bin,
@@ -364,6 +449,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_8bit,
my_well_formed_len_8bit,
my_lengthsp_8bit,
+ my_numcells_8bit,
my_mb_wc_bin,
my_wc_mb_bin,
my_case_str_bin,
@@ -394,7 +480,7 @@ CHARSET_INFO my_charset_bin =
ctype_bin, /* ctype */
bin_char_array, /* to_lower */
bin_char_array, /* to_upper */
- bin_char_array, /* sort_order */
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
@@ -407,5 +493,5 @@ CHARSET_INFO my_charset_bin =
0, /* min_sort_char */
255, /* max_sort_char */
&my_charset_handler,
- &my_collation_8bit_bin_handler
+ &my_collation_binary_handler
};
diff --git a/strings/ctype-euc_kr.c b/strings/ctype-euc_kr.c
index fd8659a181c..43a50b0dfbe 100644
--- a/strings/ctype-euc_kr.c
+++ b/strings/ctype-euc_kr.c
@@ -8657,6 +8657,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_euc_kr, /* mb_wc */
my_wc_mb_euc_kr, /* wc_mb */
my_caseup_str_mb,
@@ -8715,7 +8716,7 @@ CHARSET_INFO my_charset_euckr_bin=
ctype_euc_kr,
to_lower_euc_kr,
to_upper_euc_kr,
- sort_order_euc_kr,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-gb2312.c b/strings/ctype-gb2312.c
index b9f61256717..8d97ac9ca1d 100644
--- a/strings/ctype-gb2312.c
+++ b/strings/ctype-gb2312.c
@@ -5708,6 +5708,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_gb2312, /* mb_wc */
my_wc_mb_gb2312, /* wc_mb */
my_caseup_str_mb,
@@ -5765,7 +5766,7 @@ CHARSET_INFO my_charset_gb2312_bin=
ctype_gb2312,
to_lower_gb2312,
to_upper_gb2312,
- sort_order_gb2312,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-gbk.c b/strings/ctype-gbk.c
index 2ef75e27d9a..9400fb08f2b 100644
--- a/strings/ctype-gbk.c
+++ b/strings/ctype-gbk.c
@@ -9939,6 +9939,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_gbk,
my_wc_mb_gbk,
my_caseup_str_mb,
@@ -9996,7 +9997,7 @@ CHARSET_INFO my_charset_gbk_bin=
ctype_gbk,
to_lower_gbk,
to_upper_gbk,
- sort_order_gbk,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-latin1.c b/strings/ctype-latin1.c
index 652794fa84d..aea517811ab 100644
--- a/strings/ctype-latin1.c
+++ b/strings/ctype-latin1.c
@@ -387,6 +387,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_8bit,
my_well_formed_len_8bit,
my_lengthsp_8bit,
+ my_numcells_8bit,
my_mb_wc_latin1,
my_wc_mb_latin1,
my_caseup_str_8bit,
@@ -728,7 +729,7 @@ CHARSET_INFO my_charset_latin1_bin=
ctype_latin1,
to_lower_latin1,
to_upper_latin1,
- sort_order_latin1_de,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
cs_to_uni, /* tab_to_uni */
diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c
index 7b0dadcfa19..2548a68ab19 100644
--- a/strings/ctype-mb.c
+++ b/strings/ctype-mb.c
@@ -237,7 +237,8 @@ int my_wildcmp_mb(CHARSET_INFO *cs,
if (str++ == str_end) return (-1);
}
{
- int tmp=my_wildcmp_mb(cs,str,str_end,wildstr,wildend,escape,w_one,w_many);
+ int tmp=my_wildcmp_mb(cs,str,str_end,wildstr,wildend,escape,w_one,
+ w_many);
if (tmp <= 0)
return (tmp);
}
@@ -248,41 +249,46 @@ int my_wildcmp_mb(CHARSET_INFO *cs,
return (str != str_end ? 1 : 0);
}
+
uint my_numchars_mb(CHARSET_INFO *cs __attribute__((unused)),
- const char *b, const char *e)
+ const char *pos, const char *end)
{
- register uint32 n=0,mblen;
- while (b < e)
+ register uint32 count=0;
+ while (pos < end)
{
- b+= (mblen= my_ismbchar(cs,b,e)) ? mblen : 1;
- ++n;
+ uint mblen;
+ pos+= (mblen= my_ismbchar(cs,pos,end)) ? mblen : 1;
+ count++;
}
- return n;
+ return count;
}
+
uint my_charpos_mb(CHARSET_INFO *cs __attribute__((unused)),
- const char *b, const char *e, uint pos)
+ const char *pos, const char *end, uint length)
{
- uint mblen;
- const char *b0=b;
+ const char *start= pos;
- while (pos && b<e)
+ while (length && pos < end)
{
- b+= (mblen= my_ismbchar(cs,b,e)) ? mblen : 1;
- pos--;
+ uint mblen;
+ pos+= (mblen= my_ismbchar(cs, pos, end)) ? mblen : 1;
+ length--;
}
- return pos ? e+2-b0 : b-b0;
+ return length ? end+2-start : pos-start;
}
+
uint my_well_formed_len_mb(CHARSET_INFO *cs,
const char *b, const char *e, uint pos)
{
- my_wc_t wc;
- int mblen;
const char *b_start= b;
while (pos)
{
+ my_wc_t wc;
+ int mblen;
+
if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) b, (uchar*) e)) <0)
break;
b+= mblen;
@@ -360,11 +366,63 @@ static int my_strnncoll_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
return cmp ? cmp : (int) ((t_is_prefix ? len : slen) - tlen);
}
+
+/*
+ Compare two strings.
+
+ SYNOPSIS
+ my_strnncollsp_mb_bin()
+ cs Chararacter set
+ s String to compare
+ slen Length of 's'
+ t String to compare
+ tlen Length of 't'
+
+ NOTE
+ This function is used for character strings with binary collations.
+ The shorter string is extended with end space to be as long as the longer
+ one.
+
+ RETURN
+ A negative number if s < t
+ A positive number if s > t
+ 0 if strings are equal
+*/
+
static int my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
- const uchar *s, uint slen,
- const uchar *t, uint tlen)
+ const uchar *a, uint a_length,
+ const uchar *b, uint b_length)
{
- return my_strnncoll_mb_bin(cs,s,slen,t,tlen,0);
+ const uchar *end;
+ uint length;
+
+ end= a + (length= min(a_length, b_length));
+ while (a < end)
+ {
+ if (*a++ != *b++)
+ return ((int) a[-1] - (int) b[-1]);
+ }
+ if (a_length != b_length)
+ {
+ int swap= 0;
+ /*
+ Check the next not space character of the longer key. If it's < ' ',
+ then it's smaller than the other key.
+ */
+ if (a_length < b_length)
+ {
+ /* put shorter key in s */
+ a_length= b_length;
+ a= b;
+ swap= -1; /* swap sign of result */
+ }
+ for (end= a + a_length-length; a < end ; a++)
+ {
+ if (*a != ' ')
+ return ((int) *a - (int) ' ') ^ swap;
+ }
+ }
+ return 0;
}
@@ -519,6 +577,238 @@ static int my_wildcmp_mb_bin(CHARSET_INFO *cs,
}
+/*
+ Data was produced from EastAsianWidth.txt
+ using utt11-dump utility.
+*/
+static char pg11[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pg23[256]=
+{
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pg2E[256]=
+{
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pg2F[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0
+};
+
+static char pg30[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
+0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+static char pg31[256]=
+{
+0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+static char pg32[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0
+};
+
+static char pg4D[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pg9F[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pgA4[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pgD7[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pgFA[256]=
+{
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pgFE[256]=
+{
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static char pgFF[256]=
+{
+0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static struct {int page; char *p;} utr11_data[256]=
+{
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,pg11},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,pg23},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,pg2E},{0,pg2F},
+{0,pg30},{0,pg31},{0,pg32},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{0,pg4D},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{0,pg9F},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{0,pgA4},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},
+{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{1,NULL},{0,pgD7},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},{0,NULL},
+{0,NULL},{1,NULL},{0,pgFA},{0,NULL},{0,NULL},{0,NULL},{0,pgFE},{0,pgFF}
+};
+
+uint my_numcells_mb(CHARSET_INFO *cs, const char *b, const char *e)
+{
+ my_wc_t wc;
+ int clen= 0;
+
+ while (b < e)
+ {
+ int mblen;
+ uint pg;
+ if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) b, (uchar*) e)) <= 0)
+ {
+ mblen= 1; /* Let's think a wrong sequence takes 1 dysplay cell */
+ b++;
+ continue;
+ }
+ b+= mblen;
+ pg= (wc >> 8) & 0xFF;
+ clen+= utr11_data[pg].p ? utr11_data[pg].p[wc & 0xFF] : utr11_data[pg].page;
+ clen++;
+ }
+ return clen;
+}
+
+
MY_COLLATION_HANDLER my_collation_mb_bin_handler =
{
NULL, /* init */
diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c
index 8e295b9e13e..84bfcb0b171 100644
--- a/strings/ctype-simple.c
+++ b/strings/ctype-simple.c
@@ -1056,6 +1056,13 @@ uint my_numchars_8bit(CHARSET_INFO *cs __attribute__((unused)),
}
+uint my_numcells_8bit(CHARSET_INFO *cs __attribute__((unused)),
+ const char *b, const char *e)
+{
+ return e-b;
+}
+
+
uint my_charpos_8bit(CHARSET_INFO *cs __attribute__((unused)),
const char *b __attribute__((unused)),
const char *e __attribute__((unused)),
@@ -1171,6 +1178,15 @@ static my_bool create_fromuni(CHARSET_INFO *cs, void *(*alloc)(uint))
uni_idx idx[PLANE_NUM];
int i,n;
+ /*
+ Check that Unicode map is loaded.
+ It can be not loaded when the collation is
+ listed in Index.xml but not specified
+ in the character set specific XML file.
+ */
+ if (!cs->tab_to_uni)
+ return TRUE;
+
/* Clear plane statistics */
bzero(idx,sizeof(idx));
@@ -1278,6 +1294,7 @@ MY_CHARSET_HANDLER my_charset_8bit_handler=
my_charpos_8bit,
my_well_formed_len_8bit,
my_lengthsp_8bit,
+ my_numcells_8bit,
my_mb_wc_8bit,
my_wc_mb_8bit,
my_caseup_str_8bit,
diff --git a/strings/ctype-sjis.c b/strings/ctype-sjis.c
index 5fd005f842e..b4cfee0f24a 100644
--- a/strings/ctype-sjis.c
+++ b/strings/ctype-sjis.c
@@ -4558,6 +4558,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_sjis, /* mb_wc */
my_wc_mb_sjis, /* wc_mb */
my_caseup_str_8bit,
@@ -4615,7 +4616,7 @@ CHARSET_INFO my_charset_sjis_bin=
ctype_sjis,
to_lower_sjis,
to_upper_sjis,
- sort_order_sjis,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c
index c7d859a6ead..420c5b5582e 100644
--- a/strings/ctype-tis620.c
+++ b/strings/ctype-tis620.c
@@ -930,6 +930,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_8bit,
my_well_formed_len_8bit,
my_lengthsp_8bit,
+ my_numcells_8bit,
my_mb_wc_tis620, /* mb_wc */
my_wc_mb_tis620, /* wc_mb */
my_caseup_str_8bit,
@@ -988,7 +989,7 @@ CHARSET_INFO my_charset_tis620_bin=
ctype_tis620,
to_lower_tis620,
to_upper_tis620,
- sort_order_tis620,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c
index 1b49abd0fbb..cecc3be5045 100644
--- a/strings/ctype-uca.c
+++ b/strings/ctype-uca.c
@@ -6645,7 +6645,7 @@ static const char slovak[]=
"& H < ch <<< Ch <<< CH"
"& O < \\u00F4 <<< \\u00D4"
"& S < \\u0161 <<< \\u0160"
- "& Z < \\u017E <<< \\017D";
+ "& Z < \\u017E <<< \\u017D";
static const char spanish2[]= /* Also good for Asturian and Galician */
"&C < ch <<< Ch <<< CH"
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index 20a5ff58d3a..c6e55ee8f0e 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -1423,6 +1423,7 @@ MY_CHARSET_HANDLER my_charset_ucs2_handler=
my_charpos_ucs2,
my_well_formed_len_ucs2,
my_lengthsp_ucs2,
+ my_numcells_mb,
my_ucs2_uni, /* mb_wc */
my_uni_ucs2, /* wc_mb */
my_caseup_str_ucs2,
@@ -1480,7 +1481,7 @@ CHARSET_INFO my_charset_ucs2_bin=
ctype_ucs2, /* ctype */
to_lower_ucs2, /* to_lower */
to_upper_ucs2, /* to_upper */
- to_upper_ucs2, /* sort_order */
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-ujis.c b/strings/ctype-ujis.c
index 3f53a07f527..37c26a3bbc4 100644
--- a/strings/ctype-ujis.c
+++ b/strings/ctype-ujis.c
@@ -8443,6 +8443,7 @@ static MY_CHARSET_HANDLER my_charset_handler=
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_mb_wc_euc_jp, /* mb_wc */
my_wc_mb_euc_jp, /* wc_mb */
my_caseup_str_mb,
@@ -8502,7 +8503,7 @@ CHARSET_INFO my_charset_ujis_bin=
ctype_ujis,
to_lower_ujis,
to_upper_ujis,
- sort_order_ujis,
+ NULL, /* sort_order */
NULL, /* contractions */
NULL, /* sort_order_big*/
NULL, /* tab_to_uni */
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index bf2d8a17fb4..5e339725b1a 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -1,15 +1,15 @@
/* Copyright (C) 2000 MySQL AB
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
-
+
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
-
+
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
@@ -1524,7 +1524,7 @@ MY_UNICASE_INFO *uni_plane[256]={
#ifdef HAVE_CHARSET_utf8
-/*
+/*
We consider bytes with code more than 127 as a letter.
This garantees that word boundaries work fine with regular
expressions. Note, there is no need to mark byte 255 as a
@@ -1590,99 +1590,108 @@ static uchar to_upper_utf8[] = {
240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255
};
+static inline int bincmp(const uchar *s, const uchar *se,
+ const uchar *t, const uchar *te)
+{
+ int slen=se-s, tlen=te-t;
+ int len=min(slen,tlen);
+ int cmp= memcmp(s,t,len);
+ return cmp ? cmp : slen-tlen;
+}
+
static int my_utf8_uni(CHARSET_INFO *cs __attribute__((unused)),
- my_wc_t * pwc, const uchar *s, const uchar *e)
+ my_wc_t * pwc, const uchar *s, const uchar *e)
{
unsigned char c;
-
+
if (s >= e)
return MY_CS_TOOFEW(0);
c= s[0];
- if (c < 0x80)
+ if (c < 0x80)
{
*pwc = c;
return 1;
- }
- else if (c < 0xc2)
+ }
+ else if (c < 0xc2)
return MY_CS_ILSEQ;
- else if (c < 0xe0)
+ else if (c < 0xe0)
{
- if (s+2 > e) /* We need 2 characters */
+ if (s+2 > e) /* We need 2 characters */
return MY_CS_TOOFEW(0);
-
+
if (!((s[1] ^ 0x80) < 0x40))
return MY_CS_ILSEQ;
-
+
*pwc = ((my_wc_t) (c & 0x1f) << 6) | (my_wc_t) (s[1] ^ 0x80);
return 2;
- }
- else if (c < 0xf0)
+ }
+ else if (c < 0xf0)
{
if (s+3 > e) /* We need 3 characters */
return MY_CS_TOOFEW(0);
-
+
if (!((s[1] ^ 0x80) < 0x40 && (s[2] ^ 0x80) < 0x40 && (c >= 0xe1 || s[1] >= 0xa0)))
return MY_CS_ILSEQ;
-
- *pwc = ((my_wc_t) (c & 0x0f) << 12) |
- ((my_wc_t) (s[1] ^ 0x80) << 6) |
+
+ *pwc = ((my_wc_t) (c & 0x0f) << 12) |
+ ((my_wc_t) (s[1] ^ 0x80) << 6) |
(my_wc_t) (s[2] ^ 0x80);
-
+
return 3;
- }
+ }
#ifdef UNICODE_32BIT
- else if (c < 0xf8 && sizeof(my_wc_t)*8 >= 32)
+ else if (c < 0xf8 && sizeof(my_wc_t)*8 >= 32)
{
if (s+4 > e) /* We need 4 characters */
return MY_CS_TOOFEW(0);
-
- if (!((s[1] ^ 0x80) < 0x40 &&
- (s[2] ^ 0x80) < 0x40 &&
- (s[3] ^ 0x80) < 0x40 &&
+
+ if (!((s[1] ^ 0x80) < 0x40 &&
+ (s[2] ^ 0x80) < 0x40 &&
+ (s[3] ^ 0x80) < 0x40 &&
(c >= 0xf1 || s[1] >= 0x90)))
return MY_CS_ILSEQ;
-
- *pwc = ((my_wc_t) (c & 0x07) << 18) |
- ((my_wc_t) (s[1] ^ 0x80) << 12) |
- ((my_wc_t) (s[2] ^ 0x80) << 6) |
+
+ *pwc = ((my_wc_t) (c & 0x07) << 18) |
+ ((my_wc_t) (s[1] ^ 0x80) << 12) |
+ ((my_wc_t) (s[2] ^ 0x80) << 6) |
(my_wc_t) (s[3] ^ 0x80);
-
+
return 4;
}
- else if (c < 0xfc && sizeof(my_wc_t)*8 >= 32)
+ else if (c < 0xfc && sizeof(my_wc_t)*8 >= 32)
{
if (s+5 >e) /* We need 5 characters */
return MY_CS_TOOFEW(0);
-
- if (!((s[1] ^ 0x80) < 0x40 &&
- (s[2] ^ 0x80) < 0x40 &&
- (s[3] ^ 0x80) < 0x40 &&
- (s[4] ^ 0x80) < 0x40 &&
+
+ if (!((s[1] ^ 0x80) < 0x40 &&
+ (s[2] ^ 0x80) < 0x40 &&
+ (s[3] ^ 0x80) < 0x40 &&
+ (s[4] ^ 0x80) < 0x40 &&
(c >= 0xf9 || s[1] >= 0x88)))
return MY_CS_ILSEQ;
-
- *pwc = ((my_wc_t) (c & 0x03) << 24) |
+
+ *pwc = ((my_wc_t) (c & 0x03) << 24) |
((my_wc_t) (s[1] ^ 0x80) << 18) |
((my_wc_t) (s[2] ^ 0x80) << 12) |
((my_wc_t) (s[3] ^ 0x80) << 6) |
(my_wc_t) (s[4] ^ 0x80);
return 5;
- }
- else if (c < 0xfe && sizeof(my_wc_t)*8 >= 32)
+ }
+ else if (c < 0xfe && sizeof(my_wc_t)*8 >= 32)
{
if ( s+6 >e ) /* We need 6 characters */
return MY_CS_TOOFEW(0);
-
- if (!((s[1] ^ 0x80) < 0x40 &&
- (s[2] ^ 0x80) < 0x40 &&
- (s[3] ^ 0x80) < 0x40 &&
- (s[4] ^ 0x80) < 0x40 &&
- (s[5] ^ 0x80) < 0x40 &&
+
+ if (!((s[1] ^ 0x80) < 0x40 &&
+ (s[2] ^ 0x80) < 0x40 &&
+ (s[3] ^ 0x80) < 0x40 &&
+ (s[4] ^ 0x80) < 0x40 &&
+ (s[5] ^ 0x80) < 0x40 &&
(c >= 0xfd || s[1] >= 0x84)))
return MY_CS_ILSEQ;
-
+
*pwc = ((my_wc_t) (c & 0x01) << 30)
| ((my_wc_t) (s[1] ^ 0x80) << 24)
| ((my_wc_t) (s[2] ^ 0x80) << 18)
@@ -1702,12 +1711,12 @@ static int my_uni_utf8 (CHARSET_INFO *cs __attribute__((unused)) ,
if (r >= e)
return MY_CS_TOOSMALL;
-
- if (wc < 0x80)
+
+ if (wc < 0x80)
count = 1;
- else if (wc < 0x800)
+ else if (wc < 0x800)
count = 2;
- else if (wc < 0x10000)
+ else if (wc < 0x10000)
count = 3;
#ifdef UNICODE_32BIT
else if (wc < 0x200000)
@@ -1718,15 +1727,15 @@ static int my_uni_utf8 (CHARSET_INFO *cs __attribute__((unused)) ,
count = 6;
#endif
else return MY_CS_ILUNI;
-
- /*
- e is a character after the string r, not the last character of it.
+
+ /*
+ e is a character after the string r, not the last character of it.
Because of it (r+count > e), not (r+count-1 >e )
*/
- if ( r+count > e )
+ if ( r+count > e )
return MY_CS_TOOSMALL;
-
- switch (count) {
+
+ switch (count) {
/* Fall through all cases!!! */
#ifdef UNICODE_32BIT
case 6: r[5] = (uchar) (0x80 | (wc & 0x3f)); wc = wc >> 6; wc |= 0x4000000;
@@ -1806,8 +1815,8 @@ static void my_casedn_str_utf8(CHARSET_INFO *cs, char * s)
}
-static int my_strnncoll_utf8(CHARSET_INFO *cs,
- const uchar *s, uint slen,
+static int my_strnncoll_utf8(CHARSET_INFO *cs,
+ const uchar *s, uint slen,
const uchar *t, uint tlen,
my_bool t_is_prefix)
{
@@ -1821,13 +1830,13 @@ static int my_strnncoll_utf8(CHARSET_INFO *cs,
int plane;
s_res=my_utf8_uni(cs,&s_wc, s, se);
t_res=my_utf8_uni(cs,&t_wc, t, te);
-
+
if ( s_res <= 0 || t_res <= 0 )
{
- /* Incorrect string, compare by char value */
- return ((int)s[0]-(int)t[0]);
+ /* Incorrect string, compare byte by byte value */
+ return bincmp(s, se, t, te);
}
-
+
plane=(s_wc>>8) & 0xFF;
s_wc = uni_plane[plane] ? uni_plane[plane][s_wc & 0xFF].sort : s_wc;
plane=(t_wc>>8) & 0xFF;
@@ -1836,7 +1845,7 @@ static int my_strnncoll_utf8(CHARSET_INFO *cs,
{
return ((int) s_wc) - ((int) t_wc);
}
-
+
s+=s_res;
t+=t_res;
}
@@ -1850,11 +1859,11 @@ static int my_strnncoll_utf8(CHARSET_INFO *cs,
SYNOPSIS
my_strnncollsp_utf8()
- cs character set handler
- a First string to compare
- a_length Length of 'a'
- b Second string to compare
- b_length Length of 'b'
+ cs character set handler
+ a First string to compare
+ a_length Length of 'a'
+ b Second string to compare
+ b_length Length of 'b'
IMPLEMENTATION
If one string is shorter as the other, then we space extend the other
@@ -1867,32 +1876,32 @@ static int my_strnncoll_utf8(CHARSET_INFO *cs,
"a\0" < "a "
RETURN
- < 0 a < b
- = 0 a == b
- > 0 a > b
+ < 0 a < b
+ = 0 a == b
+ > 0 a > b
*/
-static int my_strnncollsp_utf8(CHARSET_INFO *cs,
- const uchar *s, uint slen,
- const uchar *t, uint tlen)
+static int my_strnncollsp_utf8(CHARSET_INFO *cs,
+ const uchar *s, uint slen,
+ const uchar *t, uint tlen)
{
int s_res,t_res;
my_wc_t s_wc,t_wc;
const uchar *se= s+slen;
const uchar *te= t+tlen;
-
+
while ( s < se && t < te )
{
int plane;
s_res=my_utf8_uni(cs,&s_wc, s, se);
t_res=my_utf8_uni(cs,&t_wc, t, te);
-
+
if ( s_res <= 0 || t_res <= 0 )
{
- /* Incorrect string, compare by char value */
- return ((int)s[0]-(int)t[0]);
+ /* Incorrect string, compare byte by byte value */
+ return bincmp(s, se, t, te);
}
-
+
plane=(s_wc>>8) & 0xFF;
s_wc = uni_plane[plane] ? uni_plane[plane][s_wc & 0xFF].sort : s_wc;
plane=(t_wc>>8) & 0xFF;
@@ -1901,14 +1910,14 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs,
{
return ((int) s_wc) - ((int) t_wc);
}
-
+
s+=s_res;
t+=t_res;
}
-
+
slen= se-s;
tlen= te-t;
-
+
if (slen != tlen)
{
int swap= 0;
@@ -1940,35 +1949,35 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs,
static int my_strncasecmp_utf8(CHARSET_INFO *cs,
- const char *s, const char *t, uint len)
+ const char *s, const char *t, uint len)
{
int s_res,t_res;
my_wc_t s_wc,t_wc;
const char *se=s+len;
const char *te=t+len;
-
+
while ( s < se && t < te )
{
int plane;
-
+
s_res=my_utf8_uni(cs,&s_wc, (const uchar*)s, (const uchar*)se);
t_res=my_utf8_uni(cs,&t_wc, (const uchar*)t, (const uchar*)te);
-
+
if ( s_res <= 0 || t_res <= 0 )
{
- /* Incorrect string, compare by char value */
- return ((int)s[0]-(int)t[0]);
+ /* Incorrect string, compare byte by byte value */
+ return bincmp(s, se, t, te);
}
-
+
plane=(s_wc>>8) & 0xFF;
s_wc = uni_plane[plane] ? uni_plane[plane][s_wc & 0xFF].tolower : s_wc;
plane=(t_wc>>8) & 0xFF;
t_wc = uni_plane[plane] ? uni_plane[plane][t_wc & 0xFF].tolower : t_wc;
-
+
if ( s_wc != t_wc )
return ((int) s_wc) - ((int) t_wc);
-
+
s+=s_res;
t+=t_res;
}
@@ -1983,9 +1992,9 @@ static int my_strcasecmp_utf8(CHARSET_INFO *cs, const char *s, const char *t)
return my_strncasecmp_utf8(cs, s, t, len);
}
-static int my_strnxfrm_utf8(CHARSET_INFO *cs,
- uchar *dst, uint dstlen,
- const uchar *src, uint srclen)
+static int my_strnxfrm_utf8(CHARSET_INFO *cs,
+ uchar *dst, uint dstlen,
+ const uchar *src, uint srclen)
{
my_wc_t wc;
int res;
@@ -2002,10 +2011,10 @@ static int my_strnxfrm_utf8(CHARSET_INFO *cs,
}
src+=res;
srclen-=res;
-
+
plane=(wc>>8) & 0xFF;
wc = uni_plane[plane] ? uni_plane[plane][wc & 0xFF].sort : wc;
-
+
if ((res=my_uni_utf8(cs,wc,dst,de)) <0)
{
break;
@@ -2026,18 +2035,18 @@ static int my_mbcharlen_utf8(CHARSET_INFO *cs __attribute__((unused)) , uint c)
{
if (c < 0x80)
return 1;
- else if (c < 0xc2)
+ else if (c < 0xc2)
return 0; /* Illegal mb head */
- else if (c < 0xe0)
+ else if (c < 0xe0)
return 2;
- else if (c < 0xf0)
+ else if (c < 0xf0)
return 3;
#ifdef UNICODE_32BIT
- else if (c < 0xf8)
+ else if (c < 0xf8)
return 4;
- else if (c < 0xfc)
+ else if (c < 0xfc)
return 5;
- else if (c < 0xfe)
+ else if (c < 0xfe)
return 6;
#endif
return 0; /* Illegal mb head */;
@@ -2046,7 +2055,7 @@ static int my_mbcharlen_utf8(CHARSET_INFO *cs __attribute__((unused)) , uint c)
static MY_COLLATION_HANDLER my_collation_ci_handler =
{
- NULL, /* init */
+ NULL, /* init */
my_strnncoll_utf8,
my_strnncollsp_utf8,
my_strnxfrm_utf8,
@@ -2059,13 +2068,14 @@ static MY_COLLATION_HANDLER my_collation_ci_handler =
MY_CHARSET_HANDLER my_charset_utf8_handler=
{
- NULL, /* init */
+ NULL, /* init */
my_ismbchar_utf8,
my_mbcharlen_utf8,
my_numchars_mb,
my_charpos_mb,
my_well_formed_len_mb,
my_lengthsp_8bit,
+ my_numcells_mb,
my_utf8_uni,
my_uni_utf8,
my_caseup_str_utf8,
@@ -2088,27 +2098,27 @@ MY_CHARSET_HANDLER my_charset_utf8_handler=
CHARSET_INFO my_charset_utf8_general_ci=
{
- 33,0,0, /* number */
- MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE, /* state */
- "utf8", /* cs name */
- "utf8_general_ci", /* name */
- "", /* comment */
- NULL, /* tailoring */
- ctype_utf8, /* ctype */
- to_lower_utf8, /* to_lower */
- to_upper_utf8, /* to_upper */
- to_upper_utf8, /* sort_order */
- NULL, /* contractions */
- NULL, /* sort_order_big*/
- NULL, /* tab_to_uni */
- NULL, /* tab_from_uni */
- NULL, /* state_map */
- NULL, /* ident_map */
- 1, /* strxfrm_multiply */
- 1, /* mbminlen */
- 3, /* mbmaxlen */
- 0, /* min_sort_char */
- 255, /* max_sort_char */
+ 33,0,0, /* number */
+ MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE, /* state */
+ "utf8", /* cs name */
+ "utf8_general_ci", /* name */
+ "", /* comment */
+ NULL, /* tailoring */
+ ctype_utf8, /* ctype */
+ to_lower_utf8, /* to_lower */
+ to_upper_utf8, /* to_upper */
+ to_upper_utf8, /* sort_order */
+ NULL, /* contractions */
+ NULL, /* sort_order_big*/
+ NULL, /* tab_to_uni */
+ NULL, /* tab_from_uni */
+ NULL, /* state_map */
+ NULL, /* ident_map */
+ 1, /* strxfrm_multiply */
+ 1, /* mbminlen */
+ 3, /* mbmaxlen */
+ 0, /* min_sort_char */
+ 255, /* max_sort_char */
&my_charset_utf8_handler,
&my_collation_ci_handler
};
@@ -2116,27 +2126,27 @@ CHARSET_INFO my_charset_utf8_general_ci=
CHARSET_INFO my_charset_utf8_bin=
{
- 83,0,0, /* number */
- MY_CS_COMPILED|MY_CS_BINSORT|MY_CS_UNICODE, /* state */
- "utf8", /* cs name */
- "utf8_bin", /* name */
- "", /* comment */
- NULL, /* tailoring */
- ctype_utf8, /* ctype */
- to_lower_utf8, /* to_lower */
- to_upper_utf8, /* to_upper */
- to_upper_utf8, /* sort_order */
- NULL, /* contractions */
- NULL, /* sort_order_big*/
- NULL, /* tab_to_uni */
- NULL, /* tab_from_uni */
- NULL, /* state_map */
- NULL, /* ident_map */
- 1, /* strxfrm_multiply */
- 1, /* mbminlen */
- 3, /* mbmaxlen */
- 0, /* min_sort_char */
- 255, /* max_sort_char */
+ 83,0,0, /* number */
+ MY_CS_COMPILED|MY_CS_BINSORT|MY_CS_UNICODE, /* state */
+ "utf8", /* cs name */
+ "utf8_bin", /* name */
+ "", /* comment */
+ NULL, /* tailoring */
+ ctype_utf8, /* ctype */
+ to_lower_utf8, /* to_lower */
+ to_upper_utf8, /* to_upper */
+ NULL, /* sort_order */
+ NULL, /* contractions */
+ NULL, /* sort_order_big*/
+ NULL, /* tab_to_uni */
+ NULL, /* tab_from_uni */
+ NULL, /* state_map */
+ NULL, /* ident_map */
+ 1, /* strxfrm_multiply */
+ 1, /* mbminlen */
+ 3, /* mbmaxlen */
+ 0, /* min_sort_char */
+ 255, /* max_sort_char */
&my_charset_utf8_handler,
&my_collation_mb_bin_handler
};
@@ -2154,8 +2164,8 @@ static void test_mb(CHARSET_INFO *cs, uchar *s)
int len=my_mbcharlen_utf8(cs,*s);
while(len--)
{
- printf("%c",*s);
- s++;
+ printf("%c",*s);
+ s++;
}
printf("\n");
}
@@ -2171,23 +2181,23 @@ int main()
{
char str[1024]=" utf8 test проба ПЕРА по-РУССКИ";
CHARSET_INFO *cs;
-
+
test_mb(cs,(uchar*)str);
-
+
printf("orig :'%s'\n",str);
-
+
my_caseup_utf8(cs,str,15);
printf("caseup :'%s'\n",str);
-
+
my_caseup_str_utf8(cs,str);
printf("caseup_str:'%s'\n",str);
-
+
my_casedn_utf8(cs,str,15);
printf("casedn :'%s'\n",str);
-
+
my_casedn_str_utf8(cs,str);
printf("casedn_str:'%s'\n",str);
-
+
return 0;
}
diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c
index 784c4762724..268f7d18f2a 100644
--- a/strings/my_vsnprintf.c
+++ b/strings/my_vsnprintf.c
@@ -27,27 +27,16 @@
%#[l]d
%#[l]u
%#[l]x
- %#.#s Note #.# is skiped
+ %#.#s Note first # is ignored
RETURN
length of result string
*/
-int my_snprintf(char* to, size_t n, const char* fmt, ...)
-{
- int result;
- va_list args;
- va_start(args,fmt);
- result= my_vsnprintf(to, n, fmt, args);
- va_end(args);
- return result;
-}
-
-
int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
{
char *start=to, *end=to+n-1;
- uint length, num_state, pre_zero, have_long;
+ uint length, width, pre_zero, have_long;
for (; *fmt ; fmt++)
{
@@ -62,23 +51,18 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
/* Read max fill size (only used with %d and %u) */
if (*fmt == '-')
fmt++;
- length= num_state= pre_zero= have_long= 0;
- for (;; fmt++)
+ length= width= pre_zero= have_long= 0;
+ for (;my_isdigit(&my_charset_latin1,*fmt); fmt++)
{
- if (my_isdigit(&my_charset_latin1,*fmt))
- {
- if (!num_state)
- {
- length=length*10+ (uint) (*fmt-'0');
- if (!length)
- pre_zero= 1; /* first digit was 0 */
- }
- continue;
- }
- if (*fmt != '.' || num_state)
- break;
- num_state= 1;
+ length=length*10+ (uint) (*fmt-'0');
+ if (!length)
+ pre_zero= 1; /* first digit was 0 */
}
+ if (*fmt == '.')
+ for (fmt++;my_isdigit(&my_charset_latin1,*fmt); fmt++)
+ width=width*10+ (uint) (*fmt-'0');
+ else
+ width= ~0;
if (*fmt == 'l')
{
fmt++;
@@ -90,6 +74,7 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
uint plen,left_len = (uint)(end-to)+1;
if (!par) par = (char*)"(null)";
plen = (uint) strlen(par);
+ set_if_smaller(plen,width);
if (left_len <= plen)
plen = left_len - 1;
to=strnmov(to,par,plen);
@@ -145,6 +130,15 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
return (uint) (to - start);
}
+int my_snprintf(char* to, size_t n, const char* fmt, ...)
+{
+ int result;
+ va_list args;
+ va_start(args,fmt);
+ result= my_vsnprintf(to, n, fmt, args);
+ va_end(args);
+ return result;
+}
#ifdef MAIN
#define OVERRUN_SENTRY 250
diff --git a/strings/strto.c b/strings/strto.c
index 52efec6e087..9e10b935834 100644
--- a/strings/strto.c
+++ b/strings/strto.c
@@ -35,8 +35,12 @@
it can be compiled with the UNSIGNED and/or LONGLONG flag set
*/
-#include <my_global.h>
-#include "m_string.h"
+
+#if !defined(_global_h) || !defined(_m_string_h)
+# error Calling file must include 'my_global.h' and 'm_string.h'
+ /* see 'strtoll.c' and 'strtoull.c' for the reasons */
+#endif
+
#include "m_ctype.h"
#include "my_sys.h" /* defines errno */
#include <errno.h>
diff --git a/strings/strtol.c b/strings/strtol.c
index 10d7f8f9da6..ed4ca86c846 100644
--- a/strings/strtol.c
+++ b/strings/strtol.c
@@ -14,9 +14,16 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* This defines strtol() if neaded */
+/* This implements strtol() if needed */
+/*
+ These includes are mandatory because they check for type sizes and
+ functions, especially they handle tricks for Tru64 where 'long' is
+ 64 bit already and our 'longlong' is just a 'long'.
+ */
#include <my_global.h>
+#include <m_string.h>
+
#if !defined(MSDOS) && !defined(HAVE_STRTOL) && !defined(__WIN__)
#include "strto.c"
#endif
diff --git a/strings/strtoll.c b/strings/strtoll.c
index b0b4ef328fc..45352ffd360 100644
--- a/strings/strtoll.c
+++ b/strings/strtoll.c
@@ -14,11 +14,20 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* This is defines strtoll() if neaded */
+/* This implements strtoll() if needed */
-#define strtoll glob_strtoll /* Fix for True64 */
+/*
+ These includes are mandatory because they check for type sizes and
+ functions, especially they handle tricks for Tru64 where 'long' is
+ 64 bit already and our 'longlong' is just a 'long'.
+ This solves a problem on Tru64 where the C99 compiler has a prototype
+ for 'strtoll()' but no implementation, see "6.1 New C99 library functions"
+ in file '/usr/share/doclib/cc.dtk/release_notes.txt'.
+ */
#include <my_global.h>
+#include <m_string.h>
+
#if !defined(HAVE_STRTOLL) && defined(HAVE_LONG_LONG)
#define USE_LONGLONG
#include "strto.c"
diff --git a/strings/strtoul.c b/strings/strtoul.c
index 00e1f820942..32a7bc62298 100644
--- a/strings/strtoul.c
+++ b/strings/strtoul.c
@@ -14,9 +14,16 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* This is defines strtoul() if neaded */
+/* This implements strtol() if needed */
+/*
+ These includes are mandatory because they check for type sizes and
+ functions, especially they handle tricks for Tru64 where 'long' is
+ 64 bit already and our 'longlong' is just a 'long'.
+ */
#include <my_global.h>
+#include <m_string.h>
+
#if !defined(MSDOS) && !defined(HAVE_STRTOUL)
#define USE_UNSIGNED
#include "strto.c"
diff --git a/strings/strtoull.c b/strings/strtoull.c
index f4f3ce19bf7..0c2788bc188 100644
--- a/strings/strtoull.c
+++ b/strings/strtoull.c
@@ -14,9 +14,20 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* This is defines strtoull() */
+/* This implements strtoull() if needed */
+
+/*
+ These includes are mandatory because they check for type sizes and
+ functions, especially they handle tricks for Tru64 where 'long' is
+ 64 bit already and our 'longlong' is just a 'long'.
+ This solves a problem on Tru64 where the C99 compiler has a prototype
+ for 'strtoull()' but no implementation, see "6.1 New C99 library functions"
+ in file '/usr/share/doclib/cc.dtk/release_notes.txt'.
+ */
#include <my_global.h>
+#include <m_string.h>
+
#if !defined(HAVE_STRTOULL) && defined(HAVE_LONG_LONG)
#define USE_UNSIGNED
#define USE_LONGLONG
diff --git a/strings/utr11-dump.c b/strings/utr11-dump.c
new file mode 100644
index 00000000000..c1b5a923946
--- /dev/null
+++ b/strings/utr11-dump.c
@@ -0,0 +1,112 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+/*
+ Dump an EastAsianWidth.txt file.
+ See http://www.unicode.org/reports/tr11/ for details.
+ Character types:
+ F - Full width = 1
+ H - Half width = 0
+ W - Wide = 1
+ Na - Narrow = 0
+ A - Ambiguous = 0
+ N - Neutral = 0
+*/
+
+
+int main(int ac, char **av)
+{
+ char str[128];
+ int errors= 0;
+ int plane[0x10000];
+ int page[256];
+ int i;
+
+ memset(plane, 0, sizeof(plane));
+ memset(page, 0, sizeof(page));
+
+ while (fgets(str, sizeof(str), stdin))
+ {
+ int code1, code2, width;
+ char *end;
+
+ if (str[0] == '#')
+ continue;
+ code1= strtol(str, &end, 16);
+ if (code1 < 0 || code1 > 0xFFFF)
+ continue;
+ if (end[0] == ';') /* One character */
+ {
+ code2= code1;
+ }
+ else if (end[0] == '.' && end[1] == '.') /* Range */
+ {
+ end+= 2;
+ code2= strtol(end, &end, 16);
+ if (code2 < 0 || code2 > 0xFFFF)
+ continue;
+ if (end[0] != ';')
+ {
+ errors++;
+ fprintf(stderr, "error: %s", str);
+ continue;
+ }
+ }
+ else
+ {
+ errors++;
+ fprintf(stderr, "error: %s", str);
+ continue;
+ }
+
+ end++;
+ width= (end[0] == 'F' || end[0] == 'W') ? 1 : 0;
+
+ for ( ; code1 <= code2; code1++)
+ {
+ plane[code1]= width;
+ }
+ }
+
+ if (errors)
+ return 1;
+
+ for (i=0; i < 256; i++)
+ {
+ int j;
+ int *p= plane + 256 * i;
+ page[i]= 0;
+ for (j=0; j < 256; j++)
+ {
+ page[i]+= p[j];
+ }
+ if (page[i] != 0 && page[i] != 256)
+ {
+ printf("static char pg%02X[256]=\n{\n", i);
+ for (j=0; j < 256; j++)
+ {
+ printf("%d%s%s", p[j], j < 255 ? "," : "", (j + 1) % 32 ? "" : "\n");
+ }
+ printf("};\n\n");
+ }
+ }
+
+ printf("static struct {int page; char *p;} utr11_data[256]=\n{\n");
+ for (i=0; i < 256; i++)
+ {
+ if (page[i] == 0 || page[i] == 256)
+ {
+ int width= (page[i] == 256) ? 1 : 0;
+ printf("{%d,NULL}", width);
+ }
+ else
+ {
+ printf("{0,pg%02X}", i);
+ }
+ printf("%s%s", i < 255 ? "," : "", (i+1) % 8 ? "" : "\n");
+ }
+ printf("};\n");
+ return 0;
+}
diff --git a/support-files/Makefile.am b/support-files/Makefile.am
index 196da3d8744..7ae1071f9ec 100644
--- a/support-files/Makefile.am
+++ b/support-files/Makefile.am
@@ -22,6 +22,7 @@ EXTRA_DIST = mysql.spec.sh \
my-medium.cnf.sh \
my-large.cnf.sh \
my-huge.cnf.sh \
+ my-innodb-heavy-4G.cnf.sh \
mysql-log-rotate.sh \
mysql.server.sh \
binary-configure.sh \
@@ -34,6 +35,7 @@ pkgdata_DATA = my-small.cnf \
my-medium.cnf \
my-large.cnf \
my-huge.cnf \
+ my-innodb-heavy-4G.cnf \
mysql-log-rotate \
mysql-@VERSION@.spec \
MySQL-shared-compat.spec
@@ -44,6 +46,7 @@ CLEANFILES = my-small.cnf \
my-medium.cnf \
my-large.cnf \
my-huge.cnf \
+ my-innodb-heavy-4G.cnf \
mysql.spec \
mysql-@VERSION@.spec \
mysql-log-rotate \
diff --git a/support-files/MySQL-shared-compat.spec.sh b/support-files/MySQL-shared-compat.spec.sh
index 2a257a601a1..068daadab58 100644
--- a/support-files/MySQL-shared-compat.spec.sh
+++ b/support-files/MySQL-shared-compat.spec.sh
@@ -26,8 +26,8 @@
#
# Change this to match the version of the shared libs you want to include
#
-%define version4 @VERSION@
-%define version3 3.23.56
+%define version4 @MYSQL_NO_DASH_VERSION@
+%define version3 3.23.58
Name: MySQL-shared-compat
Packager: Lenz Grimmer <build@mysql.com>
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index d5c43e61f9d..e7f8a035a15 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -1,5 +1,9 @@
%define mysql_version @VERSION@
-%define release 0
+%ifarch i386
+%define release 0
+%else
+%define release 0.glibc23
+%endif
%define mysqld_user mysql
%define server_suffix -standard
@@ -77,9 +81,8 @@ The MySQL web site (http://www.mysql.com/) provides the latest
news and information about the MySQL software. Also please see the
documentation and the manual for more information.
-This package includes the MySQL server binary (statically linked,
-compiled with InnoDB support) as well as related utilities to run
-and administrate a MySQL server.
+This package includes the MySQL server binary (incl. InnoDB) as well
+as related utilities to run and administrate a MySQL server.
If you want to access and work with the database, you have to install
package "MySQL-client" as well!
@@ -149,15 +152,15 @@ languages and applications need to dynamically load and use MySQL.
%package Max
Release: %{release}
-Summary: MySQL - server with Berkeley DB, OpenSSL, RAID and UDF support
+Summary: MySQL - server with Berkeley DB, RAID and UDF support
Group: Applications/Databases
Provides: mysql-Max
Obsoletes: mysql-Max
-Requires: MySQL >= 4.0
+Requires: MySQL-server >= 4.0
%description Max
Optional MySQL server binary that supports additional features like
-Berkeley DB, OpenSSL, RAID and User Defined Functions (UDFs).
+Berkeley DB, RAID and User Defined Functions (UDFs).
To activate this binary, just install this package in addition to
the standard MySQL package.
@@ -189,9 +192,6 @@ client/server version.
%setup -n mysql-%{mysql_version}
%build
-# The all-static flag is to make the RPM work on different
-# distributions. This version tries to put shared mysqlclient libraries
-# in a separate package.
BuildMySQL() {
# The --enable-assembler simply does nothing on systems that does not
@@ -269,7 +269,7 @@ then
fi
BuildMySQL "--enable-shared \
- --with-openssl \
+ --without-openssl \
--with-berkeley-db \
--with-innodb \
--with-raid \
@@ -284,7 +284,18 @@ mv sql/mysqld sql/mysqld-max
nm --numeric-sort sql/mysqld-max > sql/mysqld-max.sym
# Install embedded server library in the build root
-install -m 644 libmysqld/libmysqld.a $RBR%{_libdir}/mysql
+install -m 644 libmysqld/libmysqld.a $RBR%{_libdir}/mysql/
+
+# Include libgcc.a in the devel subpackage (BUG 4921)
+if [ "$CC" = gcc ]
+then
+ libgcc=`$CC --print-libgcc-file`
+ if [ -f $libgcc ]
+ then
+ %define have_libgcc 1
+ install -m 644 $libgcc $RBR%{_libdir}/mysql/libmygcc.a
+ fi
+fi
# Save libraries
(cd libmysql/.libs; tar cf $RBR/shared-libs.tar *.so*)
@@ -295,15 +306,17 @@ mv Docs/manual.ps Docs/manual.ps.save
make clean
mv Docs/manual.ps.save Docs/manual.ps
-# RPM:s destroys Makefile.in files, so we generate them here
-# aclocal; autoheader; aclocal; automake; autoconf
-# (cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
-
-# Now build the statically linked 4.0 binary (which includes InnoDB)
+#
+# Only link statically on our i386 build host (which has a specially
+# patched static glibc installed) - ia64 and x86_64 run glibc-2.3 (unpatched)
+# so don't link statically there
+#
BuildMySQL "--disable-shared \
+%ifarch i386
--with-mysqld-ldflags='-all-static' \
--with-client-ldflags='-all-static' \
$USE_OTHER_LIBC_DIR \
+%endif
--with-server-suffix='%{server_suffix}' \
--without-embedded-server \
--without-berkeley-db \
@@ -448,7 +461,7 @@ fi
%files server
%defattr(-,root,root,0755)
-%doc COPYING README
+%doc COPYING README
%doc Docs/manual.{html,ps,texi,txt}
%doc Docs/manual_toc.html
%doc support-files/my-*.cnf
@@ -535,6 +548,7 @@ fi
%files devel
%defattr(-, root, root, 0755)
+%doc EXCEPTIONS-CLIENT
%attr(755, root, root) %{_bindir}/comp_err
%attr(755, root, root) %{_bindir}/mysql_config
%dir %attr(755, root, root) %{_includedir}/mysql
@@ -543,6 +557,9 @@ fi
%{_libdir}/mysql/libdbug.a
%{_libdir}/mysql/libheap.a
%{_libdir}/mysql/libmerge.a
+%if %{have_libgcc}
+%{_libdir}/mysql/libmygcc.a
+%endif
%{_libdir}/mysql/libmyisam.a
%{_libdir}/mysql/libmyisammrg.a
%{_libdir}/mysql/libmysqlclient.a
@@ -577,8 +594,32 @@ fi
%attr(644, root, root) %{_libdir}/mysql/libmysqld.a
# The spec file changelog only includes changes made to the spec file
-# itself
+# itself - note that they must be ordered by date (important when
+# merging BK trees)
%changelog
+* Thu Aug 26 2004 Lenz Grimmer <lenz@mysql.com>
+
+- MySQL-Max now requires MySQL-server instead of MySQL (BUG 3860)
+
+* Fri Aug 20 2004 Lenz Grimmer <lenz@mysql.com>
+
+- do not link statically on IA64/AMD64 as these systems do not have
+ a patched glibc installed
+
+* Tue Aug 10 2004 Lenz Grimmer <lenz@mysql.com>
+
+- Added libmygcc.a to the devel subpackage (required to link applications
+ against the the embedded server libmysqld.a) (BUG 4921)
+
+* Mon Aug 09 2004 Lenz Grimmer <lenz@mysql.com>
+
+- Added EXCEPTIONS-CLIENT to the "devel" package
+
+* Thu Jul 29 2004 Lenz Grimmer <lenz@mysql.com>
+
+- disabled OpenSSL in the Max binaries again (the RPM packages were the
+ only exception to this anyway) (BUG 1043)
+
* Wed Jun 30 2004 Lenz Grimmer <lenz@mysql.com>
- fixed server postinstall (mysql_install_db was called with the wrong
diff --git a/tests/client_test.c b/tests/client_test.c
index 08ea345a355..73295050ceb 100644
--- a/tests/client_test.c
+++ b/tests/client_test.c
@@ -1991,7 +1991,7 @@ static void test_ps_conj_select()
MYSQL_STMT *stmt;
int rc;
MYSQL_BIND bind[2];
- long int int_data;
+ int32 int_data;
char str_data[32];
unsigned long str_length;
myheader("test_ps_conj_select");
@@ -3428,7 +3428,7 @@ static void bind_fetch(int row_count)
{
MYSQL_STMT *stmt;
int rc, i, count= row_count;
- long data[10];
+ int32 data[10];
int8 i8_data;
int16 i16_data;
int32 i32_data;
@@ -4747,7 +4747,7 @@ static void test_multi_stmt()
MYSQL_STMT *stmt, *stmt1, *stmt2;
int rc;
- ulong id;
+ uint32 id;
char name[50];
MYSQL_BIND bind[2];
ulong length[2];
@@ -4806,7 +4806,7 @@ static void test_multi_stmt()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n int_data: %lu(%lu)", id, length[0]);
+ fprintf(stdout, "\n int_data: %lu(%lu)", (ulong) id, length[0]);
fprintf(stdout, "\n str_data: %s(%lu)", name, length[1]);
assert(id == 10);
assert(strcmp(name, "mysql") == 0);
@@ -4835,7 +4835,7 @@ static void test_multi_stmt()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n int_data: %lu(%lu)", id, length[0]);
+ fprintf(stdout, "\n int_data: %lu(%lu)", (ulong) id, length[0]);
fprintf(stdout, "\n str_data: %s(%lu)", name, length[1]);
assert(id == 10);
assert(strcmp(name, "updated") == 0);
@@ -5243,7 +5243,7 @@ static void test_store_result()
{
MYSQL_STMT *stmt;
int rc;
- long nData;
+ int32 nData;
char szData[100];
MYSQL_BIND bind[2];
ulong length, length1;
@@ -5295,7 +5295,7 @@ static void test_store_result()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 1: %ld, %s(%lu)", nData, szData, length1);
+ fprintf(stdout, "\n row 1: %ld, %s(%lu)", (long) nData, szData, length1);
assert(nData == 10);
assert(strcmp(szData, "venu") == 0);
assert(length1 == 4);
@@ -5303,7 +5303,7 @@ static void test_store_result()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 2: %ld, %s(%lu)", nData, szData, length1);
+ fprintf(stdout, "\n row 2: %ld, %s(%lu)", (long) nData, szData, length1);
assert(nData == 20);
assert(strcmp(szData, "mysql") == 0);
assert(length1 == 5);
@@ -5330,7 +5330,7 @@ static void test_store_result()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 1: %ld, %s(%lu)", nData, szData, length1);
+ fprintf(stdout, "\n row 1: %ld, %s(%lu)", (long) nData, szData, length1);
assert(nData == 10);
assert(strcmp(szData, "venu") == 0);
assert(length1 == 4);
@@ -5338,7 +5338,7 @@ static void test_store_result()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 2: %ld, %s(%lu)", nData, szData, length1);
+ fprintf(stdout, "\n row 2: %ld, %s(%lu)", (long) nData, szData, length1);
assert(nData == 20);
assert(strcmp(szData, "mysql") == 0);
assert(length1 == 5);
@@ -6187,7 +6187,7 @@ static void test_ushort_bug()
MYSQL_STMT *stmt;
MYSQL_BIND bind[4];
ushort short_value;
- ulong long_value;
+ uint32 long_value;
ulong s_length, l_length, ll_length, t_length;
ulonglong longlong_value;
int rc;
@@ -6241,7 +6241,7 @@ static void test_ushort_bug()
check_execute(stmt, rc);
fprintf(stdout, "\n ushort : %d (%ld)", short_value, s_length);
- fprintf(stdout, "\n ulong : %ld (%ld)", long_value, l_length);
+ fprintf(stdout, "\n ulong : %lu (%ld)", (ulong) long_value, l_length);
fprintf(stdout, "\n longlong : %lld (%ld)", longlong_value, ll_length);
fprintf(stdout, "\n tinyint : %d (%ld)", tiny_value, t_length);
@@ -6271,7 +6271,7 @@ static void test_sshort_bug()
MYSQL_STMT *stmt;
MYSQL_BIND bind[4];
short short_value;
- long long_value;
+ int32 long_value;
ulong s_length, l_length, ll_length, t_length;
ulonglong longlong_value;
int rc;
@@ -6325,7 +6325,7 @@ static void test_sshort_bug()
check_execute(stmt, rc);
fprintf(stdout, "\n sshort : %d (%ld)", short_value, s_length);
- fprintf(stdout, "\n slong : %ld (%ld)", long_value, l_length);
+ fprintf(stdout, "\n slong : %ld (%ld)", (long) long_value, l_length);
fprintf(stdout, "\n longlong : %lld (%ld)", longlong_value, ll_length);
fprintf(stdout, "\n tinyint : %d (%ld)", tiny_value, t_length);
@@ -6355,7 +6355,7 @@ static void test_stiny_bug()
MYSQL_STMT *stmt;
MYSQL_BIND bind[4];
short short_value;
- long long_value;
+ int32 long_value;
ulong s_length, l_length, ll_length, t_length;
ulonglong longlong_value;
int rc;
@@ -6409,7 +6409,7 @@ static void test_stiny_bug()
check_execute(stmt, rc);
fprintf(stdout, "\n sshort : %d (%ld)", short_value, s_length);
- fprintf(stdout, "\n slong : %ld (%ld)", long_value, l_length);
+ fprintf(stdout, "\n slong : %ld (%ld)", (long) long_value, l_length);
fprintf(stdout, "\n longlong : %lld (%ld)", longlong_value, ll_length);
fprintf(stdout, "\n tinyint : %d (%ld)", tiny_value, t_length);
@@ -7454,7 +7454,7 @@ static void test_fetch_seek()
MYSQL_BIND bind[3];
MYSQL_ROW_OFFSET row;
int rc;
- long c1;
+ int32 c1;
char c2[11], c3[20];
myheader("test_fetch_seek");
@@ -7499,7 +7499,7 @@ static void test_fetch_seek()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 0: %ld, %s, %s", c1, c2, c3);
+ fprintf(stdout, "\n row 0: %ld, %s, %s", (long) c1, c2, c3);
row= mysql_stmt_row_tell(stmt);
@@ -7508,21 +7508,21 @@ static void test_fetch_seek()
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 2: %ld, %s, %s", c1, c2, c3);
+ fprintf(stdout, "\n row 2: %ld, %s, %s", (long) c1, c2, c3);
row= mysql_stmt_row_seek(stmt, row);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 2: %ld, %s, %s", c1, c2, c3);
+ fprintf(stdout, "\n row 2: %ld, %s, %s", (long) c1, c2, c3);
mysql_stmt_data_seek(stmt, 0);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
- fprintf(stdout, "\n row 0: %ld, %s, %s", c1, c2, c3);
+ fprintf(stdout, "\n row 0: %ld, %s, %s", (long) c1, c2, c3);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
@@ -8253,7 +8253,7 @@ static void test_bug1500()
MYSQL_STMT *stmt;
MYSQL_BIND bind[3];
int rc;
- long int_data[3]= {2, 3, 4};
+ int32 int_data[3]= {2, 3, 4};
const char *data;
myheader("test_bug1500");
@@ -9039,7 +9039,7 @@ static void test_multi()
char *query;
MYSQL_BIND bind[1];
int rc, i;
- long param= 1;
+ int32 param= 1;
ulong length= 1;
myheader("test_multi");
@@ -9091,11 +9091,11 @@ static void test_multi()
rc= mysql_stmt_execute(stmt_update);
check_execute(stmt_update, rc);
- fprintf(stdout, "update %ld\n", param);
+ fprintf(stdout, "update %ld\n", (long) param);
rc= mysql_stmt_execute(stmt_delete);
check_execute(stmt_delete, rc);
- fprintf(stdout, "delete %ld\n", param);
+ fprintf(stdout, "delete %ld\n", (long) param);
rc= mysql_stmt_execute(stmt_select1);
check_execute(stmt_select1, rc);
@@ -9169,9 +9169,9 @@ static void test_bind_nagative()
char *query;
int rc;
MYSQL_BIND bind[1];
- long my_val= 0L;
+ int32 my_val= 0;
ulong my_length= 0L;
- long my_null= 0L;
+ my_bool my_null= FALSE;
myheader("test_insert_select");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
@@ -9213,9 +9213,9 @@ static void test_derived()
MYSQL_STMT *stmt;
int rc, i;
MYSQL_BIND bind[1];
- long my_val= 0L;
+ int32 my_val= 0;
ulong my_length= 0L;
- long my_null= 0L;
+ my_bool my_null= FALSE;
const char *query=
"select count(1) from (select f.id from t1 f where f.id=?) as x";
@@ -9723,7 +9723,7 @@ static void test_union_param()
MYSQL_BIND bind[2];
char my_val[4];
ulong my_length= 3L;
- long my_null= 0L;
+ my_bool my_null= FALSE;
myheader("test_union_param");
strcpy(my_val, "abc");
@@ -10065,11 +10065,17 @@ static void test_bug4026()
time_in.minute= 59;
time_in.second= 59;
time_in.second_part= 123456;
+ /*
+ This is not necessary, just to make assert below work: this field
+ is filled in when time is received from server
+ */
+ time_in.time_type= MYSQL_TIMESTAMP_TIME;
datetime_in= time_in;
datetime_in.year= 2003;
datetime_in.month= 12;
datetime_in.day= 31;
+ datetime_in.time_type= MYSQL_TIMESTAMP_DATETIME;
mysql_stmt_bind_param(stmt, bind);
@@ -10101,7 +10107,7 @@ static void test_bug4079()
MYSQL_STMT *stmt;
MYSQL_BIND bind[1];
const char *stmt_text;
- unsigned long res;
+ uint32 res;
int rc;
myheader("test_bug4079");
@@ -10593,6 +10599,125 @@ static void test_view_insert_fields()
}
+static void test_bug5126()
+{
+ MYSQL_STMT *stmt;
+ MYSQL_BIND bind[2];
+ int32 c1, c2;
+ const char *stmt_text;
+ int rc;
+
+ myheader("test_bug5126");
+
+ stmt_text= "DROP TABLE IF EXISTS t1";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt_text= "CREATE TABLE t1 (a mediumint, b int)";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt_text= "INSERT INTO t1 VALUES (8386608, 1)";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt= mysql_stmt_init(mysql);
+ stmt_text= "SELECT a, b FROM t1";
+ rc= mysql_stmt_prepare(stmt, stmt_text, strlen(stmt_text));
+ check_execute(stmt, rc);
+ rc= mysql_stmt_execute(stmt);
+ check_execute(stmt, rc);
+
+ /* Bind output buffers */
+ bzero(bind, sizeof(bind));
+
+ bind[0].buffer_type= MYSQL_TYPE_LONG;
+ bind[0].buffer= &c1;
+ bind[1].buffer_type= MYSQL_TYPE_LONG;
+ bind[1].buffer= &c2;
+
+ mysql_stmt_bind_result(stmt, bind);
+
+ rc= mysql_stmt_fetch(stmt);
+ assert(rc == 0);
+ assert(c1 == 8386608 && c2 == 1);
+ printf("%ld, %ld\n", (long) c1, (long) c2);
+ mysql_stmt_close(stmt);
+}
+
+
+static void test_bug4231()
+{
+ MYSQL_STMT *stmt;
+ MYSQL_BIND bind[2];
+ MYSQL_TIME tm[2];
+ const char *stmt_text;
+ int rc;
+
+ myheader("test_bug4231");
+
+ stmt_text= "DROP TABLE IF EXISTS t1";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt_text= "CREATE TABLE t1 (a int)";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt_text= "INSERT INTO t1 VALUES (1)";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+
+ stmt= mysql_stmt_init(mysql);
+ stmt_text= "SELECT a FROM t1 WHERE ? = ?";
+ rc= mysql_stmt_prepare(stmt, stmt_text, strlen(stmt_text));
+ check_execute(stmt, rc);
+
+ /* Bind input buffers */
+ bzero(bind, sizeof(bind));
+ bzero(tm, sizeof(tm));
+
+ bind[0].buffer_type= MYSQL_TYPE_TIME;
+ bind[0].buffer= (void*) tm;
+ bind[1].buffer_type= MYSQL_TYPE_TIME;
+ bind[1].buffer= (void*) tm+1;
+
+ mysql_stmt_bind_param(stmt, bind);
+ check_execute(stmt, rc);
+
+ /*
+ First set server-side params to some non-zero non-equal values:
+ then we will check that they are not used when client sends
+ new (zero) times.
+ */
+ tm[0].time_type = MYSQL_TIMESTAMP_DATE;
+ tm[0].year = 2000;
+ tm[0].month = 1;
+ tm[0].day = 1;
+ tm[1]= tm[0];
+ --tm[1].year; /* tm[0] != tm[1] */
+
+ rc= mysql_stmt_execute(stmt);
+ check_execute(stmt, rc);
+
+ rc= mysql_stmt_fetch(stmt);
+
+ /* binds are unequal, no rows should be returned */
+ DBUG_ASSERT(rc == MYSQL_NO_DATA);
+
+ /* Set one of the dates to zero */
+ tm[0].year= tm[0].month= tm[0].day= 0;
+ tm[1]= tm[1];
+ mysql_stmt_execute(stmt);
+ rc= mysql_stmt_fetch(stmt);
+ DBUG_ASSERT(rc == 0);
+
+ mysql_stmt_close(stmt);
+ stmt_text= "DROP TABLE t1";
+ rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
+ myquery(rc);
+}
+
static void test_basic_cursors()
{
@@ -10944,6 +11069,9 @@ int main(int argc, char **argv)
test_bug4236(); /* init -> execute */
test_bug4030(); /* test conversion string -> time types in
libmysql */
+ test_bug5126(); /* support for mediumint type in libmysql */
+ test_bug4231(); /* proper handling of all-zero times and
+ dates in the server */
test_view(); /* Test of VIEWS with prepared statements */
test_view_where(); /* VIEW with WHERE clause & merge algorithm */
test_view_2where(); /* VIEW with WHERE * SELECt with WHERE */
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 0dc0b90c60e..5528df4dd68 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -1,5 +1,23 @@
-INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes)
-LDADD= @CLIENT_EXTRA_LDFLAGS@ ../libmysql_r/libmysqlclient_r.la @openssl_libs@
+# Copyright (C) 2004 MySQL AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# Process this file with automake to create Makefile.in
+
+INCLUDES=@MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes)
+LDADD= @CLIENT_EXTRA_LDFLAGS@ @openssl_libs@ \
+ $(top_builddir)/libmysql_r/libmysqlclient_r.la @ZLIB_LIBS@
bin_PROGRAMS= mysqlmanager
mysqlmanager_SOURCES= mysqlmanager.c
mysqlmanager_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
diff --git a/zlib/Makefile.am b/zlib/Makefile.am
new file mode 100644
index 00000000000..58d3811cd7c
--- /dev/null
+++ b/zlib/Makefile.am
@@ -0,0 +1,29 @@
+# Copyright (C) 2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+# Process this file with automake to create Makefile.in
+
+noinst_LTLIBRARIES=libz.la
+
+noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \
+ inftrees.h trees.h zconf.h zlib.h zutil.h
+
+libz_la_SOURCES= adler32.c compress.c crc32.c deflate.c gzio.c \
+ infback.c inffast.c inflate.c inftrees.c trees.c \
+ uncompr.c zutil.c
+
+EXTRA_DIST= README FAQ INDEX ChangeLog algorithm.txt zlib.3
+